From a6b85433cbd120a550240fdb64404ea635eed219 Mon Sep 17 00:00:00 2001 From: Ziming Liu Date: Sun, 21 Jul 2024 20:27:22 -0400 Subject: [PATCH] fix device=cuda --- kan/.ipynb_checkpoints/KANLayer-checkpoint.py | 323 +++ kan/.ipynb_checkpoints/LBFGS-checkpoint.py | 493 +++++ kan/.ipynb_checkpoints/MultKAN-checkpoint.py | 1815 +++++++++++++++++ kan/.ipynb_checkpoints/__init__-checkpoint.py | 3 + kan/.ipynb_checkpoints/spline-checkpoint.py | 182 ++ kan/.ipynb_checkpoints/utils-checkpoint.py | 350 ++++ kan/KANLayer.py | 32 +- kan/LBFGS.py | 3 +- kan/MultKAN.py | 62 +- kan/spline.py | 25 +- pykan.egg-info/PKG-INFO | 155 +- pykan.egg-info/SOURCES.txt | 7 + .../API_10_device-checkpoint.ipynb | 173 ++ .../Unchecked_API_10_device-checkpoint.ipynb | 110 - tutorials/API_10_device.ipynb | 173 ++ tutorials/Unchecked_API_10_device.ipynb | 110 - tutorials/model/0.0_cache_data | Bin 840 -> 0 bytes tutorials/model/0.0_config.yml | 51 - tutorials/model/0.0_state | Bin 7992 -> 0 bytes tutorials/model/0.1_cache_data | Bin 17022 -> 0 bytes tutorials/model/0.1_config.yml | 51 - tutorials/model/0.1_state | Bin 107384 -> 0 bytes tutorials/model/0.2_cache_data | Bin 17022 -> 0 bytes tutorials/model/0.2_config.yml | 51 - tutorials/model/0.2_state | Bin 107384 -> 0 bytes tutorials/model/history.txt | 4 - 26 files changed, 3716 insertions(+), 457 deletions(-) create mode 100644 kan/.ipynb_checkpoints/KANLayer-checkpoint.py create mode 100644 kan/.ipynb_checkpoints/LBFGS-checkpoint.py create mode 100644 kan/.ipynb_checkpoints/MultKAN-checkpoint.py create mode 100644 kan/.ipynb_checkpoints/__init__-checkpoint.py create mode 100644 kan/.ipynb_checkpoints/spline-checkpoint.py create mode 100644 kan/.ipynb_checkpoints/utils-checkpoint.py create mode 100644 tutorials/.ipynb_checkpoints/API_10_device-checkpoint.ipynb delete mode 100644 tutorials/.ipynb_checkpoints/Unchecked_API_10_device-checkpoint.ipynb create mode 100644 tutorials/API_10_device.ipynb delete mode 100644 tutorials/Unchecked_API_10_device.ipynb delete mode 100644 tutorials/model/0.0_cache_data delete mode 100644 tutorials/model/0.0_config.yml delete mode 100644 tutorials/model/0.0_state delete mode 100644 tutorials/model/0.1_cache_data delete mode 100644 tutorials/model/0.1_config.yml delete mode 100644 tutorials/model/0.1_state delete mode 100644 tutorials/model/0.2_cache_data delete mode 100644 tutorials/model/0.2_config.yml delete mode 100644 tutorials/model/0.2_state delete mode 100644 tutorials/model/history.txt diff --git a/kan/.ipynb_checkpoints/KANLayer-checkpoint.py b/kan/.ipynb_checkpoints/KANLayer-checkpoint.py new file mode 100644 index 00000000..60c7b0eb --- /dev/null +++ b/kan/.ipynb_checkpoints/KANLayer-checkpoint.py @@ -0,0 +1,323 @@ +import torch +import torch.nn as nn +import numpy as np +from .spline import * +from .utils import sparse_mask + + +class KANLayer(nn.Module): + """ + KANLayer class + + + Attributes: + ----------- + in_dim: int + input dimension + out_dim: int + output dimension + size: int + the number of splines = input dimension * output dimension + k: int + the piecewise polynomial order of splines + grid: 2D torch.float + grid points + noises: 2D torch.float + injected noises to splines at initialization (to break degeneracy) + coef: 2D torch.tensor + coefficients of B-spline bases + scale_base: 1D torch.float + magnitude of the residual function b(x) + scale_sp: 1D torch.float + mangitude of the spline function spline(x) + base_fun: fun + residual function b(x) + mask: 1D torch.float + mask of spline functions. setting some element of the mask to zero means setting the corresponding activation to zero function. + grid_eps: float in [0,1] + a hyperparameter used in update_grid_from_samples. When grid_eps = 0, the grid is uniform; when grid_eps = 1, the grid is partitioned using percentiles of samples. 0 < grid_eps < 1 interpolates between the two extremes. + weight_sharing: 1D tensor int + allow spline activations to share parameters + lock_counter: int + counter how many activation functions are locked (weight sharing) + lock_id: 1D torch.int + the id of activation functions that are locked + device: str + device + + Methods: + -------- + __init__(): + initialize a KANLayer + forward(): + forward + update_grid_from_samples(): + update grids based on samples' incoming activations + initialize_grid_from_parent(): + initialize grids from another model + get_subset(): + get subset of the KANLayer (used for pruning) + lock(): + lock several activation functions to share parameters + unlock(): + unlock already locked activation functions + """ + + def __init__(self, in_dim=3, out_dim=2, num=5, k=3, noise_scale=0.1, scale_base=1.0, scale_sp=1.0, base_fun=torch.nn.SiLU(), grid_eps=0.02, grid_range=[-1, 1], sp_trainable=True, sb_trainable=True, save_plot_data = True, device='cpu', sparse_init=False): + '''' + initialize a KANLayer + + Args: + ----- + in_dim : int + input dimension. Default: 2. + out_dim : int + output dimension. Default: 3. + num : int + the number of grid intervals = G. Default: 5. + k : int + the order of piecewise polynomial. Default: 3. + noise_scale : float + the scale of noise injected at initialization. Default: 0.1. + scale_base : float + the scale of the residual function b(x). Default: 1.0. + scale_sp : float + the scale of the base function spline(x). Default: 1.0. + base_fun : function + residual function b(x). Default: torch.nn.SiLU() + grid_eps : float + When grid_eps = 0, the grid is uniform; when grid_eps = 1, the grid is partitioned using percentiles of samples. 0 < grid_eps < 1 interpolates between the two extremes. Default: 0.02. + grid_range : list/np.array of shape (2,) + setting the range of grids. Default: [-1,1]. + sp_trainable : bool + If true, scale_sp is trainable. Default: True. + sb_trainable : bool + If true, scale_base is trainable. Default: True. + device : str + device + + Returns: + -------- + self + + Example + ------- + >>> model = KANLayer(in_dim=3, out_dim=5) + >>> (model.in_dim, model.out_dim) + (3, 5) + ''' + super(KANLayer, self).__init__() + # size + self.out_dim = out_dim + self.in_dim = in_dim + self.num = num + self.k = k + + # shape: (size, num) + ### grid size: (batch, in_dim, out_dim, G + 1) => (batch, in_dim, G + 2*k + 1) + + grid = torch.linspace(grid_range[0], grid_range[1], steps=num + 1)[None,:].expand(self.in_dim, num+1) + grid = extend_grid(grid, k_extend=k) + self.grid = torch.nn.Parameter(grid).requires_grad_(False) + noises = (torch.rand(self.num+1, self.in_dim, self.out_dim) - 1 / 2) * noise_scale / num + # shape: (size, coef) + self.coef = torch.nn.Parameter(curve2coef(self.grid[:,k:-k].permute(1,0), noises, self.grid, k)) + #if isinstance(scale_base, float): + if sparse_init: + mask = sparse_mask(in_dim, out_dim) + else: + mask = 1. + + self.scale_base = torch.nn.Parameter(torch.ones(in_dim, out_dim) * scale_base * mask).requires_grad_(sb_trainable) # make scale trainable + #else: + #self.scale_base = torch.nn.Parameter(scale_base.to(device)).requires_grad_(sb_trainable) + self.scale_sp = torch.nn.Parameter(torch.ones(in_dim, out_dim) * scale_sp * mask).requires_grad_(sp_trainable) # make scale trainable + self.base_fun = base_fun + + self.mask = torch.nn.Parameter(torch.ones(in_dim, out_dim)).requires_grad_(False) + self.grid_eps = grid_eps + + ### remove weight_sharing & lock parts + #self.weight_sharing = torch.arange(out_dim*in_dim).reshape(out_dim, in_dim) + #self.lock_counter = 0 + #self.lock_id = torch.zeros(out_dim*in_dim).reshape(out_dim, in_dim) + + + def forward(self, x): + ''' + KANLayer forward given input x + + Args: + ----- + x : 2D torch.float + inputs, shape (number of samples, input dimension) + + Returns: + -------- + y : 2D torch.float + outputs, shape (number of samples, output dimension) + preacts : 3D torch.float + fan out x into activations, shape (number of sampels, output dimension, input dimension) + postacts : 3D torch.float + the outputs of activation functions with preacts as inputs + postspline : 3D torch.float + the outputs of spline functions with preacts as inputs + + Example + ------- + >>> model = KANLayer(in_dim=3, out_dim=5) + >>> x = torch.normal(0,1,size=(100,3)) + >>> y, preacts, postacts, postspline = model(x) + >>> y.shape, preacts.shape, postacts.shape, postspline.shape + (torch.Size([100, 5]), + torch.Size([100, 5, 3]), + torch.Size([100, 5, 3]), + torch.Size([100, 5, 3])) + ''' + batch = x.shape[0] + # x: shape (batch, in_dim) => shape (size, batch) (size = out_dim * in_dim) + #x = torch.einsum('ij,k->ikj', x, torch.ones(self.out_dim, device=self.device)).reshape(batch, self.size).permute(1, 0) + preacts = x[:,None,:].clone().expand(batch, self.out_dim, self.in_dim) + + base = self.base_fun(x) # (batch, in_dim) + y = coef2curve(x_eval=x, grid=self.grid, coef=self.coef, k=self.k) # y shape: (batch, in_dim, out_dim) + + postspline = y.clone().permute(0,2,1) # postspline shape: (batch, out_dim, in_dim) + + y = self.scale_base[None,:,:] * base[:,:,None] + self.scale_sp[None,:,:] * y + y = self.mask[None,:,:] * y + + postacts = y.clone().permute(0,2,1) + + y = torch.sum(y, dim=1) # shape (batch, out_dim) + return y, preacts, postacts, postspline + + def update_grid_from_samples(self, x): + ''' + update grid from samples + + Args: + ----- + x : 2D torch.float + inputs, shape (number of samples, input dimension) + + Returns: + -------- + None + + Example + ------- + >>> model = KANLayer(in_dim=1, out_dim=1, num=5, k=3) + >>> print(model.grid.data) + >>> x = torch.linspace(-3,3,steps=100)[:,None] + >>> model.update_grid_from_samples(x) + >>> print(model.grid.data) + tensor([[-1.0000, -0.6000, -0.2000, 0.2000, 0.6000, 1.0000]]) + tensor([[-3.0002, -1.7882, -0.5763, 0.6357, 1.8476, 3.0002]]) + ''' + batch = x.shape[0] + #x = torch.einsum('ij,k->ikj', x, torch.ones(self.out_dim, ).to(self.device)).reshape(batch, self.size).permute(1, 0) + x_pos = torch.sort(x, dim=0)[0] + y_eval = coef2curve(x_pos, self.grid, self.coef, self.k) + num_interval = self.grid.shape[1] - 1 - 2*self.k + ids = [int(batch / num_interval * i) for i in range(num_interval)] + [-1] + grid_adaptive = x_pos[ids, :].permute(1,0) + margin = 0.01 + h = (grid_adaptive[:,[-1]] - grid_adaptive[:,[0]])/num_interval + grid_uniform = grid_adaptive[:,[0]] + h * torch.arange(num_interval+1,)[None, :].to(x.device) + grid = self.grid_eps * grid_uniform + (1 - self.grid_eps) * grid_adaptive + self.grid.data = extend_grid(grid, k_extend=self.k) + self.coef.data = curve2coef(x_pos, y_eval, self.grid, self.k) + + def initialize_grid_from_parent(self, parent, x): + ''' + update grid from a parent KANLayer & samples + + Args: + ----- + parent : KANLayer + a parent KANLayer (whose grid is usually coarser than the current model) + x : 2D torch.float + inputs, shape (number of samples, input dimension) + + Returns: + -------- + None + + Example + ------- + >>> batch = 100 + >>> parent_model = KANLayer(in_dim=1, out_dim=1, num=5, k=3) + >>> print(parent_model.grid.data) + >>> model = KANLayer(in_dim=1, out_dim=1, num=10, k=3) + >>> x = torch.normal(0,1,size=(batch, 1)) + >>> model.initialize_grid_from_parent(parent_model, x) + >>> print(model.grid.data) + tensor([[-1.0000, -0.6000, -0.2000, 0.2000, 0.6000, 1.0000]]) + tensor([[-1.0000, -0.8000, -0.6000, -0.4000, -0.2000, 0.0000, 0.2000, 0.4000, + 0.6000, 0.8000, 1.0000]]) + ''' + batch = x.shape[0] + # preacts: shape (batch, in_dim) => shape (size, batch) (size = out_dim * in_dim) + #x_eval = torch.einsum('ij,k->ikj', x, torch.ones(self.out_dim, ).to(self.device)).reshape(batch, self.size).permute(1, 0) + x_eval = x + pgrid = parent.grid # (in_dim, G+2*k+1) + pk = parent.k + y_eval = coef2curve(x_eval, pgrid, parent.coef, pk) + + h = (pgrid[:,[-pk]] - pgrid[:,[pk]])/self.num + grid = pgrid[:,[pk]] + torch.arange(self.num+1,) * h + grid = extend_grid(grid, k_extend=self.k) + self.grid.data = grid + self.coef.data = curve2coef(x_eval, y_eval, self.grid, self.k) + + def get_subset(self, in_id, out_id): + ''' + get a smaller KANLayer from a larger KANLayer (used for pruning) + + Args: + ----- + in_id : list + id of selected input neurons + out_id : list + id of selected output neurons + + Returns: + -------- + spb : KANLayer + + Example + ------- + >>> kanlayer_large = KANLayer(in_dim=10, out_dim=10, num=5, k=3) + >>> kanlayer_small = kanlayer_large.get_subset([0,9],[1,2,3]) + >>> kanlayer_small.in_dim, kanlayer_small.out_dim + (2, 3) + ''' + spb = KANLayer(len(in_id), len(out_id), self.num, self.k, base_fun=self.base_fun) + spb.grid.data = self.grid[in_id] + spb.coef.data = self.coef[in_id][:,out_id] + spb.scale_base.data = self.scale_base[in_id][:,out_id] + spb.scale_sp.data = self.scale_sp[in_id][:,out_id] + spb.mask.data = self.mask[in_id][:,out_id] + + spb.in_dim = len(in_id) + spb.out_dim = len(out_id) + return spb + + + def swap(self, i1, i2, mode='in'): + + with torch.no_grad(): + def swap_(data, i1, i2, mode='in'): + if mode == 'in': + data[i1], data[i2] = data[i2].clone(), data[i1].clone() + elif mode == 'out': + data[:,i1], data[:,i2] = data[:,i2].clone(), data[:,i1].clone() + + if mode == 'in': + swap_(self.grid.data, i1, i2, mode='in') + swap_(self.coef.data, i1, i2, mode=mode) + swap_(self.scale_base.data, i1, i2, mode=mode) + swap_(self.scale_sp.data, i1, i2, mode=mode) + swap_(self.mask.data, i1, i2, mode=mode) + diff --git a/kan/.ipynb_checkpoints/LBFGS-checkpoint.py b/kan/.ipynb_checkpoints/LBFGS-checkpoint.py new file mode 100644 index 00000000..212477f2 --- /dev/null +++ b/kan/.ipynb_checkpoints/LBFGS-checkpoint.py @@ -0,0 +1,493 @@ +import torch +from functools import reduce +from torch.optim import Optimizer + +__all__ = ['LBFGS'] + +def _cubic_interpolate(x1, f1, g1, x2, f2, g2, bounds=None): + # ported from https://github.com/torch/optim/blob/master/polyinterp.lua + # Compute bounds of interpolation area + if bounds is not None: + xmin_bound, xmax_bound = bounds + else: + xmin_bound, xmax_bound = (x1, x2) if x1 <= x2 else (x2, x1) + + # Code for most common case: cubic interpolation of 2 points + # w/ function and derivative values for both + # Solution in this case (where x2 is the farthest point): + # d1 = g1 + g2 - 3*(f1-f2)/(x1-x2); + # d2 = sqrt(d1^2 - g1*g2); + # min_pos = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2)); + # t_new = min(max(min_pos,xmin_bound),xmax_bound); + d1 = g1 + g2 - 3 * (f1 - f2) / (x1 - x2) + d2_square = d1**2 - g1 * g2 + if d2_square >= 0: + d2 = d2_square.sqrt() + if x1 <= x2: + min_pos = x2 - (x2 - x1) * ((g2 + d2 - d1) / (g2 - g1 + 2 * d2)) + else: + min_pos = x1 - (x1 - x2) * ((g1 + d2 - d1) / (g1 - g2 + 2 * d2)) + return min(max(min_pos, xmin_bound), xmax_bound) + else: + return (xmin_bound + xmax_bound) / 2. + + +def _strong_wolfe(obj_func, + x, + t, + d, + f, + g, + gtd, + c1=1e-4, + c2=0.9, + tolerance_change=1e-9, + max_ls=25): + # ported from https://github.com/torch/optim/blob/master/lswolfe.lua + d_norm = d.abs().max() + g = g.clone(memory_format=torch.contiguous_format) + # evaluate objective and gradient using initial step + f_new, g_new = obj_func(x, t, d) + ls_func_evals = 1 + gtd_new = g_new.dot(d) + + # bracket an interval containing a point satisfying the Wolfe criteria + t_prev, f_prev, g_prev, gtd_prev = 0, f, g, gtd + done = False + ls_iter = 0 + while ls_iter < max_ls: + # check conditions + #print(f_prev, f_new, g_new) + if f_new > (f + c1 * t * gtd) or (ls_iter > 1 and f_new >= f_prev): + bracket = [t_prev, t] + bracket_f = [f_prev, f_new] + bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)] + bracket_gtd = [gtd_prev, gtd_new] + break + + if abs(gtd_new) <= -c2 * gtd: + bracket = [t] + bracket_f = [f_new] + bracket_g = [g_new] + done = True + break + + if gtd_new >= 0: + bracket = [t_prev, t] + bracket_f = [f_prev, f_new] + bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)] + bracket_gtd = [gtd_prev, gtd_new] + break + + # interpolate + min_step = t + 0.01 * (t - t_prev) + max_step = t * 10 + tmp = t + t = _cubic_interpolate( + t_prev, + f_prev, + gtd_prev, + t, + f_new, + gtd_new, + bounds=(min_step, max_step)) + + # next step + t_prev = tmp + f_prev = f_new + g_prev = g_new.clone(memory_format=torch.contiguous_format) + gtd_prev = gtd_new + f_new, g_new = obj_func(x, t, d) + ls_func_evals += 1 + gtd_new = g_new.dot(d) + ls_iter += 1 + + + # reached max number of iterations? + if ls_iter == max_ls: + bracket = [0, t] + bracket_f = [f, f_new] + bracket_g = [g, g_new] + + # zoom phase: we now have a point satisfying the criteria, or + # a bracket around it. We refine the bracket until we find the + # exact point satisfying the criteria + insuf_progress = False + # find high and low points in bracket + low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[-1] else (1, 0) + while not done and ls_iter < max_ls: + # line-search bracket is so small + if abs(bracket[1] - bracket[0]) * d_norm < tolerance_change: + break + + # compute new trial value + t = _cubic_interpolate(bracket[0], bracket_f[0], bracket_gtd[0], + bracket[1], bracket_f[1], bracket_gtd[1]) + + # test that we are making sufficient progress: + # in case `t` is so close to boundary, we mark that we are making + # insufficient progress, and if + # + we have made insufficient progress in the last step, or + # + `t` is at one of the boundary, + # we will move `t` to a position which is `0.1 * len(bracket)` + # away from the nearest boundary point. + eps = 0.1 * (max(bracket) - min(bracket)) + if min(max(bracket) - t, t - min(bracket)) < eps: + # interpolation close to boundary + if insuf_progress or t >= max(bracket) or t <= min(bracket): + # evaluate at 0.1 away from boundary + if abs(t - max(bracket)) < abs(t - min(bracket)): + t = max(bracket) - eps + else: + t = min(bracket) + eps + insuf_progress = False + else: + insuf_progress = True + else: + insuf_progress = False + + # Evaluate new point + f_new, g_new = obj_func(x, t, d) + ls_func_evals += 1 + gtd_new = g_new.dot(d) + ls_iter += 1 + + if f_new > (f + c1 * t * gtd) or f_new >= bracket_f[low_pos]: + # Armijo condition not satisfied or not lower than lowest point + bracket[high_pos] = t + bracket_f[high_pos] = f_new + bracket_g[high_pos] = g_new.clone(memory_format=torch.contiguous_format) + bracket_gtd[high_pos] = gtd_new + low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[1] else (1, 0) + else: + if abs(gtd_new) <= -c2 * gtd: + # Wolfe conditions satisfied + done = True + elif gtd_new * (bracket[high_pos] - bracket[low_pos]) >= 0: + # old low becomes new high + bracket[high_pos] = bracket[low_pos] + bracket_f[high_pos] = bracket_f[low_pos] + bracket_g[high_pos] = bracket_g[low_pos] + bracket_gtd[high_pos] = bracket_gtd[low_pos] + + # new point becomes new low + bracket[low_pos] = t + bracket_f[low_pos] = f_new + bracket_g[low_pos] = g_new.clone(memory_format=torch.contiguous_format) + bracket_gtd[low_pos] = gtd_new + + #print(bracket) + if len(bracket) == 1: + t = bracket[0] + f_new = bracket_f[0] + g_new = bracket_g[0] + else: + t = bracket[low_pos] + f_new = bracket_f[low_pos] + g_new = bracket_g[low_pos] + return f_new, g_new, t, ls_func_evals + + + +class LBFGS(Optimizer): + """Implements L-BFGS algorithm. + + Heavily inspired by `minFunc + `_. + + .. warning:: + This optimizer doesn't support per-parameter options and parameter + groups (there can be only one). + + .. warning:: + Right now all parameters have to be on a single device. This will be + improved in the future. + + .. note:: + This is a very memory intensive optimizer (it requires additional + ``param_bytes * (history_size + 1)`` bytes). If it doesn't fit in memory + try reducing the history size, or use a different algorithm. + + Args: + lr (float): learning rate (default: 1) + max_iter (int): maximal number of iterations per optimization step + (default: 20) + max_eval (int): maximal number of function evaluations per optimization + step (default: max_iter * 1.25). + tolerance_grad (float): termination tolerance on first order optimality + (default: 1e-7). + tolerance_change (float): termination tolerance on function + value/parameter changes (default: 1e-9). + history_size (int): update history size (default: 100). + line_search_fn (str): either 'strong_wolfe' or None (default: None). + """ + + def __init__(self, + params, + lr=1, + max_iter=20, + max_eval=None, + tolerance_grad=1e-7, + tolerance_change=1e-9, + tolerance_ys=1e-32, + history_size=100, + line_search_fn=None): + if max_eval is None: + max_eval = max_iter * 5 // 4 + defaults = dict( + lr=lr, + max_iter=max_iter, + max_eval=max_eval, + tolerance_grad=tolerance_grad, + tolerance_change=tolerance_change, + tolerance_ys=tolerance_ys, + history_size=history_size, + line_search_fn=line_search_fn) + super().__init__(params, defaults) + + if len(self.param_groups) != 1: + raise ValueError("LBFGS doesn't support per-parameter options " + "(parameter groups)") + + self._params = self.param_groups[0]['params'] + self._numel_cache = None + + def _numel(self): + if self._numel_cache is None: + self._numel_cache = reduce(lambda total, p: total + p.numel(), self._params, 0) + return self._numel_cache + + def _gather_flat_grad(self): + views = [] + for p in self._params: + if p.grad is None: + view = p.new(p.numel()).zero_() + elif p.grad.is_sparse: + view = p.grad.to_dense().view(-1) + else: + view = p.grad.view(-1) + views.append(view) + device = views[0].device + return torch.cat(views, dim=0) + + def _add_grad(self, step_size, update): + offset = 0 + for p in self._params: + numel = p.numel() + # view as to avoid deprecated pointwise semantics + p.add_(update[offset:offset + numel].view_as(p), alpha=step_size) + offset += numel + assert offset == self._numel() + + def _clone_param(self): + return [p.clone(memory_format=torch.contiguous_format) for p in self._params] + + def _set_param(self, params_data): + for p, pdata in zip(self._params, params_data): + p.copy_(pdata) + + def _directional_evaluate(self, closure, x, t, d): + self._add_grad(t, d) + loss = float(closure()) + flat_grad = self._gather_flat_grad() + self._set_param(x) + return loss, flat_grad + + + @torch.no_grad() + def step(self, closure): + """Perform a single optimization step. + + Args: + closure (Callable): A closure that reevaluates the model + and returns the loss. + """ + + torch.manual_seed(0) + + assert len(self.param_groups) == 1 + + # Make sure the closure is always called with grad enabled + closure = torch.enable_grad()(closure) + + group = self.param_groups[0] + lr = group['lr'] + max_iter = group['max_iter'] + max_eval = group['max_eval'] + tolerance_grad = group['tolerance_grad'] + tolerance_change = group['tolerance_change'] + tolerance_ys = group['tolerance_ys'] + line_search_fn = group['line_search_fn'] + history_size = group['history_size'] + + # NOTE: LBFGS has only global state, but we register it as state for + # the first param, because this helps with casting in load_state_dict + state = self.state[self._params[0]] + state.setdefault('func_evals', 0) + state.setdefault('n_iter', 0) + + # evaluate initial f(x) and df/dx + orig_loss = closure() + loss = float(orig_loss) + current_evals = 1 + state['func_evals'] += 1 + + flat_grad = self._gather_flat_grad() + opt_cond = flat_grad.abs().max() <= tolerance_grad + + # optimal condition + if opt_cond: + return orig_loss + + # tensors cached in state (for tracing) + d = state.get('d') + t = state.get('t') + old_dirs = state.get('old_dirs') + old_stps = state.get('old_stps') + ro = state.get('ro') + H_diag = state.get('H_diag') + prev_flat_grad = state.get('prev_flat_grad') + prev_loss = state.get('prev_loss') + + n_iter = 0 + # optimize for a max of max_iter iterations + while n_iter < max_iter: + # keep track of nb of iterations + n_iter += 1 + state['n_iter'] += 1 + + ############################################################ + # compute gradient descent direction + ############################################################ + if state['n_iter'] == 1: + d = flat_grad.neg() + old_dirs = [] + old_stps = [] + ro = [] + H_diag = 1 + else: + # do lbfgs update (update memory) + y = flat_grad.sub(prev_flat_grad) + s = d.mul(t) + ys = y.dot(s) # y*s + if ys > tolerance_ys: + # updating memory + if len(old_dirs) == history_size: + # shift history by one (limited-memory) + old_dirs.pop(0) + old_stps.pop(0) + ro.pop(0) + + # store new direction/step + old_dirs.append(y) + old_stps.append(s) + ro.append(1. / ys) + + # update scale of initial Hessian approximation + H_diag = ys / y.dot(y) # (y*y) + + # compute the approximate (L-BFGS) inverse Hessian + # multiplied by the gradient + num_old = len(old_dirs) + + if 'al' not in state: + state['al'] = [None] * history_size + al = state['al'] + + # iteration in L-BFGS loop collapsed to use just one buffer + q = flat_grad.neg() + for i in range(num_old - 1, -1, -1): + al[i] = old_stps[i].dot(q) * ro[i] + q.add_(old_dirs[i], alpha=-al[i]) + + # multiply by initial Hessian + # r/d is the final direction + d = r = torch.mul(q, H_diag) + for i in range(num_old): + be_i = old_dirs[i].dot(r) * ro[i] + r.add_(old_stps[i], alpha=al[i] - be_i) + + if prev_flat_grad is None: + prev_flat_grad = flat_grad.clone(memory_format=torch.contiguous_format) + else: + prev_flat_grad.copy_(flat_grad) + prev_loss = loss + + ############################################################ + # compute step length + ############################################################ + # reset initial guess for step size + if state['n_iter'] == 1: + t = min(1., 1. / flat_grad.abs().sum()) * lr + else: + t = lr + + # directional derivative + gtd = flat_grad.dot(d) # g * d + + # directional derivative is below tolerance + if gtd > -tolerance_change: + break + + # optional line search: user function + ls_func_evals = 0 + if line_search_fn is not None: + # perform line search, using user function + if line_search_fn != "strong_wolfe": + raise RuntimeError("only 'strong_wolfe' is supported") + else: + x_init = self._clone_param() + + def obj_func(x, t, d): + return self._directional_evaluate(closure, x, t, d) + loss, flat_grad, t, ls_func_evals = _strong_wolfe( + obj_func, x_init, t, d, loss, flat_grad, gtd) + self._add_grad(t, d) + opt_cond = flat_grad.abs().max() <= tolerance_grad + else: + # no line search, simply move with fixed-step + self._add_grad(t, d) + if n_iter != max_iter: + # re-evaluate function only if not in last iteration + # the reason we do this: in a stochastic setting, + # no use to re-evaluate that function here + with torch.enable_grad(): + loss = float(closure()) + flat_grad = self._gather_flat_grad() + opt_cond = flat_grad.abs().max() <= tolerance_grad + ls_func_evals = 1 + + # update func eval + current_evals += ls_func_evals + state['func_evals'] += ls_func_evals + + ############################################################ + # check conditions + ############################################################ + if n_iter == max_iter: + break + + if current_evals >= max_eval: + break + + # optimal condition + if opt_cond: + break + + # lack of progress + if d.mul(t).abs().max() <= tolerance_change: + break + + if abs(loss - prev_loss) < tolerance_change: + break + + state['d'] = d + state['t'] = t + state['old_dirs'] = old_dirs + state['old_stps'] = old_stps + state['ro'] = ro + state['H_diag'] = H_diag + state['prev_flat_grad'] = prev_flat_grad + state['prev_loss'] = prev_loss + + return orig_loss diff --git a/kan/.ipynb_checkpoints/MultKAN-checkpoint.py b/kan/.ipynb_checkpoints/MultKAN-checkpoint.py new file mode 100644 index 00000000..39dd62b5 --- /dev/null +++ b/kan/.ipynb_checkpoints/MultKAN-checkpoint.py @@ -0,0 +1,1815 @@ +import torch +import torch.nn as nn +import numpy as np +from .KANLayer import KANLayer +#from .Symbolic_MultKANLayer import * +from .Symbolic_KANLayer import Symbolic_KANLayer +from .LBFGS import * +import os +import glob +import matplotlib.pyplot as plt +from tqdm import tqdm +import random +import copy +#from .MultKANLayer import MultKANLayer +import pandas as pd +from sympy.printing import latex +from sympy import * +import sympy +import yaml +from .spline import curve2coef +from .utils import SYMBOLIC_LIB +from .hypothesis import plot_tree + +class MultKAN(nn.Module): + + # include mult_ops = [] + def __init__(self, width=None, grid=3, k=3, mult_arity = 2, noise_scale=1.0, scale_base_mu=0.0, scale_base_sigma=1.0, base_fun='silu', symbolic_enabled=True, affine_trainable=False, grid_eps=1.0, grid_range=[-1, 1], sp_trainable=True, sb_trainable=True, seed=1, save_act=True, sparse_init=False, auto_save=True, first_init=True, ckpt_path='./model', state_id=0, round=0): + + super(MultKAN, self).__init__() + + torch.manual_seed(seed) + np.random.seed(seed) + random.seed(seed) + + ### initializeing the numerical front ### + + self.act_fun = [] + self.depth = len(width) - 1 + + for i in range(len(width)): + if type(width[i]) == int: + width[i] = [width[i],0] + + self.width = width + + # if mult_arity is just a scalar, we extend it to a list of lists + # e.g, mult_arity = [[2,3],[4]] means that in the first hidden layer, 2 mult ops have arity 2 and 3, respectively; + # in the second hidden layer, 1 mult op has arity 4. + if isinstance(mult_arity, int): + self.mult_homo = True # when homo is True, parallelization is possible + else: + self.mult_homo = False # when home if False, for loop is required. + self.mult_arity = mult_arity + + width_in = self.width_in + width_out = self.width_out + + self.base_fun_name = base_fun + if base_fun == 'silu': + base_fun = torch.nn.SiLU() + elif base_fun == 'identity': + base_fun = torch.nn.Identity() + + self.grid_eps = grid_eps + self.grid_range = grid_range + + + for l in range(self.depth): + # splines + scale_base = scale_base_mu * 1 / np.sqrt(width_in[l]) + \ + scale_base_sigma * (torch.randn(width_in[l], width_out[l + 1]) * 2 - 1) * 1/np.sqrt(width_in[l]) + sp_batch = KANLayer(in_dim=width_in[l], out_dim=width_out[l+1], num=grid, k=k, noise_scale=noise_scale, scale_base=scale_base, scale_sp=1., base_fun=base_fun, grid_eps=grid_eps, grid_range=grid_range, sp_trainable=sp_trainable, sb_trainable=sb_trainable, sparse_init=sparse_init) + self.act_fun.append(sp_batch) + + self.node_bias = [] + self.node_scale = [] + self.subnode_bias = [] + self.subnode_scale = [] + + globals()['self.node_bias_0'] = torch.nn.Parameter(torch.zeros(3,1)).requires_grad_(False) + #self.node_bias_0 = torch.nn.Parameter(torch.zeros(3,1)).requires_grad_(False) + exec('self.node_bias_0' + " = torch.nn.Parameter(torch.zeros(3,1)).requires_grad_(False)") + + for l in range(self.depth): + exec(f'self.node_bias_{l} = torch.nn.Parameter(torch.zeros(width_in[l+1],)).requires_grad_(affine_trainable)') + exec(f'self.node_scale_{l} = torch.nn.Parameter(torch.ones(width_in[l+1],)).requires_grad_(affine_trainable)') + exec(f'self.subnode_bias_{l} = torch.nn.Parameter(torch.zeros(width_out[l+1],)).requires_grad_(affine_trainable)') + exec(f'self.subnode_scale_{l} = torch.nn.Parameter(torch.ones(width_out[l+1],)).requires_grad_(affine_trainable)') + exec(f'self.node_bias.append(self.node_bias_{l})') + exec(f'self.node_scale.append(self.node_scale_{l})') + exec(f'self.subnode_bias.append(self.subnode_bias_{l})') + exec(f'self.subnode_scale.append(self.subnode_scale_{l})') + + + self.act_fun = nn.ModuleList(self.act_fun) + + self.grid = grid + self.k = k + self.base_fun = base_fun + + ### initializing the symbolic front ### + self.symbolic_fun = [] + for l in range(self.depth): + sb_batch = Symbolic_KANLayer(in_dim=width_in[l], out_dim=width_out[l+1]) + self.symbolic_fun.append(sb_batch) + + self.symbolic_fun = nn.ModuleList(self.symbolic_fun) + self.symbolic_enabled = symbolic_enabled + self.affine_trainable = affine_trainable + self.sp_trainable = sp_trainable + self.sb_trainable = sb_trainable + + self.save_act = save_act + + self.node_scores = None + self.edge_scores = None + self.subnode_scores = None + + self.cache_data = None + self.acts = None + + self.auto_save = auto_save + self.state_id = 0 + self.ckpt_path = ckpt_path + self.round = round + + if auto_save: + if first_init: + if not os.path.exists(ckpt_path): + # Create the directory + os.makedirs(ckpt_path) + print(f"checkpoint directory created: {ckpt_path}") + print('saving model version 0.0') + + history_path = self.ckpt_path+'/history.txt' + with open(history_path, 'w') as file: + file.write(f'### Round {self.round} ###' + '\n') + file.write('init => 0.0' + '\n') + self.saveckpt(path=self.ckpt_path+'/'+'0.0') + else: + self.state_id = state_id + + self.input_id = torch.arange(self.width_in[0],) + + def initialize_from_another_model(self, another_model, x): + another_model(x) # get activations + batch = x.shape[0] + + self.initialize_grid_from_another_model(another_model, x) + + for l in range(self.depth): + spb = self.act_fun[l] + #spb_parent = another_model.act_fun[l] + + # spb = spb_parent + preacts = another_model.spline_preacts[l] + postsplines = another_model.spline_postsplines[l] + self.act_fun[l].coef.data = curve2coef(preacts[:,0,:], postsplines.permute(0,2,1), spb.grid, k=spb.k) + self.act_fun[l].scale_base.data = another_model.act_fun[l].scale_base.data + self.act_fun[l].scale_sp.data = another_model.act_fun[l].scale_sp.data + self.act_fun[l].mask.data = another_model.act_fun[l].mask.data + + for l in range(self.depth): + self.node_bias[l].data = another_model.node_bias[l].data + self.node_scale[l].data = another_model.node_scale[l].data + + self.subnode_bias[l].data = another_model.subnode_bias[l].data + self.subnode_scale[l].data = another_model.subnode_scale[l].data + + for l in range(self.depth): + self.symbolic_fun[l] = another_model.symbolic_fun[l] + + return self.to(device) + + def log_history(self, method_name): + + if self.auto_save: + + # save to log file + #print(func.__name__) + with open(self.ckpt_path+'/history.txt', 'a') as file: + file.write(str(self.round)+'.'+str(self.state_id)+' => '+ method_name + ' => ' + str(self.round)+'.'+str(self.state_id+1) + '\n') + + # update state_id + self.state_id += 1 + + # save to ckpt + self.saveckpt(path=self.ckpt_path+'/'+str(self.round)+'.'+str(self.state_id)) + print('saving model version '+str(self.round)+'.'+str(self.state_id)) + + + def refine(self, new_grid): + + model_new = MultKAN(width=self.width, + grid=new_grid, + k=self.k, + mult_arity=self.mult_arity, + base_fun=self.base_fun_name, + symbolic_enabled=self.symbolic_enabled, + affine_trainable=self.affine_trainable, + grid_eps=self.grid_eps, + grid_range=self.grid_range, + sp_trainable=self.sp_trainable, + sb_trainable=self.sb_trainable, + ckpt_path=self.ckpt_path, + auto_save=True, + first_init=False, + state_id=self.state_id, + round=self.round) + + model_new.initialize_from_another_model(self, self.cache_data) + model_new.cache_data = self.cache_data + model_new.grid = new_grid + + self.log_history('refine') + model_new.state_id += 1 + + return model_new + + + def saveckpt(self, path='model'): + + model = self + + dic = dict( + width = model.width, + grid = model.grid, + k = model.k, + mult_arity = model.mult_arity, + base_fun_name = model.base_fun_name, + symbolic_enabled = model.symbolic_enabled, + affine_trainable = model.affine_trainable, + grid_eps = model.grid_eps, + grid_range = model.grid_range, + sp_trainable = model.sp_trainable, + sb_trainable = model.sb_trainable, + state_id = model.state_id, + auto_save = model.auto_save, + ckpt_path = model.ckpt_path, + round = model.round + ) + + for i in range (model.depth): + dic[f'symbolic.funs_name.{i}'] = model.symbolic_fun[i].funs_name + + with open(f'{path}_config.yml', 'w') as outfile: + yaml.dump(dic, outfile, default_flow_style=False) + + torch.save(model.state_dict(), f'{path}_state') + torch.save(model.cache_data, f'{path}_cache_data') + + @staticmethod + def loadckpt(path='model'): + with open(f'{path}_config.yml', 'r') as stream: + config = yaml.safe_load(stream) + + state = torch.load(f'{path}_state') + + model_load = MultKAN(width=config['width'], + grid=config['grid'], + k=config['k'], + mult_arity = config['mult_arity'], + base_fun=config['base_fun_name'], + symbolic_enabled=config['symbolic_enabled'], + affine_trainable=config['affine_trainable'], + grid_eps=config['grid_eps'], + grid_range=config['grid_range'], + sp_trainable=config['sp_trainable'], + sb_trainable=config['sb_trainable'], + state_id=config['state_id'], + auto_save=config['auto_save'], + first_init=False, + ckpt_path=config['ckpt_path'], + round = config['round']+1) + + model_load.load_state_dict(state) + model_load.cache_data = torch.load(f'{path}_cache_data') + + depth = len(model_load.width) - 1 + for l in range(depth): + out_dim = model_load.symbolic_fun[l].out_dim + in_dim = model_load.symbolic_fun[l].in_dim + funs_name = config[f'symbolic.funs_name.{l}'] + for j in range(out_dim): + for i in range(in_dim): + fun_name = funs_name[j][i] + model_load.symbolic_fun[l].funs_name[j][i] = fun_name + model_load.symbolic_fun[l].funs[j][i] = SYMBOLIC_LIB[fun_name][0] + model_load.symbolic_fun[l].funs_sympy[j][i] = SYMBOLIC_LIB[fun_name][1] + model_load.symbolic_fun[l].funs_avoid_singularity[j][i] = SYMBOLIC_LIB[fun_name][3] + return model_load + + def rewind(self, model_id): + + self.round += 1 + self.state_id = model_id.split('.')[-1] + + history_path = self.ckpt_path+'/history.txt' + with open(history_path, 'a') as file: + file.write(f'### Round {self.round} ###' + '\n') + + self.saveckpt(path=self.ckpt_path+'/'+f'{self.round}.{self.state_id}') + + print('rewind to model version '+f'{self.round-1}.{self.state_id}'+', renamed as '+f'{self.round}.{self.state_id}') + + return MultKAN.loadckpt(path=self.ckpt_path+'/'+str(model_id)) + + + def checkout(self, model_id): + return MultKAN.loadckpt(path=self.ckpt_path+'/'+str(model_id)) + + @property + def width_in(self): + width = self.width + width_in = [width[l][0]+width[l][1] for l in range(len(width))] + return width_in + + @property + def width_out(self): + width = self.width + if self.mult_homo == True: + width_out = [width[l][0]+self.mult_arity*width[l][1] for l in range(len(width))] + else: + width_out = [width[l][0]+int(np.sum(self.mult_arity[l])) for l in range(len(width))] + return width_out + + @property + def n_sum(self): + width = self.width + n_sum = [width[l][0] for l in range(1,len(width)-1)] + return n_sum + + @property + def n_mult(self): + width = self.width + n_mult = [width[l][1] for l in range(1,len(width)-1)] + return n_mult + + @property + def feature_score(self): + self.attribute() + if self.node_scores == None: + return None + else: + return self.node_scores[0] + + def update_grid_from_samples(self, x): + for l in range(self.depth): + self.get_act(x) + self.act_fun[l].update_grid_from_samples(self.acts[l]) + + def update_grid(self, x): + self.update_grid_from_samples(x) + + def initialize_grid_from_another_model(self, model, x): + model(x) + for l in range(self.depth): + self.act_fun[l].initialize_grid_from_parent(model.act_fun[l], model.acts[l]) + + def forward(self, x, singularity_avoiding=False, y_th=10.): + + assert x.shape[1] == self.width_in[0] + + x = x[:,self.input_id.long()] + + # cache data + self.cache_data = x + + self.acts = [] # shape ([batch, n0], [batch, n1], ..., [batch, n_L]) + self.acts_premult = [] + self.spline_preacts = [] + self.spline_postsplines = [] + self.spline_postacts = [] + self.acts_scale = [] + self.acts_scale_spline = [] + self.subnode_actscale = [] + self.edge_actscale = [] + # self.neurons_scale = [] + + self.acts.append(x) # acts shape: (batch, width[l]) + + for l in range(self.depth): + + x_numerical, preacts, postacts_numerical, postspline = self.act_fun[l](x) + #print(preacts, postacts_numerical, postspline) + + if self.symbolic_enabled == True: + x_symbolic, postacts_symbolic = self.symbolic_fun[l](x, singularity_avoiding=singularity_avoiding, y_th=y_th) + else: + x_symbolic = 0. + postacts_symbolic = 0. + + x = x_numerical + x_symbolic + + if self.save_act: + # save subnode_scale + self.subnode_actscale.append(torch.std(x, dim=0).detach()) + + # subnode affine transform + x = self.subnode_scale[l][None,:] * x + self.subnode_bias[l][None,:] + + if self.save_act: + postacts = postacts_numerical + postacts_symbolic + + # self.neurons_scale.append(torch.mean(torch.abs(x), dim=0)) + #grid_reshape = self.act_fun[l].grid.reshape(self.width_out[l + 1], self.width_in[l], -1) + input_range = torch.std(preacts, dim=0) + 0.1 + output_range_spline = torch.std(postacts_numerical, dim=0) # for training, only penalize the spline part + output_range = torch.std(postacts, dim=0) # for visualization, include the contribution from both spline + symbolic + # save edge_scale + self.edge_actscale.append(output_range) + + self.acts_scale.append((output_range / input_range).detach()) + self.acts_scale_spline.append(output_range_spline / input_range) + self.spline_preacts.append(preacts.detach()) + self.spline_postacts.append(postacts.detach()) + self.spline_postsplines.append(postspline.detach()) + + self.acts_premult.append(x.detach()) + + # multiplication + dim_sum = self.width[l+1][0] + dim_mult = self.width[l+1][1] + + if self.mult_homo == True: + for i in range(self.mult_arity-1): + if i == 0: + x_mult = x[:,dim_sum::self.mult_arity] * x[:,dim_sum+1::self.mult_arity] + else: + x_mult = x_mult * x[:,dim_sum+i+1::self.mult_arity] + + else: + for j in range(dim_mult): + acml_id = dim_sum + np.sum(self.mult_arity[l+1][:j]) + for i in range(self.mult_arity[l+1][j]-1): + if i == 0: + x_mult_j = x[:,[acml_id]] * x[:,[acml_id+1]] + else: + x_mult_j = x_mult_j * x[:,[acml_id+i+1]] + + if j == 0: + x_mult = x_mult_j + else: + x_mult = torch.cat([x_mult, x_mult_j], dim=1) + + if self.width[l+1][1] > 0: + x = torch.cat([x[:,:dim_sum], x_mult], dim=1) + + # x = x + self.biases[l].weight + # node affine transform + x = self.node_scale[l][None,:] * x + self.node_bias[l][None,:] + + self.acts.append(x.detach()) + + + return x + + def set_mode(self, l, i, j, mode, mask_n=None): + if mode == "s": + mask_n = 0.; + mask_s = 1. + elif mode == "n": + mask_n = 1.; + mask_s = 0. + elif mode == "sn" or mode == "ns": + if mask_n == None: + mask_n = 1. + else: + mask_n = mask_n + mask_s = 1. + else: + mask_n = 0.; + mask_s = 0. + + self.act_fun[l].mask.data[i][j] = mask_n + self.symbolic_fun[l].mask.data[j,i] = mask_s + + def fix_symbolic(self, l, i, j, fun_name, fit_params_bool=True, a_range=(-10, 10), b_range=(-10, 10), verbose=True, random=False, log_history=True): + if not fit_params_bool: + self.symbolic_fun[l].fix_symbolic(i, j, fun_name, verbose=verbose, random=random) + r2 = None + else: + x = self.acts[l][:, i] + mask = self.act_fun[l].mask + y = self.spline_postacts[l][:, j, i] + #y = self.postacts[l][:, j, i] + r2 = self.symbolic_fun[l].fix_symbolic(i, j, fun_name, x, y, a_range=a_range, b_range=b_range, verbose=verbose) + if mask[i,j] == 0: + r2 = - 1e8 + self.set_mode(l, i, j, mode="s") + + if log_history: + self.log_history('fix_symbolic') + return r2 + + def unfix_symbolic(self, l, i, j, log_history=True): + self.set_mode(l, i, j, mode="n") + self.symbolic_fun[l].funs_name[j][i] = "0" + if log_history: + self.log_history('unfix_symbolic') + + def unfix_symbolic_all(self): + for l in range(len(self.width) - 1): + for i in range(self.width[l]): + for j in range(self.width[l + 1]): + self.unfix_symbolic(l, i, j) + + def get_range(self, l, i, j, verbose=True): + x = self.spline_preacts[l][:, j, i] + y = self.spline_postacts[l][:, j, i] + x_min = torch.min(x) + x_max = torch.max(x) + y_min = torch.min(y) + y_max = torch.max(y) + if verbose: + print('x range: [' + '%.2f' % x_min, ',', '%.2f' % x_max, ']') + print('y range: [' + '%.2f' % y_min, ',', '%.2f' % y_max, ']') + return x_min, x_max, y_min, y_max + + def plot(self, folder="./figures", beta=3, mask=False, metric='backward', scale=0.5, tick=False, sample=False, in_vars=None, out_vars=None, title=None, varscale=1.0): + + global Symbol + + if not self.save_act: + print('cannot plot since data are not saved. Set save_act=True first.') + + # forward to obtain activations + if self.acts == None: + if self.cache_data == None: + raise Exception('model hasn\'t seen any data yet.') + self.forward(self.cache_data) + + if metric == 'backward': + self.attribute() + + + if not os.path.exists(folder): + os.makedirs(folder) + # matplotlib.use('Agg') + depth = len(self.width) - 1 + for l in range(depth): + w_large = 2.0 + for i in range(self.width_in[l]): + for j in range(self.width_out[l+1]): + rank = torch.argsort(self.acts[l][:, i]) + fig, ax = plt.subplots(figsize=(w_large, w_large)) + + num = rank.shape[0] + + #print(self.width_in[l]) + #print(self.width_out[l+1]) + symbolic_mask = self.symbolic_fun[l].mask[j][i] + numeric_mask = self.act_fun[l].mask[i][j] + if symbolic_mask > 0. and numeric_mask > 0.: + color = 'purple' + alpha_mask = 1 + if symbolic_mask > 0. and numeric_mask == 0.: + color = "red" + alpha_mask = 1 + if symbolic_mask == 0. and numeric_mask > 0.: + color = "black" + alpha_mask = 1 + if symbolic_mask == 0. and numeric_mask == 0.: + color = "white" + alpha_mask = 0 + + + if tick == True: + ax.tick_params(axis="y", direction="in", pad=-22, labelsize=50) + ax.tick_params(axis="x", direction="in", pad=-15, labelsize=50) + x_min, x_max, y_min, y_max = self.get_range(l, i, j, verbose=False) + plt.xticks([x_min, x_max], ['%2.f' % x_min, '%2.f' % x_max]) + plt.yticks([y_min, y_max], ['%2.f' % y_min, '%2.f' % y_max]) + else: + plt.xticks([]) + plt.yticks([]) + if alpha_mask == 1: + plt.gca().patch.set_edgecolor('black') + else: + plt.gca().patch.set_edgecolor('white') + plt.gca().patch.set_linewidth(1.5) + # plt.axis('off') + + plt.plot(self.acts[l][:, i][rank].cpu().detach().numpy(), self.spline_postacts[l][:, j, i][rank].cpu().detach().numpy(), color=color, lw=5) + if sample == True: + plt.scatter(self.acts[l][:, i][rank].cpu().detach().numpy(), self.spline_postacts[l][:, j, i][rank].cpu().detach().numpy(), color=color, s=400 * scale ** 2) + plt.gca().spines[:].set_color(color) + + '''lock_id = self.act_fun[l].lock_id[j * self.width[l] + i].long().item() + if lock_id > 0: + im = plt.imread(f'{folder}/lock.png') + newax = fig.add_axes([0.15, 0.7, 0.15, 0.15]) + plt.text(500, 400, lock_id, fontsize=15) + newax.imshow(im) + newax.axis('off')''' + + plt.savefig(f'{folder}/sp_{l}_{i}_{j}.png', bbox_inches="tight", dpi=400) + plt.close() + + def score2alpha(score): + return np.tanh(beta * score) + + + if metric == 'forward_n': + scores = self.acts_scale + elif metric == 'forward_u': + scores = self.edge_actscale + elif metric == 'backward': + scores = self.edge_scores + else: + raise Exception(f'metric = \'{metric}\' not recognized') + + alpha = [score2alpha(score.cpu().detach().numpy()) for score in scores] + + # draw skeleton + width = np.array(self.width) + width_in = np.array(self.width_in) + width_out = np.array(self.width_out) + A = 1 + y0 = 0.3 # height: from input to pre-mult + z0 = 0.1 # height: from pre-mult to post-mult (input of next layer) + + neuron_depth = len(width) + min_spacing = A / np.maximum(np.max(width_out), 5) + + max_neuron = np.max(width_out) + max_num_weights = np.max(width_in[:-1] * width_out[1:]) + y1 = 0.4 / np.maximum(max_num_weights, 5) # size (height/width) of 1D function diagrams + y2 = 0.15 / np.maximum(max_neuron, 5) # size (height/width) of operations (sum and mult) + + fig, ax = plt.subplots(figsize=(10 * scale, 10 * scale * (neuron_depth - 1) * (y0+z0))) + # fig, ax = plt.subplots(figsize=(5,5*(neuron_depth-1)*y0)) + + # -- Transformation functions + DC_to_FC = ax.transData.transform + FC_to_NFC = fig.transFigure.inverted().transform + # -- Take data coordinates and transform them to normalized figure coordinates + DC_to_NFC = lambda x: FC_to_NFC(DC_to_FC(x)) + + # plot scatters and lines + for l in range(neuron_depth): + + n = width_in[l] + + # scatters + for i in range(n): + plt.scatter(1 / (2 * n) + i / n, l * (y0+z0), s=min_spacing ** 2 * 10000 * scale ** 2, color='black') + + # plot connections (input to pre-mult) + for i in range(n): + if l < neuron_depth - 1: + n_next = width_out[l+1] + N = n * n_next + for j in range(n_next): + id_ = i * n_next + j + + symbol_mask = self.symbolic_fun[l].mask[j][i] + numerical_mask = self.act_fun[l].mask[i][j] + if symbol_mask == 1. and numerical_mask > 0.: + color = 'purple' + alpha_mask = 1. + if symbol_mask == 1. and numerical_mask == 0.: + color = "red" + alpha_mask = 1. + if symbol_mask == 0. and numerical_mask == 1.: + color = "black" + alpha_mask = 1. + if symbol_mask == 0. and numerical_mask == 0.: + color = "white" + alpha_mask = 0. + + + if mask == True: + plt.plot([1 / (2 * n) + i / n, 1 / (2 * N) + id_ / N], [l * (y0+z0), l * (y0+z0) + y0/2 - y1], color=color, lw=2 * scale, alpha=alpha[l][j][i] * self.mask[l][i].item() * self.mask[l + 1][j].item()) + plt.plot([1 / (2 * N) + id_ / N, 1 / (2 * n_next) + j / n_next], [l * (y0+z0) + y0/2 + y1, l * (y0+z0)+y0], color=color, lw=2 * scale, alpha=alpha[l][j][i] * self.mask[l][i].item() * self.mask[l + 1][j].item()) + else: + plt.plot([1 / (2 * n) + i / n, 1 / (2 * N) + id_ / N], [l * (y0+z0), l * (y0+z0) + y0/2 - y1], color=color, lw=2 * scale, alpha=alpha[l][j][i] * alpha_mask) + plt.plot([1 / (2 * N) + id_ / N, 1 / (2 * n_next) + j / n_next], [l * (y0+z0) + y0/2 + y1, l * (y0+z0)+y0], color=color, lw=2 * scale, alpha=alpha[l][j][i] * alpha_mask) + + + # plot connections (pre-mult to post-mult, post-mult = next-layer input) + if l < neuron_depth - 1: + n_in = width_out[l+1] + n_out = width_in[l+1] + mult_id = 0 + for i in range(n_in): + if i < width[l+1][0]: + j = i + else: + if i == width[l+1][0]: + if isinstance(self.mult_arity,int): + ma = self.mult_arity + else: + ma = self.mult_arity[l+1][mult_id] + current_mult_arity = ma + if current_mult_arity == 0: + mult_id += 1 + if isinstance(self.mult_arity,int): + ma = self.mult_arity + else: + ma = self.mult_arity[l+1][mult_id] + current_mult_arity = ma + j = width[l+1][0] + mult_id + current_mult_arity -= 1 + #j = (i-width[l+1][0])//self.mult_arity + width[l+1][0] + plt.plot([1 / (2 * n_in) + i / n_in, 1 / (2 * n_out) + j / n_out], [l * (y0+z0) + y0, (l+1) * (y0+z0)], color='black', lw=2 * scale) + + + + plt.xlim(0, 1) + plt.ylim(-0.1 * (y0+z0), (neuron_depth - 1 + 0.1) * (y0+z0)) + + + plt.axis('off') + + for l in range(neuron_depth - 1): + # plot splines + n = width_in[l] + for i in range(n): + n_next = width_out[l + 1] + N = n * n_next + for j in range(n_next): + id_ = i * n_next + j + im = plt.imread(f'{folder}/sp_{l}_{i}_{j}.png') + left = DC_to_NFC([1 / (2 * N) + id_ / N - y1, 0])[0] + right = DC_to_NFC([1 / (2 * N) + id_ / N + y1, 0])[0] + bottom = DC_to_NFC([0, l * (y0+z0) + y0/2 - y1])[1] + up = DC_to_NFC([0, l * (y0+z0) + y0/2 + y1])[1] + newax = fig.add_axes([left, bottom, right - left, up - bottom]) + # newax = fig.add_axes([1/(2*N)+id_/N-y1, (l+1/2)*y0-y1, y1, y1], anchor='NE') + if mask == False: + newax.imshow(im, alpha=alpha[l][j][i]) + else: + ### make sure to run model.prune_node() first to compute mask ### + newax.imshow(im, alpha=alpha[l][j][i] * self.mask[l][i].item() * self.mask[l + 1][j].item()) + newax.axis('off') + + + # plot sum symbols + N = n = width_out[l+1] + for j in range(n): + id_ = j + path = os.path.dirname(os.path.abspath(__file__)) + "/assets/img/sum_symbol.png" + im = plt.imread(path) + left = DC_to_NFC([1 / (2 * N) + id_ / N - y2, 0])[0] + right = DC_to_NFC([1 / (2 * N) + id_ / N + y2, 0])[0] + bottom = DC_to_NFC([0, l * (y0+z0) + y0 - y2])[1] + up = DC_to_NFC([0, l * (y0+z0) + y0 + y2])[1] + newax = fig.add_axes([left, bottom, right - left, up - bottom]) + newax.imshow(im) + newax.axis('off') + + # plot mult symbols + N = n = width_in[l+1] + n_sum = width[l+1][0] + n_mult = width[l+1][1] + for j in range(n_mult): + id_ = j + n_sum + path = os.path.dirname(os.path.abspath(__file__)) + "/assets/img/mult_symbol.png" + im = plt.imread(path) + left = DC_to_NFC([1 / (2 * N) + id_ / N - y2, 0])[0] + right = DC_to_NFC([1 / (2 * N) + id_ / N + y2, 0])[0] + bottom = DC_to_NFC([0, (l+1) * (y0+z0) - y2])[1] + up = DC_to_NFC([0, (l+1) * (y0+z0) + y2])[1] + newax = fig.add_axes([left, bottom, right - left, up - bottom]) + newax.imshow(im) + newax.axis('off') + + if in_vars != None: + n = self.width_in[0] + for i in range(n): + if isinstance(in_vars[i], sympy.Expr): + plt.gcf().get_axes()[0].text(1 / (2 * (n)) + i / (n), -0.1, f'${latex(in_vars[i])}$', fontsize=40 * scale * varscale, horizontalalignment='center', verticalalignment='center') + else: + plt.gcf().get_axes()[0].text(1 / (2 * (n)) + i / (n), -0.1, in_vars[i], fontsize=40 * scale * varscale, horizontalalignment='center', verticalalignment='center') + + + + if out_vars != None: + n = self.width_in[-1] + for i in range(n): + if isinstance(out_vars[i], sympy.Expr): + plt.gcf().get_axes()[0].text(1 / (2 * (n)) + i / (n), (y0+z0) * (len(self.width) - 1) + 0.15, f'${latex(out_vars[i])}$', fontsize=40 * scale * varscale, horizontalalignment='center', verticalalignment='center') + else: + plt.gcf().get_axes()[0].text(1 / (2 * (n)) + i / (n), (y0+z0) * (len(self.width) - 1) + 0.15, out_vars[i], fontsize=40 * scale * varscale, horizontalalignment='center', verticalalignment='center') + + if title != None: + plt.gcf().get_axes()[0].text(0.5, (y0+z0) * (len(self.width) - 1) + 0.3, title, fontsize=40 * scale, horizontalalignment='center', verticalalignment='center') + + + def reg(self, reg_metric, lamb_l1, lamb_entropy, lamb_coef, lamb_coefdiff): + + if reg_metric == 'edge_forward_n': + acts_scale = self.acts_scale_spline + + if reg_metric == 'edge_forward_u': + acts_scale = self.edge_actscale + + if reg_metric == 'edge_backward': + acts_scale = self.edge_scores + + if reg_metric == 'node_backward': + acts_scale = self.node_attribute_scores + + reg_ = 0. + for i in range(len(acts_scale)): + vec = acts_scale[i] + + l1 = torch.sum(vec) + p_row = vec / (torch.sum(vec, dim=1, keepdim=True) + 1) + p_col = vec / (torch.sum(vec, dim=0, keepdim=True) + 1) + entropy_row = - torch.mean(torch.sum(p_row * torch.log2(p_row + 1e-4), dim=1)) + entropy_col = - torch.mean(torch.sum(p_col * torch.log2(p_col + 1e-4), dim=0)) + #entropy_row = torch.max(-torch.sum(p_row * torch.log2(p_row + 1e-4), dim=1)) + #entropy_col = torch.max(-torch.sum(p_col * torch.log2(p_col + 1e-4), dim=0)) + reg_ += lamb_l1 * l1 + lamb_entropy * (entropy_row + entropy_col) # both l1 and entropy + '''vec = vec.reshape(-1,) + p = vec / (torch.sum(vec) + 1e-4) + entropy = - torch.sum(p * torch.log2(p + 1e-4)) + reg_ += lamb_l1 * l1 + lamb_entropy * entropy # both l1 and entropy''' + + # regularize coefficient to encourage spline to be zero + for i in range(len(self.act_fun)): + coeff_l1 = torch.sum(torch.mean(torch.abs(self.act_fun[i].coef), dim=1)) + coeff_diff_l1 = torch.sum(torch.mean(torch.abs(torch.diff(self.act_fun[i].coef)), dim=1)) + reg_ += lamb_coef * coeff_l1 + lamb_coefdiff * coeff_diff_l1 + + return reg_ + + def get_reg(self, reg_metric, lamb_l1, lamb_entropy, lamb_coef, lamb_coefdiff): + return self.reg(reg_metric, lamb_l1, lamb_entropy, lamb_coef, lamb_coefdiff) + + def disable_symbolic_in_fit(self, lamb): + + old_save_act = self.save_act + if lamb == 0.: + self.save_act = False + + # skip symbolic if no symbolic is turned on + depth = len(self.symbolic_fun) + no_symbolic = True + for l in range(depth): + no_symbolic *= torch.sum(torch.abs(self.symbolic_fun[l].mask)) == 0 + + old_symbolic_enabled = self.symbolic_enabled + + if no_symbolic: + self.symbolic_enabled = False + + return old_save_act, old_symbolic_enabled + + def get_params(self): + return self.parameters() + + + def fit(self, dataset, opt="LBFGS", steps=100, log=1, lamb=0., lamb_l1=1., lamb_entropy=2., lamb_coef=0., lamb_coefdiff=0., update_grid=True, grid_update_num=10, loss_fn=None, lr=1.,start_grid_update_step=-1, stop_grid_update_step=50, batch=-1, + metrics=None, save_fig=False, in_vars=None, out_vars=None, beta=3, save_fig_freq=1, img_folder='./video', singularity_avoiding=False, y_th=1000., reg_metric='edge_backward', display_metrics=None): + + if lamb > 0. and not self.save_act: + print('setting lamb=0. If you want to set lamb > 0, set self.save_act=True') + + old_save_act, old_symbolic_enabled = self.disable_symbolic_in_fit(lamb) + + pbar = tqdm(range(steps), desc='description', ncols=100) + + if loss_fn == None: + loss_fn = loss_fn_eval = lambda x, y: torch.mean((x - y) ** 2) + else: + loss_fn = loss_fn_eval = loss_fn + + grid_update_freq = int(stop_grid_update_step / grid_update_num) + + if opt == "Adam": + optimizer = torch.optim.Adam(self.get_params(), lr=lr) + elif opt == "LBFGS": + optimizer = LBFGS(self.get_params(), lr=lr, history_size=10, line_search_fn="strong_wolfe", tolerance_grad=1e-32, tolerance_change=1e-32, tolerance_ys=1e-32) + + results = {} + results['train_loss'] = [] + results['test_loss'] = [] + results['reg'] = [] + if metrics != None: + for i in range(len(metrics)): + results[metrics[i].__name__] = [] + + if batch == -1 or batch > dataset['train_input'].shape[0]: + batch_size = dataset['train_input'].shape[0] + batch_size_test = dataset['test_input'].shape[0] + else: + batch_size = batch + batch_size_test = batch + + global train_loss, reg_ + + def closure(): + global train_loss, reg_ + optimizer.zero_grad() + pred = self.forward(dataset['train_input'][train_id], singularity_avoiding=singularity_avoiding, y_th=y_th) + train_loss = loss_fn(pred, dataset['train_label'][train_id]) + if self.save_act: + if reg_metric == 'edge_backward': + self.attribute() + if reg_metric == 'node_backward': + self.node_attribute() + reg_ = self.get_reg(reg_metric, lamb_l1, lamb_entropy, lamb_coef, lamb_coefdiff) + else: + reg_ = torch.tensor(0.) + objective = train_loss + lamb * reg_ + objective.backward() + return objective + + if save_fig: + if not os.path.exists(img_folder): + os.makedirs(img_folder) + + for _ in pbar: + + if _ == steps-1 and old_save_act: + self.save_act = True + + train_id = np.random.choice(dataset['train_input'].shape[0], batch_size, replace=False) + test_id = np.random.choice(dataset['test_input'].shape[0], batch_size_test, replace=False) + + if _ % grid_update_freq == 0 and _ < stop_grid_update_step and update_grid and _ >= start_grid_update_step: + self.update_grid(dataset['train_input'][train_id]) + + if opt == "LBFGS": + optimizer.step(closure) + + if opt == "Adam": + pred = self.forward(dataset['train_input'][train_id], singularity_avoiding=singularity_avoiding, y_th=y_th) + train_loss = loss_fn(pred, dataset['train_label'][train_id]) + if self.save_act: + if reg_metric == 'edge_backward': + self.attribute() + if reg_metric == 'node_backward': + self.node_attribute() + reg_ = self.get_reg(reg_metric, lamb_l1, lamb_entropy, lamb_coef, lamb_coefdiff) + else: + reg_ = torch.tensor(0.) + loss = train_loss + lamb * reg_ + optimizer.zero_grad() + loss.backward() + optimizer.step() + + test_loss = loss_fn_eval(self.forward(dataset['test_input'][test_id]), dataset['test_label'][test_id]) + + + if metrics != None: + for i in range(len(metrics)): + results[metrics[i].__name__].append(metrics[i]().item()) + + results['train_loss'].append(torch.sqrt(train_loss).cpu().detach().numpy()) + results['test_loss'].append(torch.sqrt(test_loss).cpu().detach().numpy()) + results['reg'].append(reg_.cpu().detach().numpy()) + + if _ % log == 0: + if display_metrics == None: + pbar.set_description("| train_loss: %.2e | test_loss: %.2e | reg: %.2e | " % (torch.sqrt(train_loss).cpu().detach().numpy(), torch.sqrt(test_loss).cpu().detach().numpy(), reg_.cpu().detach().numpy())) + else: + string = '' + data = () + for metric in display_metrics: + string += f' {metric}: %.2e |' + try: + results[metric] + except: + raise Exception(f'{metric} not recognized') + data += (results[metric][-1],) + pbar.set_description(string % data) + + + if save_fig and _ % save_fig_freq == 0: + self.plot(folder=img_folder, in_vars=in_vars, out_vars=out_vars, title="Step {}".format(_), beta=beta) + plt.savefig(img_folder + '/' + str(_) + '.jpg', bbox_inches='tight', dpi=200) + plt.close() + + self.log_history('fit') + # revert back to original state + self.symbolic_enabled = old_symbolic_enabled + return results + + def prune_node(self, threshold=1e-2, mode="auto", active_neurons_id=None, log_history=True): + + if self.acts == None: + self.get_act() + + mask_up = [torch.ones(self.width_in[0], )] + mask_down = [] + active_neurons_up = [list(range(self.width_in[0]))] + active_neurons_down = [] + num_sums = [] + num_mults = [] + mult_arities = [[]] + + if active_neurons_id != None: + mode = "manual" + + for i in range(len(self.acts_scale) - 1): + + mult_arity = [] + + if mode == "auto": + self.attribute() + overall_important_up = self.node_scores[i+1] > threshold + + elif mode == "manual": + overall_important_up = torch.zeros(self.width_in[i + 1], dtype=torch.bool) + overall_important_up[active_neurons_id[i]] = True + + + num_sum = torch.sum(overall_important_up[:self.width[i+1][0]]) + num_mult = torch.sum(overall_important_up[self.width[i+1][0]:]) + if self.mult_homo == True: + overall_important_down = torch.cat([overall_important_up[:self.width[i+1][0]], (overall_important_up[self.width[i+1][0]:][None,:].expand(self.mult_arity,-1)).T.reshape(-1,)], dim=0) + else: + overall_important_down = overall_important_up[:self.width[i+1][0]] + for j in range(overall_important_up[self.width[i+1][0]:].shape[0]): + active_bool = overall_important_up[self.width[i+1][0]+j] + arity = self.mult_arity[i+1][j] + overall_important_down = torch.cat([overall_important_down, torch.tensor([active_bool]*arity)]) + if active_bool: + mult_arity.append(arity) + + num_sums.append(num_sum.item()) + num_mults.append(num_mult.item()) + + mask_up.append(overall_important_up.float()) + mask_down.append(overall_important_down.float()) + + active_neurons_up.append(torch.where(overall_important_up == True)[0]) + active_neurons_down.append(torch.where(overall_important_down == True)[0]) + + mult_arities.append(mult_arity) + + active_neurons_down.append(list(range(self.width_out[-1]))) + mask_down.append(torch.ones(self.width_out[-1], )) + + if self.mult_homo == False: + mult_arities.append(self.mult_arity[-1]) + + self.mask_up = mask_up + self.mask_down = mask_down + + # update act_fun[l].mask up + for l in range(len(self.acts_scale) - 1): + for i in range(self.width_in[l + 1]): + if i not in active_neurons_up[l + 1]: + self.remove_node(l + 1, i, mode='up',log_history=False) + + for i in range(self.width_out[l + 1]): + if i not in active_neurons_down[l]: + self.remove_node(l + 1, i, mode='down',log_history=False) + + model2 = MultKAN(copy.deepcopy(self.width), grid=self.grid, k=self.k, base_fun=self.base_fun_name, mult_arity=self.mult_arity, ckpt_path=self.ckpt_path, auto_save=True, first_init=False, state_id=self.state_id, round=self.round) + model2.load_state_dict(self.state_dict()) + + width_new = [self.width[0]] + + for i in range(len(self.acts_scale)): + + if i < len(self.acts_scale) - 1: + num_sum = num_sums[i] + num_mult = num_mults[i] + model2.node_bias[i].data = model2.node_bias[i].data[active_neurons_up[i+1]] + model2.node_scale[i].data = model2.node_scale[i].data[active_neurons_up[i+1]] + model2.subnode_bias[i].data = model2.subnode_bias[i].data[active_neurons_down[i]] + model2.subnode_scale[i].data = model2.subnode_scale[i].data[active_neurons_down[i]] + model2.width[i+1] = [num_sum, num_mult] + + model2.act_fun[i].out_dim_sum = num_sum + model2.act_fun[i].out_dim_mult = num_mult + + model2.symbolic_fun[i].out_dim_sum = num_sum + model2.symbolic_fun[i].out_dim_mult = num_mult + + width_new.append([num_sum, num_mult]) + + model2.act_fun[i] = model2.act_fun[i].get_subset(active_neurons_up[i], active_neurons_down[i]) + model2.symbolic_fun[i] = self.symbolic_fun[i].get_subset(active_neurons_up[i], active_neurons_down[i]) + + model2.cache_data = self.cache_data + model2.acts = None + + width_new.append(self.width[-1]) + model2.width = width_new + + if self.mult_homo == False: + model2.mult_arity = mult_arities + + if log_history: + self.log_history('prune_node') + model2.state_id += 1 + + return model2 + + def prune_edge(self, threshold=3e-2, log_history=True): + + if self.acts == None: + self.get_act() + + for i in range(len(self.width)-1): + #self.act_fun[i].mask.data = ((self.acts_scale[i] > threshold).permute(1,0)).float() + old_mask = self.act_fun[i].mask.data + self.act_fun[i].mask.data = ((self.edge_scores[i] > threshold).permute(1,0)*old_mask).float() + + if log_history: + self.log_history('fix_symbolic') + + def prune(self, node_th=1e-2, edge_th=3e-2): + + if self.acts == None: + self.get_act() + + self = self.prune_node(node_th, log_history=False) + #self.prune_node(node_th, log_history=False) + self.forward(self.cache_data) + self.attribute() + self.prune_edge(edge_th, log_history=False) + self.log_history('prune') + return self + + def prune_input(self, threshold=1e-2, active_inputs=None, log_history=True): + + if active_inputs == None: + self.attribute() + input_score = self.node_scores[0] + input_mask = input_score > threshold + print('keep:', input_mask.tolist()) + input_id = torch.where(input_mask==True)[0] + + else: + input_id = torch.tensor(active_inputs, dtype=torch.long) + + model2 = MultKAN(copy.deepcopy(self.width), grid=self.grid, k=self.k, base_fun=self.base_fun, mult_arity=self.mult_arity, ckpt_path=self.ckpt_path, auto_save=True, first_init=False, state_id=self.state_id, round=self.round) + model2.load_state_dict(self.state_dict()) + + model2.act_fun[0] = model2.act_fun[0].get_subset(input_id, torch.arange(self.width_out[1])) + model2.symbolic_fun[0] = self.symbolic_fun[0].get_subset(input_id, torch.arange(self.width_out[1])) + + model2.cache_data = self.cache_data + model2.acts = None + + model2.width[0] = [len(input_id), 0] + model2.input_id = input_id + + if log_history: + self.log_history('prune_input') + model2.state_id += 1 + + return model2 + + def remove_edge(self, l, i, j, log_history=True): + self.act_fun[l].mask[i][j] = 0. + if log_history: + self.log_history('remove_edge') + + def remove_node(self, l ,i, mode='all', log_history=True): + if mode == 'down': + self.act_fun[l - 1].mask[:, i] = 0. + self.symbolic_fun[l - 1].mask[i, :] *= 0. + + elif mode == 'up': + self.act_fun[l].mask[i, :] = 0. + self.symbolic_fun[l].mask[:, i] *= 0. + + else: + self.remove_node(l, i, mode='up') + self.remove_node(l, i, mode='down') + + if log_history: + self.log_history('remove_node') + + + def attribute(self, l=None, i=None, out_score=None, plot=True): + + # output (out_dim, in_dim) + + if l != None: + self.attribute() + out_score = self.node_scores[l] + + if self.acts == None: + self.get_act() + + def score_node2subnode(node_score, width, mult_arity, out_dim): + + assert np.sum(width) == node_score.shape[1] + if isinstance(mult_arity, int): + n_subnode = width[0] + mult_arity * width[1] + else: + n_subnode = width[0] + int(np.sum(mult_arity)) + + #subnode_score_leaf = torch.zeros(out_dim, n_subnode).requires_grad_(True) + #subnode_score = subnode_score_leaf.clone() + #subnode_score[:,:width[0]] = node_score[:,:width[0]] + subnode_score = node_score[:,:width[0]] + if isinstance(mult_arity, int): + #subnode_score[:,width[0]:] = node_score[:,width[0]:][:,:,None].expand(out_dim, node_score[width[0]:].shape[0], mult_arity).reshape(out_dim,-1) + subnode_score = torch.cat([subnode_score, node_score[:,width[0]:][:,:,None].expand(out_dim, node_score[width[0]:].shape[0], mult_arity).reshape(out_dim,-1)], dim=1) + else: + acml = width[0] + for i in range(len(mult_arity)): + #subnode_score[:, acml:acml+mult_arity[i]] = node_score[:, width[0]+i] + subnode_score = torch.cat([subnode_score, node_score[:, width[0]+i]].expand(out_dim, mult_arity[i]), dim=1) + acml += mult_arity[i] + return subnode_score + + + node_scores = [] + subnode_scores = [] + edge_scores = [] + + l_query = l + if l == None: + l_end = self.depth + else: + l_end = l + + # back propagate from the queried layer + out_dim = self.width_in[l_end] + if out_score == None: + node_score = torch.eye(out_dim).requires_grad_(True) + else: + node_score = torch.diag(out_score).requires_grad_(True) + node_scores.append(node_score) + + device = self.act_fun[0].grid.device + + for l in range(l_end,0,-1): + + # node to subnode + if isinstance(self.mult_arity, int): + subnode_score = score_node2subnode(node_score, self.width[l], self.mult_arity, out_dim=out_dim) + else: + mult_arity = self.mult_arity[l] + subnode_score = score_node2subnode(node_score, self.width[l], mult_arity) + + subnode_scores.append(subnode_score) + # subnode to edge + #print(self.edge_actscale[l-1].device, subnode_score.device, self.subnode_actscale[l-1].device) + edge_score = torch.einsum('ij,ki,i->kij', self.edge_actscale[l-1], subnode_score.to(device), 1/(self.subnode_actscale[l-1]+1e-4)) + edge_scores.append(edge_score) + + # edge to node + node_score = torch.sum(edge_score, dim=1) + node_scores.append(node_score) + + self.node_scores_all = list(reversed(node_scores)) + self.edge_scores_all = list(reversed(edge_scores)) + self.subnode_scores_all = list(reversed(subnode_scores)) + + self.node_scores = [torch.mean(l, dim=0) for l in self.node_scores_all] + self.edge_scores = [torch.mean(l, dim=0) for l in self.edge_scores_all] + self.subnode_scores = [torch.mean(l, dim=0) for l in self.subnode_scores_all] + + # return + if l_query != None: + if i == None: + return self.node_scores_all[0] + else: + + # plot + if plot: + in_dim = self.width_in[0] + plt.figure(figsize=(1*in_dim, 3)) + plt.bar(range(in_dim),self.node_scores_all[0][i].detach().numpy()) + plt.xticks(range(in_dim)); + + return self.node_scores_all[0][i] + + def node_attribute(self): + self.node_attribute_scores = [] + for l in range(1, self.depth+1): + node_attr = self.attribute(l) + self.node_attribute_scores.append(node_attr) + + def feature_interaction(self, l, neuron_th = 1e-2, feature_th = 1e-2): + + dic = {} + width = self.width_in[l] + + for i in range(width): + score = self.attribute(l,i,plot=False) + + if torch.max(score) > neuron_th: + features = tuple(torch.where(score > torch.max(score) * feature_th)[0].detach().numpy()) + if features in dic.keys(): + dic[features] += 1 + else: + dic[features] = 1 + + return dic + + def suggest_symbolic(self, l, i, j, a_range=(-10, 10), b_range=(-10, 10), lib=None, topk=5, verbose=True, r2_loss_fun=lambda x: np.log2(1+1e-5-x), c_loss_fun=lambda x: x, weight_simple = 0.8): + + r2s = [] + cs = [] + + if lib == None: + symbolic_lib = SYMBOLIC_LIB + else: + symbolic_lib = {} + for item in lib: + symbolic_lib[item] = SYMBOLIC_LIB[item] + + # getting r2 and complexities + for (name, content) in symbolic_lib.items(): + r2 = self.fix_symbolic(l, i, j, name, a_range=a_range, b_range=b_range, verbose=False, log_history=False) + if r2 == -1e8: # zero function + r2s.append(-1e8) + else: + r2s.append(r2.item()) + self.unfix_symbolic(l, i, j, log_history=False) + c = content[2] + cs.append(c) + + r2s = np.array(r2s) + cs = np.array(cs) + r2_loss = r2_loss_fun(r2s).astype('float') + cs_loss = c_loss_fun(cs) + + loss = weight_simple * cs_loss + (1-weight_simple) * r2_loss + + sorted_ids = np.argsort(loss)[:topk] + r2s = r2s[sorted_ids][:topk] + cs = cs[sorted_ids][:topk] + r2_loss = r2_loss[sorted_ids][:topk] + cs_loss = cs_loss[sorted_ids][:topk] + loss = loss[sorted_ids][:topk] + + topk = np.minimum(topk, len(symbolic_lib)) + + if verbose == True: + # print results in a dataframe + results = {} + results['function'] = [list(symbolic_lib.items())[sorted_ids[i]][0] for i in range(topk)] + results['fitting r2'] = r2s[:topk] + results['r2 loss'] = r2_loss[:topk] + results['complexity'] = cs[:topk] + results['complexity loss'] = cs_loss[:topk] + results['total loss'] = loss[:topk] + + df = pd.DataFrame(results) + print(df) + + '''if verbose == True: + print('function', ',', 'r2', ',', 'c', ',', 'r2 loss', ',', 'c loss', ',', 'total loss') + for i in range(topk): + print(list(symbolic_lib.items())[sorted_ids[i]][0], ',', r2s[i], ',', cs[i], ',', r2_loss[i], ',', cs_loss[i], ',', loss[i])''' + + best_name = list(symbolic_lib.items())[sorted_ids[0]][0] + best_fun = list(symbolic_lib.items())[sorted_ids[0]][1] + best_r2 = r2s[0] + best_c = cs[0] + + '''if best_r2 < 1e-3: + # zero function + zero_id = list(SYMBOLIC_LIB).index('0') + best_r2 = 0.0 + best_name = '0' + best_fun = list(symbolic_lib.items())[zero_id][1] + best_c = 0.0 + print('behave like a zero function')''' + + return best_name, best_fun, best_r2, best_c; + + def auto_symbolic(self, a_range=(-10, 10), b_range=(-10, 10), lib=None, verbose=1): + for l in range(len(self.width_in) - 1): + for i in range(self.width_in[l]): + for j in range(self.width_out[l + 1]): + #if self.symbolic_fun[l].mask[j, i] > 0. and self.act_fun[l].mask[i][j] == 0.: + if self.symbolic_fun[l].mask[j, i] > 0. and self.act_fun[l].mask[i][j] == 0.: + print(f'skipping ({l},{i},{j}) since already symbolic') + elif self.symbolic_fun[l].mask[j, i] == 0. and self.act_fun[l].mask[i][j] == 0.: + self.fix_symbolic(l, i, j, '0', verbose=verbose > 1, log_history=False) + print(f'fixing ({l},{i},{j}) with 0') + else: + name, fun, r2, c = self.suggest_symbolic(l, i, j, a_range=a_range, b_range=b_range, lib=lib, verbose=False) + self.fix_symbolic(l, i, j, name, verbose=verbose > 1, log_history=False) + if verbose >= 1: + print(f'fixing ({l},{i},{j}) with {name}, r2={r2}, c={c}') + + self.log_history('auto_symbolic') + + def symbolic_formula(self, compute_digit=5, display_digit=3, var=None, normalizer=None, simplify=False, output_normalizer = None): + + symbolic_acts = [] + symbolic_acts_premult = [] + x = [] + + def ex_round(ex1, n_digit): + ex2 = ex1 + for a in sympy.preorder_traversal(ex1): + if isinstance(a, sympy.Float): + ex2 = ex2.subs(a, round(a, n_digit)) + return ex2 + + # define variables + if var == None: + for ii in range(1, self.width[0][0] + 1): + exec(f"x{ii} = sympy.Symbol('x_{ii}')") + exec(f"x.append(x{ii})") + elif type(var[0]) == Symbol: + x = var + else: + x = [sympy.symbols(var_) for var_ in var] + + x0 = x + + if normalizer != None: + mean = normalizer[0] + std = normalizer[1] + x = [(x[i] - mean[i]) / std[i] for i in range(len(x))] + + symbolic_acts.append(x) + + for l in range(len(self.width_in) - 1): + num_sum = self.width[l + 1][0] + num_mult = self.width[l + 1][1] + y = [] + for j in range(self.width_out[l + 1]): + yj = 0. + for i in range(self.width_in[l]): + a, b, c, d = self.symbolic_fun[l].affine[j, i] + sympy_fun = self.symbolic_fun[l].funs_sympy[j][i] + try: + yj += c * sympy_fun(a * x[i] + b) + d + except: + print('make sure all activations need to be converted to symbolic formulas first!') + return + yj = self.subnode_scale[l][j] * yj + self.subnode_bias[l][j] + if simplify == True: + y.append(sympy.simplify(yj)) + else: + y.append(yj) + + symbolic_acts_premult.append(y) + + mult = [] + for k in range(num_mult): + if isinstance(self.mult_arity, int): + mult_arity = self.mult_arity + else: + mult_arity = self.mult_arity[l+1][k] + for i in range(mult_arity-1): + if i == 0: + mult_k = y[num_sum+2*k] * y[num_sum+2*k+1] + else: + mult_k = mult_k * y[num_sum+2*k+i+1] + mult.append(mult_k) + + y = y[:num_sum] + mult + + for j in range(self.width_in[l+1]): + y[j] = self.node_scale[l][j] * y[j] + self.node_bias[l][j] + + x = y + symbolic_acts.append(x) + + if output_normalizer != None: + output_layer = symbolic_acts[-1] + means = output_normalizer[0] + stds = output_normalizer[1] + + assert len(output_layer) == len(means), 'output_normalizer does not match the output layer' + assert len(output_layer) == len(stds), 'output_normalizer does not match the output layer' + + output_layer = [(output_layer[i] * stds[i] + means[i]) for i in range(len(output_layer))] + symbolic_acts[-1] = output_layer + + + self.symbolic_acts = [[symbolic_acts[l][i] for i in range(len(symbolic_acts[l]))] for l in range(len(symbolic_acts))] + self.symbolic_acts_premult = [[symbolic_acts_premult[l][i] for i in range(len(symbolic_acts_premult[l]))] for l in range(len(symbolic_acts_premult))] + + out_dim = len(symbolic_acts[-1]) + #return [symbolic_acts[-1][i] for i in range(len(symbolic_acts[-1]))], x0 + + if simplify: + return [symbolic_acts[-1][i] for i in range(len(symbolic_acts[-1]))], x0 + else: + return [symbolic_acts[-1][i] for i in range(len(symbolic_acts[-1]))], x0 + + + def expand_depth(self): + + self.depth += 1 + + # add kanlayer, set mask to zero + dim_out = self.width_in[-1] + layer = KANLayer(dim_out, dim_out, num=self.grid, k=self.k) + layer.mask *= 0. + self.act_fun.append(layer) + + self.width.append([dim_out, 0]) + self.mult_arity.append([]) + + # add symbolic_kanlayer set mask to one. fun = identity on diagonal and zero for off-diagonal + layer = Symbolic_KANLayer(dim_out, dim_out) + layer.mask += 1. + + for j in range(dim_out): + for i in range(dim_out): + if i == j: + layer.fix_symbolic(i,j,'x') + else: + layer.fix_symbolic(i,j,'0') + + self.symbolic_fun.append(layer) + + self.node_bias.append(torch.nn.Parameter(torch.zeros(dim_out,)).requires_grad_(self.affine_trainable)) + self.node_scale.append(torch.nn.Parameter(torch.ones(dim_out,)).requires_grad_(self.affine_trainable)) + self.subnode_bias.append(torch.nn.Parameter(torch.zeros(dim_out,)).requires_grad_(self.affine_trainable)) + self.subnode_scale.append(torch.nn.Parameter(torch.ones(dim_out,)).requires_grad_(self.affine_trainable)) + + def expand_width(self, layer_id, n_added_nodes, sum_bool=True, mult_arity=2): + + def _expand(layer_id, n_added_nodes, sum_bool=True, mult_arity=2, added_dim='out'): + l = layer_id + in_dim = self.symbolic_fun[l].in_dim + out_dim = self.symbolic_fun[l].out_dim + if sum_bool: + + if added_dim == 'out': + new = Symbolic_KANLayer(in_dim, out_dim + n_added_nodes) + old = self.symbolic_fun[l] + in_id = np.arange(in_dim) + out_id = np.arange(out_dim + n_added_nodes) + + for j in out_id: + for i in in_id: + new.fix_symbolic(i,j,'0') + new.mask += 1. + + for j in out_id: + for i in in_id: + if j > n_added_nodes-1: + new.funs[j][i] = old.funs[j-n_added_nodes][i] + new.funs_avoid_singularity[j][i] = old.funs_avoid_singularity[j-n_added_nodes][i] + new.funs_sympy[j][i] = old.funs_sympy[j-n_added_nodes][i] + new.funs_name[j][i] = old.funs_name[j-n_added_nodes][i] + new.affine.data[j][i] = old.affine.data[j-n_added_nodes][i] + + self.symbolic_fun[l] = new + self.act_fun[l] = KANLayer(in_dim, out_dim + n_added_nodes, num=self.grid, k=self.k) + self.act_fun[l].mask *= 0. + + self.node_scale[l].data = torch.cat([torch.ones(n_added_nodes), self.node_scale[l].data]) + self.node_bias[l].data = torch.cat([torch.zeros(n_added_nodes), self.node_bias[l].data]) + self.subnode_scale[l].data = torch.cat([torch.ones(n_added_nodes), self.subnode_scale[l].data]) + self.subnode_bias[l].data = torch.cat([torch.zeros(n_added_nodes), self.subnode_bias[l].data]) + + + + if added_dim == 'in': + new = Symbolic_KANLayer(in_dim + n_added_nodes, out_dim) + old = self.symbolic_fun[l] + in_id = np.arange(in_dim + n_added_nodes) + out_id = np.arange(out_dim) + + for j in out_id: + for i in in_id: + new.fix_symbolic(i,j,'0') + new.mask += 1. + + for j in out_id: + for i in in_id: + if i > n_added_nodes-1: + new.funs[j][i] = old.funs[j][i-n_added_nodes] + new.funs_avoid_singularity[j][i] = old.funs_avoid_singularity[j][i-n_added_nodes] + new.funs_sympy[j][i] = old.funs_sympy[j][i-n_added_nodes] + new.funs_name[j][i] = old.funs_name[j][i-n_added_nodes] + new.affine.data[j][i] = old.affine.data[j][i-n_added_nodes] + + self.symbolic_fun[l] = new + self.act_fun[l] = KANLayer(in_dim + n_added_nodes, out_dim, num=self.grid, k=self.k) + self.act_fun[l].mask *= 0. + + + else: + + if isinstance(mult_arity, int): + mult_arity = [mult_arity] * n_added_nodes + + if added_dim == 'out': + n_added_subnodes = np.sum(mult_arity) + new = Symbolic_KANLayer(in_dim, out_dim + n_added_subnodes) + old = self.symbolic_fun[l] + in_id = np.arange(in_dim) + out_id = np.arange(out_dim + n_added_nodes) + + for j in out_id: + for i in in_id: + new.fix_symbolic(i,j,'0') + new.mask += 1. + + for j in out_id: + for i in in_id: + if j < out_dim: + new.funs[j][i] = old.funs[j][i] + new.funs_avoid_singularity[j][i] = old.funs_avoid_singularity[j][i] + new.funs_sympy[j][i] = old.funs_sympy[j][i] + new.funs_name[j][i] = old.funs_name[j][i] + new.affine.data[j][i] = old.affine.data[j][i] + + self.symbolic_fun[l] = new + self.act_fun[l] = KANLayer(in_dim, out_dim + n_added_subnodes, num=self.grid, k=self.k) + self.act_fun[l].mask *= 0. + + self.node_scale[l].data = torch.cat([self.node_scale[l].data, torch.ones(n_added_nodes)]) + self.node_bias[l].data = torch.cat([self.node_bias[l].data, torch.zeros(n_added_nodes)]) + self.subnode_scale[l].data = torch.cat([self.subnode_scale[l].data, torch.ones(n_added_subnodes)]) + self.subnode_bias[l].data = torch.cat([self.subnode_bias[l].data, torch.zeros(n_added_subnodes)]) + + if added_dim == 'in': + new = Symbolic_KANLayer(in_dim + n_added_nodes, out_dim) + old = self.symbolic_fun[l] + in_id = np.arange(in_dim + n_added_nodes) + out_id = np.arange(out_dim) + + for j in out_id: + for i in in_id: + new.fix_symbolic(i,j,'0') + new.mask += 1. + + for j in out_id: + for i in in_id: + if i < in_dim: + new.funs[j][i] = old.funs[j][i] + new.funs_avoid_singularity[j][i] = old.funs_avoid_singularity[j][i] + new.funs_sympy[j][i] = old.funs_sympy[j][i] + new.funs_name[j][i] = old.funs_name[j][i] + new.affine.data[j][i] = old.affine.data[j][i] + + self.symbolic_fun[l] = new + self.act_fun[l] = KANLayer(in_dim + n_added_nodes, out_dim, num=self.grid, k=self.k) + self.act_fun[l].mask *= 0. + + _expand(layer_id-1, n_added_nodes, sum_bool, mult_arity, added_dim='out') + _expand(layer_id, n_added_nodes, sum_bool, mult_arity, added_dim='in') + if sum_bool: + self.width[layer_id][0] += n_added_nodes + else: + if isinstance(mult_arity, int): + mult_arity = [mult_arity] * n_added_nodes + + self.width[layer_id][1] += n_added_nodes + self.mult_arity[layer_id] += mult_arity + + def perturb(self, mag=0.02, mode='all'): + if mode == 'all': + for i in range(self.depth): + self.act_fun[i].mask += self.act_fun[i].mask*0. + mag + + if mode == 'minimal': + for l in range(self.depth): + funs_name = self.symbolic_fun[l].funs_name + for j in range(self.width_out[l+1]): + for i in range(self.width_in[l]): + if funs_name[j][i] != '0': + self.act_fun[l].mask.data[i][j] = mag + + self.log_history('perturb') + + + def module(self, start_layer, chain): + #chain = '[-1]->[-1,-2]->[-1]->[-1]' + groups = chain.split('->') + n_total_layers = len(groups)//2 + #start_layer = 0 + + for l in range(n_total_layers): + current_layer = cl = start_layer + l + id_in = [int(i) for i in groups[2*l][1:-1].split(',')] + id_out = [int(i) for i in groups[2*l+1][1:-1].split(',')] + + in_dim = self.width_in[cl] + out_dim = self.width_out[cl+1] + id_in_other = list(set(range(in_dim)) - set(id_in)) + id_out_other = list(set(range(out_dim)) - set(id_out)) + self.act_fun[cl].mask.data[np.ix_(id_in_other,id_out)] = 0. + self.act_fun[cl].mask.data[np.ix_(id_in,id_out_other)] = 0. + self.symbolic_fun[cl].mask.data[np.ix_(id_out,id_in_other)] = 0. + self.symbolic_fun[cl].mask.data[np.ix_(id_out_other,id_in)] = 0. + + self.log_history('module') + + def tree(self, x=None, in_var=None, style='tree', sym_th=1e-3, sep_th=1e-1, skip_sep_test=False, verbose=False): + if x == None: + x = self.cache_data + plot_tree(self, x, in_var=in_var, style=style, sym_th=sym_th, sep_th=sep_th, skip_sep_test=skip_sep_test, verbose=verbose) + + + def speed(self, compile=False): + self.symbolic_enabled=False + self.save_act=False + self.auto_save=False + if compile == True: + return torch.compile(self) + else: + return self + + def get_act(self, x=None): + if isinstance(x, dict): + x = x['train_input'] + if x == None: + if self.cache_data != None: + x = self.cache_data + else: + raise Exception("missing input data x") + save_act = self.save_act + self.save_act = True + self.forward(x) + self.save_act = save_act + + def get_fun(self, l, i, j): + inputs = self.spline_preacts[l][:,j,i] + outputs = self.spline_postacts[l][:,j,i] + # they are not ordered yet + rank = np.argsort(inputs) + inputs = inputs[rank] + outputs = outputs[rank] + plt.figure(figsize=(3,3)) + plt.plot(inputs, outputs, marker="o") + return inputs, outputs + + + def history(self, k='all'): + + with open(self.ckpt_path+'/history.txt', 'r') as f: + data = f.readlines() + n_line = len(data) + if k == 'all': + k = n_line + + data = data[-k:] + for line in data: + print(line[:-1]) + @property + def n_edge(self): + depth = len(self.act_fun) + complexity = 0 + for l in range(depth): + complexity += torch.sum(self.act_fun[l].mask > 0.) + return complexity.item() + + def evaluate(self, dataset): + evaluation = {} + evaluation['test_loss'] = torch.sqrt(torch.mean((self.forward(dataset['test_input']) - dataset['test_label'])**2)).item() + evaluation['n_edge'] = self.n_edge + evaluation['n_grid'] = self.grid + # add other metrics (maybe accuracy) + return evaluation + + def swap(self, l, i1, i2, log_history=True): + + self.act_fun[l-1].swap(i1,i2,mode='out') + self.symbolic_fun[l-1].swap(i1,i2,mode='out') + self.act_fun[l].swap(i1,i2,mode='in') + self.symbolic_fun[l].swap(i1,i2,mode='in') + + def swap_(data, i1, i2): + data[i1], data[i2] = data[i2], data[i1] + + swap_(self.node_scale[l-1].data, i1, i2) + swap_(self.node_bias[l-1].data, i1, i2) + swap_(self.subnode_scale[l-1].data, i1, i2) + swap_(self.subnode_bias[l-1].data, i1, i2) + + if log_history: + self.log_history('swap') + + @property + def connection_cost(self): + + cc = 0. + for t in self.edge_scores: + + def get_coordinate(n): + return torch.linspace(0,1,steps=n+1)[:n] + 1/(2*n) + + in_dim = t.shape[0] + x_in = get_coordinate(in_dim) + + out_dim = t.shape[1] + x_out = get_coordinate(out_dim) + + dist = torch.abs(x_in[:,None] - x_out[None,:]) + cc += torch.sum(dist * t) + + return cc + + def auto_swap_l(self, l): + + num = self.width_in[1] + for i in range(num): + ccs = [] + for j in range(num): + self.swap(l,i,j,log_history=False) + self.get_act() + self.attribute() + cc = self.connection_cost.detach().clone() + ccs.append(cc) + self.swap(l,i,j,log_history=False) + j = torch.argmin(torch.tensor(ccs)) + self.swap(l,i,j,log_history=False) + + def auto_swap(self): + depth = self.depth + for l in range(1, depth): + self.auto_swap_l(l) + + self.log_history('auto_swap') + +KAN = MultKAN diff --git a/kan/.ipynb_checkpoints/__init__-checkpoint.py b/kan/.ipynb_checkpoints/__init__-checkpoint.py new file mode 100644 index 00000000..254a757b --- /dev/null +++ b/kan/.ipynb_checkpoints/__init__-checkpoint.py @@ -0,0 +1,3 @@ +from .MultKAN import * +from .utils import * +torch.use_deterministic_algorithms(True) \ No newline at end of file diff --git a/kan/.ipynb_checkpoints/spline-checkpoint.py b/kan/.ipynb_checkpoints/spline-checkpoint.py new file mode 100644 index 00000000..6a14510c --- /dev/null +++ b/kan/.ipynb_checkpoints/spline-checkpoint.py @@ -0,0 +1,182 @@ +import torch + + +def B_batch(x, grid, k=0, extend=True, device='cpu'): + ''' + evaludate x on B-spline bases + + Args: + ----- + x : 2D torch.tensor + inputs, shape (number of splines, number of samples) + grid : 2D torch.tensor + grids, shape (number of splines, number of grid points) + k : int + the piecewise polynomial order of splines. + extend : bool + If True, k points are extended on both ends. If False, no extension (zero boundary condition). Default: True + device : str + devicde + + Returns: + -------- + spline values : 3D torch.tensor + shape (number of splines, number of B-spline bases (coeffcients), number of samples). The numbef of B-spline bases = number of grid points + k - 1. + + Example + ------- + >>> num_spline = 5 + >>> num_sample = 100 + >>> num_grid_interval = 10 + >>> k = 3 + >>> x = torch.normal(0,1,size=(num_spline, num_sample)) + >>> grids = torch.einsum('i,j->ij', torch.ones(num_spline,), torch.linspace(-1,1,steps=num_grid_interval+1)) + >>> B_batch(x, grids, k=k).shape + torch.Size([5, 13, 100]) + ''' + + '''# x shape: (size, x); grid shape: (size, grid) + def extend_grid(grid, k_extend=0): + # pad k to left and right + # grid shape: (batch, grid) + h = (grid[:, [-1]] - grid[:, [0]]) / (grid.shape[1] - 1) + + for i in range(k_extend): + grid = torch.cat([grid[:, [0]] - h, grid], dim=1) + grid = torch.cat([grid, grid[:, [-1]] + h], dim=1) + grid = grid.to(device) + return grid + + if extend == True: + grid = extend_grid(grid, k_extend=k) + + grid = grid.unsqueeze(dim=2).to(device) + x = x.unsqueeze(dim=1).to(device) + + if k == 0: + value = (x >= grid[:, :-1]) * (x < grid[:, 1:]) + else: + B_km1 = B_batch(x[:, 0], grid=grid[:, :, 0], k=k - 1, extend=False, device=device) + value = (x - grid[:, :-(k + 1)]) / (grid[:, k:-1] - grid[:, :-(k + 1)]) * B_km1[:, :-1] + ( + grid[:, k + 1:] - x) / (grid[:, k + 1:] - grid[:, 1:(-k)]) * B_km1[:, 1:]''' + + x = x.unsqueeze(dim=2) + grid = grid.unsqueeze(dim=0) + + if k == 0: + value = (x >= grid[:, :, :-1]) * (x < grid[:, :, 1:]) + else: + B_km1 = B_batch(x[:,:,0], grid=grid[0], k=k - 1) + + value = (x - grid[:, :, :-(k + 1)]) / (grid[:, :, k:-1] - grid[:, :, :-(k + 1)]) * B_km1[:, :, :-1] + ( + grid[:, :, k + 1:] - x) / (grid[:, :, k + 1:] - grid[:, :, 1:(-k)]) * B_km1[:, :, 1:] + + # in case grid is degenerate + value = torch.nan_to_num(value) + return value + + + +def coef2curve(x_eval, grid, coef, k, device="cpu"): + ''' + converting B-spline coefficients to B-spline curves. Evaluate x on B-spline curves (summing up B_batch results over B-spline basis). + + Args: + ----- + x_eval : 2D torch.tensor) + shape (number of splines, number of samples) + grid : 2D torch.tensor) + shape (number of splines, number of grid points) + coef : 2D torch.tensor) + shape (number of splines, number of coef params). number of coef params = number of grid intervals + k + k : int + the piecewise polynomial order of splines. + device : str + devicde + + Returns: + -------- + y_eval : 2D torch.tensor + shape (number of splines, number of samples) + + Example + ------- + >>> num_spline = 5 + >>> num_sample = 100 + >>> num_grid_interval = 10 + >>> k = 3 + >>> x_eval = torch.normal(0,1,size=(num_spline, num_sample)) + >>> grids = torch.einsum('i,j->ij', torch.ones(num_spline,), torch.linspace(-1,1,steps=num_grid_interval+1)) + >>> coef = torch.normal(0,1,size=(num_spline, num_grid_interval+k)) + >>> coef2curve(x_eval, grids, coef, k=k).shape + torch.Size([5, 100]) + ''' + # x_eval: (size, batch), grid: (size, grid), coef: (size, coef) + # coef: (size, coef), B_batch: (size, coef, batch), summer over coef + + b_splines = B_batch(x_eval, grid, k=k) # (batch, in_dim, n_coef) + y_eval = torch.einsum('ijk,jlk->ijl', b_splines, coef.to(b_splines.device)) + + return y_eval + + +def curve2coef(x_eval, y_eval, grid, k): + ''' + converting B-spline curves to B-spline coefficients using least squares. + + Args: + ----- + x_eval : 2D torch.tensor + shape (number of splines, number of samples) + y_eval : 2D torch.tensor + shape (number of splines, number of samples) + grid : 2D torch.tensor + shape (number of splines, number of grid points) + k : int + the piecewise polynomial order of splines. + device : str + devicde + + Example + ------- + >>> num_spline = 5 + >>> num_sample = 100 + >>> num_grid_interval = 10 + >>> k = 3 + >>> x_eval = torch.normal(0,1,size=(num_spline, num_sample)) + >>> y_eval = torch.normal(0,1,size=(num_spline, num_sample)) + >>> grids = torch.einsum('i,j->ij', torch.ones(num_spline,), torch.linspace(-1,1,steps=num_grid_interval+1)) + torch.Size([5, 13]) + ''' + ''' + # x_eval: (size, batch); y_eval: (size, batch); grid: (size, grid); k: scalar + mat = B_batch(x_eval, grid, k, device=device).permute(0, 2, 1) + # coef = torch.linalg.lstsq(mat, y_eval.unsqueeze(dim=2)).solution[:, :, 0] + coef = torch.linalg.lstsq(mat.to(device), y_eval.unsqueeze(dim=2).to(device), + driver='gelsy' if device == 'cpu' else 'gels').solution[:, :, 0]''' + batch = x_eval.shape[0] + in_dim = x_eval.shape[1] + out_dim = y_eval.shape[2] + n_coef = grid.shape[1] - k - 1 + #mat = B_batch(x_eval, grid, k, device=device).permute(0, 2, 1) + mat = B_batch(x_eval, grid, k) # (batch, in_dim, G+k) + mat = mat.permute(1,0,2)[:,None,:,:].expand(in_dim, out_dim, batch, n_coef) # (in_dim, out_dim, batch, n_coef) + # coef shape: (in_dim, outdim, G+k) + y_eval = y_eval.permute(1,2,0).unsqueeze(dim=3) # y_eval: (in_dim, out_dim, batch, 1) + #print(mat) + device = mat.device + coef = torch.linalg.lstsq(mat, y_eval, + driver='gelsy' if device == 'cpu' else 'gels').solution[:,:,:,0] + return coef + + +def extend_grid(grid, k_extend=0): + # pad k to left and right + # grid shape: (batch, grid) + h = (grid[:, [-1]] - grid[:, [0]]) / (grid.shape[1] - 1) + + for i in range(k_extend): + grid = torch.cat([grid[:, [0]] - h, grid], dim=1) + grid = torch.cat([grid, grid[:, [-1]] + h], dim=1) + + return grid \ No newline at end of file diff --git a/kan/.ipynb_checkpoints/utils-checkpoint.py b/kan/.ipynb_checkpoints/utils-checkpoint.py new file mode 100644 index 00000000..8d8bd9c4 --- /dev/null +++ b/kan/.ipynb_checkpoints/utils-checkpoint.py @@ -0,0 +1,350 @@ +import numpy as np +import torch +from sklearn.linear_model import LinearRegression +import sympy +import yaml +from sympy.utilities.lambdify import lambdify + +# sigmoid = sympy.Function('sigmoid') +# name: (torch implementation, sympy implementation) + +# singularity protection functions +f_inv = lambda x, y_th: ((x_th := 1/y_th), y_th/x_th*x * (torch.abs(x) < x_th) + torch.nan_to_num(1/x) * (torch.abs(x) >= x_th)) +f_inv2 = lambda x, y_th: ((x_th := 1/y_th**(1/2)), y_th * (torch.abs(x) < x_th) + torch.nan_to_num(1/x**2) * (torch.abs(x) >= x_th)) +f_inv3 = lambda x, y_th: ((x_th := 1/y_th**(1/3)), y_th/x_th*x * (torch.abs(x) < x_th) + torch.nan_to_num(1/x**3) * (torch.abs(x) >= x_th)) +f_inv4 = lambda x, y_th: ((x_th := 1/y_th**(1/4)), y_th * (torch.abs(x) < x_th) + torch.nan_to_num(1/x**4) * (torch.abs(x) >= x_th)) +f_inv5 = lambda x, y_th: ((x_th := 1/y_th**(1/5)), y_th/x_th*x * (torch.abs(x) < x_th) + torch.nan_to_num(1/x**5) * (torch.abs(x) >= x_th)) +f_sqrt = lambda x, y_th: ((x_th := 1/y_th**2), x_th/y_th*x * (torch.abs(x) < x_th) + torch.nan_to_num(torch.sqrt(torch.abs(x))*torch.sign(x)) * (torch.abs(x) >= x_th)) +f_power1d5 = lambda x, y_th: torch.abs(x)**1.5 +f_invsqrt = lambda x, y_th: ((x_th := 1/y_th**2), y_th * (torch.abs(x) < x_th) + torch.nan_to_num(1/torch.sqrt(torch.abs(x))) * (torch.abs(x) >= x_th)) +f_log = lambda x, y_th: ((x_th := torch.e**(-y_th)), - y_th * (torch.abs(x) < x_th) + torch.nan_to_num(torch.log(torch.abs(x))) * (torch.abs(x) >= x_th)) +f_tan = lambda x, y_th: ((clip := x % torch.pi), (delta := torch.pi/2-torch.arctan(y_th)), - y_th/delta * (clip - torch.pi/2) * (torch.abs(clip - torch.pi/2) < delta) + torch.nan_to_num(torch.tan(clip)) * (torch.abs(clip - torch.pi/2) >= delta)) +f_arctanh = lambda x, y_th: ((delta := 1-torch.tanh(y_th) + 1e-4), y_th * torch.sign(x) * (torch.abs(x) > 1 - delta) + torch.nan_to_num(torch.arctanh(x)) * (torch.abs(x) <= 1 - delta)) +f_arcsin = lambda x, y_th: ((), torch.pi/2 * torch.sign(x) * (torch.abs(x) > 1) + torch.nan_to_num(torch.arcsin(x)) * (torch.abs(x) <= 1)) +f_arccos = lambda x, y_th: ((), torch.pi/2 * (1-torch.sign(x)) * (torch.abs(x) > 1) + torch.nan_to_num(torch.arccos(x)) * (torch.abs(x) <= 1)) +f_exp = lambda x, y_th: ((x_th := torch.log(y_th)), y_th * (x > x_th) + torch.exp(x) * (x <= x_th)) + +SYMBOLIC_LIB = {'x': (lambda x: x, lambda x: x, 1, lambda x, y_th: ((), x)), + 'x^2': (lambda x: x**2, lambda x: x**2, 2, lambda x, y_th: ((), x**2)), + 'x^3': (lambda x: x**3, lambda x: x**3, 3, lambda x, y_th: ((), x**3)), + 'x^4': (lambda x: x**4, lambda x: x**4, 3, lambda x, y_th: ((), x**4)), + 'x^5': (lambda x: x**5, lambda x: x**5, 3, lambda x, y_th: ((), x**5)), + '1/x': (lambda x: 1/x, lambda x: 1/x, 2, f_inv), + '1/x^2': (lambda x: 1/x**2, lambda x: 1/x**2, 2, f_inv2), + '1/x^3': (lambda x: 1/x**3, lambda x: 1/x**3, 3, f_inv3), + '1/x^4': (lambda x: 1/x**4, lambda x: 1/x**4, 4, f_inv4), + '1/x^5': (lambda x: 1/x**5, lambda x: 1/x**5, 5, f_inv5), + 'sqrt': (lambda x: torch.sqrt(x), lambda x: sympy.sqrt(x), 2, f_sqrt), + 'x^0.5': (lambda x: torch.sqrt(x), lambda x: sympy.sqrt(x), 2, f_sqrt), + 'x^1.5': (lambda x: torch.sqrt(x)**3, lambda x: sympy.sqrt(x)**3, 4, f_power1d5), + '1/sqrt(x)': (lambda x: 1/torch.sqrt(x), lambda x: 1/sympy.sqrt(x), 2, f_invsqrt), + '1/x^0.5': (lambda x: 1/torch.sqrt(x), lambda x: 1/sympy.sqrt(x), 2, f_invsqrt), + 'exp': (lambda x: torch.exp(x), lambda x: sympy.exp(x), 2, f_exp), + 'log': (lambda x: torch.log(x), lambda x: sympy.log(x), 2, f_log), + 'abs': (lambda x: torch.abs(x), lambda x: sympy.Abs(x), 3, lambda x, y_th: ((), torch.abs(x))), + 'sin': (lambda x: torch.sin(x), lambda x: sympy.sin(x), 2, lambda x, y_th: ((), torch.sin(x))), + 'cos': (lambda x: torch.cos(x), lambda x: sympy.cos(x), 2, lambda x, y_th: ((), torch.cos(x))), + 'tan': (lambda x: torch.tan(x), lambda x: sympy.tan(x), 3, f_tan), + 'tanh': (lambda x: torch.tanh(x), lambda x: sympy.tanh(x), 3, lambda x, y_th: ((), torch.tanh(x))), + 'sgn': (lambda x: torch.sign(x), lambda x: sympy.sign(x), 3, lambda x, y_th: ((), torch.sign(x))), + 'arcsin': (lambda x: torch.arcsin(x), lambda x: sympy.asin(x), 4, f_arcsin), + 'arccos': (lambda x: torch.arccos(x), lambda x: sympy.acos(x), 4, f_arccos), + 'arctan': (lambda x: torch.arctan(x), lambda x: sympy.atan(x), 4, lambda x, y_th: ((), torch.arctan(x))), + 'arctanh': (lambda x: torch.arctanh(x), lambda x: sympy.atanh(x), 4, f_arctanh), + '0': (lambda x: x*0, lambda x: x*0, 0, lambda x, y_th: ((), x*0)), + 'gaussian': (lambda x: torch.exp(-x**2), lambda x: sympy.exp(-x**2), 3, lambda x, y_th: ((), torch.exp(-x**2))), + #'cosh': (lambda x: torch.cosh(x), lambda x: sympy.cosh(x), 5), + #'sigmoid': (lambda x: torch.sigmoid(x), sympy.Function('sigmoid'), 4), + #'relu': (lambda x: torch.relu(x), relu), +} + +def create_dataset(f, + n_var=2, + f_mode = 'col', + ranges = [-1,1], + train_num=1000, + test_num=1000, + normalize_input=False, + normalize_label=False, + device='cpu', + seed=0): + ''' + create dataset + + Args: + ----- + f : function + the symbolic formula used to create the synthetic dataset + ranges : list or np.array; shape (2,) or (n_var, 2) + the range of input variables. Default: [-1,1]. + train_num : int + the number of training samples. Default: 1000. + test_num : int + the number of test samples. Default: 1000. + normalize_input : bool + If True, apply normalization to inputs. Default: False. + normalize_label : bool + If True, apply normalization to labels. Default: False. + device : str + device. Default: 'cpu'. + seed : int + random seed. Default: 0. + + Returns: + -------- + dataset : dic + Train/test inputs/labels are dataset['train_input'], dataset['train_label'], + dataset['test_input'], dataset['test_label'] + + Example + ------- + >>> f = lambda x: torch.exp(torch.sin(torch.pi*x[:,[0]]) + x[:,[1]]**2) + >>> dataset = create_dataset(f, n_var=2, train_num=100) + >>> dataset['train_input'].shape + torch.Size([100, 2]) + ''' + + np.random.seed(seed) + torch.manual_seed(seed) + + if len(np.array(ranges).shape) == 1: + ranges = np.array(ranges * n_var).reshape(n_var,2) + else: + ranges = np.array(ranges) + + + train_input = torch.zeros(train_num, n_var) + test_input = torch.zeros(test_num, n_var) + for i in range(n_var): + train_input[:,i] = torch.rand(train_num,)*(ranges[i,1]-ranges[i,0])+ranges[i,0] + test_input[:,i] = torch.rand(test_num,)*(ranges[i,1]-ranges[i,0])+ranges[i,0] + + if f_mode == 'col': + train_label = f(train_input) + test_label = f(test_input) + elif f_mode == 'row': + train_label = f(train_input.T) + test_label = f(test_input.T) + else: + print(f'f_mode {f_mode} not recognized') + + # if has only 1 dimension + if len(train_label.shape) == 1: + train_label = train_label.unsqueeze(dim=1) + test_label = test_label.unsqueeze(dim=1) + + def normalize(data, mean, std): + return (data-mean)/std + + if normalize_input == True: + mean_input = torch.mean(train_input, dim=0, keepdim=True) + std_input = torch.std(train_input, dim=0, keepdim=True) + train_input = normalize(train_input, mean_input, std_input) + test_input = normalize(test_input, mean_input, std_input) + + if normalize_label == True: + mean_label = torch.mean(train_label, dim=0, keepdim=True) + std_label = torch.std(train_label, dim=0, keepdim=True) + train_label = normalize(train_label, mean_label, std_label) + test_label = normalize(test_label, mean_label, std_label) + + dataset = {} + dataset['train_input'] = train_input.to(device) + dataset['test_input'] = test_input.to(device) + + dataset['train_label'] = train_label.to(device) + dataset['test_label'] = test_label.to(device) + + return dataset + + + +def fit_params(x, y, fun, a_range=(-10,10), b_range=(-10,10), grid_number=101, iteration=3, verbose=True, device='cpu'): + ''' + fit a, b, c, d such that + + .. math:: + |y-(cf(ax+b)+d)|^2 + + is minimized. Both x and y are 1D array. Sweep a and b, find the best fitted model. + + Args: + ----- + x : 1D array + x values + y : 1D array + y values + fun : function + symbolic function + a_range : tuple + sweeping range of a + b_range : tuple + sweeping range of b + grid_num : int + number of steps along a and b + iteration : int + number of zooming in + verbose : bool + print extra information if True + device : str + device + + Returns: + -------- + a_best : float + best fitted a + b_best : float + best fitted b + c_best : float + best fitted c + d_best : float + best fitted d + r2_best : float + best r2 (coefficient of determination) + + Example + ------- + >>> num = 100 + >>> x = torch.linspace(-1,1,steps=num) + >>> noises = torch.normal(0,1,(num,)) * 0.02 + >>> y = 5.0*torch.sin(3.0*x + 2.0) + 0.7 + noises + >>> fit_params(x, y, torch.sin) + r2 is 0.9999727010726929 + (tensor([2.9982, 1.9996, 5.0053, 0.7011]), tensor(1.0000)) + ''' + # fit a, b, c, d such that y=c*fun(a*x+b)+d; both x and y are 1D array. + # sweep a and b, choose the best fitted model + for _ in range(iteration): + a_ = torch.linspace(a_range[0], a_range[1], steps=grid_number, device=device) + b_ = torch.linspace(b_range[0], b_range[1], steps=grid_number, device=device) + a_grid, b_grid = torch.meshgrid(a_, b_, indexing='ij') + post_fun = fun(a_grid[None,:,:] * x[:,None,None] + b_grid[None,:,:]) + x_mean = torch.mean(post_fun, dim=[0], keepdim=True) + y_mean = torch.mean(y, dim=[0], keepdim=True) + numerator = torch.sum((post_fun - x_mean)*(y-y_mean)[:,None,None], dim=0)**2 + denominator = torch.sum((post_fun - x_mean)**2, dim=0)*torch.sum((y - y_mean)[:,None,None]**2, dim=0) + r2 = numerator/(denominator+1e-4) + r2 = torch.nan_to_num(r2) + + + best_id = torch.argmax(r2) + a_id, b_id = torch.div(best_id, grid_number, rounding_mode='floor'), best_id % grid_number + + + if a_id == 0 or a_id == grid_number - 1 or b_id == 0 or b_id == grid_number - 1: + if _ == 0 and verbose==True: + print('Best value at boundary.') + if a_id == 0: + a_range = [a_[0], a_[1]] + if a_id == grid_number - 1: + a_range = [a_[-2], a_[-1]] + if b_id == 0: + b_range = [b_[0], b_[1]] + if b_id == grid_number - 1: + b_range = [b_[-2], b_[-1]] + + else: + a_range = [a_[a_id-1], a_[a_id+1]] + b_range = [b_[b_id-1], b_[b_id+1]] + + a_best = a_[a_id] + b_best = b_[b_id] + post_fun = fun(a_best * x + b_best) + r2_best = r2[a_id, b_id] + + if verbose == True: + print(f"r2 is {r2_best}") + if r2_best < 0.9: + print(f'r2 is not very high, please double check if you are choosing the correct symbolic function.') + + post_fun = torch.nan_to_num(post_fun) + reg = LinearRegression().fit(post_fun[:,None].detach().cpu().numpy(), y.detach().cpu().numpy()) + c_best = torch.from_numpy(reg.coef_)[0].to(device) + d_best = torch.from_numpy(np.array(reg.intercept_)).to(device) + return torch.stack([a_best, b_best, c_best, d_best]), r2_best + + +def sparse_mask(in_dim, out_dim): + + in_coord = torch.arange(in_dim) * 1/in_dim + 1/(2*in_dim) + out_coord = torch.arange(out_dim) * 1/out_dim + 1/(2*out_dim) + + dist_mat = torch.abs(out_coord[:,None] - in_coord[None,:]) + in_nearest = torch.argmin(dist_mat, dim=0) + in_connection = torch.stack([torch.arange(in_dim), in_nearest]).permute(1,0) + out_nearest = torch.argmin(dist_mat, dim=1) + out_connection = torch.stack([out_nearest, torch.arange(out_dim)]).permute(1,0) + all_connection = torch.cat([in_connection, out_connection], dim=0) + mask = torch.zeros(in_dim, out_dim) + mask[all_connection[:,0], all_connection[:,1]] = 1. + + return mask + + +def add_symbolic(name, fun, c=1, fun_singularity=None): + ''' + add a symbolic function to library + + Args: + ----- + name : str + name of the function + fun : fun + torch function or lambda function + + Returns: + -------- + None + + Example + ------- + >>> print(SYMBOLIC_LIB['Bessel']) + KeyError: 'Bessel' + >>> add_symbolic('Bessel', torch.special.bessel_j0) + >>> print(SYMBOLIC_LIB['Bessel']) + (, Bessel) + ''' + exec(f"globals()['{name}'] = sympy.Function('{name}')") + if fun_singularity==None: + fun_singularity = fun + SYMBOLIC_LIB[name] = (fun, globals()[name], c, fun_singularity) + + +def ex_round(ex1, n_digit): + ex2 = ex1 + for a in sympy.preorder_traversal(ex1): + if isinstance(a, sympy.Float): + ex2 = ex2.subs(a, round(a, n_digit)) + return ex2 + + +def augment_input(orig_vars, aux_vars, x): + + # if x is a tensor + if isinstance(x, torch.Tensor): + + for aux_var in aux_vars: + func = lambdify(orig_vars, aux_var,'numpy') # returns a numpy-ready function + aux_value = torch.from_numpy(func(*[x[:,[i]].numpy() for i in range(len(orig_vars))])) + x = torch.cat([x, aux_value], dim=1) + + # if x is a dataset + elif isinstance(x, dict): + x['train_input'] = augment_input(orig_vars, aux_vars, x['train_input']) + x['test_input'] = augment_input(orig_vars, aux_vars, x['test_input']) + + return x + + +def batch_jacobian(func, x, create_graph=False): + # x in shape (Batch, Length) + def _func_sum(x): + return func(x).sum(dim=0) + return torch.autograd.functional.jacobian(_func_sum, x, create_graph=create_graph)[0] + +def batch_hessian(model, x, create_graph=False): + # x in shape (Batch, Length) + jac = lambda x: batch_jacobian(model, x, create_graph=True) + def _jac_sum(x): + return jac(x).sum(dim=0) + return torch.autograd.functional.jacobian(_jac_sum, x, create_graph=create_graph).permute(1,0,2) \ No newline at end of file diff --git a/kan/KANLayer.py b/kan/KANLayer.py index a07378de..60c7b0eb 100644 --- a/kan/KANLayer.py +++ b/kan/KANLayer.py @@ -120,29 +120,28 @@ def __init__(self, in_dim=3, out_dim=2, num=5, k=3, noise_scale=0.1, scale_base= grid = extend_grid(grid, k_extend=k) self.grid = torch.nn.Parameter(grid).requires_grad_(False) noises = (torch.rand(self.num+1, self.in_dim, self.out_dim) - 1 / 2) * noise_scale / num - noises = noises.to(device) # shape: (size, coef) - self.coef = torch.nn.Parameter(curve2coef(self.grid[:,k:-k].permute(1,0), noises, self.grid, k, device)) + self.coef = torch.nn.Parameter(curve2coef(self.grid[:,k:-k].permute(1,0), noises, self.grid, k)) #if isinstance(scale_base, float): if sparse_init: mask = sparse_mask(in_dim, out_dim) else: mask = 1. - self.scale_base = torch.nn.Parameter(torch.ones(in_dim, out_dim, device=device) * scale_base * mask).requires_grad_(sb_trainable) # make scale trainable + self.scale_base = torch.nn.Parameter(torch.ones(in_dim, out_dim) * scale_base * mask).requires_grad_(sb_trainable) # make scale trainable #else: #self.scale_base = torch.nn.Parameter(scale_base.to(device)).requires_grad_(sb_trainable) - self.scale_sp = torch.nn.Parameter(torch.ones(in_dim, out_dim, device=device) * scale_sp * mask).requires_grad_(sp_trainable) # make scale trainable + self.scale_sp = torch.nn.Parameter(torch.ones(in_dim, out_dim) * scale_sp * mask).requires_grad_(sp_trainable) # make scale trainable self.base_fun = base_fun - self.mask = torch.nn.Parameter(torch.ones(in_dim, out_dim, device=device)).requires_grad_(False) + self.mask = torch.nn.Parameter(torch.ones(in_dim, out_dim)).requires_grad_(False) self.grid_eps = grid_eps ### remove weight_sharing & lock parts #self.weight_sharing = torch.arange(out_dim*in_dim).reshape(out_dim, in_dim) #self.lock_counter = 0 #self.lock_id = torch.zeros(out_dim*in_dim).reshape(out_dim, in_dim) - self.device = device + def forward(self, x): ''' @@ -181,7 +180,7 @@ def forward(self, x): preacts = x[:,None,:].clone().expand(batch, self.out_dim, self.in_dim) base = self.base_fun(x) # (batch, in_dim) - y = coef2curve(x_eval=x, grid=self.grid, coef=self.coef, k=self.k, device=self.device) # y shape: (batch, in_dim, out_dim) + y = coef2curve(x_eval=x, grid=self.grid, coef=self.coef, k=self.k) # y shape: (batch, in_dim, out_dim) postspline = y.clone().permute(0,2,1) # postspline shape: (batch, out_dim, in_dim) @@ -219,16 +218,16 @@ def update_grid_from_samples(self, x): batch = x.shape[0] #x = torch.einsum('ij,k->ikj', x, torch.ones(self.out_dim, ).to(self.device)).reshape(batch, self.size).permute(1, 0) x_pos = torch.sort(x, dim=0)[0] - y_eval = coef2curve(x_pos, self.grid, self.coef, self.k, device=self.device) + y_eval = coef2curve(x_pos, self.grid, self.coef, self.k) num_interval = self.grid.shape[1] - 1 - 2*self.k ids = [int(batch / num_interval * i) for i in range(num_interval)] + [-1] grid_adaptive = x_pos[ids, :].permute(1,0) margin = 0.01 h = (grid_adaptive[:,[-1]] - grid_adaptive[:,[0]])/num_interval - grid_uniform = grid_adaptive[:,[0]] + h * torch.arange(num_interval+1,).to(self.device)[None, :] + grid_uniform = grid_adaptive[:,[0]] + h * torch.arange(num_interval+1,)[None, :].to(x.device) grid = self.grid_eps * grid_uniform + (1 - self.grid_eps) * grid_adaptive self.grid.data = extend_grid(grid, k_extend=self.k) - self.coef.data = curve2coef(x_pos, y_eval, self.grid, self.k, device=self.device) + self.coef.data = curve2coef(x_pos, y_eval, self.grid, self.k) def initialize_grid_from_parent(self, parent, x): ''' @@ -264,20 +263,13 @@ def initialize_grid_from_parent(self, parent, x): x_eval = x pgrid = parent.grid # (in_dim, G+2*k+1) pk = parent.k - y_eval = coef2curve(x_eval, pgrid, parent.coef, pk, device=self.device) - '''print(x_pos.shape) - sp2 = KANLayer(in_dim=1, out_dim=self.in_dim, k=1, num=x_pos.shape[1] - 2*self.k - 1, scale_base=0., device=self.device) + y_eval = coef2curve(x_eval, pgrid, parent.coef, pk) - print(sp2.grid[:,sp2.k:-sp2.k].shape, x_pos[:,self.k:-self.k].shape, sp2.grid.shape) - sp2.coef.data = curve2coef(sp2.grid[:,sp2.k:-sp2.k], x_pos[:,self.k:-self.k], sp2.grid, k=1, device=self.device) - y_eval = coef2curve(x_eval, parent.grid, parent.coef, parent.k, device=self.device) - percentile = torch.linspace(-1, 1, self.num + 1).to(self.device) - self.grid.data = sp2(percentile.unsqueeze(dim=1))[0].permute(1, 0)''' h = (pgrid[:,[-pk]] - pgrid[:,[pk]])/self.num grid = pgrid[:,[pk]] + torch.arange(self.num+1,) * h grid = extend_grid(grid, k_extend=self.k) self.grid.data = grid - self.coef.data = curve2coef(x_eval, y_eval, self.grid, self.k, self.device) + self.coef.data = curve2coef(x_eval, y_eval, self.grid, self.k) def get_subset(self, in_id, out_id): ''' @@ -301,7 +293,7 @@ def get_subset(self, in_id, out_id): >>> kanlayer_small.in_dim, kanlayer_small.out_dim (2, 3) ''' - spb = KANLayer(len(in_id), len(out_id), self.num, self.k, base_fun=self.base_fun, device=self.device) + spb = KANLayer(len(in_id), len(out_id), self.num, self.k, base_fun=self.base_fun) spb.grid.data = self.grid[in_id] spb.coef.data = self.coef[in_id][:,out_id] spb.scale_base.data = self.scale_base[in_id][:,out_id] diff --git a/kan/LBFGS.py b/kan/LBFGS.py index a699b8c2..212477f2 100644 --- a/kan/LBFGS.py +++ b/kan/LBFGS.py @@ -267,7 +267,8 @@ def _gather_flat_grad(self): else: view = p.grad.view(-1) views.append(view) - return torch.cat(views, 0) + device = views[0].device + return torch.cat(views, dim=0) def _add_grad(self, step_size, update): offset = 0 diff --git a/kan/MultKAN.py b/kan/MultKAN.py index 044c264f..39dd62b5 100644 --- a/kan/MultKAN.py +++ b/kan/MultKAN.py @@ -24,7 +24,7 @@ class MultKAN(nn.Module): # include mult_ops = [] - def __init__(self, width=None, grid=3, k=3, mult_arity = 2, noise_scale=1.0, scale_base_mu=0.0, scale_base_sigma=1.0, base_fun='silu', symbolic_enabled=True, affine_trainable=False, grid_eps=1.0, grid_range=[-1, 1], sp_trainable=True, sb_trainable=True, device='cpu', seed=1, save_act=True, sparse_init=False, auto_save=True, first_init=True, ckpt_path='./model', state_id=0, round=0): + def __init__(self, width=None, grid=3, k=3, mult_arity = 2, noise_scale=1.0, scale_base_mu=0.0, scale_base_sigma=1.0, base_fun='silu', symbolic_enabled=True, affine_trainable=False, grid_eps=1.0, grid_range=[-1, 1], sp_trainable=True, sb_trainable=True, seed=1, save_act=True, sparse_init=False, auto_save=True, first_init=True, ckpt_path='./model', state_id=0, round=0): super(MultKAN, self).__init__() @@ -69,7 +69,7 @@ def __init__(self, width=None, grid=3, k=3, mult_arity = 2, noise_scale=1.0, sca # splines scale_base = scale_base_mu * 1 / np.sqrt(width_in[l]) + \ scale_base_sigma * (torch.randn(width_in[l], width_out[l + 1]) * 2 - 1) * 1/np.sqrt(width_in[l]) - sp_batch = KANLayer(in_dim=width_in[l], out_dim=width_out[l+1], num=grid, k=k, noise_scale=noise_scale, scale_base=scale_base, scale_sp=1., base_fun=base_fun, grid_eps=grid_eps, grid_range=grid_range, sp_trainable=sp_trainable, sb_trainable=sb_trainable, device=device, sparse_init=sparse_init) + sp_batch = KANLayer(in_dim=width_in[l], out_dim=width_out[l+1], num=grid, k=k, noise_scale=noise_scale, scale_base=scale_base, scale_sp=1., base_fun=base_fun, grid_eps=grid_eps, grid_range=grid_range, sp_trainable=sp_trainable, sb_trainable=sb_trainable, sparse_init=sparse_init) self.act_fun.append(sp_batch) self.node_bias = [] @@ -82,10 +82,10 @@ def __init__(self, width=None, grid=3, k=3, mult_arity = 2, noise_scale=1.0, sca exec('self.node_bias_0' + " = torch.nn.Parameter(torch.zeros(3,1)).requires_grad_(False)") for l in range(self.depth): - exec(f'self.node_bias_{l} = torch.nn.Parameter(torch.zeros(width_in[l+1],)).requires_grad_(affine_trainable).to(device)') - exec(f'self.node_scale_{l} = torch.nn.Parameter(torch.ones(width_in[l+1],)).requires_grad_(affine_trainable).to(device)') - exec(f'self.subnode_bias_{l} = torch.nn.Parameter(torch.zeros(width_out[l+1],)).requires_grad_(affine_trainable).to(device)') - exec(f'self.subnode_scale_{l} = torch.nn.Parameter(torch.ones(width_out[l+1],)).requires_grad_(affine_trainable).to(device)') + exec(f'self.node_bias_{l} = torch.nn.Parameter(torch.zeros(width_in[l+1],)).requires_grad_(affine_trainable)') + exec(f'self.node_scale_{l} = torch.nn.Parameter(torch.ones(width_in[l+1],)).requires_grad_(affine_trainable)') + exec(f'self.subnode_bias_{l} = torch.nn.Parameter(torch.zeros(width_out[l+1],)).requires_grad_(affine_trainable)') + exec(f'self.subnode_scale_{l} = torch.nn.Parameter(torch.ones(width_out[l+1],)).requires_grad_(affine_trainable)') exec(f'self.node_bias.append(self.node_bias_{l})') exec(f'self.node_scale.append(self.node_scale_{l})') exec(f'self.subnode_bias.append(self.subnode_bias_{l})') @@ -101,7 +101,7 @@ def __init__(self, width=None, grid=3, k=3, mult_arity = 2, noise_scale=1.0, sca ### initializing the symbolic front ### self.symbolic_fun = [] for l in range(self.depth): - sb_batch = Symbolic_KANLayer(in_dim=width_in[l], out_dim=width_out[l+1], device=device) + sb_batch = Symbolic_KANLayer(in_dim=width_in[l], out_dim=width_out[l+1]) self.symbolic_fun.append(sb_batch) self.symbolic_fun = nn.ModuleList(self.symbolic_fun) @@ -110,7 +110,6 @@ def __init__(self, width=None, grid=3, k=3, mult_arity = 2, noise_scale=1.0, sca self.sp_trainable = sp_trainable self.sb_trainable = sb_trainable - self.device = device self.save_act = save_act self.node_scores = None @@ -125,7 +124,6 @@ def __init__(self, width=None, grid=3, k=3, mult_arity = 2, noise_scale=1.0, sca self.ckpt_path = ckpt_path self.round = round - if auto_save: if first_init: if not os.path.exists(ckpt_path): @@ -145,10 +143,10 @@ def __init__(self, width=None, grid=3, k=3, mult_arity = 2, noise_scale=1.0, sca self.input_id = torch.arange(self.width_in[0],) def initialize_from_another_model(self, another_model, x): - another_model(x.to(another_model.device)) # get activations + another_model(x) # get activations batch = x.shape[0] - self.initialize_grid_from_another_model(another_model, x.to(another_model.device)) + self.initialize_grid_from_another_model(another_model, x) for l in range(self.depth): spb = self.act_fun[l] @@ -157,7 +155,7 @@ def initialize_from_another_model(self, another_model, x): # spb = spb_parent preacts = another_model.spline_preacts[l] postsplines = another_model.spline_postsplines[l] - self.act_fun[l].coef.data = curve2coef(preacts[:,0,:], postsplines.permute(0,2,1), spb.grid, k=spb.k, device=self.device) + self.act_fun[l].coef.data = curve2coef(preacts[:,0,:], postsplines.permute(0,2,1), spb.grid, k=spb.k) self.act_fun[l].scale_base.data = another_model.act_fun[l].scale_base.data self.act_fun[l].scale_sp.data = another_model.act_fun[l].scale_sp.data self.act_fun[l].mask.data = another_model.act_fun[l].mask.data @@ -172,7 +170,7 @@ def initialize_from_another_model(self, another_model, x): for l in range(self.depth): self.symbolic_fun[l] = another_model.symbolic_fun[l] - return self + return self.to(device) def log_history(self, method_name): @@ -204,7 +202,6 @@ def refine(self, new_grid): grid_range=self.grid_range, sp_trainable=self.sp_trainable, sb_trainable=self.sb_trainable, - device=self.device, ckpt_path=self.ckpt_path, auto_save=True, first_init=False, @@ -237,7 +234,6 @@ def saveckpt(self, path='model'): grid_range = model.grid_range, sp_trainable = model.sp_trainable, sb_trainable = model.sb_trainable, - device = model.device, state_id = model.state_id, auto_save = model.auto_save, ckpt_path = model.ckpt_path, @@ -271,7 +267,6 @@ def loadckpt(path='model'): grid_range=config['grid_range'], sp_trainable=config['sp_trainable'], sb_trainable=config['sb_trainable'], - device=config['device'], state_id=config['state_id'], auto_save=config['auto_save'], first_init=False, @@ -859,7 +854,7 @@ def get_params(self): def fit(self, dataset, opt="LBFGS", steps=100, log=1, lamb=0., lamb_l1=1., lamb_entropy=2., lamb_coef=0., lamb_coefdiff=0., update_grid=True, grid_update_num=10, loss_fn=None, lr=1.,start_grid_update_step=-1, stop_grid_update_step=50, batch=-1, - metrics=None, save_fig=False, in_vars=None, out_vars=None, beta=3, save_fig_freq=1, img_folder='./video', device='cpu', singularity_avoiding=False, y_th=1000., reg_metric='edge_backward', display_metrics=None): + metrics=None, save_fig=False, in_vars=None, out_vars=None, beta=3, save_fig_freq=1, img_folder='./video', singularity_avoiding=False, y_th=1000., reg_metric='edge_backward', display_metrics=None): if lamb > 0. and not self.save_act: print('setting lamb=0. If you want to set lamb > 0, set self.save_act=True') @@ -900,8 +895,8 @@ def fit(self, dataset, opt="LBFGS", steps=100, log=1, lamb=0., lamb_l1=1., lamb_ def closure(): global train_loss, reg_ optimizer.zero_grad() - pred = self.forward(dataset['train_input'][train_id].to(self.device), singularity_avoiding=singularity_avoiding, y_th=y_th) - train_loss = loss_fn(pred, dataset['train_label'][train_id].to(self.device)) + pred = self.forward(dataset['train_input'][train_id], singularity_avoiding=singularity_avoiding, y_th=y_th) + train_loss = loss_fn(pred, dataset['train_label'][train_id]) if self.save_act: if reg_metric == 'edge_backward': self.attribute() @@ -927,17 +922,19 @@ def closure(): test_id = np.random.choice(dataset['test_input'].shape[0], batch_size_test, replace=False) if _ % grid_update_freq == 0 and _ < stop_grid_update_step and update_grid and _ >= start_grid_update_step: - self.update_grid(dataset['train_input'][train_id].to(device)) + self.update_grid(dataset['train_input'][train_id]) if opt == "LBFGS": optimizer.step(closure) if opt == "Adam": - pred = self.forward(dataset['train_input'][train_id].to(self.device), singularity_avoiding=singularity_avoiding, y_th=y_th) - train_loss = loss_fn(pred, dataset['train_label'][train_id].to(self.device)) + pred = self.forward(dataset['train_input'][train_id], singularity_avoiding=singularity_avoiding, y_th=y_th) + train_loss = loss_fn(pred, dataset['train_label'][train_id]) if self.save_act: - if reg_metric == 'fa': + if reg_metric == 'edge_backward': self.attribute() + if reg_metric == 'node_backward': + self.node_attribute() reg_ = self.get_reg(reg_metric, lamb_l1, lamb_entropy, lamb_coef, lamb_coefdiff) else: reg_ = torch.tensor(0.) @@ -946,7 +943,7 @@ def closure(): loss.backward() optimizer.step() - test_loss = loss_fn_eval(self.forward(dataset['test_input'][test_id].to(self.device)), dataset['test_label'][test_id].to(self.device)) + test_loss = loss_fn_eval(self.forward(dataset['test_input'][test_id]), dataset['test_label'][test_id]) if metrics != None: @@ -1055,7 +1052,7 @@ def prune_node(self, threshold=1e-2, mode="auto", active_neurons_id=None, log_hi if i not in active_neurons_down[l]: self.remove_node(l + 1, i, mode='down',log_history=False) - model2 = MultKAN(copy.deepcopy(self.width), grid=self.grid, k=self.k, base_fun=self.base_fun_name, device=self.device, mult_arity=self.mult_arity, ckpt_path=self.ckpt_path, auto_save=True, first_init=False, state_id=self.state_id, round=self.round) + model2 = MultKAN(copy.deepcopy(self.width), grid=self.grid, k=self.k, base_fun=self.base_fun_name, mult_arity=self.mult_arity, ckpt_path=self.ckpt_path, auto_save=True, first_init=False, state_id=self.state_id, round=self.round) model2.load_state_dict(self.state_dict()) width_new = [self.width[0]] @@ -1135,7 +1132,7 @@ def prune_input(self, threshold=1e-2, active_inputs=None, log_history=True): else: input_id = torch.tensor(active_inputs, dtype=torch.long) - model2 = MultKAN(copy.deepcopy(self.width), grid=self.grid, k=self.k, base_fun=self.base_fun, device=self.device, mult_arity=self.mult_arity, ckpt_path=self.ckpt_path, auto_save=True, first_init=False, state_id=self.state_id, round=self.round) + model2 = MultKAN(copy.deepcopy(self.width), grid=self.grid, k=self.k, base_fun=self.base_fun, mult_arity=self.mult_arity, ckpt_path=self.ckpt_path, auto_save=True, first_init=False, state_id=self.state_id, round=self.round) model2.load_state_dict(self.state_dict()) model2.act_fun[0] = model2.act_fun[0].get_subset(input_id, torch.arange(self.width_out[1])) @@ -1227,6 +1224,8 @@ def score_node2subnode(node_score, width, mult_arity, out_dim): else: node_score = torch.diag(out_score).requires_grad_(True) node_scores.append(node_score) + + device = self.act_fun[0].grid.device for l in range(l_end,0,-1): @@ -1239,7 +1238,8 @@ def score_node2subnode(node_score, width, mult_arity, out_dim): subnode_scores.append(subnode_score) # subnode to edge - edge_score = torch.einsum('ij,ki,i->kij', self.edge_actscale[l-1], subnode_score, 1/(self.subnode_actscale[l-1]+1e-4)) + #print(self.edge_actscale[l-1].device, subnode_score.device, self.subnode_actscale[l-1].device) + edge_score = torch.einsum('ij,ki,i->kij', self.edge_actscale[l-1], subnode_score.to(device), 1/(self.subnode_actscale[l-1]+1e-4)) edge_scores.append(edge_score) # edge to node @@ -1508,10 +1508,10 @@ def expand_depth(self): self.symbolic_fun.append(layer) - self.node_bias.append(torch.nn.Parameter(torch.zeros(dim_out,)).requires_grad_(self.affine_trainable).to(self.device)) - self.node_scale.append(torch.nn.Parameter(torch.ones(dim_out,)).requires_grad_(self.affine_trainable).to(self.device)) - self.subnode_bias.append(torch.nn.Parameter(torch.zeros(dim_out,)).requires_grad_(self.affine_trainable).to(self.device)) - self.subnode_scale.append(torch.nn.Parameter(torch.ones(dim_out,)).requires_grad_(self.affine_trainable).to(self.device)) + self.node_bias.append(torch.nn.Parameter(torch.zeros(dim_out,)).requires_grad_(self.affine_trainable)) + self.node_scale.append(torch.nn.Parameter(torch.ones(dim_out,)).requires_grad_(self.affine_trainable)) + self.subnode_bias.append(torch.nn.Parameter(torch.zeros(dim_out,)).requires_grad_(self.affine_trainable)) + self.subnode_scale.append(torch.nn.Parameter(torch.ones(dim_out,)).requires_grad_(self.affine_trainable)) def expand_width(self, layer_id, n_added_nodes, sum_bool=True, mult_arity=2): diff --git a/kan/spline.py b/kan/spline.py index 4cfbc01f..6a14510c 100644 --- a/kan/spline.py +++ b/kan/spline.py @@ -60,13 +60,13 @@ def extend_grid(grid, k_extend=0): value = (x - grid[:, :-(k + 1)]) / (grid[:, k:-1] - grid[:, :-(k + 1)]) * B_km1[:, :-1] + ( grid[:, k + 1:] - x) / (grid[:, k + 1:] - grid[:, 1:(-k)]) * B_km1[:, 1:]''' - x = x.unsqueeze(dim=2).to(device) - grid = grid.unsqueeze(dim=0).to(device) + x = x.unsqueeze(dim=2) + grid = grid.unsqueeze(dim=0) if k == 0: value = (x >= grid[:, :, :-1]) * (x < grid[:, :, 1:]) else: - B_km1 = B_batch(x[:,:,0], grid=grid[0], k=k - 1, device=device) + B_km1 = B_batch(x[:,:,0], grid=grid[0], k=k - 1) value = (x - grid[:, :, :-(k + 1)]) / (grid[:, :, k:-1] - grid[:, :, :-(k + 1)]) * B_km1[:, :, :-1] + ( grid[:, :, k + 1:] - x) / (grid[:, :, k + 1:] - grid[:, :, 1:(-k)]) * B_km1[:, :, 1:] @@ -113,18 +113,14 @@ def coef2curve(x_eval, grid, coef, k, device="cpu"): ''' # x_eval: (size, batch), grid: (size, grid), coef: (size, coef) # coef: (size, coef), B_batch: (size, coef, batch), summer over coef - '''if coef.dtype != x_eval.dtype: - coef = coef.to(x_eval.dtype) - y_eval = torch.einsum('ij,ijk->ik', coef, B_batch(x_eval, grid, k, device=device))''' - b_splines = B_batch(x_eval, grid, k=k).to(device) # (batch, in_dim, n_coef) - # coef (in_dim, out_dim, n_coef) - #print(b_splines.shape, coef.shape) - y_eval = torch.einsum('ijk,jlk->ijl', b_splines, coef) + b_splines = B_batch(x_eval, grid, k=k) # (batch, in_dim, n_coef) + y_eval = torch.einsum('ijk,jlk->ijl', b_splines, coef.to(b_splines.device)) + return y_eval -def curve2coef(x_eval, y_eval, grid, k, device="cpu"): +def curve2coef(x_eval, y_eval, grid, k): ''' converting B-spline curves to B-spline coefficients using least squares. @@ -163,14 +159,15 @@ def curve2coef(x_eval, y_eval, grid, k, device="cpu"): out_dim = y_eval.shape[2] n_coef = grid.shape[1] - k - 1 #mat = B_batch(x_eval, grid, k, device=device).permute(0, 2, 1) - mat = B_batch(x_eval, grid, k, device=device) # (batch, in_dim, G+k) + mat = B_batch(x_eval, grid, k) # (batch, in_dim, G+k) mat = mat.permute(1,0,2)[:,None,:,:].expand(in_dim, out_dim, batch, n_coef) # (in_dim, out_dim, batch, n_coef) # coef shape: (in_dim, outdim, G+k) y_eval = y_eval.permute(1,2,0).unsqueeze(dim=3) # y_eval: (in_dim, out_dim, batch, 1) #print(mat) - coef = torch.linalg.lstsq(mat.to(device), y_eval.to(device), + device = mat.device + coef = torch.linalg.lstsq(mat, y_eval, driver='gelsy' if device == 'cpu' else 'gels').solution[:,:,:,0] - return coef.to(device) + return coef def extend_grid(grid, k_extend=0): diff --git a/pykan.egg-info/PKG-INFO b/pykan.egg-info/PKG-INFO index da283df5..e7307237 100644 --- a/pykan.egg-info/PKG-INFO +++ b/pykan.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: pykan -Version: 0.0.1 +Version: 0.2.1 Summary: Kolmogorov Arnold Networks Author: Ziming Liu Author-email: zmliu@mit.edu @@ -13,48 +13,175 @@ License-File: LICENSE kan_plot -# Kolmogorov-Arnold Newtworks (KANs) +# !! Major Updates on July 13, 2024 -This the github repo for the paper "KAN: Kolmogorov-Arnold Networks" [link]. The documentation can be found here [link]. +* `model.train()` has been changed to `model.fit()` +* Some other small features are changed (e.g., create_dataset has been moved to kan.utils). I have updated and checked the notebooks in `./tutorials` are runnable on CPUs, so please refer to those tutorials for updated/new functionalities. Documentation hasn't been updated yet but will be updated soon. -Kolmogorov-Arnold Networks (KANs) are promising alternatives of Multi-Layer Perceptrons (MLPs). KANs have strong mathematical foundations just like MLPs: MLPs are based on the [universal approximation theorem](https://en.wikipedia.org/wiki/Universal_approximation_theorem), while KANs are based on [Kolmogorov-Arnold representation theorem](https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Arnold_representation_theorem). KANs and MLPs are dual: KANs have activation functions on edges, while MLPs have activation functions on nodes. This simple change makes KANs better (sometimes much better!) than MLPs in terms of both model accuracy and interpretability. +For pypi users, this is the most recent version 0.2.1. + +New functionalities include (documentation later): +* including multiplications in KANs. [Tutorial](https://github.com/KindXiaoming/pykan/blob/master/tutorials/Interp_1_Hello%2C%20MultKAN.ipynb) +* the speed mode. Speed up your KAN using `model = model.speed()` if you never use the symbolic functionalities. [Tutorial](https://github.com/KindXiaoming/pykan/blob/master/tutorials/Example_2_speed_up.ipynb) +* Compiling symbolic formulas into KANs. [Tutorial](https://github.com/KindXiaoming/pykan/blob/master/tutorials/Interp_3_KAN_Compiler.ipynb) +* Feature attribution and pruning inputs. [Tutorial](https://github.com/KindXiaoming/pykan/blob/master/tutorials/Interp_4_feature_attribution.ipynb) + +# Kolmogorov-Arnold Networks (KANs) + +This is the github repo for the paper ["KAN: Kolmogorov-Arnold Networks"](https://arxiv.org/abs/2404.19756). Find the documentation [here](https://kindxiaoming.github.io/pykan/). Here's [author's note](https://github.com/KindXiaoming/pykan?tab=readme-ov-file#authors-note) responding to current hype of KANs. + +Kolmogorov-Arnold Networks (KANs) are promising alternatives of Multi-Layer Perceptrons (MLPs). KANs have strong mathematical foundations just like MLPs: MLPs are based on the universal approximation theorem, while KANs are based on Kolmogorov-Arnold representation theorem. KANs and MLPs are dual: KANs have activation functions on edges, while MLPs have activation functions on nodes. This simple change makes KANs better (sometimes much better!) than MLPs in terms of both model **accuracy** and **interpretability**. A quick intro of KANs [here](https://kindxiaoming.github.io/pykan/intro.html). mlp_kan_compare +## Accuracy +**KANs have faster scaling than MLPs. KANs have better accuracy than MLPs with fewer parameters.** + +Please set `torch.set_default_dtype(torch.float64)` if you want high precision. + +**Example 1: fitting symbolic formulas** +Screenshot 2024-04-30 at 10 55 30 + +**Example 2: fitting special functions** +Screenshot 2024-04-30 at 11 07 20 + +**Example 3: PDE solving** +Screenshot 2024-04-30 at 10 57 25 + +**Example 4: avoid catastrophic forgetting** +Screenshot 2024-04-30 at 11 04 36 + +## Interpretability +**KANs can be intuitively visualized. KANs offer interpretability and interactivity that MLPs cannot provide. We can use KANs to potentially discover new scientific laws.** + +**Example 1: Symbolic formulas** +Screenshot 2024-04-30 at 11 04 56 + +**Example 2: Discovering mathematical laws of knots** +Screenshot 2024-04-30 at 11 05 25 + +**Example 3: Discovering physical laws of Anderson localization** +Screenshot 2024-04-30 at 11 05 53 + +**Example 4: Training of a three-layer KAN** + +![kan_training_low_res](https://github.com/KindXiaoming/pykan/assets/23551623/e9f215c7-a393-46b9-8528-c906878f015e) + + + ## Installation -There are two ways to install pykan, through pypi or github. +Pykan can be installed via PyPI or directly from GitHub. -**Installation via github** +**Pre-requisites:** -```python -git clone https://github.com/KindXiaoming/pykan.git +``` +Python 3.9.7 or higher +pip +``` + +**For developers** + +``` +pip clone https://github.com/KindXiaoming/pykan.git cd pykan pip install -e . ``` -**Installation via pypi (soon)** +**Installation via github** -```python +``` +pip install git+https://github.com/KindXiaoming/pykan.git +``` + +**Installation via PyPI:** +``` pip install pykan ``` +Requirements -To install requirements: +```python +# python==3.9.7 +matplotlib==3.6.2 +numpy==1.24.4 +scikit_learn==1.1.3 +setuptools==65.5.0 +sympy==1.11.1 +torch==2.2.2 +tqdm==4.66.2 +``` + +After activating the virtual environment, you can install specific package requirements as follows: ```python pip install -r requirements.txt ``` +**Optional: Conda Environment Setup** +For those who prefer using Conda: +``` +conda create --name pykan-env python=3.9.7 +conda activate pykan-env +pip install git+https://github.com/KindXiaoming/pykan.git # For GitHub installation +# or +pip install pykan # For PyPI installation +``` + +## Computation requirements + +Examples in [tutorials](tutorials) are runnable on a single CPU typically less than 10 minutes. All examples in the paper are runnable on a single CPU in less than one day. Training KANs for PDE is the most expensive and may take hours to days on a single CPU. We use CPUs to train our models because we carried out parameter sweeps (both for MLPs and KANs) to obtain Pareto Frontiers. There are thousands of small models which is why we use CPUs rather than GPUs. Admittedly, our problem scales are smaller than typical machine learning tasks, but are typical for science-related tasks. In case the scale of your task is large, it is advisable to use GPUs. + ## Documentation -The documenation can be found here []. +The documentation can be found [here](https://kindxiaoming.github.io/pykan/). ## Tutorials **Quickstart** -Get started with [hellokan.ipynb](./hellokan.ipynb) notebook +Get started with [hellokan.ipynb](./hellokan.ipynb) notebook. **More demos** -Jupyter Notebooks in [docs/Examples](./docs/Examples) and [docs/API_demo](./docs/API\_demo) are ready to play. You may also find these examples in documentation. +More Notebook tutorials can be found in [tutorials](tutorials). + +## Advice on hyperparameter tuning +Many intuition about MLPs and other networks may not directy transfer to KANs. So how can I tune the hyperparameters effectively? Here is my general advice based on my experience playing with the problems reported in the paper. Since these problems are relatively small-scale and science-oriented, it is likely that my advice is not suitable to your case. But I want to at least share my experience such that users can have better clues where to start and what to expect from tuning hyperparameters. + +* Start from a simple setup (small KAN shape, small grid size, small data, no reguralization `lamb=0`). This is very different from MLP literature, where people by default use widths of order `O(10^2)` or higher. For example, if you have a task with 5 inputs and 1 outputs, I would try something as simple as `KAN(width=[5,1,1], grid=3, k=3)`. If it doesn't work, I would gradually first increase width. If that still doesn't work, I would consider increasing depth. You don't need to be this extreme, if you have better understanding about the complexity of your task. + +* Once an acceptable performance is achieved, you could then try refining your KAN (more accurate or more interpretable). + +* If you care about accuracy, try grid extention technique. An example is [here](https://kindxiaoming.github.io/pykan/Examples/Example_1_function_fitting.html). But watch out for overfitting, see below. + +* If you care about interpretability, try sparsifying the network with, e.g., `model.train(lamb=0.01)`. It would also be advisable to try increasing lamb gradually. After training with sparsification, plot it, if you see some neurons that are obvious useless, you may call `pruned_model = model.prune()` to get the pruned model. You can then further train (either to encourage accuracy or encouarge sparsity), or do symbolic regression. + +* I also want to emphasize that accuracy and interpretability (and also parameter efficiency) are not necessarily contradictory, e.g., Figure 2.3 in [our paper](https://arxiv.org/pdf/2404.19756). They can be positively correlated in some cases but in other cases may dispaly some tradeoff. So it would be good not to be greedy and aim for one goal at a time. However, if you have a strong reason why you believe pruning (interpretability) can also help accuracy, you may want to plan ahead, such that even if your end goal is accuracy, you want to push interpretability first. + +* Once you get a quite good result, try increasing data size and have a final run, which should give you even better results! + +Disclaimer: Try the simplest thing first is the mindset of physicists, which could be personal/biased but I find this mindset quite effective and make things well-controlled for me. Also, The reason why I tend to choose a small dataset at first is to get faster feedback in the debugging stage (my initial implementation is slow, after all!). The hidden assumption is that a small dataset behaves qualitatively similar to a large dataset, which is not necessarily true in general, but usually true in small-scale problems that I have tried. To know if your data is sufficient, see the next paragraph. + +Another thing that would be good to keep in mind is that please constantly checking if your model is in underfitting or overfitting regime. If there is a large gap between train/test losses, you probably want to increase data or reduce model (`grid` is more important than `width`, so first try decreasing `grid`, then `width`). This is also the reason why I'd love to start from simple models to make sure that the model is first in underfitting regime and then gradually expands to the "Goldilocks zone". + +## Citation +```python +@article{liu2024kan, + title={KAN: Kolmogorov-Arnold Networks}, + author={Liu, Ziming and Wang, Yixuan and Vaidya, Sachin and Ruehle, Fabian and Halverson, James and Solja{\v{c}}i{\'c}, Marin and Hou, Thomas Y and Tegmark, Max}, + journal={arXiv preprint arXiv:2404.19756}, + year={2024} +} +``` + +## Contact +If you have any questions, please contact zmliu@mit.edu + +## Author's note +I would like to thank everyone who's interested in KANs. When I designed KANs and wrote codes, I have math & physics examples (which are quite small scale!) in mind, so did not consider much optimization in efficiency or reusability. It's so honored to receive this unwarranted attention, which is way beyond my expectation. So I accept any criticism from people complaning about the efficiency and resuability of the codes, my apology. My only hope is that you find `model.plot()` fun to play with :). + +For users who are interested in scientific discoveries and scientific computing (the orginal users intended for), I'm happy to hear your applications and collaborate. This repo will continue remaining mostly for this purpose, probably without signifiant updates for efficiency. In fact, there are already implmentations like [efficientkan](https://github.com/Blealtan/efficient-kan) or [fouierkan](https://github.com/GistNoesis/FourierKAN/) that look promising for improving efficiency. + +For users who are machine learning focus, I have to be honest that KANs are likely not a simple plug-in that can be used out-of-the box (yet). Hyperparameters need tuning, and more tricks special to your applications should be introduced. For example, [GraphKAN](https://github.com/WillHua127/GraphKAN-Graph-Kolmogorov-Arnold-Networks) suggests that KANs should better be used in latent space (need embedding and unembedding linear layers after inputs and before outputs). [KANRL](https://github.com/riiswa/kanrl) suggests that some trainable parameters should better be fixed in reinforcement learning to increase training stability. +The most common question I've been asked lately is whether KANs will be next-gen LLMs. I don't have good intuition about this. KANs are designed for applications where one cares about high accuracy and/or interpretability. We do care about LLM interpretability for sure, but interpretability can mean wildly different things for LLM and for science. Do we care about high accuracy for LLMs? I don't know, scaling laws seem to imply so, but probably not too high precision. Also, accuracy can also mean different things for LLM and for science. This subtlety makes it hard to directly transfer conclusions in our paper to LLMs, or machine learning tasks in general. However, I would be very happy if you have enjoyed the high-level idea (learnable activation functions on edges, or interacting with AI for scientific discoveries), which is not necessariy *the future*, but can hopefully inspire and impact *many possible futures*. As a physicist, the message I want to convey is less of "KANs are great", but more of "try thinking of current architectures critically and seeking fundamentally different alternatives that can do fun and/or useful stuff". +I would like to welcome people to be critical of KANs, but also to be critical of critiques as well. Practice is the only criterion for testing understanding (实践是检验真理的唯一标准). We don't know many things beforehand until they are really tried and shown to be succeeding or failing. As much as I'm willing to see success mode of KANs, I'm equally curious about failure modes of KANs, to better understand the boundaries. KANs and MLPs cannot replace each other (as far as I can tell); they each have advantages in some settings and limitations in others. I would be intrigued by a theoretical framework that encompasses both and could even suggest new alternatives (physicists love unified theories, sorry :). diff --git a/pykan.egg-info/SOURCES.txt b/pykan.egg-info/SOURCES.txt index f4f9203b..ccb3b065 100644 --- a/pykan.egg-info/SOURCES.txt +++ b/pykan.egg-info/SOURCES.txt @@ -4,8 +4,15 @@ setup.py kan/KAN.py kan/KANLayer.py kan/LBFGS.py +kan/MLP.py +kan/MultKAN.py kan/Symbolic_KANLayer.py kan/__init__.py +kan/ckpt.py +kan/compiler.py +kan/experiment.py +kan/feynman.py +kan/hypothesis.py kan/spline.py kan/utils.py kan/assets/img/mult_symbol.png diff --git a/tutorials/.ipynb_checkpoints/API_10_device-checkpoint.ipynb b/tutorials/.ipynb_checkpoints/API_10_device-checkpoint.ipynb new file mode 100644 index 00000000..99b43ede --- /dev/null +++ b/tutorials/.ipynb_checkpoints/API_10_device-checkpoint.ipynb @@ -0,0 +1,173 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "134e7f9d", + "metadata": {}, + "source": [ + "# Demo 10: Device\n", + "\n", + "All other demos have by default used device = 'cpu'. In case we want to use cuda, we should pass the device argument to model and dataset." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "7a4ac1e1-84ba-4bc3-91b6-a776a5e7711c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "cpu\n" + ] + } + ], + "source": [ + "from kan import KAN, create_dataset\n", + "import torch\n", + "\n", + "torch.use_deterministic_algorithms(False)\n", + "\n", + "#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", + "device = 'cpu'\n", + "print(device)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "2075ef56", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "checkpoint directory created: ./model\n", + "saving model version 0.0\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "| train_loss: 6.83e-01 | test_loss: 7.21e-01 | reg: 1.04e+03 | : 100%|█| 50/50 [00:19<00:00, 2.62it\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "saving model version 0.1\n" + ] + } + ], + "source": [ + "model = KAN(width=[4,100,100,100,1], grid=3, k=3, seed=0).to(device)\n", + "f = lambda x: torch.exp((torch.sin(torch.pi*(x[:,[0]]**2+x[:,[1]]**2))+torch.sin(torch.pi*(x[:,[2]]**2+x[:,[3]]**2)))/2)\n", + "dataset = create_dataset(f, n_var=4, train_num=1000, device=device)\n", + "\n", + "# train the model\n", + "#model.train(dataset, opt=\"LBFGS\", steps=20, lamb=1e-3, lamb_entropy=2.);\n", + "model.fit(dataset, opt=\"Adam\", lr=1e-3, steps=50, lamb=1e-3, lamb_entropy=5., update_grid=False);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2f182cc1-51bf-4151-a253-a52fe854919e", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "f6f8125e-d26d-4c97-9e5f-988099bb4737", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "cuda\n" + ] + } + ], + "source": [ + "device = 'cuda'\n", + "print(device)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "95017dfa-3a2a-43e0-8b68-fb220ca5abc9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "checkpoint directory created: ./model\n", + "saving model version 0.0\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "| train_loss: 6.83e-01 | test_loss: 7.21e-01 | reg: 1.04e+03 | : 100%|█| 50/50 [00:01<00:00, 26.90it\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "saving model version 0.1\n" + ] + } + ], + "source": [ + "model = KAN(width=[4,100,100,100,1], grid=3, k=3, seed=0).to(device)\n", + "f = lambda x: torch.exp((torch.sin(torch.pi*(x[:,[0]]**2+x[:,[1]]**2))+torch.sin(torch.pi*(x[:,[2]]**2+x[:,[3]]**2)))/2)\n", + "dataset = create_dataset(f, n_var=4, train_num=1000, device=device)\n", + "\n", + "# train the model\n", + "#model.train(dataset, opt=\"LBFGS\", steps=20, lamb=1e-3, lamb_entropy=2.);\n", + "model.fit(dataset, opt=\"Adam\", lr=1e-3, steps=50, lamb=1e-3, lamb_entropy=5., update_grid=False);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8230d562-2635-4adc-b566-06ac679b166a", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.16" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tutorials/.ipynb_checkpoints/Unchecked_API_10_device-checkpoint.ipynb b/tutorials/.ipynb_checkpoints/Unchecked_API_10_device-checkpoint.ipynb deleted file mode 100644 index cbac3cf4..00000000 --- a/tutorials/.ipynb_checkpoints/Unchecked_API_10_device-checkpoint.ipynb +++ /dev/null @@ -1,110 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "134e7f9d", - "metadata": {}, - "source": [ - "# Demo 10: Device\n", - "\n", - "All other demos have by default used device = 'cpu'. In case we want to use cuda, we should pass the device argument to model and dataset." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "7a4ac1e1-84ba-4bc3-91b6-a776a5e7711c", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "cuda\n" - ] - } - ], - "source": [ - "from kan import KAN, create_dataset\n", - "import torch\n", - "\n", - "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", - "print(device)" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "2075ef56", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "train loss: 5.78e-03 | test loss: 5.89e-03 | reg: 7.32e+00 : 100%|██| 50/50 [00:26<00:00, 1.85it/s]\n" - ] - } - ], - "source": [ - "model = KAN(width=[4,2,1,1], grid=3, k=3, seed=0, device=device)\n", - "f = lambda x: torch.exp((torch.sin(torch.pi*(x[:,[0]]**2+x[:,[1]]**2))+torch.sin(torch.pi*(x[:,[2]]**2+x[:,[3]]**2)))/2)\n", - "dataset = create_dataset(f, n_var=4, train_num=3000, device=device)\n", - "\n", - "# train the model\n", - "#model.train(dataset, opt=\"LBFGS\", steps=20, lamb=1e-3, lamb_entropy=2.);\n", - "model.train(dataset, opt=\"LBFGS\", steps=50, lamb=5e-5, lamb_entropy=2.);" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "3acdcdee-71ca-42a1-98aa-7f7df4a29077", - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZcAAAHiCAYAAAAkiYF/AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABGtElEQVR4nO3deXyU9Z0H8M/zTDLJ5E4mCQRC7oRw5/ACL7QIiiBVtEVxPVrXVldp66utblvRaNd17bqK22O3ddeKIGolIpcJCmKpBwgBDCJJICQkkGMmySRzZc7f/sEOReTI8cw8c3zer5cvXy/JzHzlN08+z+/3/A5JCCFARESkIFntAoiIKPwwXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUlyU2gUQhQIhBHp6emCxWJCQkAC9Xg9JktQuiyhosedCdB4mkwkrVqxAcXExMjIykJ+fj4yMDBQXF2PFihUwmUxql0gUlCSeREl0drW1tVi8eDFsNhuAk70XH1+vJS4uDmvXrsW8efNUqZEoWDFciM6itrYWN954I4QQ8Hq95/w5WZYhSRI2bdrEgCE6DcOF6AwmkwnZ2dmw2+3nDRYfWZah0+nQ3t6OlJQU/xdIFAL4zIXoDK+++ipsNtuQggUAvF4vbDYbVq5c6efKiEIHey5EpxFCoLi4GM3NzRjOpSFJEgoKCtDU1MRZZERguBB9jdFoREZGxqher9frFayIKDRxWIzoNBaLZVSvN5vNClVCFNoYLkSnSUhIGNXrExMTFaqEKLQxXIhOo9frUVhYOOznJpIkobCwEGlpaX6qjCi0MFyITiNJEh5++OERvXbZsmV8mE/0//hAn+gMXOdCNHrsuRCdISUlBWvXroUkSZDl818ivhX61dXVDBai0zBciM5i3rx52LRpE3Q6HSRJ+sZwl++/6XQ6bN68GXPnzlWpUqLgxHAhOod58+ahvb0dL774IgoKCr72ZwUFBXjxxRdx/PhxBgvRWfCZC9EQCCHw0Ucf4frrr0dNTQ2uvvpqPrwnOg/2XIiGQJIkpKSkfO3fRHRuDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSnCSEEGoXQRQIQgi0tbWN+PVerxcOhwMxMTGQ5ZHfl02YMAGSJI349UShIErtAogCxeVyYe3atSgoKBjxewghRhQMXq8XjY2NiIqKwsMPPwytVjviGohCAcOFIsrMmTNx2WWXBfxza2pq8Jvf/Ab/+7//G/DPJlIDn7kQ+ZkQAj/+8Y+RlpaG9PR0tcshCgj2XIj8rK6uDq2trVi7di2ftVDEYM+FyI+EEHjggQeg1+tx/fXXq10OUcCw50LkR/X19aivr8crr7wyqhlmRKGG33YiPxFC4Pvf/z70ej1uu+02tcshCij2XIj85LPPPsOBAwewatUqaDQatcshCij2XIj8wOv14p577kF2djZuvvlmtcshCjj2XIj8YNWqVTh27Bg++OADPmuhiMRvPZHCLBYLHnnkEcyaNQuzZs1SuxwiVTBciBQkhMCyZcvgcDiwcuVKrmuhiMVwIVLQrl278MYbb+DRRx/FuHHj1C6HSDUMFyKF2O12LFmyBLm5uXjsscfYa6GIxgf6RAoQQuChhx6CwWDA559/jqgoXloU2XgFEI2SEALV1dV4/fXX8dRTT6G0tFTtkohUx2ExolFqbm7GfffdhyuuuAKPPPIIh8OIwHAhGpX+/n7ccMMNSE5Oxl/+8heuxCf6fxwWIxohh8OBm2++GQaDATt27EBKSoraJREFDYYL0Qi43W7ce++92LVrF9asWYMpU6aoXRJRUGG4EA2T2+3GD3/4Q7z77rtYsWIFFixYwOcsRGfgMxeiYXA6nbj//vuxZs0aPPPMM/j+97/PYCE6C/ZciIZoYGAA99xzD7Zs2YJnn30WDz30EIOF6BwYLkQXIITA0aNHcfvtt6OhoQF//OMfcfvttzNYiM6D4UJ0Hl6vFxs2bMA//dM/QaPRYP369bjyyisZLEQXwGcuRGchhEBPTw+WLVuGpUuXYuLEidixYweDhWiIGC5EZ3C5XKiursbll1+O119/HY899hg2bdqEnJwcBgvREHFYjOj/eTwe7Nq1C08//TT++te/4uKLL8bq1atRUVHBUCEaJoYLRTQhBFwuF3bu3IkVK1bg/fffx7hx4/C73/0OS5YsQUxMjNolEoUkhgtFJCEEurq6UFNTg1deeQV1dXXIzs7GU089hbvuugspKSnsrRCNAsOFIorL5cLatWuxbt06fPTRR+jv70dZWRl++9vfYtGiRUhOTmaoECmA4UIRxeFw4N5770Vubi6++93v4tZbb0V5eTmio6MZKkQKkoQQQu0iiALB6XTiv//7v9Hb24upU6ciPj4+4IHS0NCAH/7wh9BqtQH9XKJAY7hQxBBCoLGxUdUeihACJSUl7CVR2GO4EA2REAJCCEiSxHAgugAuoiQaov379yM+Ph779+9XuxSioMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiGQAiBvr6+r/2biM6N4UJ0HiaTCStWrEBxcTGuvfZaOBwOXHvttSguLsaKFStgMpnULpEoKEmCt2BEZ1VbW4vFixfDZrMBwNd6K5IkAQDi4uKwdu1azJs3T5UaiYIVw4XoLGpra3HjjTdCCAGv13vOn5NlGZIkYdOmTQwYotMwXIjOYDKZkJ2dDbvdft5g8ZFlGTqdDu3t7UhJSfF/gUQhgM9ciM7w6quvwmazDSlYAMDr9cJms2HlypV+rowodLDnQnQaIQSKi4vR3Nw8rBlhkiShoKAATU1Np57HEEUyhgvRaYxGIzIyMkb1er1er2BFRKGJw2JEp7FYLKN6vdlsVqgSotDGcCE6TUJCwqhen5iYqFAlRKGN4UJ0Gp1Oh7S0tBG9trCwcMSvJQo3DBciAB6PB2vWrMHMmTPh8XhG9B4zZ85EX1+fwpURhSaGC0W8HTt2YM6cOfjRj36Eyy67DDt27EB8fDxkeWiXhyRJiI2NRU5ODp577jls2rQJdrvdz1UTBTfOFqOI1djYiKqqKrz//vu46KKL8PTTT6OyshLA8Ffob968GbNnz8ZHH32EDz/8ENHR0Zg7dy4uu+wyaDSaQP0vEQUNhgtFHKPRiOeeew6vvfYaxo8fj+XLl2PhwoXfWJ8y1L3FqqurMXfu3FN/NjAwgJqaGuzevRvp6em48cYbMXnyZK5/oYjCcKGI4XA48Mc//hEvvPACZFnGI488gvvuuw9arfacrzGZTFi5ciVeeuklHDly5NR/LywsxLJly3D33XcjOTn5rK89ceIENm7ciKamJhQWFmLhwoUYP3684v9fRMGI4UJhTwiBd955B7/+9a/R2dmJe+65Bz/96U+HNbNLCIHe3l6YzWYkJiYiLS1tSD0RIQQOHTqEjRs3wmAwoLKyEtdff/05A4koXDBcKKzt2rULy5cvR11dHa6//no88cQTKCwsDHgdXq8XO3fuRG1tLZxOJ66++mrMnj0bMTExAa+FKBAYLhSWWlpa8NRTT2Hjxo2YPn06qqqqcPnll6tdFgYHB7Ft2zbs2LEDOp0O8+bNw8UXXzzkmWlEoYLhQmHFZDLhP/7jP/A///M/yMjIwC9/+UssXrw46H559/X14b333sPevXuRlZWFBQsWoKSkRO2yiBTDcKGw4HQ68corr+D555+H0+nEj370I/zwhz+ETqdTu7Tzamtrw/r169HS0oKJEydi4cKFGDNmjNplEY0aw4VCmhAC7733HqqqqtDa2oqlS5fi0UcfRWZmptqlDZkQAgcOHMCmTZvQ29uLSy+9FHPnzuU+ZRTSGC4Usvbt24fly5fjs88+wzXXXIOqqiqUlpaqXdaIeTwefPzxx/jggw/g9XpxzTXX4KqrrkJ0dLTapRENG8OFQk57ezueeeYZvP322ygtLUVVVRWuueYatctSjM1mwwcffIBPPvkEiYmJuOGGG1BeXs5FmBRSGC4UMsxmM1566SX813/9F5KSkvDYY4/h9ttvR1RUlNql+YXRaMTmzZtRX1+P7OxsLFy4EAUFBWqXRTQkDBcKem63G6tXr8a//du/wWKx4MEHH8RDDz006rNXQsXRo0exYcMGtLW1YerUqbjxxhuRnp6udllE58VwoaAlhMDWrVtRVVWFhoYGfOc738EvfvELjBs3Tu3SAk4IgX379mHz5s0YGBjArFmzcN111yEuLk7t0ojOiuFCQengwYNYvnw5/vrXv+Lyyy9HVVUVpk+frnZZqnO5XPjb3/6GrVu3QpZlzJkzB7NmzQrboUEKXQwXCipdXV149tln8frrr6OgoABPPPEE5s2bx4fZZ7BYLNiyZQt27tyJ1NRUzJ8/H9OmTePfEwUNhgsFBZvNht///vf47W9/i5iYGPz85z/HXXfdxWm4F9DV1YVNmzbhq6++Ql5eHhYuXIicnBy1yyJiuJC6vF4v3nrrLTzzzDPo7e3Ffffdh5/85CfcNXiYmpqasGHDBnR0dKCsrAzz589Hamqq2mVRBGO4kGp27NiBJ554AgcOHMCiRYvw+OOP8657FLxeL/bs2YOamhrYbDZcccUV+Na3voXY2Fi1S6MIxHChgGtqakJVVRW2bNmCyspKPP3007jooovULitsOBwOfPTRR9i+fTu0Wi3mzp2LSy+9lMctU0AxXChgenp68Nxzz2HlypUYP348Hn/8cdx00018CO0n/f39qK2txe7du5GRkYEFCxagtLSUf98UEAwX8jvf8cIvvvgiJEka0vHCpJzTj1suKirCggULeNwy+R3DhfxGieOFSRk8bpkCjeFCfnHm8cLLly9HUVGR2mVFPI/Hg507d2LLli08bpn8iuFCimppacHTTz+NDRs2BNXxwvR1Zx63fP311+Oiiy4KuhM7KXQxXEgRJpMJL7zwAl5++WWkp6fjl7/8JW699Vb+sgpyPG6Z/IXhQqPidDrx5z//Gc8//zwcDgeWLVuGBx54IOiPF6avO3bsGDZs2ICWlhaUlpZiwYIFPG6ZRoXhQiMSDscL09fxuGVSEsOFhm3fvn144okn8Omnn+Kaa67Bk08+iUmTJqldFinE7Xbjk08+4XHLNCoMFxqy048XnjhxIqqqqnDttdeqXRb5ie+45Y8//hhJSUk8bpmGheFCF2SxWPDSSy/hD3/4AxITE/HYY4/hjjvu4BkiEcJoNGLTpk04cOAAsrOzcdNNNyE/P1/tsijIMVzonE4/XthsNuPBBx/Eww8/HDHHC9PXNTc3Y8OGDWhvb+dxy3RBDBf6BiEEtm3bhieffBINDQ247bbb8Itf/IJbhhCEENi7dy/ee+89DAwM4PLLL8ecOXN43DJ9A8OFvubgwYN44okn8NFHH2HWrFmoqqrCjBkz1C6LgozL5cKOHTuwbds2HrdMZ8VwIQB/P154zZo1yMvLw5NPPsnjhemCzGYz3n//fXz22WdIS0vjcct0CsMlwp15vPBPf/pT3H333dyxmIalq6sLGzduxKFDh3jcMgFguEQsr9eLv/zlL/iXf/mXU8cL//jHP0ZKSorapVEIa2xsxMaNG9HR0YHy8nLccMMNPG45QjFcItDf/vY3PPHEE6ivr8eiRYvwq1/9Crm5uWqXRWHC6/Vi9+7dqKmpgd1ux5VXXolrr72Wxy1HGIZLBGlqasJTTz2F2tpaVFZW4qmnnsLFF1+sdlkUps523PJll13GzUwjBMMlAvT09OA3v/kNXn31VYwbNw6PP/44Fi1axIeuFBD9/f2oqanBnj17eNxyBGG4hDGHw4E//elPeOGFFyBJEn7yk5/gvvvu48FQpIoTJ05gw4YNOHz4MIqKirBw4UKMGzdO7bLITxguYUgIgXXr1uHXv/41Tpw4gXvuuQc/+9nPeLwwqY7HLUcOhkuY+fzzz7F8+XLs2bOHxwtT0DrzuOXZs2dj9uzZnAIfRhguYaK1tRVPP/001q9fj2nTpqGqqgpXXHGF2mURndfpxy3HxcXh+uuvR2VlJR/6hwGGS4gzmUx48cUX8fLLL0Ov1/N4YQpJfX192Lx5M/bt24esrCwsXLgQxcXFapdFo8BwCVFOpxOvvvoq/v3f/x0OhwMPP/wwHnzwQR4vTCGNxy2HD4ZLiBFCoKamBlVVVWhpacEdd9yBRx99lBcghQ0hBOrr67F582b09fWdOm6ZRz2EFoZLCDn9eOHZs2ejqqqKxwtT2DrzuOVrr70WV155JY9bDhEMlxDg9XqxbNkyvPXWWzxemCLOmcct33777SgoKFC7LLoAhkuACCHQ3t4+4tdbrVZoNJpR78+UnZ3NldGkCiEETCbTiF9vt9vR3NyMgoKCUT1bTElJ4TUQADzZJ0BcLhfWr1+PvLy8c/6M0+lER0cHUlJS/LKo7OjRo7j//vu5loBU4fF4sH//fuj1+nP+jNPphNlsRmJi4lm/p8nJyejp6RlxDUajEVdeeSUPNQsA/g0H0CWXXHLWjSJ9q5YfeeQRHDp0CKmpqXjsscdw2223KXqHtWvXLsXei2gk8vLyznqDJYTA0aNHUVtbi76+PqSmpuK2225DTk6OotfA0aNHFXsvOj8uhggC7e3tWLRoEbq6urB8+XIUFxfjRz/6Ed5//31w1JIiQUdHB15++WVIkoQFCxZACIE//vGPoxpGI3UxXFTm8XiwZMkSSJKEzZs345577sFrr72G6dOn4wc/+AGsVqvaJRL5ldvtxssvv4z4+Hg8+OCDmDlzJh588EHIsoxXXnmFN1ghiuGispUrV6KpqQmvvPIKxo4dC0mSoNVqsXLlSgwODuLxxx/nxUVhSwiBbdu2wWw24/vf//6pB/Xx8fFYsmQJOjo60NjYqHKVNBIMFxU5nU5UVVVh1qxZmDlz5tf+LDMzE3fffTfWrFmDgYEBlSok8i+3241t27Zh6tSp31gIPHnyZKSlpeHtt9/mDVYIYrio6LXXXoPNZsN//ud/fuOhpSRJ+NWvfgVJkvDss8+qVCGRf3300UfweDxYvHjxWa+BxYsXw2Qy4fjx4ypVSCPFcFGJ1+vFs88+i/LycmRnZ5/1Z+Lj43HDDTfgtddeg8vlCnCFRP7l9Xrx4YcforCwEPHx8Wf9maKiIsTExGDdunXsvYQYhotKdu/ejf7+fjz77LPnnGopSRKqqqrgdDrx3nvvBbhCIv9qbm6G0+nEt7/97XNeA7IsY/bs2WhtbYXD4QhwhTQaDBcVCCHw5JNPIjk5GWVlZef92ezsbGRnZ+Ppp5/mnRuFDSEENm3ahLi4uAtuuuo7l+izzz4LRGmkEIaLCux2O/bs2YP777//ggvEJEnCo48+itbW1lGtTCYKJi6XC+3t7bjqqqsueA3ExMQgKysL27dv5w1WCGG4qOCdd96BEAL/+I//OKSfX7RoEWRZxu9//3s/V0YUGPv27QMAzJo164I/K0kSbrjhBlitVvT39/u5MlIKwyXAhBB48cUXMWHChCHvHxYTE4OLL74Yr776Ku/cKOQJIbB161akpKQMeSPWkpISSJKE7du3+7c4UgzDJcBsNhtaW1vx0EMPDXnPJEmS8Itf/AJms5l7I1HIc7lc6O3txezZs4d8DciyjIKCAnz++ee8wQoRDJcAW7duHQDgtttuG9brLrnkEkRHR+P555/3Q1VEgVNfXw8AqKioGPJrJEnCvHnz4HQ6+ewxRDBcAux3v/sdsrKyzjmv/1w0Gg3mzJmDDRs2wOv1+qk6Iv/bvn07EhIShn02UW5uLmRZxrZt2/xUGSmJ4RJALpcLR44cwfe+970RbSP+85//HIODgzh48KAfqiPyP6/Xi87OTlx66aXDvgYkSUJxcTH27dvHobEQwHAJIIfDgdtvvx133HHHiF4/adIk6HQ6PPfccwpXRhQYTqcTpaWluOyyy4b9WkmSMHfuXLhcLhgMBj9UR0piuARQQkICXnjhBaSnp4/o9bIs46abbsLWrVvh8XgUro7I/2JjY3HvvfeO+KTV7OxsaDQabN26VeHKSGkMlwCTJGlUJ+s98sgjcLlc2Llzp4JVEQWOLMsjvgZkWUZpaSm++OKLYT17FELwhizAGC4hJi8vD0lJSXjmmWc47kwRae7cuXC73ejo6Bjya3p6evCHP/wBg4ODfqyMTsdwCTGSJOGee+7B7t27h7yRnxCCuypT2Bg7diyio6NRW1s75Busv/71rzh27BhiYmL8XB35MFxC0AMPPACv14vq6uoh/XxraysWLVrE88gpLMiyjIqKCjQ0NAxpaEwIgbq6OuTn549qSJqGh+ESgtLS0pCfn4/nnntuSHduv/nNb7B//34kJiYGoDoi/5szZw68Xu+QpuWbTCY4HA7MmTMnAJWRD8MlBEmShOXLl+PEiRNobW0978+63W6sX78e8+bNg0ajCVCFRP6VnJyMpKQkbNq06YI3WB988MGp7WMocBguIWrevHmIjY3F8uXLz3tx1dbWwuFw4PHHHw9gdUT+JUkS5s+fj56envMO93o8HtTV1WHSpEmQZf66CyT+bYeoqKgofO9738OWLVswMDBw1p8RQmD58uXIyclBXl5eYAsk8rOysjJERUXh3XffPecN1hdffAG3240FCxbweUuAMVxC2E9/+lPIsnzO3sunn36KtrY2/Ou//isvLAo7Go0GV155JQ4ePAiLxfKNP/d6vVi/fj0yMzOh1+tVqDCyMVxCWHx8PO699168+eab33j24nQ68YMf/AC5ubn41re+pVKFRP41Z84caDQavPHGG1+7wRJC4LPPPoPFYsGSJUt4c6UChksIkyQJv/rVr5CcnIylS5fCarUCODnO/M///M8wGAz485//zLFmClvR0dFYuHAhGhsbsWfPHgghIIRAd3c3NmzYgNLSUmRnZ6tdZkSKUrsAGh2dTofXXnsNt9xyC2655Rbcf//92Lp1K9auXYuf/exnmDx5stolEvmNJEm47LLL8NVXX+Evf/kLenp6kJSUhNraWiQkJGDp0qXstaiE4RIGLr74YqxcuRK//OUv8ZOf/ASJiYl4/PHH8eCDD/LCorAnyzLuuusuvPPOO9ixYwe8Xi8mTJiAJUuWDPvMGFKOJLhBVUA4nU688sorKCoq8ttnWK1WtLW1IT09HXq9/hvB0tTUhO9973vQarV+q4HoXNxuNz799FNkZGT45f2FEDCZTHC73UhLSzvruq7u7m7MmjULUVG8r/Y3hkuACCFw+PBhVXsSQggUFRWxN0OqEEIExTksGRkZvAYCgOESInxbhms0Gl4YFLE8Hg9sNhvi4uK440SQ4zSiEFFfX49x48ahvr5e7VKIVNPZ2YmnnnoKnZ2dapdCF8BwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXEKAEAJ9fX3wer3o6+uDEELtkogCTgiB3t5e9Pf3o7e3l9dBkGO4BDGTyYQVK1aguLgYs2fPRk9PD2bPno3i4mKsWLECJpNJ7RKJ/O7062D69On405/+hOnTp/M6CHKSYPwHpdraWixevBg2mw0AvnaXJkkSACAuLg5r167FvHnzVKmRyN94HYQuhksQqq2txY033gghBLxe7zl/TpZlSJKETZs28cKisMPrILQxXIKMyWRCdnY27Hb7eS8oH1mWodPp0N7ejpSUFP8XSBQAvA5CH5+5BJlXX30VNpttSBcUAHi9XthsNqxcudLPlREFDq+D0MeeSxARQqC4uBjNzc3DmgkjSRIKCgrQ1NR0ahyaKFTxOggPDJcgYjQakZGRMarX6/V6BSsiCjxeB+GBw2JBxGKxjOr1ZrNZoUqI1MPrIDwwXIJIQkLCqF6fmJioUCVE6uF1EB4YLkFEr9ejsLBw2OPFkiShsLAQaWlpfqqMKHB4HYQHhksQkSQJDzzwwIi2tVi2bBkfYlJYkCQJDz/88Ihey+sgeDBcgkhnZye0Wi20Wu2QLxBZlhEXF4e77rrLz9URBc7dd9+NuLg4yPLQfkVJkoTY2Fj8wz/8g58ro6FiuASJPXv24M0330R6ejrWrFkDWZYveGH5ViZXV1dz4RiFlZSUFKxduxaSJF3wRst3HTz//PPo7u6G0+kMUJV0PpyKrLLBwUHU1taiubkZlZWVuPzyy6HRaM67p5JPfHw8qqurMXfu3ECXTRQQ7733HhYvXgy73Q5Jks65t1h1dTWuuOIKtLS0wO12IycnhzdcKmO4qOjEiRPYvHkzXC4X5s2bh4KCgq/9uclkwsqVK/HSSy/hyJEjp/57VlYWrrrqKvzhD39AampqoMsmCpienh589dVX2LVrF37/+99/7TooLCzEsmXLcPfddyM5ORkA4PF4cOzYMZhMJmRkZGDcuHFDHlojZTFcVCCEwO7du/Hxxx8jKysL8+fPP+/0Sd85FmazGYmJifB6vVi1ahVuuOEGlJaWBrByosBqaGhAbGwscnNzv3EdpKWlnXPIzGg0or29HbGxscjPz0dMTEyAKyeGS4DZ7XbU1NSgpaUFF198MWbNmjWiO6vq6mrY7XYsXbrUD1USqW9gYAAtLS0oKipCXFzcsF9vt9tx9OhRuFwu5OTksJcfYOwvBtDx48exatUqdHV14eabb8YVV1wx4i57ZWUluru70d7ernCVRMHBYDAgLi5uRMECADqdDqWlpUhOTkZLSwva2tqGvBEmjV6U2gVEAiEEdu3ahU8//RTjxo3D/PnzR70KOTc3F3q9Hnv27EF2drZClRIFB7vdDqvVitzc3FG9jyzLyMvLQ2JiItrb22G1WpGXl4fY2FiFKqVzYc/Fz2w2G6qrq/HJJ5/gkksuwa233jrqYPGprKxEc3Mz+vr6FHk/omBhMBig1WqRlJSkyPvp9XqUlJRACIGGhgb09vYq8r50bgwXP2pra8OqVatgNBqxePHiET9fOZfS0lLExcWhrq5OsfckUpvL5UJ/fz/S09MVXW2v0+kwceJEpKSkoLW1FceOHeMwmR8xXPxACIFPP/0Ub7/9NtLS0nDnnXciJydH8c/RaDQoKyvDwYMHYbfbFX9/IjUYjUbIsuyXPcJkWUZubi5yc3PR19eHhoYGDA4OKv45xHBRnMViwdtvv42dO3di1qxZWLx4MeLj4/32edOnTwcAfPHFF377DKJA8Xq96O3tRVpaml/Xp6SlpWHixIkATk537unp8dtnRSqGi4JaW1uxatUq9PX14dZbb8Wll17q9030dDodJk+ejH379sHtdvv1s4j8rbe3F16vF+np6X7/rNjYWEycOBGpqak4duwYWltbOUymIIaLArxeLz7++GNUV1cjMzMTd955Z0BncFVUVMBms6GhoSFgn0mkNCEEjEYjkpOTER0dHZDPlGUZOTk5yMvLg8lkwqFDhzjErBCGyyj5hsE+//xzXHHFFbj55ptHPC9/pFJTU1FQUIA9e/YE9HOJlDQwMACn0zmqI45HKjU1FaWlpZBlGQ0NDTAajQGvIdwwXEbh6NGjeO2119Df34/bbrsNF198sWpnSVRWVqKnpwetra2qfD7RaBkMBiQkJECn06ny+TExMSgpKYFer0dbWxtaWlrg8XhUqSUccBHlCPiGwXbv3o38/HzMmzdPtQvCJzs7G5mZmdizZ8+oF54RBZrNZoPNZkNeXp6qdciyjAkTJiAhIQHHjh2DzWZDfn6+6td3KGLPZZgGBgbw1ltvoa6uDldddRUWLVoUNF+8yspKtLa2cuYLhRyDwYCYmBjFFk2Olm+YTKPRoKGhAQaDQe2SQg7DZRiOHDmC1atXw2q14jvf+Q4qKyuD6kjVkpISJCQk8NkLhRSn03lq0WQw8Q2Tpaeno729HUePHuUw2TAwXIbA4/Fg+/btWL9+PcaPH4+lS5ciKytL7bK+QZZllJeX49ChQ7BarWqXQzQkRqMRGo0mKHctliQJ2dnZyM/Ph9lsxqFDh04d4Efnx3C5gP7+frz55pvYv38/Zs+ejZtuuimoN72bNm0aZFnG/v371S6F6II8Hg96e3uRnp4e1Id6paSkoLS0FFFRUWhsbER3d7faJQW94G3NINDU1IRVq1ZhcHAQS5YsQXl5udolXVBMTAymTp2K/fv3c1ElBb3e3l4IIaDX69Uu5YK0Wi1KSkqQkZGB48ePo7m5mcNk58FwOQu3241t27Zh48aNyM3NxdKlSzFmzBi1yxqy8vJyOBwOHDx4UO1SiM7Jt2gyNTUVUVGhMXFVkiSMHz8eBQUFsFgsHII+D4bLGUwmE958800cOHAA1157LRYsWBByR6QmJyejqKgIdXV14EGjFKz6+/vhcrmC7kH+UCQnJ6O0tBTR0dFoamriMNlZMFxO09DQgNWrV8PpdOL222/HjBkz1C5pxCorK9HX14ejR4+qXQrRWRkMBiQmJgb1M8zz0Wq1KC4uRmZmJo4fP44jR45wKPo0DBecHAb74IMPsHnzZuTn52Pp0qWqbEGhpKysLGRlZXFaMgUli8UCu90ekr2W00mShHHjxqGwsBA2mw2HDh2CxWJRu6ygEPHh0tvbizVr1uCrr77CnDlzMH/+fGi1WrXLUkRlZSXa29vZZaegYzQaERsbi8TERLVLUURSUhJKS0sRExODw4cPo6urS+2SVBfR4fLVV1/h9ddfh9frxe23345p06apXZKiCgsLkZSUxN4LBRWHw4GBgYGQHx04U3R0NIqKijBmzBicOHEChw8fjuhhsogMF5fLhS1btqCmpgbFxcW44447Qr57fjayLKOiogKNjY3sqlPQMBqNiIqKQkpKitqlKE6SJGRlZaGoqAh2uz2ih8kiLlx6enqwZs0aNDQ0YO7cuZg3b17Azo5Qw5QpUxAdHY29e/eqXQoR3G73qUWTwbR1ktISExNPDZM1NTWhs7Mz4mZuRlS4fPnll3j99dcBAHfccQemTJmickX+p9VqMW3aNNTX18PpdKpdDkW4np4eSJKEtLQ0tUvxO98wWVZWFjo6OnDkyBG4XC61ywqYiAgXl8uFmpoabNmyBaWlpbjjjjtCYkWwUsrKyuByufDll1+qXQpFMCEEenp6QmrR5GhJkoSxY8eiuLgYg4ODOHToEMxms9plBUTYh4vRaMTq1atx+PBh3HDDDbjuuusi5ovtk5iYiJKSEuzduzfiuuYUPPr6+uB2u8Py+eaFJCQkoLS0FDqdDocPH0ZHR0fYX4thHS5ffPEFXn/9dURFRWHp0qUoLS1VuyTVVFRUoL+/H4cPH1a7FIpQRqMRSUlJIbfjhVKioqJQVFSEcePGobOzE4cPHw7rYbKwDBen04nNmzdj69atmDJlCpYsWRKU23kH0pgxY5Cdnc1pyaQKs9mMwcHBsJt+PBJjxoxBcXExHA4HDh06hIGBAbVL8ouwC5fu7m6sXr0aR48exY033ohvfetbETcMdi4VFRXo6OhAR0eH2qVQhDEYDNDpdIiPj1e7lKDgGyaLi4vDkSNHcOLEibAbJgurcNm3bx/eeOMNaLVaLF26FCUlJWqXFFQKCgqQkpLC3gsF1ODgICwWC3stZ4iKikJhYSHGjx+P7u5uNDU1hdWMzrAIF4fDgY0bN+LDDz/EtGnTsGTJkrBcoDVakiShoqIChw8fRn9/v9rlUIQwGAyIjo5GcnKy2qUEpczMTBQXF8PlcuHQoUNhc22GfLh0dXVh9erVOHbsGBYuXIhrrrkGGo1G7bKC1pQpUxATE8NFlRQQLpcLJpMp7BdNjlZ8fDxKS0uRkJCA5uZmHD9+POSHyUI6XOrq6vDGG28gNjYWS5cuRVFRkdolBb2oqCjMmDEDBw4cgMPhULscCnORtGhytDQaDQoKCjB+/HgYDAY0NjaG9DBZSIbL4OAg1q9fj48++gjl5eX47ne/yy73MMyYMQNerxf19fVql0JhzOv1oqenB2lpaRxNGIbMzEyUlJTA7Xbj0KFDMJlMapc0IiEXLh0dHVi1ahWOHz+ORYsW4aqrruIXd5h8XfC9e/fC6/WqXQ6Fqb6+Png8nohcNDlacXFxKC0tRWJiIo4ePYr29vaQGyYLmXARQmD37t146623kJCQgDvvvBMFBQVqlxWyKioqYLFY0NjYqHYpFKaMRiOSk5PD5nykQNNoNMjPz0d2djaMRiMaGxtDaig7JMLFbrfj3XffxY4dO1BRUYHvfOc7YXPIkFrS09ORm5vLacnkFwMDA3A4HJx+rICMjAyUlJTA4/Hg0KFD6OvrU7ukIQn6cDl+/DhWrVqFzs5O3Hzzzbjyyishy0FfdkiorKxEd3c32tvb1S6FwozBYEBcXBzi4uLULiUsxMXFYeLEiUhOTkZLSwva2tqCfkg7aJeuCyHw+eef45NPPsG4ceMwf/58JCQkqF1WWMnNzYVer8eePXuQnZ2tdjkUJux2O6xWK3Jzc9UuJaxoNBrk5eUhISEB7e3tsFqtyM/PD9q92oKyC2Cz2fDOO+/g448/xiWXXIJbb72VweInlZWVaG5uDpmuNgU/g8EArVaLpKQktUsJS+np6Zg4cSK8Xm9QD5MFXbi0tbVh1apVMBgMuOWWWzBr1iwOg/nRxIkTERcXh7q6OrVLoTDgcrnQ39/PRZN+ptPpUFpaipSUFLS0tODYsWNBN0wWNL+1hRD47LPPsHbtWqSlpeHOO+9ktzoAoqKiUFZWhoMHD8Jut6tdDoU4o9EIWZa5aDIAZFlGbm4ucnJy0NfXh4aGBgwODqpd1ilBES5WqxVr167FZ599hssuuwyLFy/m7qkBNH36dAAnz78hGimv14ve3l6kpaVxtCGA9Ho9Jk6cCABoaGhAb2+vyhWdJAkFVuYIIUZ1dGdraytOnDiBSZMmjWrDycTExIjtio+2DRobG2GxWFBRUTGqOiK5DUKdEGJUh1dZLBZ0dHQgPz9/VMdcREdHR+x3aDRt4PV60dXVBYvFgoKCglEtLleiDRQJF4/Hg7q6OkiShKSkpGHftQgh4Ha7ER0dPezPFkLA4XDAbrejoqIiYlfr+9oAAJKSkob99+B2u6HRaEb0hfJdEFarNaLbINR5vV4YjcZRzT7yeDwjan8hxKl9tNLT0yO25+P1emEwGCDL8oh+HwInr+WRhPvpbZCRkTHqNlBsKnJGRgY2bdqEm2++GePGjVPqbS+ou7sb7777LubMmROwzwxWGRkZ2LhxI2655ZaAtoHRaMSGDRswe/bsgH0m+YdOp4PRaMT48eMDurLearWivb0dOTk5AfvMYKXT6dDa2orCwsKArhOyWCxoa2tDXl6eIu+n2O2BVquFx+PBgQMHlHrLIfnyyy9hs9m4xQT+3gaBfnZy4MAB9Pf3sw3CgCzLMJvNAd8ssaenB16vl71enFzP4vF4YDQaA/q5vjZQqteoWLhIkoSMjAw0NzcHbIM1IQSampqg1+sjdoz2TJmZmQFvg8bGRs4OCiPR0dHo7e0N6HfIbDZzNf9pdDod+vv7A9oGAwMDiraBogOb06dPx+DgIDwej5Jve05erxc2mw1Tp04NyOeFghkzZsDhcMDtdgfk87xeL6xWK9sgjKSkpAT0HBEhBDweD29QTpOeng6PxxPQcPF4PNDr9Yq9p6Lh4tul+Pjx40q+7Tl1dnYCAIqLiwPyeaHA1wZtbW0B+byOjg4AODUVkkKf75d8oNZMWK1WAOCK/tP4zqeyWCwB+TxfGyh5Lpai4RIbG4uoqCjs27dPybc9p3379kGj0bA7fZqYmBhER0cHrA327t3LNggzvmdnPT09Afk8o9EISZL4vOU0vpmbBoMhIJ9nMBgUbwNFw0WSJEyYMAFtbW1+784JIdDS0oLx48fzectpJElCXl5eQA4XEkKgtbUV2dnZbIMwE6gxfyEELBYL10edwbesw2KxBKQNzGYzEhISFG0DxSeTl5eXw+12+30rkcHBQbhcLpSXl/v1c0JReXk5PB6P37vUdrsdLpdr1AsvKbhIknRqzN/f+1W53W4IIXha5VlkZGScWgPoT75nO0qfvaN4uPjWV3z11VdKv/XX+E5QnDBhgl8/JxSNHTsWAPw+LdzXxtyuP/z4nn8MDAz49XN8O/pyWPWbfFtg+Xt40rddjNI7zyseLhqNBklJSdi/f7/funNCCOzduxcJCQkcpz0L38aB9fX1fm2D/fv3j2g3AAp+sixDo9HAYDD49TvU09MDrVbLIbFziImJQU9Pj1/bwGg0+mXLHcXDRZIklJWVYWBgwG9Tkr1eL0wmE6ZPn84v5VlIkoSKigpYrVa/dak9Hg/6+/tRVlbGNghDkiQhLS3NrzPGfNsGcXv+s/OtHXS5XH4bnvRt+eKPNvDLBj6TJk0CALS0tPjj7XHs2DEAwJQpU/zy/uGgpKQEwN+HD5V29OhRAMDkyZP98v6kPt+aB980VaX5htxGs1ltuEtNTQUA9Pf3++X9fZvd+mONkV/CJTY2FrGxsdi1a5fi3Tnf8cdarZbjtOeh1WoRHx+P3bt3+6UNdu3adaqdKTxFR0dDlmV0dXX55TvU3d0NjUbDYdXzkGUZUVFRfmuDrq4uaDSaUe1ifS5+CRdJkjB16lQYDAbFu3NCCHR0dGDy5MnsSp+Hb3iyt7dX8eFJ386tHJYMb5IkISUlxS89FyEEBgcHuXXTBfiGxhwOh1+eu9hsNqSlpfmlDfy2r3V5efmpdRBK8q2hqaysVPR9w5HvEDClh8aOHj0KIQTKysoUfV8KPpmZmQCUHxrzDcdwCvKF+YYnld5M1NcGSk9B9vFbuMTFxSE2NhaffvqpYokrhMAnn3yCmJgYxafNhaOYmBgkJiZi586dirbBp59+Cp1Ox2HJCOAbGuvs7FT0O9TZ2YmoqCgOiQ2BRqNBdHS0okNjvjbwvbc/+C1cfDOWDAaDYjOWPB4Purq6OENpiCRJwiWXXAKTyQSHw6HIe7pcLhiNRlx00UVsgwjgW1Bps9kU+8Xm9XrhcDiQkZHB79AQSJKEMWPGwOFwKDbELYSAzWbz60w9vx73NmPGDADKLeb78ssvAYCr8ofBN3PPd0rlaO3fvx/A34fcKPz5hq58Cx5Hy7cokLsgD51v1phSZ7z4Fk76a0gM8HO4aLVapKenKzJrTAiBnTt3Ii0tbVTHsEaaqKgojB8/HnV1dYq0we7du5GZmem3rjQFH41Gg5iYGEWGZYQQMBgM0Ol0EXuU8UjIsoy4uDh0d3cr0gadnZ2IiYnx67CkX1tXkiRcffXVsNvto07c3t5e2Gw2XHXVVexKD4OvDVwuF06cODGq9+ru7sbg4CBmz57NNoggkiQhKysLbrd71MOrdrsdHo8H48aN43doGCRJwvjx40+dYTUavvOe/L3pr99vHbKzsxEdHY0PP/xwxIkrhMCHH36I6OhonrE9ApmZmdDpdNi2bduo2mDbtm3QarWn9o+jyJGYmAhZlnHixIlRfYeOHz9+6i6chicuLg4ajWZUO54LIdDe3g5ZlpGYmKhwhV/n93CRJAmXXnopTpw4MeLEtdvtaG9vx0UXXcSu9AhIkoQrrrgCRqNxxCt9rVYrOjs7MXPmTN5xRiDfeguLxTLih8oulwt2ux1jx47ld2gEfD1I327kI+F2u2GxWJCZmen3NgjIb+qysjLIsjyi3osQAtu3b4csy9zafRQmTZqEqKgofPDBByNqg61bt0Kj0fBBfgTzPfwdSe/F12vx7VlGI+Nb8DiSM7NObwPf+iV/Cki4aDQaVFZW4vDhw8NejGWz2dDU1ISysjK/bFEQKTQaDWbNmoW2trZhL8ayWCxobm7GRRddxHUJEcw3LdlkMg17eYHL5YLZbEZmZiZHH0ZBlmWMGTMGZrMZTqdzWK91u90wmUwBmwIekFb2DY1FRUWhpqZmyIkrhEBtbS00Gg2HYxRQVlYGrVaLzZs3D6sN3nvvPURHR+OSSy5hG0QwSZJODWkNZ9zft1OHLMt+nfoaKXwB3draOuw2OL0N/S1gtxBRUVG4+uqr0d7ePuQtYdra2nDs2DFceeWV7LUoQKPR4LrrrkN3dzeampqG9MVsbW3F8ePHMXv2bLYBnRr3N5vNQxqFEEJgYGAAdrsd48ePZ69FAbIsIzs7GzabbciHuZnNZlgsloAeSR7Qlp46dSrS09Px3nvvXfAY5MHBQWzatAl6vR7Tpk3jHbNCiouLkZWVhS1btlxwgsXg4CA2b96M9PR0Hm9AAE6Gi16vR0xMDFpbWy/4cN/tdqOtrQ06nY5b6ysoNTUVOp0Ora2tFxyidLvdaGlpQWxsrN82qTybgIaLJElYtGgRPB4P1q1bd86/FLfbjXfffRcejweLFi3i3Y6CJEnCTTfdBEmSUF1dfc5xW7fbjXfeeQcejwff/va3Ge50iiRJyMvLg9frRUtLyzl3Pvd4PKfO/cnLy+N3SEGSJCE/Px9CCDQ3N5+zDbxeL44cOQIAKCgoCGgbBPy3dkJCAhYsWACDwYB169bBarWeGp4RQsBqteLdd99FZ2cn5s+f7/e52JFIp9Nh0aJF6O3tRXV1NSwWyzfaYN26dejq6sKCBQu4SSh9g1arRU5ODqxW66m759O/Q06nE83NzXA4HMjLy+OQqh9ER0cjPz8fNpsNzc3NcLlcX2sDl8uFI0eOwG63Iy8vL+C7akhCgd3oPB4Puru7kZWVNaSfF0LgyJEjqK2thVarxdSpU6HX69Hb24sDBw7A4XDguuuuQ3Fx8ZCT9sSJExgzZkzEzmbybeo51AWOQgi0tLRg8+bNiIqKwrRp06DX69HX14f6+no4HA7MnTt3WG1w/PhxjB07NmLbINR5vV7Y7XbEx8cP6eeFEDCZTGhvb0dUVNSprZkGBwdPnfuek5ODxMTEIX+HrFZrRG8NM5I26O/vx7Fjx6DRaE4NWfrawOv1IicnB8nJyUNuA4vFgri4uFG3gSrhApz8S+nt7cUnn3yC48ePw+VyndoHa9asWcM+RIjhMrxwAU62QV9fHz7++GO0t7d/ow2Gu2MqwyW0DfcXG/D3Q786OztPjULIsoyEhASMHTsWWq12WN8hhsvI26CjowNWqxVer/dUG2RlZSEmJmZYbRB04bJ///5TO3cOl9VqxeDgIGJjY4f1l3q6vr4+zJgxI2J/sY2mDXzbbw8ODiImJgbx8fEjGpuN9DYIdV6vFz09PSPeGNblcsHj8SAqKmrEw2AOhwN6vT6iw8VoNI7o+HAhBNxuN9xu96k2GMl1PDg4iPT09OAIF98dsNpSU1Mj9qEh24BGSwih2Lk/ozHcO+1wEk5toEi4jJbL5YLFYhlxz4dGz+FwwG63c7oojZhvx15OAFGP2+2G0+kMio1Bg6LvuXv3bqxevRqDg4NqlxKxtm/fjrVr155zSiPRhZhMJjQ3Nw97WxJSjm+BtFInVo5GUITL9OnT4fV6FTuxkoavvLwcAwMDOHz4sNqlUIgyGAxISkqCVqtVu5SI5Htek56eHhTPPYMiXOLj41FaWoq9e/cGReJGoszMTEyYMAF79uxRuxQKQWazGQ6Hg3uHqai3txcejydo2iAowgUAKisrYbFY0NTUpHYpEauyshKdnZ2jPrGSIo/BYEBcXNyIZ3vS6HV3dyMlJSVoeo5BEy56vR65ubm8c1ZRXl4eUlNT2QY0LHa7HRaLBenp6WqXErH6+/vhcDgCck7LUAVNuAAn75y7u7vR3t6udikRSZKkU+fuDPfMF4pcRqMR0dHRSE5OVruUiNXd3Y34+Pig6jkGVbjk5uZCr9fzzllFkyZNgk6nw969e9UuhUKAy+WCyWQa9m4OpBybzXbq6OJgElThApzsvTQ3NwfFgsBIFBUVhRkzZuDAgQOcGk4X1NPTw6OLVdbd3Y2YmJig6zkGXbiUlpYiLi4OdXV1apcSsWbMmAEhBOrr69UuhYKYb7sYvV4fFFNfI5HT6Qzo0cXDEXThotFoUFZWhoMHD17wQDHyj7i4OEyaNAn79u3j1HA6p76+Pni9Xuj1erVLiVgGgwGyLAdlGwRduAAnF1UCwBdffKFyJZGroqICFosFjY2NapdCQUgIAYPBgOTk5KCZ+hppPB4Penp6FNlk0h+CryKcPMxq8uTJ2Ldv3wWP8CT/0Ov1yMvL4+QKOiuz2Qyn08npxyryndcSLIsmzxSU4QKcvHO22WxoaGhQu5SIVVlZCYPBgLa2NrVLoSBjMBgQHx8fFBskRiJfzzE1NTXgJ0wOVdCGS2pqKgoKCnjnrKKcnBykp6ezDehrbDYbrFYrey0q6u/vh9PpDLrpx6cL2nABTt459/T0oLW1Ve1SIlZlZSWOHj2K3t5etUuhIGE0GqHVapGUlKR2KRGru7sbCQkJ0Ol0apdyTkEdLtnZ2cjMzOSds4omTpyI+Ph4Tg0nACenvvb39wfl1NdIYbVaYbVag7rXAgR5uAAn75xbW1thNBrVLiUi+aaGf/XVV5waTujp6YEsyzzYT0XBumjyTEEfLiUlJUhISOCds4p8U8P379+vciWkJt/U10g+415tDocDJpMp6HstQAiEiyzLKC8vx6FDh2C1WtUuJyLFxsZiypQp2L9/P6eGR7De3l4IIYJywV6kMBgMiIqKContdoI+XABg2rRpkGWZd84q8k0NP3TokNqlkAqEEDAajUhJSQnaqa/hLtgXTZ4p+CsEEBMTg6lTp/LOWUUpKSkoLCzk5IoI1d/fD5fLxenHKjIajRBCBO2iyTOFRLgAJ894dzgc+PLLL9UuJWJVVlait7cXLS0tapdCAWY0GoN+6ms48y2aTEtLQ1RUlNrlDEnIhEtycjKKiopQV1cHIYTa5USk8ePHY8yYMey9RBir1QqbzRYyd8zhqK+vDy6XKyQe5PuETLgAJ++cTSYTmpub1S4lYlVWVuLYsWMwGAxql0IBYjAYEBMTg8TERLVLiVjd3d1ISkpCbGys2qUMWUiFS1ZWFrKysjgtWUXFxcVITExkG0QIp9OJgYEB9lpUZLFYYLfbQ6rXAoRYuAAn75zb29vR1dWldikR6fSp4RaLRe1yyM98U19TUlLULiVidXV1QafThVzPMeTCpaioCMnJybxzVtHUqVOh0Wg4NTzMeTwe9PX1cdGkigYHBzEwMBByvRYgBMNFkiSUl5ejoaEBZrNZ7XIiUkxMDKZNm4YvvvgCLpdL7XLIT3p6erhoUmUGgwHR0dEhud1OyIULAEyZMgVarRb79u1Tu5SI5ZsafvDgQbVLIT8QQqCnpwepqakhM/U13LjdbvT29iI9PT0kNwkNyXDRarWYNm0a6uvr4XQ61S4nIiUlJaG4uJhTw8OUyWTiokmV+TbrDdU2CMlwAU7eObtcLi6qVBGnhocvg8GAxMTEkJr6Gk68Xm/ILZo8U8iGS0JCAkpKSlBXVwev16t2ORFp7NixGDduHBdVhhmLxYLBwUFOP1ZRX18f3G53SD7I9wnZcAFObqY4MDCAI0eOqF1KxKqsrMTx48c5NTyMGAwGxMbGIiEhQe1SIlZ3dzeSk5MRExOjdikjFtLhMmbMGGRnZ/POWUWFhYVITk5mG4QJh8MBs9nMXouKBgYGMDg4GNK9FiDEwwU4eefc0dGBjo4OtUuJSJIkoaKiAo2NjZwaHgZ8U1+5aFI93d3diIuLC/meY8iHS35+PlJTU3nnrCLf1PC9e/eqXQqNgtvtPrVoMhSnvoYDu90Os9kc8r0WIAzCxXfnfPjwYfT396tdTkSKjo7G9OnTOTU8xPX09ECSJC6aVFF3d3fY9BxDPlwAYPLkyYiJieGds4rKysrgdrtx4MABtUuhEfB6vacWTWo0GrXLiUgulwt9fX3IzMwMi55jWIRLVFQUZsyYgQMHDsDhcKhdTkRKSEjAxIkTsXfvXk4ND0EmkwlutztkF+yFA6PRGFY9x7AIFwCYMWMGvF4v6uvr1S4lYvmmhh8+fFjtUmiYDAYDkpKSQnrqayjzLZrU6/Vh03MMm3CJj49HaWkp75xVlJmZiQkTJnByRYgxm81wOBycfqyi3t5eeDyesHiQ7xM24QKcvHO2WCxobGxUu5SIVVFRgc7OTpw4cULtUmiIDAYDdDod4uPj1S4lYnV3dyMlJQVarVbtUhQTVuGSnp6O3Nxc3jmriFPDQ8vg4CAsFgt7LSrq7++Hw+EIq14LEGbhApxcVNnd3Y329na1S4lIp08NN5lMapdDF+BbNJmcnKx2KRGru7sb8fHxYddzDLtwyc3NhV6v552ziiZPnozY2FhODQ9yLpcLJpMpZM8LCQc2mw0WiyXsei1AGIYLcLL30tzcjL6+PrVLiUi+qeFffvklBgcH1S6HzsG3aDItLU3tUiJWd3c3tFptWPYcwzJcSktLERcXh7q6OrVLiVhlZWWcGh7EfIsm09LSwmbqa6hxOp0wmUxhs2jyTKF5Cs0FaDQalJWVYdeuXZg5c+aprmdCQgL3TQqQuLg4lJaWYt++fSgvL4fJZGIbqMh3bLGvDQDA4/Fw0WQAndkGg4ODkGU5bBZNniksey4AkJOTg+3bt6OkpAQZGRnIz89HRkYGiouLsWLFCj5sDoCioiJs3rwZBQUFbAOVmEwmrFixAsXFxV9rg4qKClRXV8Nms6ldYtg7VxvMnDkT69atw8DAgNol+ocIQzU1NSI+Pl4A+MY/kiQJSZJEfHy8qKmpUbvUsMU2UJ+vDXx/32drC7aBf0VyG4RduNTU1AiNRiNkWT5rQ/r+kWVZaDSasGxUtbEN1Mc2UF+kt4EkhBDK94fUYTKZkJ2dDbvdPqQtYGRZhk6nQ3t7e1hscR0M2AbqYxuoj20QZs9cXn31VdhstiHvLeb1emGz2bBy5Uo/VxY52AbqYxuoj20AhE3PRQiB4uJiNDc3Yzj/S5IkoaCgAE1NTZzBNEpsA/WxDdTHNjgpbMLFaDSOan8ko9EYtlMCA4VtoD62gfrYBieFzbCYxWIZ1evNZrNClUQutoH62AbqYxucFDbh4lsYNlKJiYkKVRK52AbqYxuoj21wUtiEi16vR2Fh4bDHKiVJQmFhIfdXUgDbQH1sA/WxDU4Km3CRJAkPP/zwiF67bNmysHiApja2gfrYBupjG5wUNg/0Ac4tDwZsA/WxDdTHNgijngsApKSkYO3atZAkCbJ8/v81WZYhSRKqq6vDpjGDAdtAfWwD9bENEN57i51tP5/T97Wqra1Vu9SwxTZQH9tAfZHcBmEZLkII0dfXJ1asWCEKCwu/1qCFhYVixYoVwmQyqV1i2GMbqI9toL5IbYOweuZyNkII9Pb2wmw2IzExEWlpaWHzwCxUsA3UxzZQX6S1QdiHCxERBV5YPdAnIqLgwHAhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLF/R96o8qByf2ilAAAAABJRU5ErkJggg==", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "model.plot()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8230d562-2635-4adc-b566-06ac679b166a", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/tutorials/API_10_device.ipynb b/tutorials/API_10_device.ipynb new file mode 100644 index 00000000..99b43ede --- /dev/null +++ b/tutorials/API_10_device.ipynb @@ -0,0 +1,173 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "134e7f9d", + "metadata": {}, + "source": [ + "# Demo 10: Device\n", + "\n", + "All other demos have by default used device = 'cpu'. In case we want to use cuda, we should pass the device argument to model and dataset." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "7a4ac1e1-84ba-4bc3-91b6-a776a5e7711c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "cpu\n" + ] + } + ], + "source": [ + "from kan import KAN, create_dataset\n", + "import torch\n", + "\n", + "torch.use_deterministic_algorithms(False)\n", + "\n", + "#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", + "device = 'cpu'\n", + "print(device)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "2075ef56", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "checkpoint directory created: ./model\n", + "saving model version 0.0\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "| train_loss: 6.83e-01 | test_loss: 7.21e-01 | reg: 1.04e+03 | : 100%|█| 50/50 [00:19<00:00, 2.62it\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "saving model version 0.1\n" + ] + } + ], + "source": [ + "model = KAN(width=[4,100,100,100,1], grid=3, k=3, seed=0).to(device)\n", + "f = lambda x: torch.exp((torch.sin(torch.pi*(x[:,[0]]**2+x[:,[1]]**2))+torch.sin(torch.pi*(x[:,[2]]**2+x[:,[3]]**2)))/2)\n", + "dataset = create_dataset(f, n_var=4, train_num=1000, device=device)\n", + "\n", + "# train the model\n", + "#model.train(dataset, opt=\"LBFGS\", steps=20, lamb=1e-3, lamb_entropy=2.);\n", + "model.fit(dataset, opt=\"Adam\", lr=1e-3, steps=50, lamb=1e-3, lamb_entropy=5., update_grid=False);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2f182cc1-51bf-4151-a253-a52fe854919e", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "f6f8125e-d26d-4c97-9e5f-988099bb4737", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "cuda\n" + ] + } + ], + "source": [ + "device = 'cuda'\n", + "print(device)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "95017dfa-3a2a-43e0-8b68-fb220ca5abc9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "checkpoint directory created: ./model\n", + "saving model version 0.0\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "| train_loss: 6.83e-01 | test_loss: 7.21e-01 | reg: 1.04e+03 | : 100%|█| 50/50 [00:01<00:00, 26.90it\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "saving model version 0.1\n" + ] + } + ], + "source": [ + "model = KAN(width=[4,100,100,100,1], grid=3, k=3, seed=0).to(device)\n", + "f = lambda x: torch.exp((torch.sin(torch.pi*(x[:,[0]]**2+x[:,[1]]**2))+torch.sin(torch.pi*(x[:,[2]]**2+x[:,[3]]**2)))/2)\n", + "dataset = create_dataset(f, n_var=4, train_num=1000, device=device)\n", + "\n", + "# train the model\n", + "#model.train(dataset, opt=\"LBFGS\", steps=20, lamb=1e-3, lamb_entropy=2.);\n", + "model.fit(dataset, opt=\"Adam\", lr=1e-3, steps=50, lamb=1e-3, lamb_entropy=5., update_grid=False);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8230d562-2635-4adc-b566-06ac679b166a", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.16" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tutorials/Unchecked_API_10_device.ipynb b/tutorials/Unchecked_API_10_device.ipynb deleted file mode 100644 index cbac3cf4..00000000 --- a/tutorials/Unchecked_API_10_device.ipynb +++ /dev/null @@ -1,110 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "134e7f9d", - "metadata": {}, - "source": [ - "# Demo 10: Device\n", - "\n", - "All other demos have by default used device = 'cpu'. In case we want to use cuda, we should pass the device argument to model and dataset." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "7a4ac1e1-84ba-4bc3-91b6-a776a5e7711c", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "cuda\n" - ] - } - ], - "source": [ - "from kan import KAN, create_dataset\n", - "import torch\n", - "\n", - "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", - "print(device)" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "2075ef56", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "train loss: 5.78e-03 | test loss: 5.89e-03 | reg: 7.32e+00 : 100%|██| 50/50 [00:26<00:00, 1.85it/s]\n" - ] - } - ], - "source": [ - "model = KAN(width=[4,2,1,1], grid=3, k=3, seed=0, device=device)\n", - "f = lambda x: torch.exp((torch.sin(torch.pi*(x[:,[0]]**2+x[:,[1]]**2))+torch.sin(torch.pi*(x[:,[2]]**2+x[:,[3]]**2)))/2)\n", - "dataset = create_dataset(f, n_var=4, train_num=3000, device=device)\n", - "\n", - "# train the model\n", - "#model.train(dataset, opt=\"LBFGS\", steps=20, lamb=1e-3, lamb_entropy=2.);\n", - "model.train(dataset, opt=\"LBFGS\", steps=50, lamb=5e-5, lamb_entropy=2.);" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "3acdcdee-71ca-42a1-98aa-7f7df4a29077", - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZcAAAHiCAYAAAAkiYF/AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABGtElEQVR4nO3deXyU9Z0H8M/zTDLJ5E4mCQRC7oRw5/ACL7QIiiBVtEVxPVrXVldp66utblvRaNd17bqK22O3ddeKIGolIpcJCmKpBwgBDCJJICQkkGMmySRzZc7f/sEOReTI8cw8c3zer5cvXy/JzHzlN08+z+/3/A5JCCFARESkIFntAoiIKPwwXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUlyU2gUQhQIhBHp6emCxWJCQkAC9Xg9JktQuiyhosedCdB4mkwkrVqxAcXExMjIykJ+fj4yMDBQXF2PFihUwmUxql0gUlCSeREl0drW1tVi8eDFsNhuAk70XH1+vJS4uDmvXrsW8efNUqZEoWDFciM6itrYWN954I4QQ8Hq95/w5WZYhSRI2bdrEgCE6DcOF6AwmkwnZ2dmw2+3nDRYfWZah0+nQ3t6OlJQU/xdIFAL4zIXoDK+++ipsNtuQggUAvF4vbDYbVq5c6efKiEIHey5EpxFCoLi4GM3NzRjOpSFJEgoKCtDU1MRZZERguBB9jdFoREZGxqher9frFayIKDRxWIzoNBaLZVSvN5vNClVCFNoYLkSnSUhIGNXrExMTFaqEKLQxXIhOo9frUVhYOOznJpIkobCwEGlpaX6qjCi0MFyITiNJEh5++OERvXbZsmV8mE/0//hAn+gMXOdCNHrsuRCdISUlBWvXroUkSZDl818ivhX61dXVDBai0zBciM5i3rx52LRpE3Q6HSRJ+sZwl++/6XQ6bN68GXPnzlWpUqLgxHAhOod58+ahvb0dL774IgoKCr72ZwUFBXjxxRdx/PhxBgvRWfCZC9EQCCHw0Ucf4frrr0dNTQ2uvvpqPrwnOg/2XIiGQJIkpKSkfO3fRHRuDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSnCSEEGoXQRQIQgi0tbWN+PVerxcOhwMxMTGQ5ZHfl02YMAGSJI349UShIErtAogCxeVyYe3atSgoKBjxewghRhQMXq8XjY2NiIqKwsMPPwytVjviGohCAcOFIsrMmTNx2WWXBfxza2pq8Jvf/Ab/+7//G/DPJlIDn7kQ+ZkQAj/+8Y+RlpaG9PR0tcshCgj2XIj8rK6uDq2trVi7di2ftVDEYM+FyI+EEHjggQeg1+tx/fXXq10OUcCw50LkR/X19aivr8crr7wyqhlmRKGG33YiPxFC4Pvf/z70ej1uu+02tcshCij2XIj85LPPPsOBAwewatUqaDQatcshCij2XIj8wOv14p577kF2djZuvvlmtcshCjj2XIj8YNWqVTh27Bg++OADPmuhiMRvPZHCLBYLHnnkEcyaNQuzZs1SuxwiVTBciBQkhMCyZcvgcDiwcuVKrmuhiMVwIVLQrl278MYbb+DRRx/FuHHj1C6HSDUMFyKF2O12LFmyBLm5uXjsscfYa6GIxgf6RAoQQuChhx6CwWDA559/jqgoXloU2XgFEI2SEALV1dV4/fXX8dRTT6G0tFTtkohUx2ExolFqbm7GfffdhyuuuAKPPPIIh8OIwHAhGpX+/n7ccMMNSE5Oxl/+8heuxCf6fxwWIxohh8OBm2++GQaDATt27EBKSoraJREFDYYL0Qi43W7ce++92LVrF9asWYMpU6aoXRJRUGG4EA2T2+3GD3/4Q7z77rtYsWIFFixYwOcsRGfgMxeiYXA6nbj//vuxZs0aPPPMM/j+97/PYCE6C/ZciIZoYGAA99xzD7Zs2YJnn30WDz30EIOF6BwYLkQXIITA0aNHcfvtt6OhoQF//OMfcfvttzNYiM6D4UJ0Hl6vFxs2bMA//dM/QaPRYP369bjyyisZLEQXwGcuRGchhEBPTw+WLVuGpUuXYuLEidixYweDhWiIGC5EZ3C5XKiursbll1+O119/HY899hg2bdqEnJwcBgvREHFYjOj/eTwe7Nq1C08//TT++te/4uKLL8bq1atRUVHBUCEaJoYLRTQhBFwuF3bu3IkVK1bg/fffx7hx4/C73/0OS5YsQUxMjNolEoUkhgtFJCEEurq6UFNTg1deeQV1dXXIzs7GU089hbvuugspKSnsrRCNAsOFIorL5cLatWuxbt06fPTRR+jv70dZWRl++9vfYtGiRUhOTmaoECmA4UIRxeFw4N5770Vubi6++93v4tZbb0V5eTmio6MZKkQKkoQQQu0iiALB6XTiv//7v9Hb24upU6ciPj4+4IHS0NCAH/7wh9BqtQH9XKJAY7hQxBBCoLGxUdUeihACJSUl7CVR2GO4EA2REAJCCEiSxHAgugAuoiQaov379yM+Ph779+9XuxSioMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiGQAiBvr6+r/2biM6N4UJ0HiaTCStWrEBxcTGuvfZaOBwOXHvttSguLsaKFStgMpnULpEoKEmCt2BEZ1VbW4vFixfDZrMBwNd6K5IkAQDi4uKwdu1azJs3T5UaiYIVw4XoLGpra3HjjTdCCAGv13vOn5NlGZIkYdOmTQwYotMwXIjOYDKZkJ2dDbvdft5g8ZFlGTqdDu3t7UhJSfF/gUQhgM9ciM7w6quvwmazDSlYAMDr9cJms2HlypV+rowodLDnQnQaIQSKi4vR3Nw8rBlhkiShoKAATU1Np57HEEUyhgvRaYxGIzIyMkb1er1er2BFRKGJw2JEp7FYLKN6vdlsVqgSotDGcCE6TUJCwqhen5iYqFAlRKGN4UJ0Gp1Oh7S0tBG9trCwcMSvJQo3DBciAB6PB2vWrMHMmTPh8XhG9B4zZ85EX1+fwpURhSaGC0W8HTt2YM6cOfjRj36Eyy67DDt27EB8fDxkeWiXhyRJiI2NRU5ODp577jls2rQJdrvdz1UTBTfOFqOI1djYiKqqKrz//vu46KKL8PTTT6OyshLA8Ffob968GbNnz8ZHH32EDz/8ENHR0Zg7dy4uu+wyaDSaQP0vEQUNhgtFHKPRiOeeew6vvfYaxo8fj+XLl2PhwoXfWJ8y1L3FqqurMXfu3FN/NjAwgJqaGuzevRvp6em48cYbMXnyZK5/oYjCcKGI4XA48Mc//hEvvPACZFnGI488gvvuuw9arfacrzGZTFi5ciVeeuklHDly5NR/LywsxLJly3D33XcjOTn5rK89ceIENm7ciKamJhQWFmLhwoUYP3684v9fRMGI4UJhTwiBd955B7/+9a/R2dmJe+65Bz/96U+HNbNLCIHe3l6YzWYkJiYiLS1tSD0RIQQOHTqEjRs3wmAwoLKyEtdff/05A4koXDBcKKzt2rULy5cvR11dHa6//no88cQTKCwsDHgdXq8XO3fuRG1tLZxOJ66++mrMnj0bMTExAa+FKBAYLhSWWlpa8NRTT2Hjxo2YPn06qqqqcPnll6tdFgYHB7Ft2zbs2LEDOp0O8+bNw8UXXzzkmWlEoYLhQmHFZDLhP/7jP/A///M/yMjIwC9/+UssXrw46H559/X14b333sPevXuRlZWFBQsWoKSkRO2yiBTDcKGw4HQ68corr+D555+H0+nEj370I/zwhz+ETqdTu7Tzamtrw/r169HS0oKJEydi4cKFGDNmjNplEY0aw4VCmhAC7733HqqqqtDa2oqlS5fi0UcfRWZmptqlDZkQAgcOHMCmTZvQ29uLSy+9FHPnzuU+ZRTSGC4Usvbt24fly5fjs88+wzXXXIOqqiqUlpaqXdaIeTwefPzxx/jggw/g9XpxzTXX4KqrrkJ0dLTapRENG8OFQk57ezueeeYZvP322ygtLUVVVRWuueYatctSjM1mwwcffIBPPvkEiYmJuOGGG1BeXs5FmBRSGC4UMsxmM1566SX813/9F5KSkvDYY4/h9ttvR1RUlNql+YXRaMTmzZtRX1+P7OxsLFy4EAUFBWqXRTQkDBcKem63G6tXr8a//du/wWKx4MEHH8RDDz006rNXQsXRo0exYcMGtLW1YerUqbjxxhuRnp6udllE58VwoaAlhMDWrVtRVVWFhoYGfOc738EvfvELjBs3Tu3SAk4IgX379mHz5s0YGBjArFmzcN111yEuLk7t0ojOiuFCQengwYNYvnw5/vrXv+Lyyy9HVVUVpk+frnZZqnO5XPjb3/6GrVu3QpZlzJkzB7NmzQrboUEKXQwXCipdXV149tln8frrr6OgoABPPPEE5s2bx4fZZ7BYLNiyZQt27tyJ1NRUzJ8/H9OmTePfEwUNhgsFBZvNht///vf47W9/i5iYGPz85z/HXXfdxWm4F9DV1YVNmzbhq6++Ql5eHhYuXIicnBy1yyJiuJC6vF4v3nrrLTzzzDPo7e3Ffffdh5/85CfcNXiYmpqasGHDBnR0dKCsrAzz589Hamqq2mVRBGO4kGp27NiBJ554AgcOHMCiRYvw+OOP8657FLxeL/bs2YOamhrYbDZcccUV+Na3voXY2Fi1S6MIxHChgGtqakJVVRW2bNmCyspKPP3007jooovULitsOBwOfPTRR9i+fTu0Wi3mzp2LSy+9lMctU0AxXChgenp68Nxzz2HlypUYP348Hn/8cdx00018CO0n/f39qK2txe7du5GRkYEFCxagtLSUf98UEAwX8jvf8cIvvvgiJEka0vHCpJzTj1suKirCggULeNwy+R3DhfxGieOFSRk8bpkCjeFCfnHm8cLLly9HUVGR2mVFPI/Hg507d2LLli08bpn8iuFCimppacHTTz+NDRs2BNXxwvR1Zx63fP311+Oiiy4KuhM7KXQxXEgRJpMJL7zwAl5++WWkp6fjl7/8JW699Vb+sgpyPG6Z/IXhQqPidDrx5z//Gc8//zwcDgeWLVuGBx54IOiPF6avO3bsGDZs2ICWlhaUlpZiwYIFPG6ZRoXhQiMSDscL09fxuGVSEsOFhm3fvn144okn8Omnn+Kaa67Bk08+iUmTJqldFinE7Xbjk08+4XHLNCoMFxqy048XnjhxIqqqqnDttdeqXRb5ie+45Y8//hhJSUk8bpmGheFCF2SxWPDSSy/hD3/4AxITE/HYY4/hjjvu4BkiEcJoNGLTpk04cOAAsrOzcdNNNyE/P1/tsijIMVzonE4/XthsNuPBBx/Eww8/HDHHC9PXNTc3Y8OGDWhvb+dxy3RBDBf6BiEEtm3bhieffBINDQ247bbb8Itf/IJbhhCEENi7dy/ee+89DAwM4PLLL8ecOXN43DJ9A8OFvubgwYN44okn8NFHH2HWrFmoqqrCjBkz1C6LgozL5cKOHTuwbds2HrdMZ8VwIQB/P154zZo1yMvLw5NPPsnjhemCzGYz3n//fXz22WdIS0vjcct0CsMlwp15vPBPf/pT3H333dyxmIalq6sLGzduxKFDh3jcMgFguEQsr9eLv/zlL/iXf/mXU8cL//jHP0ZKSorapVEIa2xsxMaNG9HR0YHy8nLccMMNPG45QjFcItDf/vY3PPHEE6ivr8eiRYvwq1/9Crm5uWqXRWHC6/Vi9+7dqKmpgd1ux5VXXolrr72Wxy1HGIZLBGlqasJTTz2F2tpaVFZW4qmnnsLFF1+sdlkUps523PJll13GzUwjBMMlAvT09OA3v/kNXn31VYwbNw6PP/44Fi1axIeuFBD9/f2oqanBnj17eNxyBGG4hDGHw4E//elPeOGFFyBJEn7yk5/gvvvu48FQpIoTJ05gw4YNOHz4MIqKirBw4UKMGzdO7bLITxguYUgIgXXr1uHXv/41Tpw4gXvuuQc/+9nPeLwwqY7HLUcOhkuY+fzzz7F8+XLs2bOHxwtT0DrzuOXZs2dj9uzZnAIfRhguYaK1tRVPP/001q9fj2nTpqGqqgpXXHGF2mURndfpxy3HxcXh+uuvR2VlJR/6hwGGS4gzmUx48cUX8fLLL0Ov1/N4YQpJfX192Lx5M/bt24esrCwsXLgQxcXFapdFo8BwCVFOpxOvvvoq/v3f/x0OhwMPP/wwHnzwQR4vTCGNxy2HD4ZLiBFCoKamBlVVVWhpacEdd9yBRx99lBcghQ0hBOrr67F582b09fWdOm6ZRz2EFoZLCDn9eOHZs2ejqqqKxwtT2DrzuOVrr70WV155JY9bDhEMlxDg9XqxbNkyvPXWWzxemCLOmcct33777SgoKFC7LLoAhkuACCHQ3t4+4tdbrVZoNJpR78+UnZ3NldGkCiEETCbTiF9vt9vR3NyMgoKCUT1bTElJ4TUQADzZJ0BcLhfWr1+PvLy8c/6M0+lER0cHUlJS/LKo7OjRo7j//vu5loBU4fF4sH//fuj1+nP+jNPphNlsRmJi4lm/p8nJyejp6RlxDUajEVdeeSUPNQsA/g0H0CWXXHLWjSJ9q5YfeeQRHDp0CKmpqXjsscdw2223KXqHtWvXLsXei2gk8vLyznqDJYTA0aNHUVtbi76+PqSmpuK2225DTk6OotfA0aNHFXsvOj8uhggC7e3tWLRoEbq6urB8+XIUFxfjRz/6Ed5//31w1JIiQUdHB15++WVIkoQFCxZACIE//vGPoxpGI3UxXFTm8XiwZMkSSJKEzZs345577sFrr72G6dOn4wc/+AGsVqvaJRL5ldvtxssvv4z4+Hg8+OCDmDlzJh588EHIsoxXXnmFN1ghiuGispUrV6KpqQmvvPIKxo4dC0mSoNVqsXLlSgwODuLxxx/nxUVhSwiBbdu2wWw24/vf//6pB/Xx8fFYsmQJOjo60NjYqHKVNBIMFxU5nU5UVVVh1qxZmDlz5tf+LDMzE3fffTfWrFmDgYEBlSok8i+3241t27Zh6tSp31gIPHnyZKSlpeHtt9/mDVYIYrio6LXXXoPNZsN//ud/fuOhpSRJ+NWvfgVJkvDss8+qVCGRf3300UfweDxYvHjxWa+BxYsXw2Qy4fjx4ypVSCPFcFGJ1+vFs88+i/LycmRnZ5/1Z+Lj43HDDTfgtddeg8vlCnCFRP7l9Xrx4YcforCwEPHx8Wf9maKiIsTExGDdunXsvYQYhotKdu/ejf7+fjz77LPnnGopSRKqqqrgdDrx3nvvBbhCIv9qbm6G0+nEt7/97XNeA7IsY/bs2WhtbYXD4QhwhTQaDBcVCCHw5JNPIjk5GWVlZef92ezsbGRnZ+Ppp5/mnRuFDSEENm3ahLi4uAtuuuo7l+izzz4LRGmkEIaLCux2O/bs2YP777//ggvEJEnCo48+itbW1lGtTCYKJi6XC+3t7bjqqqsueA3ExMQgKysL27dv5w1WCGG4qOCdd96BEAL/+I//OKSfX7RoEWRZxu9//3s/V0YUGPv27QMAzJo164I/K0kSbrjhBlitVvT39/u5MlIKwyXAhBB48cUXMWHChCHvHxYTE4OLL74Yr776Ku/cKOQJIbB161akpKQMeSPWkpISSJKE7du3+7c4UgzDJcBsNhtaW1vx0EMPDXnPJEmS8Itf/AJms5l7I1HIc7lc6O3txezZs4d8DciyjIKCAnz++ee8wQoRDJcAW7duHQDgtttuG9brLrnkEkRHR+P555/3Q1VEgVNfXw8AqKioGPJrJEnCvHnz4HQ6+ewxRDBcAux3v/sdsrKyzjmv/1w0Gg3mzJmDDRs2wOv1+qk6Iv/bvn07EhIShn02UW5uLmRZxrZt2/xUGSmJ4RJALpcLR44cwfe+970RbSP+85//HIODgzh48KAfqiPyP6/Xi87OTlx66aXDvgYkSUJxcTH27dvHobEQwHAJIIfDgdtvvx133HHHiF4/adIk6HQ6PPfccwpXRhQYTqcTpaWluOyyy4b9WkmSMHfuXLhcLhgMBj9UR0piuARQQkICXnjhBaSnp4/o9bIs46abbsLWrVvh8XgUro7I/2JjY3HvvfeO+KTV7OxsaDQabN26VeHKSGkMlwCTJGlUJ+s98sgjcLlc2Llzp4JVEQWOLMsjvgZkWUZpaSm++OKLYT17FELwhizAGC4hJi8vD0lJSXjmmWc47kwRae7cuXC73ejo6Bjya3p6evCHP/wBg4ODfqyMTsdwCTGSJOGee+7B7t27h7yRnxCCuypT2Bg7diyio6NRW1s75Busv/71rzh27BhiYmL8XB35MFxC0AMPPACv14vq6uoh/XxraysWLVrE88gpLMiyjIqKCjQ0NAxpaEwIgbq6OuTn549qSJqGh+ESgtLS0pCfn4/nnntuSHduv/nNb7B//34kJiYGoDoi/5szZw68Xu+QpuWbTCY4HA7MmTMnAJWRD8MlBEmShOXLl+PEiRNobW0978+63W6sX78e8+bNg0ajCVCFRP6VnJyMpKQkbNq06YI3WB988MGp7WMocBguIWrevHmIjY3F8uXLz3tx1dbWwuFw4PHHHw9gdUT+JUkS5s+fj56envMO93o8HtTV1WHSpEmQZf66CyT+bYeoqKgofO9738OWLVswMDBw1p8RQmD58uXIyclBXl5eYAsk8rOysjJERUXh3XffPecN1hdffAG3240FCxbweUuAMVxC2E9/+lPIsnzO3sunn36KtrY2/Ou//isvLAo7Go0GV155JQ4ePAiLxfKNP/d6vVi/fj0yMzOh1+tVqDCyMVxCWHx8PO699168+eab33j24nQ68YMf/AC5ubn41re+pVKFRP41Z84caDQavPHGG1+7wRJC4LPPPoPFYsGSJUt4c6UChksIkyQJv/rVr5CcnIylS5fCarUCODnO/M///M8wGAz485//zLFmClvR0dFYuHAhGhsbsWfPHgghIIRAd3c3NmzYgNLSUmRnZ6tdZkSKUrsAGh2dTofXXnsNt9xyC2655Rbcf//92Lp1K9auXYuf/exnmDx5stolEvmNJEm47LLL8NVXX+Evf/kLenp6kJSUhNraWiQkJGDp0qXstaiE4RIGLr74YqxcuRK//OUv8ZOf/ASJiYl4/PHH8eCDD/LCorAnyzLuuusuvPPOO9ixYwe8Xi8mTJiAJUuWDPvMGFKOJLhBVUA4nU688sorKCoq8ttnWK1WtLW1IT09HXq9/hvB0tTUhO9973vQarV+q4HoXNxuNz799FNkZGT45f2FEDCZTHC73UhLSzvruq7u7m7MmjULUVG8r/Y3hkuACCFw+PBhVXsSQggUFRWxN0OqEEIExTksGRkZvAYCgOESInxbhms0Gl4YFLE8Hg9sNhvi4uK440SQ4zSiEFFfX49x48ahvr5e7VKIVNPZ2YmnnnoKnZ2dapdCF8BwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXEKAEAJ9fX3wer3o6+uDEELtkogCTgiB3t5e9Pf3o7e3l9dBkGO4BDGTyYQVK1aguLgYs2fPRk9PD2bPno3i4mKsWLECJpNJ7RKJ/O7062D69On405/+hOnTp/M6CHKSYPwHpdraWixevBg2mw0AvnaXJkkSACAuLg5r167FvHnzVKmRyN94HYQuhksQqq2txY033gghBLxe7zl/TpZlSJKETZs28cKisMPrILQxXIKMyWRCdnY27Hb7eS8oH1mWodPp0N7ejpSUFP8XSBQAvA5CH5+5BJlXX30VNpttSBcUAHi9XthsNqxcudLPlREFDq+D0MeeSxARQqC4uBjNzc3DmgkjSRIKCgrQ1NR0ahyaKFTxOggPDJcgYjQakZGRMarX6/V6BSsiCjxeB+GBw2JBxGKxjOr1ZrNZoUqI1MPrIDwwXIJIQkLCqF6fmJioUCVE6uF1EB4YLkFEr9ejsLBw2OPFkiShsLAQaWlpfqqMKHB4HYQHhksQkSQJDzzwwIi2tVi2bBkfYlJYkCQJDz/88Ihey+sgeDBcgkhnZye0Wi20Wu2QLxBZlhEXF4e77rrLz9URBc7dd9+NuLg4yPLQfkVJkoTY2Fj8wz/8g58ro6FiuASJPXv24M0330R6ejrWrFkDWZYveGH5ViZXV1dz4RiFlZSUFKxduxaSJF3wRst3HTz//PPo7u6G0+kMUJV0PpyKrLLBwUHU1taiubkZlZWVuPzyy6HRaM67p5JPfHw8qqurMXfu3ECXTRQQ7733HhYvXgy73Q5Jks65t1h1dTWuuOIKtLS0wO12IycnhzdcKmO4qOjEiRPYvHkzXC4X5s2bh4KCgq/9uclkwsqVK/HSSy/hyJEjp/57VlYWrrrqKvzhD39AampqoMsmCpienh589dVX2LVrF37/+99/7TooLCzEsmXLcPfddyM5ORkA4PF4cOzYMZhMJmRkZGDcuHFDHlojZTFcVCCEwO7du/Hxxx8jKysL8+fPP+/0Sd85FmazGYmJifB6vVi1ahVuuOEGlJaWBrByosBqaGhAbGwscnNzv3EdpKWlnXPIzGg0or29HbGxscjPz0dMTEyAKyeGS4DZ7XbU1NSgpaUFF198MWbNmjWiO6vq6mrY7XYsXbrUD1USqW9gYAAtLS0oKipCXFzcsF9vt9tx9OhRuFwu5OTksJcfYOwvBtDx48exatUqdHV14eabb8YVV1wx4i57ZWUluru70d7ernCVRMHBYDAgLi5uRMECADqdDqWlpUhOTkZLSwva2tqGvBEmjV6U2gVEAiEEdu3ahU8//RTjxo3D/PnzR70KOTc3F3q9Hnv27EF2drZClRIFB7vdDqvVitzc3FG9jyzLyMvLQ2JiItrb22G1WpGXl4fY2FiFKqVzYc/Fz2w2G6qrq/HJJ5/gkksuwa233jrqYPGprKxEc3Mz+vr6FHk/omBhMBig1WqRlJSkyPvp9XqUlJRACIGGhgb09vYq8r50bgwXP2pra8OqVatgNBqxePHiET9fOZfS0lLExcWhrq5OsfckUpvL5UJ/fz/S09MVXW2v0+kwceJEpKSkoLW1FceOHeMwmR8xXPxACIFPP/0Ub7/9NtLS0nDnnXciJydH8c/RaDQoKyvDwYMHYbfbFX9/IjUYjUbIsuyXPcJkWUZubi5yc3PR19eHhoYGDA4OKv45xHBRnMViwdtvv42dO3di1qxZWLx4MeLj4/32edOnTwcAfPHFF377DKJA8Xq96O3tRVpaml/Xp6SlpWHixIkATk537unp8dtnRSqGi4JaW1uxatUq9PX14dZbb8Wll17q9030dDodJk+ejH379sHtdvv1s4j8rbe3F16vF+np6X7/rNjYWEycOBGpqak4duwYWltbOUymIIaLArxeLz7++GNUV1cjMzMTd955Z0BncFVUVMBms6GhoSFgn0mkNCEEjEYjkpOTER0dHZDPlGUZOTk5yMvLg8lkwqFDhzjErBCGyyj5hsE+//xzXHHFFbj55ptHPC9/pFJTU1FQUIA9e/YE9HOJlDQwMACn0zmqI45HKjU1FaWlpZBlGQ0NDTAajQGvIdwwXEbh6NGjeO2119Df34/bbrsNF198sWpnSVRWVqKnpwetra2qfD7RaBkMBiQkJECn06ny+TExMSgpKYFer0dbWxtaWlrg8XhUqSUccBHlCPiGwXbv3o38/HzMmzdPtQvCJzs7G5mZmdizZ8+oF54RBZrNZoPNZkNeXp6qdciyjAkTJiAhIQHHjh2DzWZDfn6+6td3KGLPZZgGBgbw1ltvoa6uDldddRUWLVoUNF+8yspKtLa2cuYLhRyDwYCYmBjFFk2Olm+YTKPRoKGhAQaDQe2SQg7DZRiOHDmC1atXw2q14jvf+Q4qKyuD6kjVkpISJCQk8NkLhRSn03lq0WQw8Q2Tpaeno729HUePHuUw2TAwXIbA4/Fg+/btWL9+PcaPH4+lS5ciKytL7bK+QZZllJeX49ChQ7BarWqXQzQkRqMRGo0mKHctliQJ2dnZyM/Ph9lsxqFDh04d4Efnx3C5gP7+frz55pvYv38/Zs+ejZtuuimoN72bNm0aZFnG/v371S6F6II8Hg96e3uRnp4e1Id6paSkoLS0FFFRUWhsbER3d7faJQW94G3NINDU1IRVq1ZhcHAQS5YsQXl5udolXVBMTAymTp2K/fv3c1ElBb3e3l4IIaDX69Uu5YK0Wi1KSkqQkZGB48ePo7m5mcNk58FwOQu3241t27Zh48aNyM3NxdKlSzFmzBi1yxqy8vJyOBwOHDx4UO1SiM7Jt2gyNTUVUVGhMXFVkiSMHz8eBQUFsFgsHII+D4bLGUwmE958800cOHAA1157LRYsWBByR6QmJyejqKgIdXV14EGjFKz6+/vhcrmC7kH+UCQnJ6O0tBTR0dFoamriMNlZMFxO09DQgNWrV8PpdOL222/HjBkz1C5pxCorK9HX14ejR4+qXQrRWRkMBiQmJgb1M8zz0Wq1KC4uRmZmJo4fP44jR45wKPo0DBecHAb74IMPsHnzZuTn52Pp0qWqbEGhpKysLGRlZXFaMgUli8UCu90ekr2W00mShHHjxqGwsBA2mw2HDh2CxWJRu6ygEPHh0tvbizVr1uCrr77CnDlzMH/+fGi1WrXLUkRlZSXa29vZZaegYzQaERsbi8TERLVLUURSUhJKS0sRExODw4cPo6urS+2SVBfR4fLVV1/h9ddfh9frxe23345p06apXZKiCgsLkZSUxN4LBRWHw4GBgYGQHx04U3R0NIqKijBmzBicOHEChw8fjuhhsogMF5fLhS1btqCmpgbFxcW44447Qr57fjayLKOiogKNjY3sqlPQMBqNiIqKQkpKitqlKE6SJGRlZaGoqAh2uz2ih8kiLlx6enqwZs0aNDQ0YO7cuZg3b17Azo5Qw5QpUxAdHY29e/eqXQoR3G73qUWTwbR1ktISExNPDZM1NTWhs7Mz4mZuRlS4fPnll3j99dcBAHfccQemTJmickX+p9VqMW3aNNTX18PpdKpdDkW4np4eSJKEtLQ0tUvxO98wWVZWFjo6OnDkyBG4XC61ywqYiAgXl8uFmpoabNmyBaWlpbjjjjtCYkWwUsrKyuByufDll1+qXQpFMCEEenp6QmrR5GhJkoSxY8eiuLgYg4ODOHToEMxms9plBUTYh4vRaMTq1atx+PBh3HDDDbjuuusi5ovtk5iYiJKSEuzduzfiuuYUPPr6+uB2u8Py+eaFJCQkoLS0FDqdDocPH0ZHR0fYX4thHS5ffPEFXn/9dURFRWHp0qUoLS1VuyTVVFRUoL+/H4cPH1a7FIpQRqMRSUlJIbfjhVKioqJQVFSEcePGobOzE4cPHw7rYbKwDBen04nNmzdj69atmDJlCpYsWRKU23kH0pgxY5Cdnc1pyaQKs9mMwcHBsJt+PBJjxoxBcXExHA4HDh06hIGBAbVL8ouwC5fu7m6sXr0aR48exY033ohvfetbETcMdi4VFRXo6OhAR0eH2qVQhDEYDNDpdIiPj1e7lKDgGyaLi4vDkSNHcOLEibAbJgurcNm3bx/eeOMNaLVaLF26FCUlJWqXFFQKCgqQkpLC3gsF1ODgICwWC3stZ4iKikJhYSHGjx+P7u5uNDU1hdWMzrAIF4fDgY0bN+LDDz/EtGnTsGTJkrBcoDVakiShoqIChw8fRn9/v9rlUIQwGAyIjo5GcnKy2qUEpczMTBQXF8PlcuHQoUNhc22GfLh0dXVh9erVOHbsGBYuXIhrrrkGGo1G7bKC1pQpUxATE8NFlRQQLpcLJpMp7BdNjlZ8fDxKS0uRkJCA5uZmHD9+POSHyUI6XOrq6vDGG28gNjYWS5cuRVFRkdolBb2oqCjMmDEDBw4cgMPhULscCnORtGhytDQaDQoKCjB+/HgYDAY0NjaG9DBZSIbL4OAg1q9fj48++gjl5eX47ne/yy73MMyYMQNerxf19fVql0JhzOv1oqenB2lpaRxNGIbMzEyUlJTA7Xbj0KFDMJlMapc0IiEXLh0dHVi1ahWOHz+ORYsW4aqrruIXd5h8XfC9e/fC6/WqXQ6Fqb6+Png8nohcNDlacXFxKC0tRWJiIo4ePYr29vaQGyYLmXARQmD37t146623kJCQgDvvvBMFBQVqlxWyKioqYLFY0NjYqHYpFKaMRiOSk5PD5nykQNNoNMjPz0d2djaMRiMaGxtDaig7JMLFbrfj3XffxY4dO1BRUYHvfOc7YXPIkFrS09ORm5vLacnkFwMDA3A4HJx+rICMjAyUlJTA4/Hg0KFD6OvrU7ukIQn6cDl+/DhWrVqFzs5O3Hzzzbjyyishy0FfdkiorKxEd3c32tvb1S6FwozBYEBcXBzi4uLULiUsxMXFYeLEiUhOTkZLSwva2tqCfkg7aJeuCyHw+eef45NPPsG4ceMwf/58JCQkqF1WWMnNzYVer8eePXuQnZ2tdjkUJux2O6xWK3Jzc9UuJaxoNBrk5eUhISEB7e3tsFqtyM/PD9q92oKyC2Cz2fDOO+/g448/xiWXXIJbb72VweInlZWVaG5uDpmuNgU/g8EArVaLpKQktUsJS+np6Zg4cSK8Xm9QD5MFXbi0tbVh1apVMBgMuOWWWzBr1iwOg/nRxIkTERcXh7q6OrVLoTDgcrnQ39/PRZN+ptPpUFpaipSUFLS0tODYsWNBN0wWNL+1hRD47LPPsHbtWqSlpeHOO+9ktzoAoqKiUFZWhoMHD8Jut6tdDoU4o9EIWZa5aDIAZFlGbm4ucnJy0NfXh4aGBgwODqpd1ilBES5WqxVr167FZ599hssuuwyLFy/m7qkBNH36dAAnz78hGimv14ve3l6kpaVxtCGA9Ho9Jk6cCABoaGhAb2+vyhWdJAkFVuYIIUZ1dGdraytOnDiBSZMmjWrDycTExIjtio+2DRobG2GxWFBRUTGqOiK5DUKdEGJUh1dZLBZ0dHQgPz9/VMdcREdHR+x3aDRt4PV60dXVBYvFgoKCglEtLleiDRQJF4/Hg7q6OkiShKSkpGHftQgh4Ha7ER0dPezPFkLA4XDAbrejoqIiYlfr+9oAAJKSkob99+B2u6HRaEb0hfJdEFarNaLbINR5vV4YjcZRzT7yeDwjan8hxKl9tNLT0yO25+P1emEwGCDL8oh+HwInr+WRhPvpbZCRkTHqNlBsKnJGRgY2bdqEm2++GePGjVPqbS+ou7sb7777LubMmROwzwxWGRkZ2LhxI2655ZaAtoHRaMSGDRswe/bsgH0m+YdOp4PRaMT48eMDurLearWivb0dOTk5AfvMYKXT6dDa2orCwsKArhOyWCxoa2tDXl6eIu+n2O2BVquFx+PBgQMHlHrLIfnyyy9hs9m4xQT+3gaBfnZy4MAB9Pf3sw3CgCzLMJvNAd8ssaenB16vl71enFzP4vF4YDQaA/q5vjZQqteoWLhIkoSMjAw0NzcHbIM1IQSampqg1+sjdoz2TJmZmQFvg8bGRs4OCiPR0dHo7e0N6HfIbDZzNf9pdDod+vv7A9oGAwMDiraBogOb06dPx+DgIDwej5Jve05erxc2mw1Tp04NyOeFghkzZsDhcMDtdgfk87xeL6xWK9sgjKSkpAT0HBEhBDweD29QTpOeng6PxxPQcPF4PNDr9Yq9p6Lh4tul+Pjx40q+7Tl1dnYCAIqLiwPyeaHA1wZtbW0B+byOjg4AODUVkkKf75d8oNZMWK1WAOCK/tP4zqeyWCwB+TxfGyh5Lpai4RIbG4uoqCjs27dPybc9p3379kGj0bA7fZqYmBhER0cHrA327t3LNggzvmdnPT09Afk8o9EISZL4vOU0vpmbBoMhIJ9nMBgUbwNFw0WSJEyYMAFtbW1+784JIdDS0oLx48fzectpJElCXl5eQA4XEkKgtbUV2dnZbIMwE6gxfyEELBYL10edwbesw2KxBKQNzGYzEhISFG0DxSeTl5eXw+12+30rkcHBQbhcLpSXl/v1c0JReXk5PB6P37vUdrsdLpdr1AsvKbhIknRqzN/f+1W53W4IIXha5VlkZGScWgPoT75nO0qfvaN4uPjWV3z11VdKv/XX+E5QnDBhgl8/JxSNHTsWAPw+LdzXxtyuP/z4nn8MDAz49XN8O/pyWPWbfFtg+Xt40rddjNI7zyseLhqNBklJSdi/f7/funNCCOzduxcJCQkcpz0L38aB9fX1fm2D/fv3j2g3AAp+sixDo9HAYDD49TvU09MDrVbLIbFziImJQU9Pj1/bwGg0+mXLHcXDRZIklJWVYWBgwG9Tkr1eL0wmE6ZPn84v5VlIkoSKigpYrVa/dak9Hg/6+/tRVlbGNghDkiQhLS3NrzPGfNsGcXv+s/OtHXS5XH4bnvRt+eKPNvDLBj6TJk0CALS0tPjj7XHs2DEAwJQpU/zy/uGgpKQEwN+HD5V29OhRAMDkyZP98v6kPt+aB980VaX5htxGs1ltuEtNTQUA9Pf3++X9fZvd+mONkV/CJTY2FrGxsdi1a5fi3Tnf8cdarZbjtOeh1WoRHx+P3bt3+6UNdu3adaqdKTxFR0dDlmV0dXX55TvU3d0NjUbDYdXzkGUZUVFRfmuDrq4uaDSaUe1ifS5+CRdJkjB16lQYDAbFu3NCCHR0dGDy5MnsSp+Hb3iyt7dX8eFJ386tHJYMb5IkISUlxS89FyEEBgcHuXXTBfiGxhwOh1+eu9hsNqSlpfmlDfy2r3V5efmpdRBK8q2hqaysVPR9w5HvEDClh8aOHj0KIQTKysoUfV8KPpmZmQCUHxrzDcdwCvKF+YYnld5M1NcGSk9B9vFbuMTFxSE2NhaffvqpYokrhMAnn3yCmJgYxafNhaOYmBgkJiZi586dirbBp59+Cp1Ox2HJCOAbGuvs7FT0O9TZ2YmoqCgOiQ2BRqNBdHS0okNjvjbwvbc/+C1cfDOWDAaDYjOWPB4Purq6OENpiCRJwiWXXAKTyQSHw6HIe7pcLhiNRlx00UVsgwjgW1Bps9kU+8Xm9XrhcDiQkZHB79AQSJKEMWPGwOFwKDbELYSAzWbz60w9vx73NmPGDADKLeb78ssvAYCr8ofBN3PPd0rlaO3fvx/A34fcKPz5hq58Cx5Hy7cokLsgD51v1phSZ7z4Fk76a0gM8HO4aLVapKenKzJrTAiBnTt3Ii0tbVTHsEaaqKgojB8/HnV1dYq0we7du5GZmem3rjQFH41Gg5iYGEWGZYQQMBgM0Ol0EXuU8UjIsoy4uDh0d3cr0gadnZ2IiYnx67CkX1tXkiRcffXVsNvto07c3t5e2Gw2XHXVVexKD4OvDVwuF06cODGq9+ru7sbg4CBmz57NNoggkiQhKysLbrd71MOrdrsdHo8H48aN43doGCRJwvjx40+dYTUavvOe/L3pr99vHbKzsxEdHY0PP/xwxIkrhMCHH36I6OhonrE9ApmZmdDpdNi2bduo2mDbtm3QarWn9o+jyJGYmAhZlnHixIlRfYeOHz9+6i6chicuLg4ajWZUO54LIdDe3g5ZlpGYmKhwhV/n93CRJAmXXnopTpw4MeLEtdvtaG9vx0UXXcSu9AhIkoQrrrgCRqNxxCt9rVYrOjs7MXPmTN5xRiDfeguLxTLih8oulwt2ux1jx47ld2gEfD1I327kI+F2u2GxWJCZmen3NgjIb+qysjLIsjyi3osQAtu3b4csy9zafRQmTZqEqKgofPDBByNqg61bt0Kj0fBBfgTzPfwdSe/F12vx7VlGI+Nb8DiSM7NObwPf+iV/Cki4aDQaVFZW4vDhw8NejGWz2dDU1ISysjK/bFEQKTQaDWbNmoW2trZhL8ayWCxobm7GRRddxHUJEcw3LdlkMg17eYHL5YLZbEZmZiZHH0ZBlmWMGTMGZrMZTqdzWK91u90wmUwBmwIekFb2DY1FRUWhpqZmyIkrhEBtbS00Gg2HYxRQVlYGrVaLzZs3D6sN3nvvPURHR+OSSy5hG0QwSZJODWkNZ9zft1OHLMt+nfoaKXwB3draOuw2OL0N/S1gtxBRUVG4+uqr0d7ePuQtYdra2nDs2DFceeWV7LUoQKPR4LrrrkN3dzeampqG9MVsbW3F8ePHMXv2bLYBnRr3N5vNQxqFEEJgYGAAdrsd48ePZ69FAbIsIzs7GzabbciHuZnNZlgsloAeSR7Qlp46dSrS09Px3nvvXfAY5MHBQWzatAl6vR7Tpk3jHbNCiouLkZWVhS1btlxwgsXg4CA2b96M9PR0Hm9AAE6Gi16vR0xMDFpbWy/4cN/tdqOtrQ06nY5b6ysoNTUVOp0Ora2tFxyidLvdaGlpQWxsrN82qTybgIaLJElYtGgRPB4P1q1bd86/FLfbjXfffRcejweLFi3i3Y6CJEnCTTfdBEmSUF1dfc5xW7fbjXfeeQcejwff/va3Ge50iiRJyMvLg9frRUtLyzl3Pvd4PKfO/cnLy+N3SEGSJCE/Px9CCDQ3N5+zDbxeL44cOQIAKCgoCGgbBPy3dkJCAhYsWACDwYB169bBarWeGp4RQsBqteLdd99FZ2cn5s+f7/e52JFIp9Nh0aJF6O3tRXV1NSwWyzfaYN26dejq6sKCBQu4SSh9g1arRU5ODqxW66m759O/Q06nE83NzXA4HMjLy+OQqh9ER0cjPz8fNpsNzc3NcLlcX2sDl8uFI0eOwG63Iy8vL+C7akhCgd3oPB4Puru7kZWVNaSfF0LgyJEjqK2thVarxdSpU6HX69Hb24sDBw7A4XDguuuuQ3Fx8ZCT9sSJExgzZkzEzmbybeo51AWOQgi0tLRg8+bNiIqKwrRp06DX69HX14f6+no4HA7MnTt3WG1w/PhxjB07NmLbINR5vV7Y7XbEx8cP6eeFEDCZTGhvb0dUVNSprZkGBwdPnfuek5ODxMTEIX+HrFZrRG8NM5I26O/vx7Fjx6DRaE4NWfrawOv1IicnB8nJyUNuA4vFgri4uFG3gSrhApz8S+nt7cUnn3yC48ePw+VyndoHa9asWcM+RIjhMrxwAU62QV9fHz7++GO0t7d/ow2Gu2MqwyW0DfcXG/D3Q786OztPjULIsoyEhASMHTsWWq12WN8hhsvI26CjowNWqxVer/dUG2RlZSEmJmZYbRB04bJ///5TO3cOl9VqxeDgIGJjY4f1l3q6vr4+zJgxI2J/sY2mDXzbbw8ODiImJgbx8fEjGpuN9DYIdV6vFz09PSPeGNblcsHj8SAqKmrEw2AOhwN6vT6iw8VoNI7o+HAhBNxuN9xu96k2GMl1PDg4iPT09OAIF98dsNpSU1Mj9qEh24BGSwih2Lk/ozHcO+1wEk5toEi4jJbL5YLFYhlxz4dGz+FwwG63c7oojZhvx15OAFGP2+2G0+kMio1Bg6LvuXv3bqxevRqDg4NqlxKxtm/fjrVr155zSiPRhZhMJjQ3Nw97WxJSjm+BtFInVo5GUITL9OnT4fV6FTuxkoavvLwcAwMDOHz4sNqlUIgyGAxISkqCVqtVu5SI5Htek56eHhTPPYMiXOLj41FaWoq9e/cGReJGoszMTEyYMAF79uxRuxQKQWazGQ6Hg3uHqai3txcejydo2iAowgUAKisrYbFY0NTUpHYpEauyshKdnZ2jPrGSIo/BYEBcXNyIZ3vS6HV3dyMlJSVoeo5BEy56vR65ubm8c1ZRXl4eUlNT2QY0LHa7HRaLBenp6WqXErH6+/vhcDgCck7LUAVNuAAn75y7u7vR3t6udikRSZKkU+fuDPfMF4pcRqMR0dHRSE5OVruUiNXd3Y34+Pig6jkGVbjk5uZCr9fzzllFkyZNgk6nw969e9UuhUKAy+WCyWQa9m4OpBybzXbq6OJgElThApzsvTQ3NwfFgsBIFBUVhRkzZuDAgQOcGk4X1NPTw6OLVdbd3Y2YmJig6zkGXbiUlpYiLi4OdXV1apcSsWbMmAEhBOrr69UuhYKYb7sYvV4fFFNfI5HT6Qzo0cXDEXThotFoUFZWhoMHD17wQDHyj7i4OEyaNAn79u3j1HA6p76+Pni9Xuj1erVLiVgGgwGyLAdlGwRduAAnF1UCwBdffKFyJZGroqICFosFjY2NapdCQUgIAYPBgOTk5KCZ+hppPB4Penp6FNlk0h+CryKcPMxq8uTJ2Ldv3wWP8CT/0Ov1yMvL4+QKOiuz2Qyn08npxyryndcSLIsmzxSU4QKcvHO22WxoaGhQu5SIVVlZCYPBgLa2NrVLoSBjMBgQHx8fFBskRiJfzzE1NTXgJ0wOVdCGS2pqKgoKCnjnrKKcnBykp6ezDehrbDYbrFYrey0q6u/vh9PpDLrpx6cL2nABTt459/T0oLW1Ve1SIlZlZSWOHj2K3t5etUuhIGE0GqHVapGUlKR2KRGru7sbCQkJ0Ol0apdyTkEdLtnZ2cjMzOSds4omTpyI+Ph4Tg0nACenvvb39wfl1NdIYbVaYbVag7rXAgR5uAAn75xbW1thNBrVLiUi+aaGf/XVV5waTujp6YEsyzzYT0XBumjyTEEfLiUlJUhISOCds4p8U8P379+vciWkJt/U10g+415tDocDJpMp6HstQAiEiyzLKC8vx6FDh2C1WtUuJyLFxsZiypQp2L9/P6eGR7De3l4IIYJywV6kMBgMiIqKContdoI+XABg2rRpkGWZd84q8k0NP3TokNqlkAqEEDAajUhJSQnaqa/hLtgXTZ4p+CsEEBMTg6lTp/LOWUUpKSkoLCzk5IoI1d/fD5fLxenHKjIajRBCBO2iyTOFRLgAJ894dzgc+PLLL9UuJWJVVlait7cXLS0tapdCAWY0GoN+6ms48y2aTEtLQ1RUlNrlDEnIhEtycjKKiopQV1cHIYTa5USk8ePHY8yYMey9RBir1QqbzRYyd8zhqK+vDy6XKyQe5PuETLgAJ++cTSYTmpub1S4lYlVWVuLYsWMwGAxql0IBYjAYEBMTg8TERLVLiVjd3d1ISkpCbGys2qUMWUiFS1ZWFrKysjgtWUXFxcVITExkG0QIp9OJgYEB9lpUZLFYYLfbQ6rXAoRYuAAn75zb29vR1dWldikR6fSp4RaLRe1yyM98U19TUlLULiVidXV1QafThVzPMeTCpaioCMnJybxzVtHUqVOh0Wg4NTzMeTwe9PX1cdGkigYHBzEwMBByvRYgBMNFkiSUl5ejoaEBZrNZ7XIiUkxMDKZNm4YvvvgCLpdL7XLIT3p6erhoUmUGgwHR0dEhud1OyIULAEyZMgVarRb79u1Tu5SI5ZsafvDgQbVLIT8QQqCnpwepqakhM/U13LjdbvT29iI9PT0kNwkNyXDRarWYNm0a6uvr4XQ61S4nIiUlJaG4uJhTw8OUyWTiokmV+TbrDdU2CMlwAU7eObtcLi6qVBGnhocvg8GAxMTEkJr6Gk68Xm/ILZo8U8iGS0JCAkpKSlBXVwev16t2ORFp7NixGDduHBdVhhmLxYLBwUFOP1ZRX18f3G53SD7I9wnZcAFObqY4MDCAI0eOqF1KxKqsrMTx48c5NTyMGAwGxMbGIiEhQe1SIlZ3dzeSk5MRExOjdikjFtLhMmbMGGRnZ/POWUWFhYVITk5mG4QJh8MBs9nMXouKBgYGMDg4GNK9FiDEwwU4eefc0dGBjo4OtUuJSJIkoaKiAo2NjZwaHgZ8U1+5aFI93d3diIuLC/meY8iHS35+PlJTU3nnrCLf1PC9e/eqXQqNgtvtPrVoMhSnvoYDu90Os9kc8r0WIAzCxXfnfPjwYfT396tdTkSKjo7G9OnTOTU8xPX09ECSJC6aVFF3d3fY9BxDPlwAYPLkyYiJieGds4rKysrgdrtx4MABtUuhEfB6vacWTWo0GrXLiUgulwt9fX3IzMwMi55jWIRLVFQUZsyYgQMHDsDhcKhdTkRKSEjAxIkTsXfvXk4ND0EmkwlutztkF+yFA6PRGFY9x7AIFwCYMWMGvF4v6uvr1S4lYvmmhh8+fFjtUmiYDAYDkpKSQnrqayjzLZrU6/Vh03MMm3CJj49HaWkp75xVlJmZiQkTJnByRYgxm81wOBycfqyi3t5eeDyesHiQ7xM24QKcvHO2WCxobGxUu5SIVVFRgc7OTpw4cULtUmiIDAYDdDod4uPj1S4lYnV3dyMlJQVarVbtUhQTVuGSnp6O3Nxc3jmriFPDQ8vg4CAsFgt7LSrq7++Hw+EIq14LEGbhApxcVNnd3Y329na1S4lIp08NN5lMapdDF+BbNJmcnKx2KRGru7sb8fHxYddzDLtwyc3NhV6v552ziiZPnozY2FhODQ9yLpcLJpMpZM8LCQc2mw0WiyXsei1AGIYLcLL30tzcjL6+PrVLiUi+qeFffvklBgcH1S6HzsG3aDItLU3tUiJWd3c3tFptWPYcwzJcSktLERcXh7q6OrVLiVhlZWWcGh7EfIsm09LSwmbqa6hxOp0wmUxhs2jyTKF5Cs0FaDQalJWVYdeuXZg5c+aprmdCQgL3TQqQuLg4lJaWYt++fSgvL4fJZGIbqMh3bLGvDQDA4/Fw0WQAndkGg4ODkGU5bBZNniksey4AkJOTg+3bt6OkpAQZGRnIz89HRkYGiouLsWLFCj5sDoCioiJs3rwZBQUFbAOVmEwmrFixAsXFxV9rg4qKClRXV8Nms6ldYtg7VxvMnDkT69atw8DAgNol+ocIQzU1NSI+Pl4A+MY/kiQJSZJEfHy8qKmpUbvUsMU2UJ+vDXx/32drC7aBf0VyG4RduNTU1AiNRiNkWT5rQ/r+kWVZaDSasGxUtbEN1Mc2UF+kt4EkhBDK94fUYTKZkJ2dDbvdPqQtYGRZhk6nQ3t7e1hscR0M2AbqYxuoj20QZs9cXn31VdhstiHvLeb1emGz2bBy5Uo/VxY52AbqYxuoj20AhE3PRQiB4uJiNDc3Yzj/S5IkoaCgAE1NTZzBNEpsA/WxDdTHNjgpbMLFaDSOan8ko9EYtlMCA4VtoD62gfrYBieFzbCYxWIZ1evNZrNClUQutoH62AbqYxucFDbh4lsYNlKJiYkKVRK52AbqYxuoj21wUtiEi16vR2Fh4bDHKiVJQmFhIfdXUgDbQH1sA/WxDU4Km3CRJAkPP/zwiF67bNmysHiApja2gfrYBupjG5wUNg/0Ac4tDwZsA/WxDdTHNgijngsApKSkYO3atZAkCbJ8/v81WZYhSRKqq6vDpjGDAdtAfWwD9bENEN57i51tP5/T97Wqra1Vu9SwxTZQH9tAfZHcBmEZLkII0dfXJ1asWCEKCwu/1qCFhYVixYoVwmQyqV1i2GMbqI9toL5IbYOweuZyNkII9Pb2wmw2IzExEWlpaWHzwCxUsA3UxzZQX6S1QdiHCxERBV5YPdAnIqLgwHAhIiLFMVyIiEhxDBciIlIcw4WIiBTHcCEiIsUxXIiISHEMFyIiUhzDhYiIFMdwISIixTFciIhIcQwXIiJSHMOFiIgUx3AhIiLF/R96o8qByf2ilAAAAABJRU5ErkJggg==", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "model.plot()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8230d562-2635-4adc-b566-06ac679b166a", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/tutorials/model/0.0_cache_data b/tutorials/model/0.0_cache_data deleted file mode 100644 index d8d7888835e4ac5c9e1d383a812d9714be3e6404..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 840 zcmWIWW@cev;NW1u09*_b3}lpI(4BI|uKjt+had7(h4x zrfW0>NoSG<+u}L9 zo}HfCv^V$v`@Va>Z|=Qwr_k0XmZH>ZD&o>mX_PL*<=`FKC6!gyf)r|g7uU)dr?a-I z%I)MmwKZPNeRVE(o!d3VV&Qtkb*WA$b7E)7vo^e~iA(j>|-Oc_onxAAa02P5psGD^^y zkqV3fMiIqL?uyhfs2PuyDY1F(x#y3QRWS z5lp^>nT#<71oLwqrf_giz!`^=FDb9D(dx8|>O3y6DkvtHiYVzkOv98|o2Ch-+cY*+ z6U-na8xJ!%m=#8H*1F5#UIobv_aRaP4~2vjXNzA8vuz`7@ix_3;0SFF4|6%V-=2a( z-su+!dYQxPh6h5-7}SK;K@ozc@L-pqO)$?Ehuxb`fDiJpfP-TDpe}&kC9qJ5qoIW0 zN<%mY!Icr5lLr?E?y%lgJG_-pu0-i!5kXaiP#%I>Oi-0PRB=$PK+z-wHA<8LY7t7$ z!xD^&!!ZC7gF1rp^1yRYABGCVV5t(73Cj>_Bo7S)rNTj2jzSO!!U}@>1rHB#;8O&F z*2;qLuo9XDk02E`(c8aLoj_ zfrpJ8YzpgcAPAe4sBCzQptgiij}z2Zg4)Kzb`E}NAB?A%<<(`iRUW5wGX!`$lspqW zfp{q0o+P~3CTKx71?uTjgtwE2T^u}ZPmSa`%F8`9Zg@sXG{duqSjfX}iD-*m3(wgG zVCq_Uo={)lVGjp;BfX@x0WVt>OuY_XB)xtKGgY>@CU}{o-af+H&%-Mmyc+2xt(Cld zO-ZET0O|KZiHN;CWJ@7l9wyY+d3b|^H|u@ymOT;mzogpDJMf|izX~iYN7C)6mV&o^ za5S)FEp^v<(NYMq#Cb=@&thnAN+=V6C|TD`X#ahwixp#V@GZpG?^8Z+&x{|D-CRU2QqWK6JiOobiY4{>GxG*@M>Q z!l_U0&P&dHKJNujnt$@a3u6AD$Nb~QxB8P0kMUa#t^NXIx_G^Rx%kn6oq5>z{7e6G z72DVPw=8*4b@UnXDd$jQjAJ3lrkNs~RlRdg;{_B6@{u_GjzlzFlf8hR9RL=O( zIQ#eQ+b5#J;xR{csHEnjHn{vxx5II7{p#_!4wSRFM8D?~smYt)LWj%;@Bsa!TcXS>)hO~YQg;br!Wcvjdp ze71O`&qh`|CtEx<{*gAsRT z6@8nTDKF0y=lu05Q`A3?!Tqg%$Mtms>MJfc>+ir&f3vIw{gnOv`0xcGXS~6Geq%=a zr0>u9|ME>y4rZ@fcVKVN<}YKf=C7{qZEU~i^Jn&d*}kcl_EGSD#0!tzrL?h|dp7@> zdp&>rh_3%%qHw%s6@Bn3nqVnhqTk1AN)x<~(Q*|p-Ns!F8fJEH|0COf=zF*S`0?zc zJwz||KV8|smAdZVKWZu`7!S1k8kN&(aC!K_&s&@S)KqByik7n8?O$=uC8%%z@!+QVo;y4|5(3x@pG^ANRO5aqVJ`BG~M5leKcKT{ZC+9-EI^Y zTy7SdyMnPn)A^m<%*T%78<~FJm$SQyMd99UA2RkDU_MV3b4{~bEmY;{-0kb;iF1c! zvZW8UFY!EZWbm5eCnx%A8im+d+678y8OwNsXcH*@Y zLe97(;mC`7m_}DK%fD_C@4Y-#Y|*Y}tM~3^&b`+p?0c8B&52tGuf4I z&9say+{)ay&~IsYX{=D0{&%4xE{Pe_u}XB7R9dS>&Kc&qH$ix< zaWQ+_jy25nWpmq-H`E9#xt!cfZJ#nR)kg8$!BXL$%kE+BUbv9mocla;XeTE&mOjDE zTA$2JduIf5wI!2H`l^!IxFApb`l$tMefHMCKJnzvlUFXFwFQ^x`IAUZK=~sbe^?9d z=w1RDH2Cg6{mi2K@zEzP(JiD=CS*a2PhT>;?mCa7%Co|O-#e6eT-Jij?imc5QE3C6 zWj16RjAnDDS+6shO<6|TU@+@w+GwK9StbLmGwbx(*vq8&b>4L3AufeB6?QiC=0twn zp}}7$uFm%;;X3)L1%6S|dAZf--A3opNVrhGK%uf+67$se}O1>>ts>Y#HDdJ`Z z9j=l;(Bs3a&TEfM6=6*N9IcdgACX(pyS7Iri^whc_$%|j6rGk!)*^C5 zzPVAVE_SLSQbfL`QL5&mCu7N0M21ASwY^gnks#4+ecGvt$Pf9(KZ4Jn}UJVLu@$nO-wj=vY3aN%me+56E ki6a%JVdN9(@M~x4>`3xITJX?JABT^Wsp04cwi7awG|U{ zW^!9|L9nn8{y&?gU@s^~M9)i}H#}iUoS%=aF#CVlbY}CDmnF@Q9Ui$Nc~RVQ%gCe! zQ7ab3MMowth+n=eDKf>)GEs1L4(lLaf4O|-yoC!A+x)ADrC;2#dCAlN^-s%0v#>Vu zIm*PgVQuB({Dc*W=2P@tk`voan=DLeXgk?_Q)2tc=9AlOO6;(C{`_TeaSP@rFIpDA z+;VDC^n#=X(Y}l3CtD_33<*yBBstMCII-h!&EyUiNfm=*^W^LOvupm>x^@(X%5{xe zmAqitKNs`!8T>z1_J62<+@j>as`|ISM?t^NYE0UFV5#8ghZ;DuIhomeT2A>IBsF=yu|^t{J7j+wCrV<%?7>|^$< zwwVNl*=;fT%@-8g(HS(GUQ4>kDU$V*rBvqH3-y_oL}C7RZ+%`hq&r<8mnWUjC_V(| z*z-^kZV$(=^+Ma&G|qXn1hWqYvbntpl7Gn{?FEyl{X0t(W}cJ{~^|i$=AOBpHu|vdh+4tXaB2^80Qe z>n-Y~+G&?5cid_!eKZ|wzYQeq-Pr(Jz0qgWaP-}NpK9v{VCDEIES&HIS$82Rr{Pc0 zb7mS@N8S@JInT$2YYkLn=)pPGdqwk_k$muQC)SpSP|c%Ca{0y*<2u!VZ|-}r7;DDD zm%~)rl2%bUVlJ9`&!Mo|E7X45aMayzfW{T0Fl=}e9WU_2%4T-V zQ-MC4&bicqbY%ln>fL9hW1sfJT(4l%{9Fh^$t|#Owc_+UJIVasRG0Zrt((uFNHL~zrEKPY?C40Z@GXNw;s$o)2rnr3z6 z`gw0b@uU|=J124zE3w+YhU$uaxWC4S%O>Q(Iom0yZL){x3p%P@84lIni}3oE;h4O% zicD|%ac$6Fr0%B$P3}O}bw4fY`n;4({+mT*mNWWxeoh(R+hgjgN)Vpe8+Og<#A!o1 zVeppO=revGTLw--tE*S#XA#M>Ud_Ut0~g(kG^p@jju}fPbMY$|u3giKa`Uc}uCgzf z^rPt9;2|7+Jr;vMwB>yN)tGVr6nX8kMBP}m!Ps+vsGa$PB%}mgZ12?pJC}Q+b67H2 z+#Jc;BV$Q7I>~VJ-ee9v=FMTZ-axUHA88k_k}lQE;#lD>6}umX{uZumT;u{FCT9-) z(2pHE%|mt5Z{X3VGjDn6%B@?9K~XXZ%tyqtTdF&zw+rX|$8H z2u`!b^5z*_GQo|Duk~cVt}fV^@RgLaE(B8J15owu3|W43bb6yAO!+9z4C!n&ihuUBt21el>k#it}lE6F8&DQZ1S#ROX~&RJxR>aFY0b=FS0 zSG17p{J$pMj+4|NE`r8cju1BGI+#6Jgo@&Il)n2oJu57O)2}SJl=i>}w;5cQ{Tv2e z8Oz0To7J9J0$bjTY#ly`E$;b|@ec=5-WY%Z3m3C+><`1i?cFhb^whL@XRU0&JGF!7TSh+~_3%YkIq5owcXg|*mBea_Qoe)C*d)ph#}j4qZ~^!5)OaooxfoW>&&bmZb%{2 zyUoIbBSv!0$K_b6*a=<ePWSl!MVQx#d{dpDCz#Wm8UG<&QzEQL@( zgK@LUL9;534e%wERokN4{<@*;<6wUH`#f$s2LF zM^2yl7v)SoB?_M1!8m7+WVx&xs=Xf>;%aK4z;2H`4r@f~-LIsyCM(RpXUVqP!r3sc zE0@lI0%^{Mnk#&-P|*pD?l?)PpNtSfKr^Y*JKplJ?zLhSj@9lJ?#M z($(dO&$r4p=~TGnpB)G1X3XHcai>AOc7S9uosf2YX^sieyQSJITPdfH7IG89!C~!R zp!4kuD!ZMqWBEun?iN_>vqhD?ArN=059PO;>^b4{RyxDbyW9Mfpz_+V-8PY;arPkc(w_AeXc_EPG)UR*po z6dPw=6Z3D+!i=qB(dG|a*m&il)5oWjm-+)WhOOY*ZA&ov z&2)CIJq%61Zid#$e~{OqnH&nUFs(KXeQj=l)tFSaOhUF$pMs`7m&tH*JE%&mSW&c& zxBO<#E~mWt=C}6jerE#}Zj9%$Ku0$J_y%HWI|vUt8wA}OxMUWINk8>OVXDeddg4C4 zoHZQv3#Ws2(jpAZ{78j)k0E*X6jZPEhI@9&?0I7pd2Ss>4u>y8jzuo{U7yI+KaN8E zQib^J@*a8~9L3rGW4PRV2`XG2MdhT+lsqbvECL=a(!R2EM|vmTyUHa3Xk?7k-73+=dKQZ+b81z@&(DYm@t zijCe%&<1UlTw>N@{J=T5Yg8e4J#|F0QyyF%I)txVx^qC~By0?JBt6WfeL@gc)XMLv zsI#Q_?UYy;Wr>CBoxy9!I1Ef`f;hQt3O`puaY9$jb@C?XnlSSEy)&BDrig+n{$lg= zBn-1w!?7RQ@r7q9)J0wvH5u#4I_W5#TkFcg{vW)Z2ZVuF>o9IDy+?v$Tj}PP)@=At zMuyl6VEU@Pqzc~#rY}4}+sB=i83G%BA1OBOU(FSXF>Kst5877&RY%tQuND z)$6~OO6GUPriw@4c+4DyxNOn%Wevpqk;JJ7Y`}V~B+WiQOdfwaXms90+Ocay!H>!K z^)AU2vWPWvqOjO^zi84&N@VGSuHILvC@>ZW-CM@$H{U}`Q!YguY0G)%C!sL-wD*pW zOR%A&ADc#;m&~pAfO+*mbb9?g*_U2`^gs2WaryyTL%lg^qJ~q3&BEv(|DxpOBiOpy z55v)pbw%lhwOvG3Ti$`X#@D1g5`j5)E`jdW-zw`*eirM`eNV-I7eLtauPJ2px1e5B zYe+ZVC9{t4C@9y^@t;>=!?HjsRE4C-PP=RY$7-IESI!v2eG~jOZxELWbq-6G>?0sUxy%WU**NMbxjZ&9mTrd zH590vifN-dve5OC3P!}CS7>{z;gOhm)g6s%4oCrW`(g6)C-6Yk4ukchv9>UbGK*a? zIa!0AsnfV}&LE7qrAF%xCMi&D&gbeP*f}DdG{$5+w!#}NdJX~48FAn~u8REDbZ4^~ z0e$s zQc-LeR4sSl2qUszr;!|zznb&kM|0tww{%aon`N3^RQ<#SLYtOy$h9BgfmtZl z1_=<+SLEEM@5%MaPRdA5Ve5;nAZ(VT*qJ*aZw+8HCvketZ>0RQ8)>(80QL4l$x3^k z3Kadh_J=sgeSI7B{kMw2PZwgqt|*LNB68|y-JxRPA9U&FC{FFT6xC^h=zVXX{?EUl`q)*mA;p?a zFRy^-d>bD0Y&s`@Br3dQhNZnbzynTwVUMmaL``Toh(*)^lrm$S*y9MOEm;c zjbV$K%Rx8eo|Hbb59WQB4Y~9aWCeWz!eIwWdOb|`2ewOjz3NE_KTD=+l5VOMoV{=w z*M=-a-Hb9-`8U(K`u%(e@3t4JtU9vs@mFNH-jkIVzM$I37}%tm%c*0&1m`)IA#?mb zYTqUuvj(nV=UI>FyUuxF18=-(oGB8skKpL>u(% z>_*0Wt~`6q5cK>d5rT|qZ1hW~{+B)3Ypx&WKKTYTA2X!78&#zHsV`Rf>F7DO#hTZ{ zIr~EhHZ6P)rB#_wH$a76ce}CB?@L;I+L4_P48TE4JW;=;C%QcB!5VK3SnCU=*rX&# z@34+-n!BQJ^;%YZc@#5Pe=9l3Hq-tlfN@o}L3?W+<@w8Vc#)aZ+}j@uQipMK zH!WINFM-9!Z8^7K3zc;4fQ51P5c;z-n)Z`ab0LPc-?SB-54?p^-$=CoxR&j^WP<0; z({S)BJM?r7fwbBf%vwAaon>rkp<2wPr7Jl{-G#H1yFfkff$Bn1UyLi%k^ZSAsMDSq zKHOi6sTKF3@L33zbQr(|Nk&Qy>%fiYBQZAn0XS?r3QkAoaKp1esxcdlS@vD{dRZvm zel4K-mqcz^{{@wm{0T`nBC%!9QnuG8LGnx&*8Ug{(KbKP^E@y18#IiI_XJ_uY$d0C z<$&t5rzxdi20#1x9>kWXWAhKAW!x+nYSkU6_CX}7jm?H$R(({VaWQ|s5<1wdhIb{r6im}0F;IFnO3+JBfY-56= ziSF!tc|IL%=*ye36kKz_6-xtOP(sQTGW2ht^n)A7v}lW{%kLxAW-X`c>m{IbU#|)| zQwh3Pv4+?q`JSnSn<QS-A3B*3v^mJhIiF?qH^EQ(&O_H{J|`qvwEF`2MsYee7zOtPfo^y zgG9pPD9L)cFB*I`n3ec7spnKm9#5?>;~ApgkwkPFN|HelS*SFF{jT7srJxHN;$5@+DS>QZ!<+KtGx%g-%Uj8ru$T~{WXMIx?oLC z65rmtkoCQW(7x5(S(WoAcy#EE=Z1AbyR=ZWpKihSPhBWR(Fp?<_uuO)#_qQSOusA7b(;O4 zj_3%=X<^X%U^A2)h{CeHS>W(~06!Qk+pXu(TswIcxm71~QvZ%vyv-MP6nS#frgRoE zQ#ql&Omb^gpyhyYEE@JV={ssLVE+`(|6IoEQ5tdaykxHQnTh&-3#lb=3MGdu;j-5i zpq-j7J-Fu18KK*tVY@vSiYH-QqJ zv~B{;vurAgzY0aueK{a19*v#mlK-P*T-z^{g_Mbss(m@F|8qHRIc(3V?Ypy~L4(7; zT#Pq=HDmklu%r@OXJ|)_swts=GN8T_+p{^IuPbI(-!=JWfcta}HC~ zodIm28%u$6vZ&>5GWq03VV&&v**{+cAzeD4Q9W8puN;dxThCDGljWqhY>Tdw{*>|K zJF1=(0GXAG$o)zQZF%d@Zpt>SbGl27E8dCZ5|5_MxzN(b35DoahAQ8YXuoh28lQfI zeK|u=(fY0QvL%Kqd>ko%Tpt$Nri+)1T{vlj16n<4hSI2gRI%Q){)a1*Ku)TAB?Ma zV)ysmIC|+vuw9#s!n!{Uu|q8&_QeFM*&ffOANo=JLO*%@ui=129#|aV&OV)b^2;YG z&iy<9{03?`jXGfVf%({fgcA>X8;?%L#HWY(m71E zmK4_QIe174*RAi%=gQY$>1Ua6VFvQ0e$)8jUr`t}VkW0vUCZ@XV^MSdo|JkGLAd`W$w8ze=gV1wC02n)86YbaiOqm<@k+W5AR{4HUbw^7{ zG4Qsic|V7pPmIJJbCc1y;Gw8nJX6f{PXTpyi0FL(GqgepjZyi z?sVk#R~K;g)LE!L9w!xa3&ik!vd=MU4MdmaP~-QLsrAqZEW9z7`!`I$k}to5+>`!P zee9K}T-X`5ybEO8?D1$?Fr92)MxuN1uVC-)gT+h6V@BCKs5-m?t)pVu*jGhu@h*v&{=@`@`J18h*Fl{6_dGP&Re%udh_>Z3@U+_; ztmvJ91#N$)lzBt3Xv;?udi4_(>f>VhjxMGsl3CX8}}bK zhxPr+#nO95^8d<(4RYTT8i#r(xc8(OZ9MAjD2d@XkGI3R-(Gg%9b zZd1Us?HFh&I|8A9_@UU`F6F}QDB z54*nGMz*VFv%TMau%6Kut=IMEdsBSSbg~%EZ5DXb>Ty`wz9+U{nvRWk;uy01WsLfY zggv2H-YJRuUzm$^kzbS2E(46886@{UlR3dTg+dg8TsvqYt#6o#MjI64H-&Si&k)L( zkiyN^M__8sDKL&5MCuh2As}%Xmp^RJ$&N>%;BXmaYA1th;S5xL=!nnrRak8P3;0+n zv3{lv3fG4jHU%s~Ppdh+N$JYL&k@7hWRsOo3%Rcc%zGjCtFaZ}bnGm3{;Lfp2hPGc zy%K$UoB`9tIMVyRqWW#w(74PXRb92jXExUOm{zgj^-OL$ypxLFs1Ra5;kOgQS@)on zYV{r2{)CLhJ9j1XF`lUIQ7qOB*I>|IC#H3sG27J|OP@Z64?(lhP;vvR{XR-nzyC9S z&nbOO7(V;Hj{M;`IcLn}j9$mU^u7c5Ug?US;rpP-y#zKDuf+tjNUnj?KgUn zuwt30$$Leqa(gJ+or1UK0&D*GmV|y~-l;e2S+{!w#f>hZ*xh%@xo=xmHwTIu-2+ir zo~W|?W+^AD2B7bP3d;Op9fxgjWNn3(>N2OHL!TOOH^gwr-<9BheK`2yH&C|YPYSgi ziw!5t*m9^H-%L&5Cw}hS_|sriPdE$um50RG8-vOA$pW@1nu)nFCnA)^F zYjEGNF=&-B4TCpXKEroFB zKf$`DKx`-o15>nC=ARzWK1Ub4;M0?B_jhB5eX~*dIt}W!AQx6B;I?-RDgus!)8bFC z|NB5LwI2sUdB#QkkRrRfsv$6nr~g&FbpU4IvxNG3d8o4DK)q8@l*W-H}jk`Q{*1lrG29ojY-rDjYKh zkEGlUPOPx)M8@A!Q8=^D+c2e?PN%83)l=4>Y<1*a=RbwAj|U++dIcX#PDK5$edSo; zE7*kXv3S-n$Z+X{70&6b~pQ$R1KAczI47DE1DB-)o5c0?sFF0GV<3uYAf1|+AuESB7 z9}a;(t1)5Ac+$-Y1I4USqW+kxsDITey?xal3-e}kMVonS86U@uolC%?!_Tl~|1=yv zZ5EgODEF!F_CogkZd|-%A*lv@L-lj4QKz;w1ZEBA^{Z;+ziF7<&fPog%z{sqQ!)cpzVGQJU=|AtmMBy@$#-1b885eD8B;pvy<_$ zlLvZ?8ppMM^VodXFbvo`37;v|81_d#*>7U9Hgsj3Lof^d_fSlV6%T(oMdlIiQ<5CN zdv^CZ)o$p)h7E%-LwN@*EPVb1W zKEpU?TnQrJ6BYz%ep7V$v^^c^c7!bUwFL z)nf)xxb1Q__TMk2O$y>?EAnMKX@qAVR$%qDNcOnhmyNt**=Do55A(u-Ya;v%&8gIR2xe1CK<W&31=oDVHx?7;%K(O>P@{!|eqcVENyGxXF@G@BKVZTR3J*=N?jy6E{) z)+@Zb0_v_G4BBy?qT(z;!N9$c^LQXB#Aa&tm3@R6A0g9X6PTCuW$aM zfHr0v{q`49oBD&MzyTWrPD%GZjzSF?ME{3{q|Qqdb7N+ZW~?`)ZE?V+FKt*m{S;|Z z7J$#>Uf6UmlWISm0&z9H(WcIptrum8otO3InusZ!8)-u+t&XT|bdqd-NW-jg2W4m{ zYsFeqP-v`B3E2v7=gVfCK=#m7G!k-U!oYV%iJYruX|?sW$1TM_JyoD6enZt zjzhuk)q2fF#Wm}YCLV&YR^J+|3X0jiFIV}vz2szcA=7vN?u}fTfMji)z@}I(cErw zoi2;YJKsv`X}_yiFvS?0SHUKL7%58JysJAfw#KmN{JaEY0MVX}{6#r2r zdCK}ZO^89P%Up}l(~)!hJIQ=YEht|t2G1X54mYbk#LP*?+WCD@yML#YQ@%+Iu1>+0 z@fvc_mC+!@N>1K!hIF3}V|Pt3hNU!<^Zcjep^xU`vZdI#y*-EbT!SeItI>F-qf|ZY zB~?7L!#ulR$UpBS33=PRbO#4ZIWZSR#R*F&*tHW}{j{h~`yLuQw1wMRz=pTZ+;S?O zo$Fsx*14ZS;kJTJS8Tyw8iATQGJkK~k>h(MV)eWz^8d4nGG5NdV*7!RbNf>;P0l3y z{SjyiU&FO=9l>ej92R!0RvD+fCOsx&QP?4h{^mKD+`pARge}D#=SQ)}C4o()BgKG^ zZmiGiC_d5I;})GC56bT-o|qfj5YOA2eZfb|y_qLouyIc}c;^*j2bu=Hz#F5Q=oPn(T4 z0jXH*=1(~Tf0T6pa7jfhJ^WJ+&DbpWQMJK5zb~r49?hK#ebCdb1M9x*AO(->gu0a@ zrG_0l;KQ+TvR1SJLhikX+aKi^{Ky}~l;cY{v&R9j@7Nyd#*~1*oy;4I^y8q0R80Ah z$c3ejl+f=oWh!jBmkzLSq62L@Z;8*{<=C;SH#>ImX6rYJq<6C6gofLaaJ>q0HXFey zJ(3*)(|F6;u9!3bSE<=F9rNrz!I-U{nEFjN*bSP-v9sbiIbtfS{(4GwYt^U;b723b zFRAswL#q8|GT6`eCg1p#7<=DI*72ocPQ`ojTv-bx9_8e8^EwIow_|UnTA;)D9gtf( z0ECcGLxX+{#&=u7{`TX+XSWm9JKO$Nih@HvmyPPA2@d0@KPB5GQZHr2q@1^R_ zKT4Xvwn*nP{8*7_1F_aiS=~QW3?Dp#V{4z&;sYscv96h7+Bk8;ku}hm8N|D;HIe?@ z1JK(4DfXW>fWzDFp`c@3Sgos9xlaEJlqb(XL0A)5f0hrHJKAEte;?K_FO(vp{rH4? zDCWPnWecyZ)VMVQHGa3n#%*7dUy=t4cUMsEmbMt&GnT{V_2S$g{9*0FG3??qhRgPL z<**DnegLN+uGcb-nw^BtN3_G)?zu3sbb9S?LgsM`um z&pb)Bg^Ogacp<00-9b5~ZzN+uTXgu$m;FB-2DWF;k|898&31Li)NuzW)=eI}<-dpl z0pl@r*g{O7n81NwzJ@)EMq{a&1zU&dNmsv7rS@}yUD5;&iqdd+Wu)lb<8hA)P((rK?)+S~;#`}V=buY0o1fLNK&TO;$_ z9#Yv4H=!YY6xDB$eWS}iK;z!^q|ZAl-iscNCoH<5)fp9DXqe7c0fn%`VL6}M3!G{B z4c&X_#hR+0$k#OnliRIe?G=EG?q-~>d?DA@N4EbZP<(3`7o5ljSO3r9SzZ>J#+r)Gex(W4A@mN}()C zj?E6*NQ>)zId5z;1P(t9DQhR9@ZcA3Q~m&Pm;YbnBgbft`#Nw4oFRo*21RvDIcKjwG$x#s>~}>%>eGQ>2<(q>e}}Q#D_hoiE6}|u zjH9HU3=3AW;>cjp=y?s)NzGF3^fI5`LjqWr|A}h-^wlh=N-rc_xJoU1X0Y{<7U{vo z&MZX!DOKc6gJR1(GQ2DywQIH{l>cJLdlQM_OWUya%_wd#nt`kLcG9dU5vv#dBy$Fb zM9)t)LHnDXxGbm)%64z3XAWviEuD=0!#%j-ovl1(rlDzGTXAt;ExVpCr<;*h9Q&25 z3AOlKQaFFYsi$7hrAKqoBO(Apa($mQa zLa%i}KWjOjZQVo8Wmsa^G<6kLZd<}RJ<3I=I5~d3qEWh+(hjSRog!oY6v*3D0F~l8 z6a*JpzqLS$9XK7W?2<8|@Ec0#u^60XZbiHKgJ@^zg=UolS<@vMimQTIw_!LcT8@yx zTh=Z9S^zb@W^zru%$K^n6ho^gqSql=zftlXH7*(eQIRsQ=C1>=j6>s9dm5fH3B$ds zA*|^nRWCU!+HaEI+fIV21v%h(K7-cxQ}CdHmKGy7^-@FN~)K0QTVmU5VcIr5jp)>e>h#r_wC4GUERs( zd_%16`-jwcA%bk(Y&d4OB_5n9Yf5WBp`5TE$@f_Zc78HR)~7FGt56>{VPDoPtA?9q zqtND{GwTC~Ng)ve<_3L7g+Dza|4}p0IDVUzBF!md%xfI`xb41^l% z(-3!d6CAjV!K(yRp8i7$YjPpanIk~h=&Gvv*p)rr z&BB}xQJA-XJeE#~0aeu@@_a*(yQ~2oe(=Or#brv^-Ib3oi@^L}Qc$t4R?7K133Ll( zJ8&^b)^(gDHEld)o>?As-B zbgl|jyZ6zc{vViFSYB$4!Ro9sVC8(`72q-dP&8tUD0NB4BPkbPx@PSY`l}D((I1|(}KBTfgJx* zOwdaOk6qEQpf6gi=qAU_lQ>t_Mdt5Y#)2WkyZX#;kUP>6gFKVb6~3eNrmvyYatZo- zrf|mKG5Gw^Y|e{M=Gv9Fpy9@B&@SyP8edcx!qm?}9iO4{*ZELH<1#c?IkEBM21)NT zO^i#Ibz>*wdEr2%7+x--)tqkFve}hf!~RAan8aY0KOZXzu)yF8n%?^TTAV>djIrTG=0C zf1LnPGXG%I=Nr^5{pEP@50E@A2zT_Jh3)@d%Y`?Z$=2>OGES@~bEyO#Bo5}lq(^YQ z6|g$JKWBQILE*R_6#FiO7Yl7rU;du#PyGbC9xK53<{Ie^#&PP670~+R2MDV$lF-}N zkhN$r+jqC&3Wteg*macxtphOntEK_ERC0 zg*}3ncTRB5zC+@LmfLT$AhpYUkUEjwmF*>_BFr^c|U zjm&dTOhQu4VRQPLY9?F(`%c|Z6W0RGzu4jFYi&?BrT{8_({fnZTME2eN1CqPQ9pIB zSi8OhctrMoo>YP;q836pVLdyTTN-SUL)dX1H^1 z;YvtoUJAOK&SFlV1kMcp0H!USsCJt->1<`Zzq}f9xApYkF~SXsj}Tj&bxmeln!0Uz5j4IuhB;Gx1WrHYms;DV0((4-5p}T z`vl*H#^8gqp(xy&dolX?48Hw&3_B&sns0}9pu6nH2S3PhsWZM@9e<3X7LDhOtWxmU z;(;gpRG1Yu4ploveqQZ_ii$?ExcV(r^oWG$t(CN+DuPXw2PCiCept3pi}t=N$>?%Z zvXXVDx~9FVlGf$uvU@(;?X|_w1IyWYshl4n_YdvpLa{;}gAJF@%G}O6O#gHfh1C5- z%35F6m-QA4hQ1)z>qe-ObAlS%OhEUYd#P-LL-@OkeLsBpNFG0(g3x%kUON4HEDrZ}XYEo) zDYGC60wcR|A~)eE?n~FCfP+=VnX9I@>v;&A@R4#Q#TBS!W=18w-8P49mU+8i4f4eE4x(& zp*C_Q*IKP$ubC^bc6K_q{OAo%<_}3x+nw~E>aa@DsLl&t=l zeExQj{lD(G>*qSEz9#1Z|7|ZO{2mYSlY3xXtS`FMx8s0l0nM}JcHh5K*3`+*P;EG` zkM#C%htY$jh z>VlO89XR=35XR3+!iJqsK2nl#E(81+8~ozz^;u z$U2|}Vud_^Yb>Xsa>se7{mB~?GCysVi0FCbcgS@<0XE*_xTKe!T5N}qt{{q)Uv=i> z+kb*>`C&3U-4DY|0t@*+f@jRnRP@aza($YJevb#Tw&ol0#9tn$NvW2Uh4zqeQb*aD z^H>}DyOeQjC%D~Sz_p(q1dH>2cu>ykxiokY7Jp*{#%(gE89IZaLPOazX(8%DlBNp0bnbbJ3ir8#A;FGwuT;U}6{}f!^fcATc@`CGLb%l113eop zNLW9KPyaFuE9^T^asL6(aAF<5e76esZJvpbW&KZm-U_a$h{KYfrgPSenJkyGHze1L40%bG{mGpd~H&60njK;fIi zSSjaC)GJ^8hmRY!d?uPF?<1R^R%52~SkSdZp#IAfP!KeYiH$@qvXgZf9W0_IJ~*!fLHK9?-!s=g5k8SK*HacGEtj+I{e0T}D?@&i zYe<|}!{`PNc3jFti|K8~Tb@hc% zc^!m*-Ef5RznA*=hW_Ng+|FA4PpSWWJ^#Jfzt`OUzZ47pk7EDxJJC#T-`@2<&vtA( z?EjW)_n)8spQF=13pdDHi~2tfa@)55XSoN#!RKG!|M^)nx!petBM9x}zkkmE*SY@( D_|{h_ diff --git a/tutorials/model/0.1_config.yml b/tutorials/model/0.1_config.yml deleted file mode 100644 index 1cab8a4e..00000000 --- a/tutorials/model/0.1_config.yml +++ /dev/null @@ -1,51 +0,0 @@ -affine_trainable: false -auto_save: true -base_fun_name: silu -ckpt_path: ./model -device: cpu -grid: 3 -grid_eps: 1.0 -grid_range: -- -1 -- 1 -k: 3 -mult_arity: 2 -round: 0 -sb_trainable: true -sp_trainable: true -state_id: 1 -symbolic.funs_name.0: -- - '0' - - '0' - - '0' - - '0' -- - '0' - - '0' - - '0' - - '0' -- - '0' - - '0' - - '0' - - '0' -- - '0' - - '0' - - '0' - - '0' -- - '0' - - '0' - - '0' - - '0' -symbolic.funs_name.1: -- - '0' - - '0' - - '0' - - '0' - - '0' -symbolic_enabled: false -width: -- - 4 - - 0 -- - 5 - - 0 -- - 1 - - 0 diff --git a/tutorials/model/0.1_state b/tutorials/model/0.1_state deleted file mode 100644 index 3bc98435ce7c3533f5e3ffa0f576cc3f2fb46b66..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 107384 zcmbrl349dA);@gL0|W>VAgm61*pp0kO|zz1jjS;Q2>Sp50tCoFz_1q~gb)IRu!VgH zTVxGPcTKamh!PPYA}T84$R;RAM8v3w--Geq|LeW?{eR#4e);{LoT}>TQ&p#`&N)?c zI7sBe6MbSH0(e8iIU^SO^=TsKQ3|7^l39I444r+ ze#ZFN9+So;R&Y^^C|8N#n2KOMZF=nZ(PJl#nK?SdRdQV7^cmyg+K!%;I4OQ+h0!y{ zkDWCsK6Z5C_-Qkz&lo*Btb(glaBxgHyfzax$4ngWDqZ|ag^r_54U`939d)bYOi7x*rSEU&LfAnSMxH0kL z(V5C|Xe+LCTrsauSCzOwc?A5$qv}f@)rxskPjm%FxoX5ze%Y~EWB==rYW|H&n5$MX zm!QA6)PBjOPBE9diLQE4uKF=m{tK6Xbf&@I*w|gK6tl7Y#irp)HjRqeG){CiiE=gl z7vYAwn*EIhb2SHx5{a(hmn`shi2yB)a-Wx%&MJr*YHAPjL1B8>jZJ0deK}+4?8CB8yp-=~H%@ z>-BzV)%)Eq)u+TVS5z^>fr+l@DA%BvK+O98>00r;j~z2}yle13*_K3=&=A)U^e`~d z74sL{RM*fxWnT7hSTV!liLMb*u8}d-{sY6A39eCpXBX-kUChq;Pj+L9*^Mn`H!jf? z8|51RF9V)BX66*vguk;2b4@H}757h8lZshQE@m|)(G?%%n))|Zp~bT>?eDDYuIXSE zmgq`&$*RoDX>b)!!;E59GZS5jQLb74!s^dynEiKF%=HH5Aq`tJshC%Zm-8?OlknF( z%q`|PFVQtW%C+Ec^AOthzs$qJzw=dGi_k?V74a9_RM+A@C0@=$axudtiLR6=*V4bu z!#@~e9#a3#j=Po>vrGFYyXD2~(u>)xNOWaHxmNzmi2t02RexvYaIG$8mHAIrYl>NA z6|-8K=vo)$S|3yWrJ|WLb?o%`N#p+75`XgB@OOTyYa{q!#bp=sE1l}vgsp**ZZ77x zCDFAt%C#+~#=r9$GhxD{Y2#hn|IV?!YX>++Cc4~zaqLrinQLdCfR|jCxpIoR?n-p+ zj&kk!_g;p!{j-;Q|Hd;c#I>(@)VVKtmgrL^)%9la>f2w;??9sKV3h07zxOh5m{?Sv%Cyse(8eQH$&5Icv zgHm!GU*J0Nr{S7Ce#T4~MAykaC1(Cv@Ykkyom${J9rtJHOFsWp1+Fs-TxW~R|GuMN zDu~j1p(ZuK4iG-1dXcJoC#rbMaRC>1|`Ko<5Kk+vR277k|`$QRGXx z{aYzS50(;FaA6(d~=7i@M;6vhG*Oi4{tf8clg5ij^QH~g@lJK`sL65 zkMD5elmA)&!~WO(|JS+y|C@g)|MZ3q+yAWp_WyPN$Bv(L=Eo-k&ZG!_rbgxZXZCI> zbtWO|+UcyRJ5NupI`Z`B)!(1WeZ1l%e?G3ux98%zY+pXLOH|~HE@f-I(dERK^SU^q zM3*&2a+mLdmUihme_5BgpR%)qoa}7K1N+&EGwf&Y{Tg=mttDY+xB7>jePey-*{BAg zXWQ%xIs4PGx6eKaI(zo$W_Ru<%C5`@?2->f7;DrzY>c=(O;Ft@BH{bv<)uTt3$9 zOZ@kHzC3mP=elLRx^@2Ft=oTSfWt@1om!mlI+ay@|0%yIH&4x3R{r$$1+7lgt6ULZ z7z*{6#YMtF?h9?|pdND73<*agOOEMkWiFFRG(+-C%D#u~@?RHo{xgGS{ zYKLuelEe4zKE<|uu_E3HP((&ag+5rVi;MZXdq54Na86qzpn13tl$0;KOkZK)lpBO~j) zB7N2?GGu=vnV>h9?mMld&)rU1PcUOLk{$Piy=neavz>6_C- zih>?8;An42k9*6^n|)HEI=@{l-HuEd zd2x;OwOcE#5gR21WXrtnTV=@kt+JqChqNxcr8{<~Ec|qrbZfh1_`NAZ zPJxy>D6RU3q;2vc>GnG;ZDoMXhoyG)up~Mn>DN4|#UGW{!(&n_rAyl)UHTsBlExcS zn`cOhm(q7xO4}^2beA-xR@Ic$(Uc;_l(sph^sO?bv&wNvHIGYYt>e#yLZZ*&eBN_DIpgBk9yJN%f9N z=ai$8>gP#kdG!6m15(`FFSTX+CGF3Z&g@*Nwa=B-fxW1+N79eGr0t7cQahR>ZB@5R z=bCL&4BaZlm27F71Pom(ZSSv=B4DL-dNL%9OqW*Obg8XaE-n8wsl7;*+Nh;cypn>x zCQIKpLejuDB<)R<+B%@YOsVx6C$0Bhm*PaEw1)MO^qfhpeq%|yZIE{ZNtNnIt8<_f zL8T?#{KXLa9vPy^T|+GY!Vrx2}h8vs2J7c3PFrNbY9zW)Y*Cr4$hxpjZ`C6mffrBC^sIZ3Xb-4n@1-R_OKtMKpe2 z5$}JX(1%wPQRfTAq8kdyuM|=G5%%Om#aE8FJI#+f^ZmGpuE52)3cPS+0CyHv@wbmIm7-Fg1+-TC$IJ$OXlp1kn$p4|Mc7gyhm;K6EN zu6ORoubuD5-P8K>n8lIY%#Q@0C>~gGAfIt?Ab*w_%@=+e&94p`%(Ioje5W~tj~_jh zXWSji%bCOY#_}WhwwWV%r=}zMyC03@aRsBeTsfNiJIC^GA~%$doxXES-gj6@y@-E76r<_UTdclMscwVLxejh)Z)w#?@RC+G9b0Sma* zU?C5%1Q(qa^Xy}bdEw2)Jfvwd7g@>N&$fgY)Lg=~MoYM@{}LWD0QtlvJkycF#po0s zd7&5qk5YI-m8HC(YASaRPUU&~QhDYcq+^$HD`6RT&tJx!JJY!Ba2mG)mvims<=k06 zom2;5 z@1=3yTWMT#r*RRJ#-0AlxbJ={*S-RFrE*_pDtGou<)lJBPnO~x;P6szdvz)H2~Y#+ zgDKpd4Y|LP%&iW|-1+@tE(#WNZ7SyJ=0YxJEaa5EfK$u)TwI#Vt&?*xwl}!w_y)HQ z&E{hAEKZ-!OKMm%Z-yp6njOJFOf!x~tI=7O6L|{Z8ZhafUt*l;L+tiDT7QHyl z?8(KwZkz^oBWGtTVSh=*+d29l4dJVoh*vMR4R5ZUxyPt1zyuZ^!BP zw%nT3mTOhpa8bWC__g9%Y)fvv-js_Uns9o*A*Z=EPA6XB;+F=THrM6W??Idbt3xKx zNvo@3{;F{8V}EY#E626A?<;iQr_h2g6^i;)p=OsA z@!KVZj=rae@%f5adr}dlPAKBJp@>zwLal($auu;OMAj6>W-G<0` z6SiuqzIvb)_w+z7*?# zF%6~dLSy(h&0wpWOPkhQYP&#Xu=HiLkzz-vv?_*4S^)I7OY07k?)MZaIyj^|u%om( zg-d6UCdGrU@aK9;_vK#F=Z%ogF|SEqx_ zKN%-O%1)Far^Qo&z80~-;mmZBw0XnrL|_R%w%(A!1lS)DHqB-W1-9} zku0Bz6sfmcDl;pm%GQHZWt=Bfo}ao*n&+2Ep{pP^aj$0O%&4HhnBi&YxjQe`GjHtI)Ms`0SBkvrLA%hOefV~H0LF6H+tv@6amL8Uo z3lGbHormFTg5EqVLw-Lj6LuYuk>&H?Pvyyky?N4?lP3#4%aeuo@+8$cDl@AdlbKVG z$%K!9@w&|XL6_o)Av3=uSo#a8u?T zgs%y|F(d>2qu&YXOFAK~ttX_|b3#(1lhTSkDcx}=r8eWFbk05{eFsiSO*<`pM}Ui` zrFQnT6phbFQTL4076K2>NZX4u(sv8#oU`!lfD#tQVM)4vR@&|$Ul-}0Kt~||2I{o{ zN}ZNuL!0lT9|wW9@QM4QpHJYojsyln7NG~ERd2r(r*oue2OoX?CitKmrRcX_3b|Hl zXI4w?@k*)j45`grfpYkuZ(^Q5OqHVlA}L1Am15!?Nlj+pj4(xt2{=QXjFr@Wl%&}s zB-I@zsd0o9?`x7W!=<>a3 z>^Eo$e9b+X@GEBxT&1_!4@SXa7viNt-C4Qx(xgK0`_+QDlXn$#mT*%YZumY@pwI_ zlr7wPAFyrZ;?NHGhC5)#c5%^m54RlqV9)n)YgjH9QMvGC_H%3He%P=>+zLO;#YcIt ze;#hdm|RRb4*h=u{X4<6H&1b@b(&kTr#Y=X%e7hOxcK-y7ric^-3y$)dJneleXa#x z;#!{%xR`bsHtY)gmjX_Y3b<(gAs011;@VFiabf?Ay9+)8@6Wk!%om(W-r(*9H@R)l zSKK$?EA;;>ZY%5KRN@vFZ{Fh0PPe%=`VLPDFXV###Kj-?U^DM=VxVun3tM)ClXE+# zeE4c9kX5GC$<|d;B=$(m2a>$6duj17FJ0U1B}4b_v~M_5=CzGyzG<=c%)UXZ&%716 z;>?yWm!6?XAr1;^;-DRM9W>z&PQFh#O~80NNltrramwhx>3LaB>1P#+!!@1tu7@VK zFhp38q~D<k?o(tYPB|X?KN!cdp90NMkAoF#UX$H*)+zx|cab_%C(x8aXbuv%ui!Gt3>Mm|q!P zhp!S`8NQ33LIJfD@|D5fuLJ)j2xUzbDuPcF(gAC!l|t#=;al}psLpVO4o50CvN1CTlC(!j-3bos&&?5L&18^?=VW&dh?^kH%ncYdgo0XXvOdoGNzW z)DE~5&Z#TP%5~$^2Jq|3sWYCpM){N;oZjn+y1h7sgTf!D!4aIE_U7~k(5oM(HLr2P zd6vEg_3O)NWgkv+`g0mEfRh%9^EBxB*KwXix(?_f;MKvLD#hSz0lEpa-f&I^uzx5g z?=Viokrtyl*`1sQje`F^7CeA9v7D}hCV}1s&W(rv9fx{I*8qKcB6QCr=(#DJqCq33 zaXJOuh)3UCoRX(QFX8O+>nw~rfz!j8oU)K_g!~C$Ln5c`z_Hn!%D%y=1L)Sd#k2@@ zlaQZ*dN=W`^?dX4CA*efQ4%!B~piUj+O{BMjF2wT~(71WU*o^mckPcjc zcYti<2ZLT+1f30B5S$L8tSxBC#hlh5J@l`#`KT9(bk0Kb59xRm_vKI%3A zkBTUd0OfeL4Cw}-F3>nUKLI4+Suju%?^Hl}3d)KwmT2_TLceFDeimpVp0`F{M*+FO zImjRq^;!e57-s^;RscD+gzR$g>^jn2kme}=9_d?1pF{c{(xoBC^N@Wn;1cHJJj&8Q zOJkm{fnLNsMq|E0CUdF@`aO^Z%t!t*=qWtUo`5;TT)zuCVH|jk!I-cv!jRU1i=#2; zqcBgPyGL-6!#VXvJ`(9BBQYPtFpq=K#$d=K8uLF8V;KOMU_E!h`n6&G@|WuwYrZVj zd`fqWyBlPzp&rs1pjSI%OyQiWb>z55!yGsuLm=Odz1R-#v_{<)7;`Y%YK-R%(T7G@ zgS9yI2;@{)g;Tcx*w0FM=8t#ELmp)y%hI4F(C^z^%}2I7^OzpY=2qXH)oE5%U$w zgzi+KL*v4rSD|C61j?Q>3U~(p>}!U5KwvE+u?=(*(3X*}DkEo8l--6t{}i}nC!qtC zq1TT<-|q+`N(v=yE$l#mPBcp=i@`>Gpi?H!hXL=Rthzx?X3%vzsA5n|Z`hbei z`>LcKuSps<5WWHKdLKqfab~O(gT_eV9V=-B(%(G2F1UU{Xwb!4?n znvf;ahOd=*>N@HBaIF;AH%RUDCh7cWlXOqqBHdSjx3)`b<~B+5c1Y1M2Rw76wsEJl zLU&8;tzA+a--GkX9w~m?2Y(Is=9{|IU==h0i55&-lKTun53U| zNi__#3Az&W+HpzMPe?LA&!3W_#c4@SVAWYk=PdX%XQk-$7JN6Hn<~Byxd8pn;SBSR zq+{o$P~Vl}mkZ#T4}b516a(JFd%#fO{6$IqK9Dr_68tF8vX^ll2U=Z`6k32e09|@j z(ru&<055$+JFw!Kq%|K(Dg}BE`HH~)k0c!e4kP_OXbMmP=^3DDsJ9I1&Y;hLP?Y@w z^Z;G}>w)s%_Xt=9JOIj}?M9$)quuUEe-4BL1CSpNS{n2=`n(hAA?Wh}paJsc`|$gK z-WZ<+yo0eH0@?xl-o?2Ra#{Dbr2KP|-i6$H1JOt~fGqPN(>ll>K7}+80Xf$^2_GL| zm;+{_+$+f+$nr?a%fmbXmkz`K1V$W$Z+%G8RnUV#0p_~peo2Y@By9)w?#7sMAmbhI zxwjYl$U&R2ht@;ZS$HoK@>~T!K10&hG)YNIC8Y{UMGH_i2VM5a|k%eAuh;*r#Sm@B!Qy(*QQSJ}0u_oYoL_ z8g?VP38(zVu;-oOGj!qP-v|D|K-g*6vb3?BO2ZyjNP(?his$LDDa&EkSK|3<*zt9+ zWgAeQ4Ig9^?B90Wac+eT15Mn9wt;|Mu$gYyN6>`buvzu^{MG_{!+uCUaaA;X*C_zjG7Kx zAj%fha)?T`9JY9r<<@YB%7G3s09aeyAv#oZhy~!cB)~yMRUG1Xe+S)0A1VVafpTRW z^cDJ6@^|=2zrmk)2A}ahr!#lp``mzBAe*97kSFBk54n|wEO%oL4lc&M`~vubqha?k zSI$=OZ(+OXh(gXZV4*^yI`WxVVC_XB;3H>z`49-wwD^zG^uo3lV(pd z7d@VR(%0tV$>9gyJZYxSIoWf5=t*_>r4#SW|Jjq5%qoIJt<(Mo}C(_TYCrUkrVpq1t0X${T68Y_3~YGcLdYbhYj_l_bcgQObOjL z_%}~x_SYWY-QynTeXqw?XOG9~nC79YE|1t2>(R~*_gHIQ_t-M)dYm5xc&ykzj)~DW zoc*S@BVX-2I`bfmYBaI?hNs%y@o9GFA1CapdEOo@zqE_5Z`$+U|HJNorwWUCQj1-` zUXQ5`8;eeC#^SF8GiPuc7X5J>7BZt93;Ni>(i`_?s|tHFEu}AG%?GgUy#_Gf#Nq66 ztdnUuW7ySN<5}~M$FnEXC$R9Plh~GflUT*?KSlWr%?D>jWOx-$*6;z(Z?suHQ)}5WkT3?*Z0(y;O zMJLBG(PA`P*K!1NJ{`>TaZxPzZWJ?Zy_xN9Pxkz`uB^4%l_fmWSW#>TrY#R+f!?-k zox2UY^KKJ1;(TKkGNB>cdagdZwxb41+*zGPzEhe71e9U{eM+#H1H`O;&+GwL?%CaU zFW9v^OYOF>1$N*0C_BBgH%wgb7e*gE4JF!egk}#uO6#^Bvuw>g?hU~n_s7jWR>EYD zHFu83_v0K-!Qg$KkoOOItmlV4Az$Wu+?NYH^!TyIFS(XpRL7Bf4*=*GI= zsAjq?yM-Ro!=dMY8?HxYch!r&j?(>(4bq(vL-f2&ak^jnWId+;WIe<-T`yQPT`vls zsb}7qp(p6G_3I0g^nge6^cc?~y~zln&u2o9uC!P$+?cH2IiIW-^j@N$st7&!evMA2*67xVES)XQ(v#+9=`oM9bpLxG+Gpse`ef+wTDsot$L0Fu2Fvwnqtf(PzDz$qF;$N) zu~dJz&QiTgLW!-1L+Sjpq-i~p) zb#9D)r;k(5+dNu#RvfKsdq(QM4@c+^+YZs6_8Ow+w~N+&F$49aX0PklI}Om&nh(%- zhxgZ?ukNptD?(Qjy6UC7gzI<4chZYacF?an6#Z%~MIScQuGc&qrnmj3jh^>b8$I@y z2D;jgcJv>*xn-2kF+gwe;41)YO~A*3@ItE9>X>mC+LxmDZp7 zmDac4A-z$D-#l?mo_V^of9kpR^&`)@nh!m{Ub^QAaev{7+V;8Ue$~%B)gFH0=|opO zVXs{A%vy5N)2Z=EPtMw-p1lJOd+H3y_4xAKo?F*8d%iun$x~v&a?iZDX&yafqG$K8 zXiuvT`g=N$3-KiR8vTjBT0KwZ`g)$Abpf8e4g5USJ}TwO@AvFjgw1y>A~gS4-f8`q zd*zN}kuwJ!3n@L|n3dl37`fXYrW#g=wp|IOYP~|8XEH-=hw6nneMw;fm9~b7mD|Ds zX1T-MBd&#ICVdu`xy=`5ZND8>xbZ<)Lcb?r_Y=z6LpGJQyFaa9_fsp`BL)T9gEs}) z)zS6s2`yUK10w8pt17qOf3t_(HuhC}K<8+C#NokqOOLiY+l{dY+2`8t*I8)4K5w2q zuV|q?_~9aZ(1jFx;TOy7^J}KtZSz;!um6^5kKMP%-g@^syJpX|=XKHTRR}`F{KpiWGl-u@04S) z3(B*t1(jKFZWU$*2eA9O)!Ea%b=kUsjhMEkG0Qm7iWRJD!&0V%viu`q>|ifD%dF;L z?&c2Wyw;wDJZaA&_jF<*Uvy`!NB3Z{4STY{x_w#XZ~a)o^w(Kr&4JAJ)j+0ik7j98 zhq2tdBUul}D0aQ>ICe)J$5Qshvg;crFuz`tSVY)l=Jz}Fa>J?278B2?)J&$mnaB#m zX0w2`vzc%D8%+CTHcRl%W#Y@ZEM&u6=DWU-C3ITEp6*-Bn13?6b1IodJWpn&r!HYs zBZWO3o5JotPhov;rn10J%b3l%jQJ`pXZfElXPvsFvvu9l*?nyV`mmCvtY69UZ?0k) zay1M1Wi`t$zlKr$wXAj8T9&kREsMOjmZh{<$J{@xWAU%9XX=9W?8eshEWv*RYyEr! z(`X~3TN_wP(~a!Zrj4xc-i_?WIndB-mJ*-M5_V;?+*+HMIb;(PZ*F4qzuLs&rf+69 z`fX;36Sp$!>Ne(=y^V$3-o~1A-OeH>Z)a3z2g{Axfxhlw2fx_CqSw3Gb9E=%J$fgL z-@B9DZ<52*_#Bp~=CI&tIn3IW!*cZ;*6DT*D_WPsqTB3ZK4llnuepoav|a3BiQO!B z&u-S_&TbZ1W)I7~x10ID*v+zA?_qvR_ppq)dsxExJy=RP*VvyVk5?_+A}K9=}mAG>yMAKO|smlX!(vdAa<*!kCTncwtW7W7^& z>j5;`pUa+GxvbutT&4}kWw~YW{D*xk=AC`)>6v}3sKGvV@WEb|vSlyJ*t?hYsI-^a zzTCrNh8H8^$KA|1WH&qI-NkYn?q;nkV+>W*g~dy9>6npaa`kM#^^f@b_(Oe%b9TVBl7k^zjyUhqnHSdd@BE z(!I?rZ_{QL^Y&&I6tS5-|6&t6ICv9_K9SAbrLtMJwUIr<-1ca<0Y1oDb}bugXLhh!q#4Bm+6xLzjwN!S#c`9@6 zSjrMY7csxf^O@FuKJ(i>kBR+rSwz!0Eci|mb8bvzerIN~g12U{`vvjL9Y2MMEbNoT z6Io31cvkc%7VBUf3#vI5YjZS<959mEGDkA!qhaj&PeWPcuo$K#$FR7!qFF)n{!IJ1 z5Ay~0X4aUVENy64tXYlumTQPX;LP%|KXLuGo%tSzvV``nu!mc)w870;8f(VV>NI8f zZ#QAt;~KHF0}Yt1etkx(>$0RWb(uI?gQ-E)S^kIBnD25GmJnZsMYQ&3F)J&u%$y4B z{>E}Fct;tQJ@99H_KctGnF}At9w1KJixN)RX}Q;4&`P%l z`FZRK^^V%rG5hT_W3S!H-EOx9ZnH;*341~7DRwI-)?WC{0K5C^?)HQ=s=Y9;nO);m z!YKJrC?!7)p{I3@QsIoFl^;6HKlSQ7ylu~-p#R~eU27!87!y0;rLxKA9(zW&LmQB~( zHhlzV`ur}P_0VSh^@YQodQR&xe`4LJ$$E(~@%o30T>9C^Gxew%uV_B;L3PmlCFq2KGJM?cZun)+0~ z{Pr(;$=u)cnT^O;Sh<8zv0h2zv%pfu;;%{@*GiW)dOav>bQoXWXzHkFRP*yU;t%;7 zpS)Gg=+!pJuwM%@I*hJu{2Eu=$e&r;sQje1abtUZV{T*vqeJC}MoQf#MwimfjGH3Z zNDgXYTp!xP*qqhEm~gU%G3rW7Bfb^R`2Ua(XBi;GKO&@y`95o^jb0Y9&{L4+f*a=6V-So+8bQ9H!9p|Z^WgvH!jU; zZ`gXYH}14(Z}``3Z`}VyHT)7(BVYljlN)!^6~pgmW)%LxjOWAcMqGTDk@01yVI_wc z)`AeDsB(z0^;TOWBDt|(+);-G@A>WrYl0GTYv-|y^r!{@3TVFlUb9>y^3;2CK@98~V zt#?;1y6MwNy{2EE^N}9$@QUt^%hv_7^tj%p9vrkm&kkLz+lJ29eVr%iksXKW0e-b~ zG5QaW_KVjeE=}}^iETXMr=}k2_<5KZS>7({jk0U+Ew)<$EA66Zrd_yq+J!i0*Pi%c z|N1klYF|dfMl$Qu@ys^I#R}g{WA2h(Cg!}wEcOBP$qj~kT2^rV8>W@~o)ujzqXb^7 zrrc=MSlOK#tR$^#sRXQOtK5hSSNu2jQRv6l6&gQ8v9`u2emP?lU!$o?VhdzOSQ-oH{C#r zF9*%&;h=;n4$2*hyPNj7Cn|~Gf==Nc?l$h)N_y$@s$L2X^U_7;rK`QY)NiquDyDg9 z$!f2tyv8e5@AT57JzjB9$M5SWz2e=oUb^E08jgW6_Is=-Uw!Ljk zXP35S;b(R;Bu6nLY^r%(Rn5FosyWu7nfdQ^H4B>dGR59rrtM5GllZHqujo}%YZhUO zPhT}{170MA-H892PI;NFf)3lNTOe(BoQY@MD z$sb;N1oV20e&0e&!Dr0)7joVB6wtH#Uaxb-8;-w(S$bXub&X4j^ zGsHuD+sI3PL0*dR^HRnyxQ985yT)YPi&g{u2={~%zrUZt{T=S^tns*8!`)lojrdK= zgWrB`;Xbb%VkBBP=pb{@e20Vfc6Ctp3J0~@DK4!Zt{g91NwP-c-slqju= zY(G`ZtD++2MWxi*D&2ks>%yi|*M_Qit+`5Suu3mlsra2;B_6KQ!EP!I=z(?ALlskB zRcU{OD)#qL>DmC54h>YP#$c5u4^f2`ql$^6R9ZSp6=JL^j*SD~ajIB0K@|l+or$Wr z3S5Xon?U7BD%GB(ihR(IK^sk0Mc8DOCQebsa$p_s9&i&VnxfKkphP^@GU!v_DF4pP_#_s2pa}h12?UN)wW(uOZO#}~j2*3N zX(LswHgGCNwN4IEX*Tf1V2pK;YBd|Aib>Hb^^a1uf_|zM-A~oBd#l!(UaAP{rE0x< zs#bHzXkJ%UYt{*TJF22}M^)R~LDedBzMm1DwT4z#wVKscYfv@S>Re5Aud4!? zU>_Iwt3F3r)jhS0sx2gy+PrXxy3ZV1z}F6I$4v))^s&PoebwPCyx{Ql_d0ymAxA>+ zn~q4H>&Uya&k;FcgCoBv%Mtfznd3~n%VA9%;1ECcb~sD5cUa+J4(*S64wAS_{Q`HZ z$!VPKEyC~TiMUIhig=^p_>Hm??i)GoWLt7N+64EGS8+G~Iqu2haHqc#_x;cn2G)6L z)k|UBz4Umzm)@M|r8_{4SzdbY4KF>P>lIU%da1`U?3D~JUCHnY*D9}QztJo9Z}d{( zA+PuqI%HYCm)5=K71clSiku(3A`d9@tCs}!;VWcnV@jHq0{yYIv?ziUG_VZPsw#^jv8<}*inJGSOX^KyP7uf$7xJj)Xrq~D^Y!AKB(Igx6 zgRQG6rUM4>OIMSobb}7+j(2;Qw63Qqeh1F=GwJ>SlYWRa>D|}yJldqF;U@hy(xegN zOc6ZM6k8^m^kR}pKTSse;!O&iYSO)Frno!Jq*E^Fz8R)?E725nlS~nsWK!cfCfVni z;^};oUR!92E{jYpO_<~pruImf)~v-QWiB?YZpnCO3C6m_q*+T$>w^@N?xdJjqopPV zFEzF9K*>~7bEl&Jpc9svTEKEs>{@PGzb-eaPP$1Afvj{>Tn1b#Oq#I*^2;!3X9o1; zN>f_{Y+ec72mAq4S!IfPt4yj1)CF3vGOZ@7Of49w15`!X&nr!Fbfsyn1JZ$+;PESX ziVRaM$uOfNbnuvF(g*RTm;^X6hy5m)6gI&W>Elew9%j<( z!%Pu1)D+c$8AD7l8rV9}6lqu!Px_l;Y+qA6>TQaOusxx$NfmmT;#aJvg;-muolF|r z0qtWghQJmDgqmVpTay~KHbqi%Q@r2Aq-ssDhOu_XH#8{@Yk6CcNsF+)TUIlvLx3sz zS23w`dGxQ0NgJPd>9g-)YaV%N>Rm7Ox$ULfUwP?cV8mx$+Ij^x>ynoSzw4!kuvy+i zUb=9=OP{{!rH1>wbuZVwatTHv?Wyaby1-&JE^wluB!F4yK03+sJ5=a zwg}bAdJX!tx9T3(N453rtNMP1u7B@!Rg8e%?>R^GCIwVR-ZjGdqsoQqRK zVy3I^%5SLd26NQ7m2=gMh4a6RGNi&r;RQ zYs=IdWz*E?)HKz3Cqt#gm1^Fim8x^|N;TxjN;TJ7snSO))il3Vs_(`sHK@yKHG9fx zwQ&Aw^?LI)s%^^}HLXgPYQ2`F7S39$=IsTptW_f?tWy(~uTwL#*Qw5%>(qeN>s9UO zde!=Uy&4&?K^5ILz;10+-Dk5QhfQkckj<*~{${n1ZBYY;Y*8a0Zc&|)TUGbgt*Wil zHr2Uyo9f%X4fYVU%MR5SxdS{vFYi#TpLeLXer}ZpyH#tPTXl|dt73v%^$mAJen<~M zIvTX6Tea0iof1fwLjA*de--!)0NMez9jf-zc2&E-U8OI#qdwAdo2u;x#5Psb1H4;R zYv5K@s}K7A7S-AdWNucq%h{^-<3?2&8&xe2<5M=MRu$mvdR0VW+%;cfooa2##M}b& zGgS>AThXJHsN$?Rqse`ox{1T*6pIRynt*KJ;YAQ9Wq|%rQDzz%5(#R6R`o-yZ>VyXv5}?>p$)yAFD2IVkIlgYw^WP|7w3MXqYz*g9He!D-`UzbR)h{O*X~wHxC%#zDYzoP#Ie_w__c z*SF(0`J?zvUIN!~j`#!LPL}dgBv7vk^ifUNa_EuL1HG_sUfK@7`QBnHf=Lw<;Um0Z(x5r8p_?dC?4%4D0gKid?iH<{L$ zZKkzvo2ixE4%@Tc6jgV?PPy)hq{m!F%Mbflh12Qiny zPhndgcJ$X%~XT4RLkNqfLSLw!B=%bw~HQcY#@jUE9Qzdah zrAi;DH2MpiIc}(Q=7~zNKdRK!uRVPj-JY5cX-^%-w5Myc+UKo**tt>pA3BSVf9%}r zqi3C)Z~LY5_cva27Ha|=RK^c`EEDm$BRPG7v-RvV3YEq0@t>7LJk}^_eZ3RkWqc;J zSj4FtCA~JcpVuOsw|O1-(yhFfvo&mP8?QA0h-&TCZnlCC-OOv=qTEeY^t^dMq|79tj44q}sLOR7Cf z(%ey!HX?3jz!1c&M}q?E21=^dA8|Oq-9D1O=#97}ASFW5mT*bYcElisqJA^PQUS-B zBChHcNu6pVPN=%1Hv=RkSAq_xC~3MMc$AaW_YZ^0zQFfc4-l*OEn?^H8`Rc^?~ZR6 zG~+YG(7lbA^D~ICJ894`#M+$NirBkUgH|m?%wDoVw`SoRk=OBVcf8ZlpcBj>jytWS zK*ZXVHz>!?ps><7kNl$3x)(a-KgRdTAM11wF*hOjPPu3_Vh5Wc2IqGV*~TIErXk`B z`yl>i1%As;M9krBJ0(;xp#s z8Q=N?yA&%BzEqb(ir92e5ts55G3$t8 zZSg2tX|JMfJf&!xjw@m_sBS4*J;a?KJ*Uu=^9p^8xbq45imxrc9o%t6@wNX@aVCDK z*z}JS-@=a->iU^t`{6p`&p%Vdps$tO&+jTljUOnh8hoc{gC8lCLLV#o#vhdYfzQy+ zGbL~MZ;H?Lo8qojf?Ge9;O-qIxwB7ce1}qjJD2%!C$GqDP5im`!XL3LmAH6V3EzrV z;kG<{@A!EQZhIKWEl*9ZjS1q~{yJP6QIA_~Ug4Uk&$SK>@f{1|lh)X{cDFIt9ydY^ zQWMl|g4pv`+zM%dSf&B7Y@ppnLfw;Lx%cP?gk zM|^q@#H9n#y|}0zfq0`=xtI{a#d+ZVYg`=f!-baOlh~i=s>imFo z49fLEoaRL%)+(9{8q7uS!H6G1Sv}BKLF0jYNZ-J-nlbqP2l=Oixac;7i&ud+W4Q1~ zIR`BR%tg8~=rmv+p6>vyhq4m57km#i1~eI1F%0q4p#6aAf0ccJvH?iDQSSz@2^*ji)4%YS*Pz^K#W%=znr8>ZeqfekyLHjFck6~Uw>)JUr2Hpz6`UD1qa@x|4 z(`jH*Yux{|L`+~S$Oo9$f>X0#$OWj>9Qy!tH|YMRSntiy|0eJY8>4^NyNy58An<>IIQHlGW(WBE31Z}ckcWtO zzk`^`TMC`=DRlX3h2AZIUWV@c+`>0OUZi#CWC@CN@-f`a9#CjDkab9*{rii*G2Fjf zp|Lsmc4((U-Jr9#Z^oTIbok*7;0tVqUO$$Bww9r-6vWTZQ>fkug%SrMrhX8ap)%yar=sZI_I%4UM z;`{3($nS%_!tek19hL5PLU}tzh? zW~7>hWDqu|lhNt0IdKLZ24N5`nKWu-mnkC?8_7s&&z_mxpr(bvNZ5yDBy1$1!yLjK z!sq!~d(ZcCe?O1=b3g9eegF5F$K%!O{p)&P@9W1}d#$y96fH3RLa;BNvERUF>b~bU zI*NE5%;NVtM!**}n$mOl9$#sb$-SP>4qCp4{XmncoWtuS-^-ryD!ik~Bs=*GKCi1I z`EGWP{hH0-1DegCLCvO&_rRQBGtUK@O^*`3*Len?i{-Vt=DcQe-qdDu_9etl=Xv3E zy#Dh#KBl7C#BXXgSI=uUKU6iFeQS7~zrWe^;4nXW)8neGL`RY*R?g9 z1uyX)Ry)@VuQprr8oylv9{jS|?)4Sd4Bs@{TRWSrZ&$Nv`M%kf^WE)de`X);K0H}>b|0c?{t2H=a_Mca6czeyYKi!{e$BZ&K0L7F8j;GMCgP` ze1?2-BK6}R6Yc)ViQMx_6CLv|NL0np_b2R$b$l*;X~O9Ok3|#iKt8ws7QbCH`^kixSdnl~dy4O@uSz&itV-DEs)YF~ zpVJ@Mkgzv`$_B1k;|Z7VQJd|J3A=>P=YQPH_uvx=cgvcD+mr7=f5vyB-&)Ief7d0< zEqvd3xSMd#b`$my(D(w^jco~&`%=Q}Xye-TWj+Vb=kuFi;dAWm3H#ft3H!>1gnjun z?j1MsJ!#PMjfA-|nXo@4xdz>ou=~7&56pg-Yt@c~ed}F5-;bPx-|R@3e(&*p3HXn& z*~0hnK>}R&e!?txKVkpyLBf8rmG2{LO_<;Z33DO(RUh%agAe(BDg0#y_oNc$cJM|j zVXyc&VJCf(up>X^TKE$_GY^0HDZf9oEn!aoEMap$<9pAb?{>a73ZJx{Yi{^a^be4C zf=$TxV(TD&4LBD0OkjXZo^!y{;4AP4FpEA?pbd-w#h~V6<`f)CUAfdbjhfE{eZg?X zk_|p!oJWDTwj@jrNHGuhGe3=>cr)LXg|C6`|XE$2~&LOXvIN&fyiaxxb2pstCk$nN z4(2)IK>Ew!nWH$H@5J`uy@x#$rvFF$W>G^k&m6dK;NGF_9zK(An@vYC@2wT`JaRbC zCXPkkmwQIb`Hs(yggH-!lbkQP#rPuS z-wP|{GubzBT`-H+`+K=ASioyn4c9KriH4zG{0cfP@Isl3Pc_O|ez%ZFS? zruZ!V$Gk>=GRpS(n(v2y$LHjK;8duo4xwtSn z&?!tE?G!q%omA+2b5fx_;gmvm&MAe?Lb$p?_1-yPr zp&idXyHi|fUgP=F`Qr-htD_6eACE7zpYk5(Ztm%C$}KdD4l6W+xQ`ygoc3UT%Wvd5 z^cub+Fqmr`?#pWX@!gZZvi?uZhZMhm{o-zqzEp^V7*~-fu9LtXz8df35$* z-_`%W_j`*P|9}1WH;aB}|4i`z;Gb&Wo^#~<|Jv_Ap76VW|M7pY^#3PQX}!)m|COg5 ze#&0>zaMK^rcd{=#?OuR&K%zV-lQq#fA05d|IP2m27ll0$Ntat&u_r~j^D5S_xk_u z*#7g!PnH?oy&n90DsK}0uD}1@`6r7P_{{ltV9cst?XS4vk(K|Y_PkuA+yBn~Io|T4;0C)2l@2sEy{q5^N zS+SzEbkT}&8@{}~c*d#k6hD8)0H^)iVyDwBcH55_;5vK0v;M<-ZeM@%;1#Xyn^%nM zz2o-cNx^rD&np_>-285_Q`k}L22L2@o;UKH^;J7>U*C~h(Hb7OV%)|#w-@i)DQ=#C?wc4W3W;Z!%$g zsnOhjZ*DZBYCNqqMCzGC>2)Kz*g_xahxlgA7sUtjt;1RplT_fpM)O(rxF+i>&_bbNLjHSMH-+Gs5P zeSpS9U8xb+=*vDu%x7F*lh2^L)a1TMd-@JjlYam;s~v3r$$0t@w?8uDc4_0%k9!<3 zs^c*1KhpMi_&2zPXZFb(No5 ztkzX^c42!QnX$O)qZ;3Ka5bPlmpO2-ndbrf#nHOHgl{%Fapn%%Jp%s~e2RY$=5kN+ zUd%jeUtu#L;@MmFUi^&LeLK_FzO0SbVky4y_=qvn@jVMi;a9=nSK+$= znYxXh4{areykYjOtwd%FsRi(1*q$clI_gm0&Df7bugfQj z#@~Z{-^EP~+mJc5`OZT8MM-kE2~K_6Yuw*cqcK zhkfWS@(|<=@Uw6dIM`L&%h*PdYasG);4+RB`CMYgW3R-&9XSq`fK?zw+r5b$jlUw! zd;s;O+)W_Vz1?P_>Ah#^mbRgZElM4k?x1@{8?fK$P~Kx5hfZbV*DknUq20lOk5RAe8D437j1T>G=g15nw zU<5b;P?tF%Q_o=d0d&S;ABNRmA@cq3%NYP5ZsQr?n=BYVcD-bXJma&>xxEhC zG;l0nyykjv5y;QPpMtEt^H;#Um_ccHbAK@MJTMk$j}L<{12+TZ)x61m8ax1Kf8=Fk z?P2Xx&2vxCgslBL5cz9ZwVn@71nTE#FdoRyJeUuOIRu^r_kwktuwTq3SjU=<_ruUj zzz*aRp!_>Q5jF!-$dll9SZ#EEkYD>HiY$8zP`^4}yFh>BK0rCw0`*e^DnT`fg5LNo z=M=?mL^tG;PqG8#Q=5`F=M6Xjl)nO9^E({)(3^nHCu&=ifzDN($jXx>uj*6Z7Fl@| zFQ3kl>Q8M`$m(-9au&#qr_VEC>@iRx9i;L(kKlLVD7qhRf-N@XZG*!=v6>szVj9yi znu{p$<*?d?iBTJ`FXF1vYk_Jhg?rG>W^6@p2M7SQiv-iUAD)`YpCdbMv(Tf+DSQV? z#%9pBf^76kxI1?h@(OSh_D`TZ=)c1K;IP{BcLK8N=?Rx(+Xo&7Qu%57RD2sj z09ydRSpIBmF8WC1ZO9kECjrgN-rzHAI;Zm*Xh)&vp*zIC0KWuyeX}#Mzll5?j>E(` z2Ou*JI|lxm_z}DopAE=upQp|F@QcK@T(RcoqwP_AkaMc1B`?$ZBUcn3DM z55Y6xiNFA@PbIAJNLOE~LGh|d(AbqrHNOB?!KzWYRiogL=M7l-SHNYUdn}p{)f%GR zd}NJt8@Lxlu$KWtt}OHt&;q^!8p8~DQUQMcN|7H2CB!S|7N9no8|{Tn;5KB{DE}AW zE$|HJ2eeP_B1Zd3d(|IIw;zptF}lWaa{=cQ?42AR%B$;Ajzi0_p=*C{e_hYd`%DC9Jmc)qp5I-P;y5 zUNQN#)~8DDKk>R7Hm<#@{@1a4ox6J6mdAP4OXYeh>jwz|cmz>?WSBtrx6) zt~#@1L-qlSZp7pgtDV|M(d~$IKURLAwz_YNg7SI4|?8rcNKj_S` zj~ZOs)uW3$VC7Mp7LWw$&%l0S!>}M-^(!s}6z2n)2SI->dbeNWkC0cns<9UV-BX4^ z8FCB`WNa(pBzhC<2dc*bitnL3*woI+#9255l%qZqr*>L{68Qq@+8#m<;CC~yF=PMC zb-)|wIjfC568|8ewt3igf&gPV8hs)1F|c7RR7Wndav69Om{{n;Xb)N-dswpCWHNpJe@sJsH;A z=4AR-z9_aB5T}4kJIPK%rdH$#kS&XNwNd}dpTwrIs*Pmt55I#fUFR@yE*yqaAb>2^ zI0XGg(2Fv5`D5tHFPn7L>&wW>qdGca`+q>SDNaxy{)}J!t4#3lD zqo5w2{I$p-5RIkB>dh7Q5YU`OU_;v^vMU)@t|+o%J-_Vy1>$L&i@y|EH7G{&uG%`0 zi?CbzlHCDGkOCp>VvSM#DOT+bP#yBS_+-yP?vBes_Tq?9F2zTHA6<2H*NVrRGqsi9 z0a0R7a1}PyTMb-v<#v!&t6+fQV?cHIfckaw|Nr#_u&H*n4-*^BKy}6Nb;8Ok_5%xC zpm_CT;qF+)h4a((DQ^wXm=xnjUIrpSKIIeCRz9^=Yy`a$S;o)Pk=3bo-Z)weAs6$#u1^r}YdWM2;WfQXt<-a2xWaU?O-1)Dct1 z{QFlM=jXWTydT>_u$c2iZ}LnfhqipK?l1;Y$&#F^517`LoHb_>^MNb+6~O&Df&Ess3E-;b1y$ zGV&;ph28rjR(nqVFnvraU~I?+yZ)Mx-2i(VyGuWs6K`M1Po5I=mx+;0`#c*~jbq?4 zupD_N*nzK%_G{C2N4aXS{S#jnth^QQY*>3E0c+pt8bfom5vax*;3rNsw#BI(*7$Rg z#U1D^$kkvsvii|nMUjUv25+A5S#mfa3IgapVx+4NL!1jc0(S2&pEktlyrQ--d@igx zb_M(fmM;Y>kLJD;E~AZNtf02A!EV)tKSy?S*&Gl>cQb4ohD~U0ox0EV)`jz4Xnp(b z^45q1}nOt6;wx&7fWJO|Pvc(MPMk9j{UKt%rhj4_|9LS06B`` zd$I3TzF@4M|?U^*>&UCj8nSFrdxr}7@!B4j-q`X#_~ zHrD71>U*idL?#v(TaW$S29u)B&b!FTygB^}jPq#Rdw0-HjDZ7j_5ij!kuM`>Bdqgj1vwe3-nTU?!7lP|MqdrI&NIN@ z0An$~fu}(c;5|%x2=-Hf0ReI!3m*l1jA<--3H>ZZpARfJ2q@-cK%A|HHFu@Rk7w*! zKlOhhI`ied|F{hP6W9~HfKM2Od;sm{!WyS+QTkPXLx4X6t>HDuNstdT&db3>>`8Du zvih3?Pl5-+`Wu8i8Tm+%2ef}f@C?R%2i%HYPXG60YS1|P0_ER|PwmdZcQ<@J&>Don zv-orksj+;GJc*cD$bSLZ;0Vx(eI(oyh&x!5o!E{;-U8nS-$%O!SbJs_xCCAEc>_@F zIJg>i=_>~7cvWu2-wNl!I{tM`u~)S|b~(%*GP{7rdm{QVpg?}EE43zi-ED$d@9y_@ zy?U3&(|hDR?COtmjeQV#2r|cxw?C_p^}6~Ub9fOxgO2RI=4ebGA^QRI=Diocl5w+7 zy!UlS$J58{8vOmS4FK8$S|{z39CEUjF8f|+?j-&Z zj&1du6Em*%k3Vi4*4n+2el&+K(N7lqY=d#J+ZyuUNq+`gkX)KGjX~$SC%`D;b(}1Q zFNSXf+N0AzR%80Ou8GqR^D`2z#ICj62$*ksFY!80h4I&e{={gHTqPUaA84KTK%W7x z1e|BQc)cb`KM`x~}Sl>~LO^%?HNEsS9)gKRWB?J*U}(Y-rPleF8QYIVM{} z+CH%{y-ze|*`wHf#P&kh^=J<4M^}6${w!n*yPyS>p@%>zat)kFrw91fxb2Y(f;zQdpsgn-88k}E}5@;eiYWv2_#J#2Zvc$Ra654^d^)eSfI-B}#8(blK^9Q_V&H-b&_9?S zo1b<*bk(l@S0bw}wa)_rEuZ49VFbnw=j@RMDJsdYK*J7^bI}zaj8G;W6x)&cx%u~pL0R1-@yEd2} zr{?%<@G4jY^p`{4I?xY23G20i`|{IcvByL+)_khXNT9qA!BKdKm^zmQ(|gwttMyg?k{!4bU#E2aA^VXlXqSU5J%z45RZ9fB zMK8**=2ErSWpw3MZ5BvYO#zTZ*IGx3ReOKNRvJv(4YqDy3RzJ5R6fj~AJ#Zkr}C?2 z7k1Gd*ny)!d9`PZkoi;Hij_Z1JN>1Q9b{id*FLhuC|3F8R~zLsV*DvsIUHa$Hev<& zWskxxTuy8i@cz)d`w(j`6xW?k@gA^+ku{$Vauiuefm-Flral#;w$cNz=G7IbBZ4n1 zAFLSJi!!m2)n0YwfCwlDnt#n@7AOIZau6#hw;$av2Hvp@`+$z;9Y8VTX{Y{s!2xu) zA-z^r$YZeO0Ui4~=a=FuLcRl=+RXsv*z`OrM2>dwFt45Y*bgJlfggqC_v2H4y|L-F z$tB-z^je_4EXc;ET2-5BipH>u(e7cjwcZJLq9pZN%N}{j%Zf=5BigyKIWJ_??Vy^D}ueZEVB(o@cEe|M7|I zPror|eQB>pTj%e3d+aM`Mzd&XBiB}ZzL?K;2l!mH2{xM87W{lpJM|v9{zmQ&`d9~F0KVru`xN~=03OBP z4_iH~^Y)SO2JFuxhd9?>1K$WP2an^6!!MF+U;5dIOuV@dgz!BEuLimX{u4U+y?ayY zH@ncO$x?@T6Kq2M23`Q(V2<=Tiu;lKgD88!f!l~_f>px}$e&Y}J||y|PJ4Z>$I<86 zHzU6W=-aMI+r7_N)FJ1QI||>4ZyAUnUkwB-+=M|8jAx59i(AfB%f%jQw<+>Ey4!*{|0}MuH zuDrc64(58)`waYb@DTiy;S|iZv^fR2Czu6J1215o1Ah`Umi^$(K`Zhe$bRmJnKN?^ z$Sq+1(>_Ig53zRaFE7Vr+N;lHtaBcG17p$HQ_Md#>9aFtD(!06uMU{cJ;1Hlu8niw z&EA{OeYx(DCI?N}&oiWlxCURHZ^{{eYIuRkS&2UknrQbEdFGLSE}xNQuQ+wcpJMC7 zJ$5mBm1|=?XR+E-|HA$Q#{&2EdT-Ba%*}CQz6F0QNPmX+Bjir}Ye6;gcVM3aV;=%s zuRDv#w*Xr*lq=P;C~99#k^+Y z8;iaQ-2u8M*bj^$SAW{?iEkeISzv0TcaLqqL~aFd0p`Ta0=jlJtu94rMkxewcQ3T1h0aP zU_H1EtOI+{e=V$Wt8V3f4!i@t0{h`t?vKF+une4$sat)p?);~etRc3eh%JTpf|d7} zk(}z`-|#bTZ>+B&KZfkb*B{^G=o**$S`3B(wci3i4frh&^8~DZHCJ!K9{|l|HP{WB zz-DkH{sS|4`y!u=jk&hHGdWm$OHHN^F^9vfi>J#z5M6UjKbD-Hei6J0ya&E!pOMp5 z8)|ZT!mJg)&(7;7vYsd0fc`Sv1wKLkrogz&gFS-yJ;|?eYEGDU`#G}q-_PlIt9fVd z*u${>6J~DgLC9;6p8`ASU$I{xkK_M;*8CO#a#+^NA$NGdNWMD-GzS`wORVf#7uG&Z z-7b5^VNCuV1*T%iNR#}uz#LBeDE9kq#^D=2(sVK2F50`SQRqiL>&}`vtNE;Z7J08m z*4)M!+jq#UlZ|pOe<=T#pZe7P)!xDGFpd=Cb_Zd<9ot-hY=45a4>jJ|@KTsPVr9D# z#&7?D{3e`)B|i_S!P_UyiT1GFm%P;Pt$zew3wmG^GymScd>0p0iv=-W5 z+QT1$X<#^@Pd%G5O91=S+gFdk>bnuBANuw_d;T`u12hBe-@C!hpav`iY8M9bGe_RB zG6~+vv9DuH?KLNgWeM0Q2u%3#;CVAOyw$^`Yw|_0vMTS+M3q?RTK-I90!z%kdemf-68Z zP!6^AgFK*i%AvMG5~yAmmS4G4liGwcwjAUvpmC@MoiEGL6&poXt%_0qrJw}lf)Kuu zuzc;v2FyfO|ARmmdI#u*to*7`@td$kc)d^#U1ye|yC7F~uoZOT_j2NQh^xX@2NbIs zi@j)|aw)d`;Wl(#e@20FM#;Gmt^qM@F0aRG-w{X7#7#riYpVLtYqJaI z0p(QRs=FR80UC$K6`^feocYG*Q14vqTH1K%b8(C6DU<5{!&99-w+FfNEX|yKokE)uVClM$QGA z3qkYjM=yfaM&nVQC`jTDV>2KNdk!`Slp&XcevQm4y5a-CAzpdZ#?n_gHua(QRoIh? zK^Bx>z7%lL{peMo9>^95rpFe7)!x7sR{zSaam(ieVXz}kKK$}2HifJ_4zNJF*H$_) zs#kIPQ;hyRJ2vHUVAaqb$APZ;^rt))IZB&y{EBh08TpZYa6Rnl*yVGH4P#dwe%JwC z8+>Zx2kK9du6X^4Z5-j>!1GDZ{CRDpXL2b%Dm_SBY+=|zcaX)kKMK`_E;$q= zFR~>@@tu-^*32cZ6;rQ^zX(}%)PNYe;#+`Xl;4j{$72_AHnuM088GjgIqQ*kg3+|q zd*&fpgKH@r9&oRcp zBjH?_b$9QE&*!}g-gj`g$8l-noX7P(>*C%3bDnfgA!ZO5#l6n8K@(X(9BrKM$a^Gh zBj~^8o7{;(6FN6&iuMVbiqG;*<=#R2;e$c5jdmL!B|gaen)8Atb`iOl`xI;Iev)sZ zr*j{41btnM-6dxsvg+hM$ec)@kD}xjO&e{` zpj{c*1D%}aZ-83N+1SRzy#Hzc1fK*BNwYl=ALH;ocdy@t{fT+F2mKN-6r7r7dlY;H z{$GIhPB!h7L;IQcVKuk*U}~}0rqYjU*bmzYFgfjUFg4l|SfAfq1K9JPeF<_7u~*=$ zWPI1adTp75UA!CHL2xI}rEf`MR z-{DO_^Bo7QiCGSG%s$w_GY)tPNYVBUYCN1a{jlpivJU$#4ZIHpzk}}{I0w7(SHP#h ztfggqj`my~F&i7w$KKh<%!N4#y)4Fj5Um0`$Ru50HI6@zR~u z#?|@bV)9QQ-=3g?y`a5*cFe?v#=W_*?@)WzaV8*tFyADvBfbxES%Yzg#Y|{!g9$XE zGbZ;5`oDu`g!JiVk!M(g@!yYpEB$=TvxS~;1u-`^@ZLD~r_q1FZfUa+TW@sD#bw9~ zkgop^YXiP-`^!+tot39bQazzX{4OU&oU+ZgYc@Bnm^pFaPE(EW@{ zv9Ewe@C+DFp25UTL%tAn@|k|}*n_aC&7Sa`uwqqv7+w7s`gjlh2;>&H6^s^*CQVT?*lXk zYQGiU10D#NU#s_DGzW)(!!xKP<{acRz&vmQ$OC7Bo%lWkXCco7+AGSr4N$MfU=9E` zfxo+_ePBKRqE z#mocLrSFQ_K7f5;l^5Nc6V1U-^vSrC$Nmi*3e@ja%=c7S>kQi)km=K{fLSk>dR?t0 zb01xRoDW!6)#!xdCVW3K2v=jjmpF3T)tQ{^hv*z^v$0)O zhc09Gu5o7JQ#;0Fjs(x4YoEQqoKwGL9=!eBiLMy!jm5}1{=}?>_Zt)=GjUU3_OPvj z_X8t9w@>HQ0J>^99(;nX{iowa>!`kB*r-!|n;+o5a1mf`4fA0%&+JL>T+;&%5VI$o zf|Z-Tz3ZVaj&Ieby`$rRbu#K-HtE{`I-Z*V$E|n&wH6+VU97nc!_2uE15~5h^@O8v zCD1r@tgZw)clgoOziLdvEkL^3wFAzh-glC>!R!%le41y@u_g`@Kx5ee&j1eKm@u2* z-9WFA>azry{o=(>MV=161`&{2oj$MTBCCJ3`xexKB|x>tm>ac=!Vkgfdk9zt<^$E) z3;6NNrt`Y$P!9F2_o9@i1%DXGzZ}#9eHKpNr;^Xf_+@kPJqmQLErc10cZz;|fNRNLcascGzr{gWLde3XrsBh3OPHUwL;eXejDKYj_pJEF!%@TS@2{q z8cYHjw_#6Y!+Je4u*T)5O)k)04ag3{`RR3$FAOFU*B{$ma24<&OYb1I7kUWRT9kqm z@<>=dKbV26KDCD|dS`(C^63WFUXtA~cGctJS8nYAwbh;s0p*RNYpl}65jc!ZbFFqY z=sqA_HL7;COQKi6xfx$ITn37O`f=%7G0GW(1K5>M{Zs;jJquP2*2C*lwa8bJ(H+>q zro1_zRC*viCbh51_|=DE?FjmUHE!ilZgCbU14Y13K0mBFRFi7c9CgC#V@DTH192wbNfN93`*8mW8akW&rK0F+jDdFUhJ$m;}^Ld#DZQ zI5`-M0y;i#06NYT(+jAtCUTU5Sztf(6FJtdB<44cL-n~9dtX@JFMAoBg{*U_GlKIP z@@{N8Zq@%3pksF;7zA`&X^iTt3`_%E*aZD)K2@XENO79eN?7ZtzP&$u8mk4G=Wd&W z9wJ7Z0w$xEXKV(x*!=nFbyt0gRh(Ep^&1tB$Q(!ihtEY%0n-g~C|-6apgd<@e9O8K z6Rux3>6mfrzFq(4wKdnAH@3$o4PzMx{c62tf+fhoAkQV?6xa=(0^C=6&n|i(8){hq z$H63W7vyulAh{v^+2cVm9;*|7Sipq(292{X_c&MPo7!!}ToEu`!-H(We3Rn7)Z9e- z9Aeht8$_O)KpjYeMc`@dEx-YrIIq&iJ%o0TpuYlI!JXiJuo1ioR)Z%&6np}DgZ|{| zN4sp~ec&*B8XSVh0~b_*hrkOUAD?O%1lMGI+u?e!94N2az5#{=xwaw>R-d}xHsi4W z0uDv~2mB%SvBdoe{WS$3@6FRx`bq-wnb+WEn7UGa@+oE=Hs;Vw0;)5JuJ!pD zK7e}qAZrh4?p^?D&pKF*`EFQq`~$WuSab9^wnF4F$Q57+(41(FwI>*ZnF+N1)y$3h zQy*K<$;W@5z_k?2TpQKb25Y}+KPaE%Z^0nY3+S~x1Z#cUk?TPVvi2HtW4;FSf%f`a zAP;DrwYFQ4%fL3EbuIx90nLSCb$lt`bmTD5_+_hsnR~C#99Z=ffoh<>cYsQ;1ZX^} zRW{YLP%=5Au;#rEsFsOvIcNf7fa<9LT_7AwA3r)r`_T2ex)Ii~qI#7}*RUE#3g2CD z6|6orN7+Eztoj&G`yh@v0*UlvlAXtToUa1V9*FbE0*wjq&_|dh?JKzZ1>^ z{ek)|YDkYs>k_38jkOmx0|&rNVk}S}exR`#YSXz`In=Hi);w|EG?z7SO^eONe=u@8 z@ecA7d~Mi!z%K2}kyZO3us>)*F9N+Ah=+$^tBKJ!avL0>jq1?xCff#Ju%Cgx8F?Ad zx%oEs=REkh20p(5Tb})B-mAdg*p|XAU^@ca!?Zsb^o!veZmi~SBHtJ7 zLyX2FY#QOc-Z<19&>sCK{(a$%JTtpAV9L)$UQ50R*9F%TzmZ%e@IuCaJKTaErr)cH z8;zdi8sU9-KlGKv%*8(y{Q~q_YSescZZvLQpG;rCYmsr-JJX2OUfmO$pSJDTHAlVC z!^k=KEzsW4J-|flyRaMNA;$>M`^g8rXd=+pAXnkAI{m3a|@*2`> zYLTn3MS%9R>e1Rn(e-E1Bj}yz8oTB@2TTO@=t+Fq@3pY@gRYr;a3!`V+!cfALp3O` z+IWA+F7{4XV^llo2Hm005bafOHT`IhYCkJ)lGqrWBE}Ez0IIbe7~0Dg0QEq}S_$oB zS39x#=+5C{v*;RQ7}=qXd=a2Jl*`BmD^`Au-9e5Zmnt`XC{B6($ldvK@u|*EV&xB@ zr;xokWUpWJC_eQkUs!rZc8C$OGBB_SWX3^FmN-8)uLg7HEr<{tD9>p$McGaxD+!1M?A6@xXkK{lm#zl6BQ`<05Jr-7C{i|JfJ@V^Md(B&S^zq1A<7pXNx2%||Kzg4A@RuVG!KQq7!G1UiRKNCOE$9S_ zNijb8r7J!PlvgZu~(fQ&Oe7Zlm3+VVwVpr|s zkT=5RpdP5kqCmO^jYaFFaf`deKy$4>hq~I)HGUm~EwJ)v9J0s2jtsQEF7Zipt(oRj zIn~ys76XUT9au3D*)y!R-ku}QK~~!k2;(=f;=1#yj|yx#pd4uYf+MUcGCU8ksnA-!7h-^W}sYxXB&YXD8^!oW@44gvng)?h_ER( zBwr?0{VG>V?UCiP$g+tYwatgSV-yEm5Wwcc*Zo(H9sv%%XdvxZjAX%&+zoHM*u)-U zX*SsO7e@CB=$gwY(D5oclus_PRR)8xZRp9S)@1Hc19rYlm2aUo|L~?5ZzC zYzX%HUX6_I%d{8!V(H_)3%L_HMrhb;uSH#OX6d93(J$VEW?Nf#?tIXck|7=z6Hq?c1| z6+aP_0O_hEGOrI6V4sg-sSG@WVs<4gB*p#y!U1LyQwVMj6 zf$CHrvw#KDfP8lW`PH}DTm@zT<(Ug_gyq`_YmCdlRxeTa&)v^uSP4HW=a!*9oTn)?k>acyueFM*oyypYbbw7I{_wA?h z-Bs?V9qt=a8vjtheY?37RzII0s~@q}TXVAsC{ANkf0FCr5QsEzO^>egXcWB)sEziw zh1Etr>5}EwIz>PVn=d2l7;ums_O9$H*fgZu@voOVhF`hmZ^x$?_2I$}HvM^hVGB@G zCo%fdaby$=!uWc{)BPyF`t@T|OgV^>#|Ni?3nZ&XaTK`7s@;;K6FnDJTT8nb90mrP zWI=ZC4_h=pZ3`eL@#Vl}ip5?8JJ^iCpQmF}F6C6e4oD#@r`m_``9T)wL|6L?WVKOl z^{+hs47;=|&DiBrzAE%6vhw(Va+Jei;3CVWToD1iM0Sk{t_9nG#;^=HKy@fr4d`w! zR*mJb@>T-riq8d8fpS%V@j$f+Gmtyr{%{D6((e%DI-vOZK>nF<5m0XVTfjqL5>Vgu zuyPpWE>MQNHe=g~UIH#b9|^7ks(%LP0VaSx;6k8z*+%Rf_(2dxw(u@k?=LFu^f>=N z0j%@#FgObIT5&Qw2pj?q09Dv-BsPXD-*k`%c#q#42I}zN5H}9vbeJQ(@2~gV^;wLl zYPyNn8D4WNb)_CaKLy`z+TeF@qfHh%?Y#F)C&KrE0QiX*&6&ol_f*dX1HeXX>VGz< zM{a^O-YK9F+zpzrYu;76YSw$g%Yo*2C|m*P*PCC>O9{}NE8i+)jqxqeg6;=PkTqAO zUR+{} zf7P#=%ke9Zi{CHCA44`kxztvDCGjbzk9M_cgB+4wvB(B{9SDN}FvQE3g{(H-Ul1KQ z*p*lH#N@-KT&iaiy2fwiN7firPl`SwZ~$n1Ct?pFKMQArJfJb^wYCLW_4JpmA$@!+ zwguaxK*#*S_&Uj{_o_73dgN;CDb8Q_Vw(au7unN!?%%V3IiQ|dTt7UH-Rpxknzu&u zBIF!+4mdAPJM`V44|)^mLQcWMKoVJV@>hTAF=2P=kDj8oGt^|JUviY&4Kn&fbf6cQW>;mOL^B`F^g_4CEk^``KJ@hMKZ^c@Seb%1Ix=o)7dxn2HvW)D!Kmpc;| zB}RRRfY&!}gRd67oHkL|2V9_bF7c8rP(Kb(Unw!|yjqBnO|?X27x;5w#W^4pPsghN z?!G&)tG)X6>Sw%))i~r6qzl@2>OY%)0?4X;2fD_hIa19@V1bu|Tq(|YLpBg&~7)TSI{0j~yp`YS^Zfe6?H)L#242Ufhk zx1@bok8D5@*rPGM--aOfK<*-5`&WM*^v!!`-uEp10X|QPTpc$fx5DbH80dPT3s(Dn z)PvzA3R6}oU`rC%Ax;0iIDJG^0jsdaeL*Lg?obo8I3rEluBj|fUnlp_> zKFzfso7T|<(ye^RVPqkSZeZENibYN#`+)(Hos6tJQDhrD<(NZSulW4v)^Sf9)w=p2 zf9v^c4sZSBt8drwTj_k~t%GyRY2^Ao=>1NQxs~?`_vE{Fd|ynzU8Q?8+YEZ~-Mb#x zTd`e^zLWFQVSMjzBHz))AFjrBCBMDHTKg-5rk2a2$Mp|0QH zfAE?3y&22>*p;_0HL$ME34FJZbF0lI-`&*B@nCg6Eg+6s%m93!0G$`JzyuKHdBS#V zPa+=!&PHAdbpCk}ow0hqFSHrkW9SjUoY=3y6X0j`Z>f{o%^;v!_5G+@@O7cz1FwUx z13%@nC&7cr%#-~9(3ki6qukTMUxE71tck1r*p7|9^jQL%fCC^-9s}}_OW;lTK2PUV zd+&aE6ucALYaoT}2g`}wN$f|+_kkng>0^O8^^Ub{e5|WpckDAjuYXf%vzGUtb?jV( z-X8GYL)NnqXCQOT+-dLVy%2jDtTC`BqUU7x(OJyl%f#qC^@kh0`)BVv0Ux8k3BtVh zb|8Mn=CY>V+}6W=Fq$z$^BqNJbkTnR|D7g#IeuqGW=Sf5wKp9L&PQEDAyUxmv_xk z(vYrYJn=g2QsmwUFC(red=g;Hb`H#5Hof2~c;9%sW*_lo;t+9SsS&6)PqVc=$vOB>aw>zTn|2fD_-4gNOHy$W)F*pJ>D zu7TC=62MxTE3i$4Cj-{edk*3>AQSg6ww=g&jnVVI7TB!j{S55Wh&9v_1MCH}8($HS zuC^MF)h4 z`yk$L7wDN|lzOz+Q}|~F)Ax|aA?w-n?Xdda#@<|pyb_tYvP+O1@_a&Gop&nXec?R( z`@{F(e-{h|?fB+mXP)h)=rdvE@1XCu$@NH_^F2E6|LQZ>jzzvXZrmDbngLD&M`8O1 zd2VRnZ6f5Of_!#?JlV)g$bByDu8JG~`)~vCCxc(GGfrm$r1oKcdEd#NKs&u3v=#i7_pWj{Zk@QPVvfsLGw0j@_sx8t<;|E$S!z0j zn%EzmhtN+O@dtA6PCfn;(0{?NXUX*GJVZZp8P_o6g~%cB82bM331pw0AgGEb-oRie0oh z44ckv`Pc>nt;s-eDmi0dIdYVoQMi^dQ=jLjKYi!IZi4&c9}S1|)Atj78@=E5uye3Y z1ik2UBTPP%2fig<-+A>1)5n36ojLVq*i+24Sw>tF=!N}mbmqnTebTpJ<bJZE>JCT0@$~OYtg`WY7!QVl9hIi2JCuH^iG+=GKxX<8MsBM4R zYF=Le2P5AHKMYjcZ}3?7Q&{`w^YmEs|7Ynrk9iHO18OguYS;Kn;PrrYvXju6V>=ku z9ytmw!uL4xK;)O*; zItzIN%sP7W&hcmJf%+MRFNw@PHN%iKNAH4Sa%sQBk#`WMc~_ge0Cw*;S@y=J`R@W8 z|5n$vdjhS$>L8cuQoNow*=$&2*4#CM62O{S^{d=ra2fEC{{YYjJ2|}H#8ErVz1pxI z8mqTQR5x>DUI)vuABX){WX>nv@4@YbUI2bT*SczrG_M1YnIm^dBbQLfT^!Sg!-vu@ z$BNwsrsE$3jwOaTS93yJtGU!T&qe-M=D4cim}AdHdlO3=hdtoDiftv>K^* zeE#j^I0O9%+ORj1?5$8$+*Gt-XZ{CKThYsWFXr^PS&LjwT$nitoy70bu&-?~`L(B6 zf9;zT$Aq8pr2^EL$2pJqR6e=22UjyT*2;cCyRV3+CP#aYxpOt<4V)LLTgSI|98;$^ zXJ6wVieKx>_`UDS`tWJJv03f^Ao9szX@=?7yO+>0pmRLq^4c#!9*)cyjADkPvtHgh zu{Pc@pgnsPvhu259h(#2T+qFDEawaDeH~LehipdHp3t$XeIQxqKCSmiz`jdAbJqC- zc?;-7E{8P-s)70Pe(Om4kmJ#Np2>Q8xsJrvgj@%7p3u2NzUe??x(l#wUVH7=`N+(# z_pCBZto*7g8)(nSud%6L^~D-^`&;>R9L@lWbwCAB{T6fr)e#`4`qMtqSoGYZ1Ktjl zBZOb^F4zV9L9PYh{`9H$RCJx8c%8o&!>TO@XuhTbt*^$Xx|e}+@?{0n&!nxG%s40S->JomtE%{wKM3RFF$>ZKY;xk;6vAIbrSztWW5Gxj2iz0pmR|S zy%eaHtsp;`zUI+uwyr78L-%7}38=@0X`}lNU4KP+zxL~x*`~Zc=}pZYj`hcC(oa;uORP@@YP^E^63TX z<9q?~-$7x}xEq4Td670(5mUr9)gsPSr3J?B&1Z>^U|v7qTIL&K4&^h6eYk(F<9?3k z6{(M~ajzS0qFo;2--jHNXfwFLI9ltoSc5#!gFc=o&tmSw%ISX(Y}J5szjuv#5$#U} zm!or!W$Tc&KVCv#3uaMg3jI*znfSyj;SzWgzAuou7PDW%P1rVK`;7i*Yo9<5qrZkv z$JbHlS{JQv4YI@io1UTeL}pDiPOpyTj4cQIztA5_f8@$e4SK$37s~t_=GqCk1Um(u+?(>;@UIQ1Q&to2YV0((Z z*TPe|*5AN$A+2XN@0Y(CG&L6lO*ZGm*7F%tGv7~j=!bUR=QFRT4YL0))4gM1JCkDS+g70DE8u&z5_h%o#^A!s}3UY&-^BL<#R${h-Br#Dw1FY*4y@qbauYQNnr(WN6 zo_QTT#(vN>+iK*G0qf&^rlb!3GMGLGNe+QmfuH_Z4>oQ3b*Y*ORxEwNA2NUVwJawd^*-oMjiuJVb?Xy z%g80jT2JMxW8QRK!@l+I-?dkF(=G-t1*%hfDU0~E)WE%#_xe$ZZ8v*-1GZW0&w&Nr zHJN=6o$=Tg@h8bM8(Cw}vDY8Y1{&8@U>ul6j=3=BJ#z)%n$pY-@*4~AcKR&h+`%5S zW61Bazb>c$$MG#D_Zs}S(B>cLUC1$P3(2<%eE@iYXJu{N6Bp5bK03!sn0pOpGWx;6 z^y?>kR?mFZ)@~3Jm)l^qya4ZcNZKHSH^ zXUObBJ-2Xw#;-N)7c?%%g~NGJ-&=L2G9T@XeGvGNSdK;SvzR*ewh%u9`!@P~h<3$vFtDJp2+3+`VBdN{d@Sw zV0s<2XNRHd_*WmyzxOPACVk}+`!Tiyke4BMfe5;eg`V_v7_kQV3|QBphau}Yo{s(m zatXOz(2lNSH;GSqF2h%aF8f$=>-@I_o5rncS?Mjr#qlo&3qc(|9cxkYX>2>O4X14w zUH#>P9I!Y}E!Zc)QS#)%d!TFFe*B%pbC2Ua1FS?(qN|T=pnaq9tpHJ=8s-MmeJn@U zy`{z%!PW#^{CV(vpmnTA*Ze8w%Xs=a+rg&(XJY3y)_bD{>w7AAn@djR7Ha#jC~u_8qeGHz*@-g^!?~ z6m#UqcPp~$cF-Tkr#W~7*8D0*KlCXAwlj%sf%k=T@oUV}@bPE&#I}?8$=IgK2A>D- zX0P_Zw-H|jxF7p394DF^;=T7tRFix~;2`YUGc^tA=g+jYh1h12rv`fg7>Vy1_;rq{ z=Q*DH;y2h1Ca>1?TCgMCUe`W)9_#3wQp`CgtALk5o{RLYixDae0=SuU=vTbR~zj~U3)5jFE|%l zig+J-lsdU4^7i{w^lE%VfaE;v7G4W_BX`lJ9iR4(gRC0HpnLrxXMqU){{SmC0(|&O z(Zkq_kaf;3Lf4$dfX%dx4^pA7kK94 z{U%sBHnmm#0b<)g3~0=HworwCC3-CgAeVp^TTeptjY)MlOe4}x*91O>ZoWN63)(ShKBg0Xq%#~O13KDlxXWHYT zAx?_ABI4`}>1MP(DJm-2revFuPD+NRw)v-`GJCJ}*k8Q``n4Pm4vPlV!)j3$Nq-{5HGi1T_h-h2FK9s`-HFRyxWz6`C*!t03lL+1Z z^eIq|1>YjlIaUwI4k-0KyY#lhi;t~8{4*=BIQ;xyJ+CQN|&_;?=N z4gGd7-;y@wwj5i<5S83Za+y=8@9`t~1Mf+ng0OoZmtb$@gUx>6%V{0?!46q;DE`T+# z0=D)22>Q+NW4IrF1owdL-Sf7Y1kRb`*p|aAbYt{_dD!O%it-v6T095 zxC9<;l*>rhZZR0oK8@vb(yyQjTr<--eG-|2hhRAz15;Uh%Zwp?uW1nZ66M{Ij>*0u z>Os0Lw_taEiRJHW#f&4mm*@J`Kwru4crBc3aCt5F?sIsLjaW6tSMXRGX5G&~UJM7( zj`774&`*I;=rW6^i31FV)>-voc=ctVD zfyiqig1uorOa}Lr=NRX;rD3z(B5aPe6WNAz{%h#=YYygPd*i9Qug4xF-JfTy8UiNCLF`IuOXXkgVf%R-i7P{{l`(($NrLrt%l5i zeGEZAIPd0KLN{JS8~uzGfqpRzg1I=hVkFD{WrJPMsm`mCR zy0Idp<1`oh4Xt$>TH4-r{@Gs%9DhQ5W7vk*W9qMul-WzvS+-39S;STYpM_RqvmN)P z={#!dz`sP<9L#q%W&3HyRz_yQe5dx+kBLF-0p?)0z8ige*Nys!*u`vpJtk3?p`4{Y zLwmoE=`~>!+zgFb?lz)xySjsbyy_ClzccSibY-PUNnc+G2T&^omQP)ZN2Zs=vCeuK~t=C3r~)<$^4Js^#5 zt`Tf&Z+!hjYu#=;{bHncWtuzryMDc3AKS)bd*`r2U-rx-%AU{lbKTVg(pW9%5xy}D zQr4#*v?=Y^eX|bIe37>4LT-y=-?sOfxEp#6xd~1}&mxVLG~y3a-T*bq#`C(n9hg@S z*v@d=E(QIZW9P+KlIoN@AfbLtt{y+27aKO)mnoZ9L>qPcb#EFk zLhnTSm&U;_{liwo-#m*A{{qrK;~2k$w0&sMoWmcz21zS+%Gri3P}V<&X4>kb9?-3m z61L1Xe`EyOjGG|!S6bFyq8uS@TSHnOfqyaMGk0yNFZD^IUKt?4X1#vqRMPIBQoAww z>h038;g`kFxzpb@DL|k+h`;mHNp1l@V;8XbS3-}_U3c3C$~ARl0hVuTgUvr}z1U)C zuhjc;UR+DtXYq4SEXMW_(tcdSUgQdGU_R!x0NrEvYsi(@^l3xC80I(h19D;ZAJHGd z*3-xTx8JU5&b^ z!ugA`^Vx;$LdMh?%eokf5GW`3+h=IVVQkv<&nU5%ktOZ=fN_#ay={c9uQ4*YKfZ9# zlsA_iGG+3vyG^ zk0O2D|L52~PuX@KqTCL?ckoM|VSFFx3grICUy#QleAiOuGiK>$vmT**3eQCoc~6A; z*!ReGDuz_RNz^y=4;i?bqnXfck$aFxW7xtFZMWo7nLwQ&dl-tQNG^yEtG!^UGQ;iE0KSI z0<4B}VI15Bi)g+*$sa^s#qYT@h7$c1|4wYyHPeb+AIHcZh@D#Ydb73QWyElEWL%Vz0{{Dr07kvMs895pp z$CE(+>7Fbiw*X@eW5Kd{&O^?DGPnmf(+BgF9vAKj_lI%r0p~E&sOv)Z;_JS24DY}O z@K{<7?vsV!ajcKW=QPUez(1pGd>2qfd)Gw@%8+w7dCdx4!h^<5)Z=w5~s_zNdj)9HX0_BMF@D$2h(O0zA zW3!LbdsfRG*gaRz!G8jNr@{>MF6`PKf?O-P;lpRt=F5tPeESY7n1*C8$_N)efNXuZ`(!lD{Lodt;a~eL*hF4dhtHU z>BtwsYe7P8kD;H7eH1vpEse5s;a?5;9=LCMz#O+C7a;crZR?Qj$QzK;p%WIt2$%;o z`f{yjz)F}3YhW>qhGyDMKo%fD?}A=*#*w~$*VU+tmDK6y+S_*wWtfbu9UN;jEP)8R zsWaChr18vUHnI$k#hA-Ln`2klRs!pm>P6&wFfW-4wl!`ESweOrjiD?eGf4gP?}a5`T>)%spUt4p zBG9KD*1;GUCWbjo0)6bK6U@W-Wu*2RxNjVn{b!(v-T`UO)M@XctiOJ1AOq&zgS4-J zv`q~9no|V&n7id6m<_hgf^GB}1KMR8Xxp|QThJ#!d&4#z*$nzRzxF>4E(Lw8HxJ9k zy&3GQ2*$O(6SSLGHyEP@w9SPPFbgu^es;{OUaO z!DHDt9+Te!{Z52$z^6E_<+I#x;uG8dXE+yJ2E>xu|MA_oFCvLs`w;h^`vd;rP~_K< zU!WZ}kL%dD2U5NmXHRVv`^R0I8Csw;cq{d$AG%?t**_z)`HA5Y|Ti^ z+uArbZQJVY$M)81FVbEgW4SlYLAyB`$9ip|O+Rh=Y}3n>ha1GmOapy7AfarVZ9cY( zC?_0C`e?5~tgWG&pMRF4hTZzY|CGz|dt!|C?jSUc(-P$Fec~bFbN-dAJw7PPoV1r}}hKj;J3*{*+U%2v+u(GomT$WDmLC%PtP@7==wWO{q3^=i_jfw2HpLgAe(dKjy?&NHf#~nc{avI zq;;Lp0oK*9+h!0-;MkOz24$qa-H-+Ai%9LZ*SG(F$}##NBn@eO)IfUyVo)zO$W+c6 zciZ8QTzuQ%XYBjn;b(7t=kVXG-ET^M;^hM|s zkgJfK*Zllq`hSp~Lwf(>G4AIe$HEfu9A&u)K8Jn+=P=JX^O^SypRIK*r}0_iW4~F=4k{eUGWzF74kw`sf2$pnVOwiMrJ=5`Wj>Sma^k zXfCs%yRW`}<~4*qh+Kkw3lu4vw{zhbGD!O=p=Vp`W3&H2n*`<&cP-^CKK@1cxrWY@ z^HQelv9^_ZkH6K(8h8zHE!}ta>3PB9s0-csvu>wceeZ0-7SUH46M24X`rX=e-HbVb z`Yvo<`woV8DBlC##}}}_kN!#Q?MS~j>R24}WOe#-{oP}8Df@nxEK-ws>w z(MF!>z4?Pk?~j^cIy?*BBS}9PLwy>!rq{qFV1CZQQdkGhUk|K?Zpgp@#PAlZg-*B` znyU5tv_bTT;AMEJA?G5mgZ1ERipJMg1LGU31RLQ=m<<)sRs?-shc(a(3mWn<ugn2UMoV=nK*b71@DVF-+88$ScT6}b=+SPt*O2CyIdcoE(N<9!HQ z895f&kJN7oJOyunb73F<2h_(O!0$fv-@;$v5%>$-1MVel_HW#Gk*>A*-wS_)SL>v4 zv>yic@izRD__i~LpMi6B6Wj^f9gp9;{W0KU3z|NLbg>fxON;itnAU?$HN%jhQ~KPU7WW#%+p zANR{G@IJU7+*hvEe)!%+n`WeQS_Rk2xpv)u4Nrjc|2k3_Lt1ZctHERAW$1@_um#M) zHLx#oP3xJ9^mua*RKVlOxca%T+(RBuD}nV$bG41==do)4##=O^jUeDuqJd9DJ-;ql4dO8eJlPV2xvHbVkiVG-zOyvg92tblIN z&-m(XFc0k8oMyvfU~i{5R%6@#8F(GqVI!EItOMuF82Vdw|82{ohw?Dz^ldq8plmGH zV=gp<>tU`1sDbA!`??P91mmuSe$dw(dci&7+@24%bu8xO_=ezBD1$L4fOGAhas4>X z(tYs&`t87;PuqKZ>_naOaR?;j?)mL`a4e2vGd9P>F`K?Gv;@g{CpO0Q@CESPXO3rb zJQ~kF67ZO(ou9AEFjqCsT_yaA3;4f`k6>p`O3YCykMGGy#vOeFopV`<81dKP-`HM2 z=AcUag-COlg8UuQ+;>6012|X2ms9p!&_d3fkJ4+X^EwHZ0Xe15^Ik`t^J8HM-!9}T zm<=AAw&}rc*?QNcjI_;Y(0(a6&N;xC(rfBN$QjTA&f#+CrCl>pzb()Y-N0DV*GyI; zJHa*cUNS~*0_VLQ>ADze9hjTvgt$?+5}R!U7`FiSsclU|*KWKHD8m>qpI$KkU5Qsj z4ieM*+V{|hp$P&u^KrX)r*X}D12RH-PO(47)UM3d?>Ae}-78&4 zA#Z#&u4VODLOn2I;Z9ffZfBN-+Yw7vcH855`x_ft+_Dx8i2Rt6ft8*_r6*&vvS-cK*8`ow&^H%oQ zrfuxK$YfYe{aeI(5dB8*8u$z(*k&P{bL8*XC_CmF(tZPWb8;*J=0Xc}HDnP#DYQ2B zW|)bgv-B8Tc=T1G~XFm}nLRnuiAM3grGK19LIWiA@pgTgV>T#`ADn$Xw7CI!3kewjs6;wuR^! z-cLLL`JX)RH}d}BPkFEJ_xaF;tu&%7%!baL+d^Rs?SGyRPLz75qxhY#f`EU>MUtg3DFF%$K>+hi5>9n~dA6n+q{z0a6UneWef&*<-ZB>O-s_G#G0A$R0lKL`DE@N*4k zV7nGu3w_{UnpX|AE3ltSJ^jTy)%*57HM#)bYlxdcGL}p?^`8F^h23C?Ys#;YU&8kw z>bRDKBC*=JXI(?wyRe;&ekZo8@w<%vZbzRE)Ri7XE`-lhH-#AEx&K{_ekPm;r%`90 zlr!WW#ps_ye;50~=;xqch5jhMlW2PrwtMjVH2P1l{{`;DXDPN4`FtPn>9~@2k+7tRBluCIpP{%UABsv1U<=kJKQVc%*f603uCtYIr4ON`6! z!}uND4A;$s`{vA6uJ>QZuaCNus$pQ4=+`3ehgmSL z!o47|zlNSgkC07}aQ>_!7r|oSvk)GK-lO>be)^rpK&-o{yM^-h@G5!%rovb7KN6d< zcY!RnKSG(;`F5f_AN>{BM2u^YV_^>bMxFX5+MkV_49CK)*zKbQINwA!ARULfT#tM@ z7vigFb1)nOGvF-hzK8q(l04!sApef<7^Lm;K&<$y$Ub2S%b|u--7D^pYTn% z9sT3T_wbvByci_7MK1lP)tVjNY*vsH`>dl>Y;V9}J&xh;}(O1(KIhB8$55p_)Cr;P<)V)mIx%jQi zhmHiFUvWJmR(vmge1r2pvh;0ypU8*RT#p9V=0gu{qF(y>Q9f+`E!PwD+0W%e^F{ch zH{-LJT*v$;AC|m9yIMZ9kx%i3eCWL|A3BLsB%dn=^5F~Q`q@9^!zk8i?7Vzfd22qb zA)knYRr63_ptRCdTv~<_ zdI316#517{{dnYY@O5;EC&FpKyoBrPw$MqLvAgDuFV3N#)fnq9IKO@woxK!3)sQFS zL(b{>`6=pce=2+oSj+eUjv?}jxrW5VOD=$eINvNpZcmII(1{n#z-Kq4V?`zxbL{Mi zWDLWMAyD?b7_G{M(j8oT8Ao{!>OP6z6zbR?gZJdZgm-hH%KYr{5$;8gCDx(2&~^s* zvgF;&SUQ^U{}K0`J0Xb`(YD5Yrk~NPaczn>qI16(zDnOU>ba&w%zbnK#=@gU@7(!kj!!V zyVx%v-v_T%+)KoaDZO5L-Fb+z`)ogKUh`br<=DOUo{k>DT=ZvPA&_(W9ql8KJ(Rb= zrLZ&Q4A)+LpMI9K|c(_I`VeTCWF`d{b~0+GQw{BVfZk%)yO5_ zHJzN(_o}qL1mh}o`$}~0HGB@8hI|<|6K4f7uGIT5KjuGuo|}bkJfAbQmyl~4wrpR$ zzw;@Z+X!G!gcf)Nn!#sQ+Z53|@GrxdNT*et+a;DlX=bjDC+%xYwh%NK&AfPr0sj~8%_r%m!_aIb9cI zGw9m|0oFk`3_}leK(?VPOSx+9DCS)<#+P^K-cI;bt&xI~rgQ&Wuk&=bGYmIC>KwULQ%k zH~2O&eo8(0Co$lY(69GN(M#x#i}Sws-N9=E*TIM}rOyrrAvx!U%Nyt0z%$Ghls#vA zo;I&p=nuhh;2tf(57F({ad1tFy}kwdx(yrS@bl#9?_>TEnI*@ZlWV z(TzVjB+OlnYj^De>`yWtu8&>UQpYtq!(K~&(|ig!{R?0Aq-&W>q)+x^<}S*2k{j30 zs0*EWjqX97jyx3k7ghIfI>j=yPw}gnWXyj^P0CS(kFmTDWhbg-EZ>UW3({lXMSFN1h1u zkv^L;pW#&W5s;wAV4hj}VDHAg=&mW_bDzWm$X)At>c_eV_mno*%vi1^`!ap5Hor50 zV<6qfdm`=IkPLHF?|z?roj{TmxS> zh-2i7!2N~KNnrtyU)+P_*hr6YkDUVg9mp>rv3viN-WyQ%IW_&x%Hv4ZJAJlrJnVsV zKH0~f2h!IOjPH429dafd56pG?yzD%*fpL_NLXEmtfH{o+M)_XoM`tYQ>i~0+UC;8_}mC=ONESl2`hCNnXh_=nrAL z8A<>4TiBlS?YsD11on3cdt7-1{x74`U*@;?JLjxnVHx+loX0cvvjU&rQ^y?E*ee~J z6HCO5Z>b+EHTr0FU73Rp`7?EA=R-T!f)2($_;1Q@!p}K&S;yk@`LLC~W)nA}+_{GG z+I$$zb*Y{CoAwrVj;XFGJj0V?KYjPVM%-)Z=Y85gNgmH~uAq(2iE)A46LK*J<}|~c zMcd)?APnF`{4&>}gjf-;1H{kKhBc3gYt9M#CXO1m<@o%L7+mLk?n*Y`^LN+`S#uuo%c8ei0a3cmwGk zV*Kf`<347N(r5k)sCyaxVI<>CpFb^s6Nnd{#zuc>zSp30?73%srj36AcYtyAXYPF6 zAbtoQ0mkU_ZhHLDU&3qs>Dbr<9((R3?c5T-jeZ?<|3osUjwuk!*8|eF zj`uw<4mm{Z#fZ28yT@{DonvRd((gPuj+@Zgi>Xb!Yh~GEiFr?0gY@cgq7fa zFsAX{XP<3I)-+wCm#~p%d^vI|IQBC91RM)}`k@eElcAE`6W6GvPIS&%yR6b&SvRYR&z08)f$d zITfzvm~(uLwYGrrDR2p|tDVcap@MBzJ}mzRuissn4_C3j=CcoKy}2-*c!eV8l5087 zkVox{l!>2VeM{_z0y3V+Uq4-qjl9D?l+Oc?$rivjc7Jps z3*>)2`uX6v_JeC+2Ur59!bYwy9mpeq{L}A;O+&W9*TL(L_j@O!p9Pe?HAUC6oMpKa2%l#_D(8M7Nb#y%g}1ZzN_0=6>v%xgcL*z_yGD0JJ~rVRo#gZJ^X z8nznx1oYX+8OX)32K`mw^RaQB@)N|!Ape4GBiP>e+)u-npgW$aFc~7SU;A@x9pKy) zkVRMy+uE3~{jY1(**2!!jnqGY^JENl+iSNz2J4#tN4HLUfD%}5S%1s6FGCFy7;Kc4 z+G5a0%23L6j@y4=WAkGJtw&!m(7EjCpB?kutyA`2@)DnKKZ$z@o|mIfa}D4+86Sau zI(jQQ=j^1J`y-y0OP|AjJa(S7W1i1_{w{IdOP=67#QW{GE#aH+Y+gHrdnKRkci~z4 zFWl$w{2ZZY{>C|sb9eJrBOKE)%_@BbKhF83-{Q-) zCE}R!JeX{Pxs*9}lOw6S47Tuk=-)YKR?%ls_c)T{D(0G!zQ1)a@+$Z?#O#BQP{%no zeTMlrbMywBMtLgrKKt2W2dw_Fk`um^}YdV(lbntn~bDVSjGC1eH#^~!f*85sf zAcn8w>`|@1?>8E|&#LaPEcQyJ{{H$9a)@#UyZdM@;C z_ClYGj6h#wwqW!1qGjl3qkjmtgCoFPn&BAOO#LU2F&qR(LI<|5A_tJxPe(r&7NQ$_ zAM~kkIIN^RvReN?GYcx|d+*VelqXfw_utdsEV!_mz84<8ft}pr-Dvk+n2Y{-7)$*; ziowK)X1D{8NSZqR~Hwz6;p@o^S@X6OiA5OX=?k%EXN?tMY&Tu-!)7E2``% zZ2uw8izuH-c_F@E#ztR(a_vXx+U}$N9`yaIJW^CcksJ!E;qGead>-Es*x#&#EsLw6 zn>ky@7;E&GJWYPgWqb}kyCaXob_0_9lW$^U4dU6zqltA5wk-X8i*hG0C*gMZ3I5b4 ztV4VqIL4D`cNNm}(|qmM&p9@5WK zd>r`&_yO2Y1!>Nf-8Y{`niu(nnQ$?<#_r8I@LBYa0_&FUNB72u(9LZI&{y~zoCZ6A z{XU6LJCZ$;?z^p&-P4|TS0E>1??Ru3%rF-ALE85T$P#;gG(MAH56b3s5}XX)j*R zJ^#-}ZUT?-F~Gh^Ki5^nHcLCypag572RgxVFNXl@AtGj(c(xk>OQ1-(Iafc}VY0pD@R+1N7ZlaX7ImKVT07*nZVhZdoG z-dc_{ml?=u;59dmiEa+o+fNDoF!Zm%Ot7zExDNdom=6VfPC%}2NUz6!&*n;`W1oY( z4D9C-=)muGr2m73dG&Rz`sXq8^vCCy4x((HS+3=GA-CXvF!C+) zt(5!G`-$;5ud{swe@MsS>#0Yfk45f+G}q^$1)uFGcLJ{&rtfn-h8#{tH?a^6lenbt6@73xf6U2)DMFFVUW5lpwByCn=gT}PY2udHheyY?%25Q zr8zME^gXykk<3^6zLuzTMxpNslfeErfcfY*8Jq1UG~`BX zqtT6b%>OVA-S)%OuLJwlF9XiK@$ECW3RTzd4$#F}8=#fcx%9u--h)(eb%Hu8r|N z3dV9ScL(#@7Y+dTvh(eInK|stG3GvJa1u3b!_Rpd%TceI$xe2UW9GOy>{oo{k#@D zUpO~C=#I-{RUhl;CZZX=QIr8Vg5!L=}sz8-tZez0ySFemA;Y=0Xl zuLk>B3EN?_ANA2l*GG&kU0}bqvG2`LY{(MQe$0=#O|OC0xfceZ0CD5GlSQ|^ZF<3W z`r4Pi=B?}ieY&8#A&rxOZPcwVL&EtygS5>yUwvZGPrHBWHGF%J5m?TEeZ*j04Os$n zizusGXFm}bUjl3f`wpOFpQY`MZ=VUo)*)@1f#HUp&Qqmsi~8#0)^8oS-?ldN9(){& zM8q0I7QyrN5^!D3$GZ7QYveZ|<*Rai`N`35O(AR*m2J!D|l-(1aKf1A* zzt^S?+CPLnKmpwQe(us3w!fn_bnu>AQmF7=6lJgX_UkdR3ON~KVtAj>g)C7npbvxl z(-@gbeg52sGaFp|0waw*J(&OYv)v5kOH;QZ{#`%G8y8~Hm` zc)zAK^n8&yKb3RPm-&tC3pj7>M45Y;@N?eh8-W~0j7`KEg?yQ|-djzeyoUN8qPL)r zL(Z(!?=87+i|0}H8G0s6r~Xpfx)*%EZ`rqvyA8L;C#(q~r55>lt+KwfLM< zm;&uE3heJ5aIF>rIj5h`@Ou>QL*7$NKWni8{UUVt*jg~&Xy85KG`C}s^TB*`a5Y$e z5}X1Ppa~rRY;X@QhO?m;#zQ9*z&T&8gk#`B*ow{i9l@BlfORwAA}EmWkC7wcZnzfu z!FhTcrh)ZEm;)2xd@$BjSOCUx-kb;fpAPz&zh%dC1JuYxU;EN`Pw<+wxvxIQ-0P-u z;&`1`^L~pqjyr-D%FfL)a6J}6mbT8h`SqYT<2Mey9eo76LwQ%qem~v0T2J|Am;s*8 zyYz+MT4oIF#qc=#AJ|*hAwSp0eDePtPG(PjjJor&ozoYJ=W*R#(i*yGmpp{;bo{o1 z9QBhbeAWQjL41EFDuZ-w7Qp?KcS3%YSZir_Gtxd!fDzbOVM3d``VNG9UEkko2auL{iWaQ>83mm`}wdMy&Gv?<`|HpC>QWQ z1v$P^*M+?X36xj0O8x4bQ+JoDa`~eNJzb^?wl(xD$4&HgZE> z58F|G4A~1!=$*)Wz_qo%rEn>9z(WmvBz~LG7sCJ;X9YO!9TX*b0hLrm<#8@UGPJg3S(g*+zs>*z6~?re%Qd? zEFzbJF((6kr@!rB%-%-%G-L2)P zn2p{J-C&)*5e!j3woy(vjyyg+egn4A(2U-O90&U8Z=a)J5PcXXpzE6f%gsn*j6oVt z|1y$!PVWt@FHzQ4zap5+wsz)Gz^0FX8Kea4wQV{wrmSzKwJt-p-cL+jdVXot+eTl} zHD{?@f)uA zZMENZ^l`ruo(He@<~|nYz%KavJ*|lQ0>|ooMi%K<%*SzRk{~P8v3K$7Z?oKR%u}^$*kyTL+%M3zXZDMbO7O=YI{l^<~Pg ziTwp+pe~J#9z(QEw$|e;!cUuh+h%p69FX=OgYB&w1j2-f7uD@LK{T+s|Kk_Q%mB4j3y+(H8Z{7vU)8J;9 z3<-GMw~xo51p@agC8Tq)8G2wn*fzi*amq;R=Uaw;Fy|ODPy)wYfL<_$bzLw7TOmLe zv}K?S`V2!lm-JIaw_diz*QUSoSwhyJ4C=~68f^>c!{FFsE~vA=Ym_~|Yj4Nq z`yV5%=e)NY(r3Gev2`J5Am_p)@VwZC+zh_2(vz*n3&=8M|B{2r4?V%=Uj~_Nt)IJ% zYrV2)J-Rt`P;Y+C$Qt$%cpP|a1nMnk(DjRjassxsO`?sMwT3<0S8rRuJ_Mep`=JX0 z6hYq_QXl8SIK5y@{VZFjZwcR~Ts?+m>-^K-IQ~UQ{p?R)`*ba$aw8VH_2$`)p4L;B z;Ab9zHVNfe$S1;<1*LteD+e2Ry(l&GKwSH`z5V+)j5L=1>H$4y!>^1iSw=UXEajp; zef7OnqfS5bNZT~(&BZ?bOOSEk>6(2Z|1Wv~4adJIJ+ zZMu-D4Ly^kPy7>fF^3qMu*J}!1e`>PpCx z5)yQC)~1iP2w4DSL|xp_m4ULkc>EWU2`D=#>z`?$uWMRBs+Wbb=ZPBnFqn(wfUZw7 z`XJJ|x1SX2@Qbt~6YRE2|B$vTfo(kx4Yt<%G^YYY*lI}I+GnY?o{O>osUOmM^||<` zb?9XXl;dn&w_j~hYdy9(WGLG%26bbaqvZnp@AxJe3tRe!pT1da&P#;d+-+;TU<@hO z>&$6TJ9uB`@v4lmn|u1FP8(wx(^%G}?I_!~{zWiO2B|GQm*d+>Jbg>nL9Ml(O9ys! z+xpkt&^!9LW>Z(956>sPl&zbLtqmqXRy*i3nzH@c#@x-rF=o&`XO$>-!C3V9V4N|~ zgKZ7$iM}5BZR8f@-pHA-5X`R!X?y!M#wqYAUPqh9JvpzNWpb2X;Jx&zlo#Vyq_Y0;4N4V{jeGm(033@;Op~6 zr0*$hLK;_Ja~T5t&BOP+e7|G@7-wsv-dyY>f{mb057PHQYe@Skz-F)yV-ADwn`tvo zZN68auXWmtuYW&M%tO0=>gLgdG`Aws{*38-*tQp%!F;UO=6glsz&7UQ`xTDAjBZ~M zbfFJH3wj6AvN3$WWLu0uZ0g$7eP6@2!|2^$TjOijrf(U1k3?zzo00YtgLxaPqhV8b ze2!C}9x!k7HJ;_E=#FPBIM!{sShh_%esueEjK;Q4?FBG5$2<#o?>3Fsf_)PBUbUE; z(!Sfk_s7;@i@^6!w;;FKjNb*e*S->t!AC#ac7XXh2JPBv_zZ%1nM;B+|1wfP=f%Bc zJI6Ey>2c!wz{cHnocTVmeHS>seUI0@>v8xtkZ*D#Hed7iGcsr4^H*%-UH)AzEPW2$ zzTNxpQ0M1LyuN-O=`p*R{WPT>Cq2iFMK6Ny@A@A7J?P!=68K)RYu}9C0glK0==s?B zwvTo&cgN4F-z4AS#-wo=!%1$<)8=?T)96X++4pLz!DGj8~$zSsg~ z?ZzumwohdQ|E~|Wv>m?IN0uAXcE+&2sY-4Y^wxUJVRYkY5Bg!(FGIP6JwQTPn{6_b zOZXL_xe?3yApB_$*!7L^9TaRuq-Eomw813X=dgh`eUb(-Hv1e#mLbB|*qI7tD1bQ{ zC&F(;o;`yepaiy0=xZ2#D~!cgS;AL((bvH!?5Q)k>EaP2lD zGr4-aHMAK4maQ|FK)mtTI`P{GYhVoZUC3jwJ2zQ$<&Km$pxejU=%aG=&(?ks{hRPQ z_Q~*3^rNwO z<%D)h=h43OP5Z($v;Na6%{49MhBxzw@U&VKy3g0Cn-X-W0_?@XIDPP6s{zhW^R3#+W zVgCoeF?Jzk-w)t9JUJ;B1`nnET<)33x%-An=y`;?wrrTrdoN1|a-nnATxhyD$LEPz zyNmf>H|vRgFm|qKg%S8(&))eZaeqdC{tw9W3EF9*NylY%70``eD*2kI+R{Rxqc;HhxVmzD>m|pnUC~6{U?!Q>F3|r4krfJ z#?s}i0iP3#?__@WK;DPE7W?gU_DDQ~6BSCy4o8Hbm=by9e(to98g`xE_XY z(&xkE@fUcy(brUbxDQC?z%Qupe?}Pu@$l0t}oC}+e=KElu zrOyLeL;pm4C-6HI&il#K-;fJS$fd+wtz$op@LGOgHe9wj7d9TBi&yQ=-d>#xJwMEa zWB3eQ^Cs%&5});n>1Qj~@_zPH=~LLQA@@IILzyv!p@T!|N^+oGnR$*MW>5V9pBb&8 z^l!%HzTq<_zPDa_4xh2Kxu_B{&dpLjr_SE>GaE7U=U(;w;q-sU-HZOs3VZHB{vRUs z7vc9T^R*lGOVR(t@!)!M+{8P;iTFm4;rQa1N?7CcH`9(mjvyAk>1T^);FHDYqqHfa zcM|i)O8qxz&GkNPhXCVA|4+2%`kTq0eHH%}o1c3rBfne;$zIhE?ONsgboejD=QiZ_ z`28Ar8|5L|9)vy{{Wbbn16&W{%ds5~kHd%HXxJ5|;b(i|#49K-raTsT7(9)w8~GoY zj=miED4c|T2eP%AzE>HyA-B?wy_Rqejz0jq!@nybIs|DR6Y<%P{5e0z>nXoY`wa5) z*m>d&f%dHebSiA$DXu2fj}EO6vB8Vy7@!!ItNKgRiVPvo8WIu7*QI&-llM zN~m3iWUeBwqhCPYL5@?GvzGtk0IvS;CCV(CD!@0-3^~3fO#(+K-=5d z@169?8f6Zn{$Ipl9iz>VAs_N+rtcc-(45DIx$kA|67*U0H~RHTxNz@kSp7~Vj3AFS z#vqp_`i_{puAPuOAl(DGYABEAJ1bnrBl6sOc{OAiUzza~*r#RuSbs-hYCLCX3qMsHt9X&WOT>-IXE2sP5hY8RCjK( zV2lsL_V5|XFVglg{5UqlPtaNWFo!y04P)mXAh2Iz_J#h=V|qWw-q7xSaC`vliav;q zJrcJ0SpRYAE=019>3tRZJ}d?HYGBR+?c)8AoFDxBjQOVba^#-g^Lji-9K%0Gvd7Zj zO)@vn7arFuz;U(UWB>O;CtOdgaoCrj)6Oxcc`d^J0rZc-*uwST8RV1D3h(0M8m&RUg|_6Gu7Trz zpYzyu9Ak_(=DbsI%_%1({I*a(4>+GD^c{5|pT~z7gFof`!EqZl92{z=Qnv%Y|I9h% z81`YU#OF87_XnIyS(ErluD{G(#QCMby2kz=#xLd4^J@CK`-kYm>)$YW26BIne(u0_ zF+Mf!?;gT_1LY%V`x5eW7{c~_tWkE)rj#d#uPJ%C_^Kqy;?-msXe`d`2VZgZ0y~cej!Y z_D=ME>}Bj_bgzMPkOdeG%@x`s6Y3Wr+bR2eH3~h8tru3J`)Ayw%yFi~?mjcW7-|2O zBjnB0``3crj@|{v>VT=({CftQd5=YofR|x%rT!hFKp&;ndVL4-N$L`07I_a$g%`oG z7-K1V7xg9dR~z+1`a?4~7RPS>-C)kPSq!eRxxI(9O^rJH8(B&3P14VTwxfRyn>oeM z3*76aYiXPxLK)l2KF$T`(_lGFhB4Uskk7&(y1o~}BJ@qI^*-8i_20xEpgfnlsmQJ5 z;ahbj`MMF-ir<0b=Zb2uP^mTy&q~%^whr zpUCqB=ibue*|1?^j^Ebhf5>wGyy@ZA_{Wu8dXnu}{c&xKo#&V?yo$i=g7%7xQE%y0F4oA3Sn zxHW8ig*|XcHq1UMA0M-KKAyg7J|4$JpxV{*wy}p*NQgk4(siDOcvgilg{F-@S8T!LN8N;itJUXA7*%h4)6}!*9uF^6k0s z!$tIeOg`RkUOrs%6@FX(Fuse#d8&O%J}jJyj@)%OzLRuhJ{(A2J+$5ZD!!X^NR!9AC2F|gyJpH%v+oRvehgq+4+%PZQ?6LRA@y-kLVdf3_aL~2+FtmFqER|#d&o*_TN<2y8HQDSjIkH{()SW#IZNwph{TJanN-fuNy4Lg_WEWV$N^P zt2h=vo(pIHITzOMQ3#Rx`#d*%U_*2}|w6^m5_kE#rdL?vo z{qMM(9Dc%m&z_aAF6Onki03C__Ix52hOViENprXdx`XG%6Zt;TMqURywl56c)E6dj z?^M{oHLSj#I4ijS`90_Uk^C>-A3`uHlF*h+i6Fe?3zmG96_GH zrrrEXSo9!r|7uu!bd?;dTyy)v_=Bop@A1{J;9Q=^cn(;a(B`Y;vY5DMRzmwv_&-{A zRKiBiac%1>Ve%Ul*0B-}oKX!of2JA+x@i01Y8d=VC9LE=tjOGNc(xMWT2u)Wxp!}# z*B6f6xf+)Ki8(p88YbkcVbrzU55CPDXUSoh@%)_g=+or67xh>2{P?>{Xxph8rt-}5 z_dTj%=q<*-FJ=0x%&dl8h&|ImFcdA z++o%5ZlN0X8&M6*E+^(8Jab-F4euX8-QLwOVvzO!LN$z`|4!!RzHe5;l5bVR%jZ?Y zDfIbI{A;Yy$4{t+|6@Ksa1!%Fzwb8j+{|2TAdh9t(FOEz+yeej!uPA;-mh1~-#E5s->ZhcFEdVZJrnjiw;CTm zv6{SbQ#E{rm`{JN8c#dD8Xh6;D|4#x2bquOTC4G2nS(p0Fo$EPo6rARn@|nUoKp?Q zezh9@!T$QywahW&9FNZ)pRa~}E~ti$i>vXBtI791d@f~f&Zve9nZJGU{ruJRKd%}t zJ%IIN4lZe_hF5oCPH4Lq_9G|py1=c~@FH{B^hJD_lO4(7C*(7M{vKn^_hg;6{D=MW zwQ6|b9$tHhS=;7n_zu3~PpgLQX}>qmr`w^wi|;#ovUeA==T2pRu{Yjg{(Fd>If}h7 zz8dDhF!`Lq_=cI!(byB_dj0X_@?Csq;=}rlSy=ZO^J&(Y@oymK`Q+clelN3+Mlinh zKVa{VWlu2f5vTB59J8un#%1J4y!kg$w;%no*SlG-?!&8L6L~Z-?{8kou}9qLyRu)t zUJVnkW9@0T2%o~|7%%y~w|_Ov{U^sOwi08SJ)JxmOA~9-&R&_%vt-j@(eo;jGzvFLN*(1*#3cq3SY_n7lJ z_-8-FvB%ynF{dNglZDqRVeEzELz|7`Xgim7zp8|()ORyaMSLRm#9PeW1olTfg8L`> zo~rz3)}R^aII-etey3)MVy?eefP@y7^3w8^%9}v9DxakK~vcdYt2V z2kgXa`7_7+r^tt#d)N=9eW4Q8Qr^mO(7ZEa;hdEH zFZXJJc{>{00?vc%b6~iOh_hyKRT?Bea=36@3YT7`<`>>p3F=kYx{WC z(m(KgE?X!O z;kA_M{awmDKz!d>lQLt8)#DGQ>_X}{;nVaN@Rj%67ZKwI@_Z-j=@k5Q>7zG!5go!f zpJ07w&1yNsno5k~n^R^udJglp1AhzItDH-j7sehzZvwHLL)_|l|66hf>%m);$%oC_ zL*}H+0oL+KE_R!erk**`o>ytv6E?AD{hB%J&pyEXM!PU~#4bsEWAK|9Pr)n5F+T^k zAV(}~Zbs)Ib4^~nOMF(aM>c=V{=5TU*7E!@$o|6o*5KD(a?=~f7{0`h1+?wLZ}wf~ zhQnODlT-flxd=Wv>`U$n=AImitwz3zoOp)4t^nCS$bAa_oiy>S$UR2eo3u3(qeklV znRpvLy+@gVed6eZsNYYWJPqtd_Ai{V5}%e|@>wOurROV_G3h?Qo`ilWAXWvBve!1C zOFd9enb>-}xVLzf{q`>S?DOtczW4VwY~RTJ%sTW}^FAi>8i!F>)n&Klj> z^nW#V&Rf#6Xs_ems1ZJT4r56$X7dW~#fFAB|03Sxk&EqS=E%dA%|nN|cRxjM8{b1r zeC;V*^NhjcS-r)URe2Qr{rFN&ytwAl%a{YsNbFqRpBdVI-)IgH3+Gk(%@6V063F$V z?+fvPTs`Q){anJWHzI@Jn;J0$i zpjV@QE@6CJ_qFPqXALdib5F_qz!+D9FGb`9@0D;fXa zD374b_2Mw^-UqbH&%VTB9=vlhu=r$&ojV%-H2m*}y&}rX#K*VG=!+aq98b*Np#4pK z-WA>qVo?Dlo^BX@;$V)!w;A-sJelk8Z6J2|{%Lm%?ZiNzoweU#_bzZ6{oezBB7Eja z*N1+K(S3+|AI^J*K?jgatVbrA+<55gF(02$KX%`R)nk{wv}erR8!y`K{i%L?<~wK} z2xtB_={V@lQ07@o8edPP9AjSMl<~{|{6Q_{qM-lG$7B7pTwgjTi=EDT`#SxTJMM`g zQ^VTlEM$*cNABa7UCZ^$8j#Og=DmPUHN20&gOtf_cL(FvwYwj%9{HaqUP-wZz@FJo zza_};0jzg=hVBNS#K!+k%Tf5kI$~J^{pZeHYyS5;X)aiEb?&6+=Q=mjS*3nQ%s)S9 zq2GHb*CWr`<$u<4I+T6a>_d)y$S$Hh5jq~QH<(Z0qhr}O_5E5pAM>Ac{t;W;qnK*O zxDtLD;Qqo+rha#(tnVz-cdq%*&Do4`fK>Y9_pV{;Imf;e+ife?+e;i zfv-UgZTG>u7T}v3M@DwIr?BhbeS@Fy^tU5ATX6BsKdaC98GgUjI)Ya0gKN0}#}+{36%=X~xvl=Gm9x5h!f z*wvKr#odDp_wep|Y!Wk%^<1C9^rk?$U-Nx?7@ft`S>JR%=nzX+@ncW-&!3nN_3L5J zV_Z7xOB0tgx#(`7FLZP!&_b>_XQD?wy5F*9vPSDS)FUqUG&SsNf9Lw2-ee#C5%pV< zWerZhh(7yGni%+JUO{xoL#wg)>M}O&ncx^^8~Vk^E=r{EeO6gdBNl zrlCWB{`n}^sDE~1xV~8v%|F0D0c(-o!}`CEEWNit9rXy;U!3dm2WTJa{M z4L$qQbIOC19ry++Stpfw?irxIh58Io1bT&ewhXNY{V1oiWd|F(vt`*{O!*tI7wiGY zBRdBg1z%+8YtU}+$AQa%{GAFN1U{o&0xizMg`Nmxt3PxHR5E##e*%Vsv%vr`ADjg4 zq&);J0+Q(sK7n^D^Z+yv&U{C90rf&~9+(28D?QqMS*@jEa5`u~ZyIzY5Isa){gr{i zU?><5}cpLfd5u{iv2~P8qklu1Uf0~ z|E7X@n7VWrrzg3&P~vP$X|D;J#2V;yc-WI&rxux$;WM`h*<2hp(L2K?{vV!S;d?Fd zS&wXFb=aio$61H|(;@$N=h9bD#|L*Zdbfk~!X{>^pMm`mnSLz#$_G!e#fD)__8g!w zUz#Z!{E(a_$1FKu@y9A}h`qfE%6$4Uz`l)9kAm|7{aVIkhh^%PKJ|N^=6UcYIFmZD z@UN?nvixJIYtHc9j0K9Z!2I~&LlsAGNl<$S=T4PM+ zLZItl2voV&9h?RtK>2wLRJqF7eZQ19+u@7qTHXwlkR+9>})rs!fniQI=}l zyMYTFPy;+rAWC0?e3g7Hh=634YHgLh1TqcK7}SO5K(&5LR{E9MHkJ3P4HUu?7K427 z3sBHVKGG(1qpq`}Gmp#d9JhEkgd{%oL`2E5@NZFw+id;UBj`pGm<>x^qbq8F4?eC$(;mOC% z?33N01L1ii;X~Kc)&&2zyib}*Ifb2z;rBpJ^)XP(=hpQ3G~N^We}~0;5dZ8jjob#H z@y>$I2MP2W8Z-C4iqU9fEj){U9KJ`{rL3`NUefR!pjfCq5zNHWLSxhe&>G;Pr+5^= z(^wRj1a;}Uw5tzapL#Pe>X&xekW4I?DXT3lQ1^8yCxPT-KLM33kFs9!*95T*EH?dD zR@Oq*F53nwUFj9!n{qf!JwcoNuu$c)Y`5BwjN0|`^Bj5U7=YV{t(&OXG}3@hSk?Uy7j`zt(n*@f%?9O*#(P0um`= z3bnuk=~f_Tu(zJNWMd#fy%<~SL-{ExyA7Zl`~y$}Rb4vrqZx=VJ4w(2;y`jVKGo{3KE+Ko8nXXM)d-^=tefl)VAFS}vE(j^nl z?`~%6749tyZ*RQz(k=)KN4HR9B3KwXuWEVKs2i%6EvQ<&;FhZ8Si5?`yb1gyn{V*{ z{4W>|mR5wz!(|l}B^9ASq#{yU77Ue?l!Zd2q0;h5q^vYpT3!;Am#6OBR$YvTLUdxD zoIO0!>z^tgcM_u(daEJl6&NL_N7gJ_+)?kYF@HU=2eQI3IXa856c}~Q&C9EoE}K`q ztfS)nkG*)&a_7iu4iRcq3w3nzyzTqv?!q-8*U5A{H4-*=Z0Fw6 zrOcVaB>{5PijHmTt)!#cy)WmLw+1p-EqO(oT4_hM%FU0Ta%8n(bK2C(I;ypww%p$8 z+o_dzRQs%G=mSTNan|#?T&$@Z6L?E}fbUU?*j%r8G$oSQVd+Ri9?X_|{ z+uu&BQ*Ir7X!Bxiops8s_JPd3YMiRWYMpXq(4WT-VO29iu2y}UR;LtMxbTV1N7gF5 z=5Sw~GUVue&1lo=lpsg%YfYP0r~DZAMsjL`F$uZjdbdrhQ+l*t?sK)0a}STJQ+Bjp z;&bOYz9EmRI=eA+Ose*4ZV_&_z4rKON8Syh6MOU+o)sP3b;Mz%f!R_k-Pk?DSn)Wi b&I`SsqWz!D)b)RKVs|ahW+4A0pOO6^2*p~` diff --git a/tutorials/model/0.2_cache_data b/tutorials/model/0.2_cache_data deleted file mode 100644 index 53ca4ae178ba52dbcc252ee02d1198d19bf48a85..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17022 zcma*P4_Hm<*FU@^Gzm!(l7oi7awG|U{ zW^!9|L9nn8{y&?gU@s^~M9)i}H#}iUoS%=aF#CVlbY}CDmnF@Q9Ui$Nc~RVQ%gCe! zQ7ab3MMowth+n=eDKf>)GEs1L4(lLaf4O|-yoC!A+x)ADrC;2#dCAlN^-s%0v#>Vu zIm*PgVQuB({Dc*W=2P@tk`voan=DLeXgk?_Q)2tc=9AlOO6;(C{`_TeaSP@rFIpDA z+;VDC^n#=X(Y}l3CtD_33<*yBBstMCII-h!&EyUiNfm=*^W^LOvupm>x^@(X%5{xe zmAqitKNs`!8T>z1_J62<+@j>as`|ISM?t^NYE0UFV5#8ghZ;DuIhomeT2A>IBsF=yu|^t{J7j+wCrV<%?7>|^$< zwwVNl*=;fT%@-8g(HS(GUQ4>kDU$V*rBvqH3-y_oL}C7RZ+%`hq&r<8mnWUjC_V(| z*z-^kZV$(=^+Ma&G|qXn1hWqYvbntpl7Gn{?FEyl{X0t(W}cJ{~^|i$=AOBpHu|vdh+4tXaB2^80Qe z>n-Y~+G&?5cid_!eKZ|wzYQeq-Pr(Jz0qgWaP-}NpK9v{VCDEIES&HIS$82Rr{Pc0 zb7mS@N8S@JInT$2YYkLn=)pPGdqwk_k$muQC)SpSP|c%Ca{0y*<2u!VZ|-}r7;DDD zm%~)rl2%bUVlJ9`&!Mo|E7X45aMayzfW{T0Fl=}e9WU_2%4T-V zQ-MC4&bicqbY%ln>fL9hW1sfJT(4l%{9Fh^$t|#Owc_+UJIVasRG0Zrt((uFNHL~zrEKPY?C40Z@GXNw;s$o)2rnr3z6 z`gw0b@uU|=J124zE3w+YhU$uaxWC4S%O>Q(Iom0yZL){x3p%P@84lIni}3oE;h4O% zicD|%ac$6Fr0%B$P3}O}bw4fY`n;4({+mT*mNWWxeoh(R+hgjgN)Vpe8+Og<#A!o1 zVeppO=revGTLw--tE*S#XA#M>Ud_Ut0~g(kG^p@jju}fPbMY$|u3giKa`Uc}uCgzf z^rPt9;2|7+Jr;vMwB>yN)tGVr6nX8kMBP}m!Ps+vsGa$PB%}mgZ12?pJC}Q+b67H2 z+#Jc;BV$Q7I>~VJ-ee9v=FMTZ-axUHA88k_k}lQE;#lD>6}umX{uZumT;u{FCT9-) z(2pHE%|mt5Z{X3VGjDn6%B@?9K~XXZ%tyqtTdF&zw+rX|$8H z2u`!b^5z*_GQo|Duk~cVt}fV^@RgLaE(B8J15owu3|W43bb6yAO!+9z4C!n&ihuUBt21el>k#it}lE6F8&DQZ1S#ROX~&RJxR>aFY0b=FS0 zSG17p{J$pMj+4|NE`r8cju1BGI+#6Jgo@&Il)n2oJu57O)2}SJl=i>}w;5cQ{Tv2e z8Oz0To7J9J0$bjTY#ly`E$;b|@ec=5-WY%Z3m3C+><`1i?cFhb^whL@XRU0&JGF!7TSh+~_3%YkIq5owcXg|*mBea_Qoe)C*d)ph#}j4qZ~^!5)OaooxfoW>&&bmZb%{2 zyUoIbBSv!0$K_b6*a=<ePWSl!MVQx#d{dpDCz#Wm8UG<&QzEQL@( zgK@LUL9;534e%wERokN4{<@*;<6wUH`#f$s2LF zM^2yl7v)SoB?_M1!8m7+WVx&xs=Xf>;%aK4z;2H`4r@f~-LIsyCM(RpXUVqP!r3sc zE0@lI0%^{Mnk#&-P|*pD?l?)PpNtSfKr^Y*JKplJ?zLhSj@9lJ?#M z($(dO&$r4p=~TGnpB)G1X3XHcai>AOc7S9uosf2YX^sieyQSJITPdfH7IG89!C~!R zp!4kuD!ZMqWBEun?iN_>vqhD?ArN=059PO;>^b4{RyxDbyW9Mfpz_+V-8PY;arPkc(w_AeXc_EPG)UR*po z6dPw=6Z3D+!i=qB(dG|a*m&il)5oWjm-+)WhOOY*ZA&ov z&2)CIJq%61Zid#$e~{OqnH&nUFs(KXeQj=l)tFSaOhUF$pMs`7m&tH*JE%&mSW&c& zxBO<#E~mWt=C}6jerE#}Zj9%$Ku0$J_y%HWI|vUt8wA}OxMUWINk8>OVXDeddg4C4 zoHZQv3#Ws2(jpAZ{78j)k0E*X6jZPEhI@9&?0I7pd2Ss>4u>y8jzuo{U7yI+KaN8E zQib^J@*a8~9L3rGW4PRV2`XG2MdhT+lsqbvECL=a(!R2EM|vmTyUHa3Xk?7k-73+=dKQZ+b81z@&(DYm@t zijCe%&<1UlTw>N@{J=T5Yg8e4J#|F0QyyF%I)txVx^qC~By0?JBt6WfeL@gc)XMLv zsI#Q_?UYy;Wr>CBoxy9!I1Ef`f;hQt3O`puaY9$jb@C?XnlSSEy)&BDrig+n{$lg= zBn-1w!?7RQ@r7q9)J0wvH5u#4I_W5#TkFcg{vW)Z2ZVuF>o9IDy+?v$Tj}PP)@=At zMuyl6VEU@Pqzc~#rY}4}+sB=i83G%BA1OBOU(FSXF>Kst5877&RY%tQuND z)$6~OO6GUPriw@4c+4DyxNOn%Wevpqk;JJ7Y`}V~B+WiQOdfwaXms90+Ocay!H>!K z^)AU2vWPWvqOjO^zi84&N@VGSuHILvC@>ZW-CM@$H{U}`Q!YguY0G)%C!sL-wD*pW zOR%A&ADc#;m&~pAfO+*mbb9?g*_U2`^gs2WaryyTL%lg^qJ~q3&BEv(|DxpOBiOpy z55v)pbw%lhwOvG3Ti$`X#@D1g5`j5)E`jdW-zw`*eirM`eNV-I7eLtauPJ2px1e5B zYe+ZVC9{t4C@9y^@t;>=!?HjsRE4C-PP=RY$7-IESI!v2eG~jOZxELWbq-6G>?0sUxy%WU**NMbxjZ&9mTrd zH590vifN-dve5OC3P!}CS7>{z;gOhm)g6s%4oCrW`(g6)C-6Yk4ukchv9>UbGK*a? zIa!0AsnfV}&LE7qrAF%xCMi&D&gbeP*f}DdG{$5+w!#}NdJX~48FAn~u8REDbZ4^~ z0e$s zQc-LeR4sSl2qUszr;!|zznb&kM|0tww{%aon`N3^RQ<#SLYtOy$h9BgfmtZl z1_=<+SLEEM@5%MaPRdA5Ve5;nAZ(VT*qJ*aZw+8HCvketZ>0RQ8)>(80QL4l$x3^k z3Kadh_J=sgeSI7B{kMw2PZwgqt|*LNB68|y-JxRPA9U&FC{FFT6xC^h=zVXX{?EUl`q)*mA;p?a zFRy^-d>bD0Y&s`@Br3dQhNZnbzynTwVUMmaL``Toh(*)^lrm$S*y9MOEm;c zjbV$K%Rx8eo|Hbb59WQB4Y~9aWCeWz!eIwWdOb|`2ewOjz3NE_KTD=+l5VOMoV{=w z*M=-a-Hb9-`8U(K`u%(e@3t4JtU9vs@mFNH-jkIVzM$I37}%tm%c*0&1m`)IA#?mb zYTqUuvj(nV=UI>FyUuxF18=-(oGB8skKpL>u(% z>_*0Wt~`6q5cK>d5rT|qZ1hW~{+B)3Ypx&WKKTYTA2X!78&#zHsV`Rf>F7DO#hTZ{ zIr~EhHZ6P)rB#_wH$a76ce}CB?@L;I+L4_P48TE4JW;=;C%QcB!5VK3SnCU=*rX&# z@34+-n!BQJ^;%YZc@#5Pe=9l3Hq-tlfN@o}L3?W+<@w8Vc#)aZ+}j@uQipMK zH!WINFM-9!Z8^7K3zc;4fQ51P5c;z-n)Z`ab0LPc-?SB-54?p^-$=CoxR&j^WP<0; z({S)BJM?r7fwbBf%vwAaon>rkp<2wPr7Jl{-G#H1yFfkff$Bn1UyLi%k^ZSAsMDSq zKHOi6sTKF3@L33zbQr(|Nk&Qy>%fiYBQZAn0XS?r3QkAoaKp1esxcdlS@vD{dRZvm zel4K-mqcz^{{@wm{0T`nBC%!9QnuG8LGnx&*8Ug{(KbKP^E@y18#IiI_XJ_uY$d0C z<$&t5rzxdi20#1x9>kWXWAhKAW!x+nYSkU6_CX}7jm?H$R(({VaWQ|s5<1wdhIb{r6im}0F;IFnO3+JBfY-56= ziSF!tc|IL%=*ye36kKz_6-xtOP(sQTGW2ht^n)A7v}lW{%kLxAW-X`c>m{IbU#|)| zQwh3Pv4+?q`JSnSn<QS-A3B*3v^mJhIiF?qH^EQ(&O_H{J|`qvwEF`2MsYee7zOtPfo^y zgG9pPD9L)cFB*I`n3ec7spnKm9#5?>;~ApgkwkPFN|HelS*SFF{jT7srJxHN;$5@+DS>QZ!<+KtGx%g-%Uj8ru$T~{WXMIx?oLC z65rmtkoCQW(7x5(S(WoAcy#EE=Z1AbyR=ZWpKihSPhBWR(Fp?<_uuO)#_qQSOusA7b(;O4 zj_3%=X<^X%U^A2)h{CeHS>W(~06!Qk+pXu(TswIcxm71~QvZ%vyv-MP6nS#frgRoE zQ#ql&Omb^gpyhyYEE@JV={ssLVE+`(|6IoEQ5tdaykxHQnTh&-3#lb=3MGdu;j-5i zpq-j7J-Fu18KK*tVY@vSiYH-QqJ zv~B{;vurAgzY0aueK{a19*v#mlK-P*T-z^{g_Mbss(m@F|8qHRIc(3V?Ypy~L4(7; zT#Pq=HDmklu%r@OXJ|)_swts=GN8T_+p{^IuPbI(-!=JWfcta}HC~ zodIm28%u$6vZ&>5GWq03VV&&v**{+cAzeD4Q9W8puN;dxThCDGljWqhY>Tdw{*>|K zJF1=(0GXAG$o)zQZF%d@Zpt>SbGl27E8dCZ5|5_MxzN(b35DoahAQ8YXuoh28lQfI zeK|u=(fY0QvL%Kqd>ko%Tpt$Nri+)1T{vlj16n<4hSI2gRI%Q){)a1*Ku)TAB?Ma zV)ysmIC|+vuw9#s!n!{Uu|q8&_QeFM*&ffOANo=JLO*%@ui=129#|aV&OV)b^2;YG z&iy<9{03?`jXGfVf%({fgcA>X8;?%L#HWY(m71E zmK4_QIe174*RAi%=gQY$>1Ua6VFvQ0e$)8jUr`t}VkW0vUCZ@XV^MSdo|JkGLAd`W$w8ze=gV1wC02n)86YbaiOqm<@k+W5AR{4HUbw^7{ zG4Qsic|V7pPmIJJbCc1y;Gw8nJX6f{PXTpyi0FL(GqgepjZyi z?sVk#R~K;g)LE!L9w!xa3&ik!vd=MU4MdmaP~-QLsrAqZEW9z7`!`I$k}to5+>`!P zee9K}T-X`5ybEO8?D1$?Fr92)MxuN1uVC-)gT+h6V@BCKs5-m?t)pVu*jGhu@h*v&{=@`@`J18h*Fl{6_dGP&Re%udh_>Z3@U+_; ztmvJ91#N$)lzBt3Xv;?udi4_(>f>VhjxMGsl3CX8}}bK zhxPr+#nO95^8d<(4RYTT8i#r(xc8(OZ9MAjD2d@XkGI3R-(Gg%9b zZd1Us?HFh&I|8A9_@UU`F6F}QDB z54*nGMz*VFv%TMau%6Kut=IMEdsBSSbg~%EZ5DXb>Ty`wz9+U{nvRWk;uy01WsLfY zggv2H-YJRuUzm$^kzbS2E(46886@{UlR3dTg+dg8TsvqYt#6o#MjI64H-&Si&k)L( zkiyN^M__8sDKL&5MCuh2As}%Xmp^RJ$&N>%;BXmaYA1th;S5xL=!nnrRak8P3;0+n zv3{lv3fG4jHU%s~Ppdh+N$JYL&k@7hWRsOo3%Rcc%zGjCtFaZ}bnGm3{;Lfp2hPGc zy%K$UoB`9tIMVyRqWW#w(74PXRb92jXExUOm{zgj^-OL$ypxLFs1Ra5;kOgQS@)on zYV{r2{)CLhJ9j1XF`lUIQ7qOB*I>|IC#H3sG27J|OP@Z64?(lhP;vvR{XR-nzyC9S z&nbOO7(V;Hj{M;`IcLn}j9$mU^u7c5Ug?US;rpP-y#zKDuf+tjNUnj?KgUn zuwt30$$Leqa(gJ+or1UK0&D*GmV|y~-l;e2S+{!w#f>hZ*xh%@xo=xmHwTIu-2+ir zo~W|?W+^AD2B7bP3d;Op9fxgjWNn3(>N2OHL!TOOH^gwr-<9BheK`2yH&C|YPYSgi ziw!5t*m9^H-%L&5Cw}hS_|sriPdE$um50RG8-vOA$pW@1nu)nFCnA)^F zYjEGNF=&-B4TCpXKEroFB zKf$`DKx`-o15>nC=ARzWK1Ub4;M0?B_jhB5eX~*dIt}W!AQx6B;I?-RDgus!)8bFC z|NB5LwI2sUdB#QkkRrRfsv$6nr~g&FbpU4IvxNG3d8o4DK)q8@l*W-H}jk`Q{*1lrG29ojY-rDjYKh zkEGlUPOPx)M8@A!Q8=^D+c2e?PN%83)l=4>Y<1*a=RbwAj|U++dIcX#PDK5$edSo; zE7*kXv3S-n$Z+X{70&6b~pQ$R1KAczI47DE1DB-)o5c0?sFF0GV<3uYAf1|+AuESB7 z9}a;(t1)5Ac+$-Y1I4USqW+kxsDITey?xal3-e}kMVonS86U@uolC%?!_Tl~|1=yv zZ5EgODEF!F_CogkZd|-%A*lv@L-lj4QKz;w1ZEBA^{Z;+ziF7<&fPog%z{sqQ!)cpzVGQJU=|AtmMBy@$#-1b885eD8B;pvy<_$ zlLvZ?8ppMM^VodXFbvo`37;v|81_d#*>7U9Hgsj3Lof^d_fSlV6%T(oMdlIiQ<5CN zdv^CZ)o$p)h7E%-LwN@*EPVb1W zKEpU?TnQrJ6BYz%ep7V$v^^c^c7!bUwFL z)nf)xxb1Q__TMk2O$y>?EAnMKX@qAVR$%qDNcOnhmyNt**=Do55A(u-Ya;v%&8gIR2xe1CK<W&31=oDVHx?7;%K(O>P@{!|eqcVENyGxXF@G@BKVZTR3J*=N?jy6E{) z)+@Zb0_v_G4BBy?qT(z;!N9$c^LQXB#Aa&tm3@R6A0g9X6PTCuW$aM zfHr0v{q`49oBD&MzyTWrPD%GZjzSF?ME{3{q|Qqdb7N+ZW~?`)ZE?V+FKt*m{S;|Z z7J$#>Uf6UmlWISm0&z9H(WcIptrum8otO3InusZ!8)-u+t&XT|bdqd-NW-jg2W4m{ zYsFeqP-v`B3E2v7=gVfCK=#m7G!k-U!oYV%iJYruX|?sW$1TM_JyoD6enZt zjzhuk)q2fF#Wm}YCLV&YR^J+|3X0jiFIV}vz2szcA=7vN?u}fTfMji)z@}I(cErw zoi2;YJKsv`X}_yiFvS?0SHUKL7%58JysJAfw#KmN{JaEY0MVX}{6#r2r zdCK}ZO^89P%Up}l(~)!hJIQ=YEht|t2G1X54mYbk#LP*?+WCD@yML#YQ@%+Iu1>+0 z@fvc_mC+!@N>1K!hIF3}V|Pt3hNU!<^Zcjep^xU`vZdI#y*-EbT!SeItI>F-qf|ZY zB~?7L!#ulR$UpBS33=PRbO#4ZIWZSR#R*F&*tHW}{j{h~`yLuQw1wMRz=pTZ+;S?O zo$Fsx*14ZS;kJTJS8Tyw8iATQGJkK~k>h(MV)eWz^8d4nGG5NdV*7!RbNf>;P0l3y z{SjyiU&FO=9l>ej92R!0RvD+fCOsx&QP?4h{^mKD+`pARge}D#=SQ)}C4o()BgKG^ zZmiGiC_d5I;})GC56bT-o|qfj5YOA2eZfb|y_qLouyIc}c;^*j2bu=Hz#F5Q=oPn(T4 z0jXH*=1(~Tf0T6pa7jfhJ^WJ+&DbpWQMJK5zb~r49?hK#ebCdb1M9x*AO(->gu0a@ zrG_0l;KQ+TvR1SJLhikX+aKi^{Ky}~l;cY{v&R9j@7Nyd#*~1*oy;4I^y8q0R80Ah z$c3ejl+f=oWh!jBmkzLSq62L@Z;8*{<=C;SH#>ImX6rYJq<6C6gofLaaJ>q0HXFey zJ(3*)(|F6;u9!3bSE<=F9rNrz!I-U{nEFjN*bSP-v9sbiIbtfS{(4GwYt^U;b723b zFRAswL#q8|GT6`eCg1p#7<=DI*72ocPQ`ojTv-bx9_8e8^EwIow_|UnTA;)D9gtf( z0ECcGLxX+{#&=u7{`TX+XSWm9JKO$Nih@HvmyPPA2@d0@KPB5GQZHr2q@1^R_ zKT4Xvwn*nP{8*7_1F_aiS=~QW3?Dp#V{4z&;sYscv96h7+Bk8;ku}hm8N|D;HIe?@ z1JK(4DfXW>fWzDFp`c@3Sgos9xlaEJlqb(XL0A)5f0hrHJKAEte;?K_FO(vp{rH4? zDCWPnWecyZ)VMVQHGa3n#%*7dUy=t4cUMsEmbMt&GnT{V_2S$g{9*0FG3??qhRgPL z<**DnegLN+uGcb-nw^BtN3_G)?zu3sbb9S?LgsM`um z&pb)Bg^Ogacp<00-9b5~ZzN+uTXgu$m;FB-2DWF;k|898&31Li)NuzW)=eI}<-dpl z0pl@r*g{O7n81NwzJ@)EMq{a&1zU&dNmsv7rS@}yUD5;&iqdd+Wu)lb<8hA)P((rK?)+S~;#`}V=buY0o1fLNK&TO;$_ z9#Yv4H=!YY6xDB$eWS}iK;z!^q|ZAl-iscNCoH<5)fp9DXqe7c0fn%`VL6}M3!G{B z4c&X_#hR+0$k#OnliRIe?G=EG?q-~>d?DA@N4EbZP<(3`7o5ljSO3r9SzZ>J#+r)Gex(W4A@mN}()C zj?E6*NQ>)zId5z;1P(t9DQhR9@ZcA3Q~m&Pm;YbnBgbft`#Nw4oFRo*21RvDIcKjwG$x#s>~}>%>eGQ>2<(q>e}}Q#D_hoiE6}|u zjH9HU3=3AW;>cjp=y?s)NzGF3^fI5`LjqWr|A}h-^wlh=N-rc_xJoU1X0Y{<7U{vo z&MZX!DOKc6gJR1(GQ2DywQIH{l>cJLdlQM_OWUya%_wd#nt`kLcG9dU5vv#dBy$Fb zM9)t)LHnDXxGbm)%64z3XAWviEuD=0!#%j-ovl1(rlDzGTXAt;ExVpCr<;*h9Q&25 z3AOlKQaFFYsi$7hrAKqoBO(Apa($mQa zLa%i}KWjOjZQVo8Wmsa^G<6kLZd<}RJ<3I=I5~d3qEWh+(hjSRog!oY6v*3D0F~l8 z6a*JpzqLS$9XK7W?2<8|@Ec0#u^60XZbiHKgJ@^zg=UolS<@vMimQTIw_!LcT8@yx zTh=Z9S^zb@W^zru%$K^n6ho^gqSql=zftlXH7*(eQIRsQ=C1>=j6>s9dm5fH3B$ds zA*|^nRWCU!+HaEI+fIV21v%h(K7-cxQ}CdHmKGy7^-@FN~)K0QTVmU5VcIr5jp)>e>h#r_wC4GUERs( zd_%16`-jwcA%bk(Y&d4OB_5n9Yf5WBp`5TE$@f_Zc78HR)~7FGt56>{VPDoPtA?9q zqtND{GwTC~Ng)ve<_3L7g+Dza|4}p0IDVUzBF!md%xfI`xb41^l% z(-3!d6CAjV!K(yRp8i7$YjPpanIk~h=&Gvv*p)rr z&BB}xQJA-XJeE#~0aeu@@_a*(yQ~2oe(=Or#brv^-Ib3oi@^L}Qc$t4R?7K133Ll( zJ8&^b)^(gDHEld)o>?As-B zbgl|jyZ6zc{vViFSYB$4!Ro9sVC8(`72q-dP&8tUD0NB4BPkbPx@PSY`l}D((I1|(}KBTfgJx* zOwdaOk6qEQpf6gi=qAU_lQ>t_Mdt5Y#)2WkyZX#;kUP>6gFKVb6~3eNrmvyYatZo- zrf|mKG5Gw^Y|e{M=Gv9Fpy9@B&@SyP8edcx!qm?}9iO4{*ZELH<1#c?IkEBM21)NT zO^i#Ibz>*wdEr2%7+x--)tqkFve}hf!~RAan8aY0KOZXzu)yF8n%?^TTAV>djIrTG=0C zf1LnPGXG%I=Nr^5{pEP@50E@A2zT_Jh3)@d%Y`?Z$=2>OGES@~bEyO#Bo5}lq(^YQ z6|g$JKWBQILE*R_6#FiO7Yl7rU;du#PyGbC9xK53<{Ie^#&PP670~+R2MDV$lF-}N zkhN$r+jqC&3Wteg*macxtphOntEK_ERC0 zg*}3ncTRB5zC+@LmfLT$AhpYUkUEjwmF*>_BFr^c|U zjm&dTOhQu4VRQPLY9?F(`%c|Z6W0RGzu4jFYi&?BrT{8_({fnZTME2eN1CqPQ9pIB zSi8OhctrMoo>YP;q836pVLdyTTN-SUL)dX1H^1 z;YvtoUJAOK&SFlV1kMcp0H!USsCJt->1<`Zzq}f9xApYkF~SXsj}Tj&bxmeln!0Uz5j4IuhB;Gx1WrHYms;DV0((4-5p}T z`vl*H#^8gqp(xy&dolX?48Hw&3_B&sns0}9pu6nH2S3PhsWZM@9e<3X7LDhOtWxmU z;(;gpRG1Yu4ploveqQZ_ii$?ExcV(r^oWG$t(CN+DuPXw2PCiCept3pi}t=N$>?%Z zvXXVDx~9FVlGf$uvU@(;?X|_w1IyWYshl4n_YdvpLa{;}gAJF@%G}O6O#gHfh1C5- z%35F6m-QA4hQ1)z>qe-ObAlS%OhEUYd#P-LL-@OkeLsBpNFG0(g3x%kUON4HEDrZ}XYEo) zDYGC60wcR|A~)eE?n~FCfP+=VnX9I@>v;&A@R4#Q#TBS!W=18w-8P49mU+8i4f4eE4x(& zp*C_Q*IKP$ubC^bc6K_q{OAo%<_}3x+nw~E>aa@DsLl&t=l zeExQj{lD(G>*qSEz9#1Z|7|ZO{2mYSlY3xXtS`FMx8s0l0nM}JcHh5K*3`+*P;EG` zkM#C%htY$jh z>VlO89XR=35XR3+!iJqsK2nl#E(81+8~ozz^;u z$U2|}Vud_^Yb>Xsa>se7{mB~?GCysVi0FCbcgS@<0XE*_xTKe!T5N}qt{{q)Uv=i> z+kb*>`C&3U-4DY|0t@*+f@jRnRP@aza($YJevb#Tw&ol0#9tn$NvW2Uh4zqeQb*aD z^H>}DyOeQjC%D~Sz_p(q1dH>2cu>ykxiokY7Jp*{#%(gE89IZaLPOazX(8%DlBNp0bnbbJ3ir8#A;FGwuT;U}6{}f!^fcATc@`CGLb%l113eop zNLW9KPyaFuE9^T^asL6(aAF<5e76esZJvpbW&KZm-U_a$h{KYfrgPSenJkyGHze1L40%bG{mGpd~H&60njK;fIi zSSjaC)GJ^8hmRY!d?uPF?<1R^R%52~SkSdZp#IAfP!KeYiH$@qvXgZf9W0_IJ~*!fLHK9?-!s=g5k8SK*HacGEtj+I{e0T}D?@&i zYe<|}!{`PNc3jFti|K8~Tb@hc% zc^!m*-Ef5RznA*=hW_Ng+|FA4PpSWWJ^#Jfzt`OUzZ47pk7EDxJJC#T-`@2<&vtA( z?EjW)_n)8spQF=13pdDHi~2tfa@)55XSoN#!RKG!|M^)nx!petBM9x}zkkmE*SY@( D_|{h_ diff --git a/tutorials/model/0.2_config.yml b/tutorials/model/0.2_config.yml deleted file mode 100644 index 20146554..00000000 --- a/tutorials/model/0.2_config.yml +++ /dev/null @@ -1,51 +0,0 @@ -affine_trainable: false -auto_save: true -base_fun_name: silu -ckpt_path: ./model -device: cpu -grid: 3 -grid_eps: 1.0 -grid_range: -- -1 -- 1 -k: 3 -mult_arity: 2 -round: 0 -sb_trainable: true -sp_trainable: true -state_id: 2 -symbolic.funs_name.0: -- - '0' - - '0' - - '0' - - '0' -- - '0' - - '0' - - '0' - - '0' -- - '0' - - '0' - - '0' - - '0' -- - '0' - - '0' - - '0' - - '0' -- - '0' - - '0' - - '0' - - '0' -symbolic.funs_name.1: -- - '0' - - '0' - - '0' - - '0' - - '0' -symbolic_enabled: true -width: -- - 4 - - 0 -- - 5 - - 0 -- - 1 - - 0 diff --git a/tutorials/model/0.2_state b/tutorials/model/0.2_state deleted file mode 100644 index 3bc98435ce7c3533f5e3ffa0f576cc3f2fb46b66..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 107384 zcmbrl349dA);@gL0|W>VAgm61*pp0kO|zz1jjS;Q2>Sp50tCoFz_1q~gb)IRu!VgH zTVxGPcTKamh!PPYA}T84$R;RAM8v3w--Geq|LeW?{eR#4e);{LoT}>TQ&p#`&N)?c zI7sBe6MbSH0(e8iIU^SO^=TsKQ3|7^l39I444r+ ze#ZFN9+So;R&Y^^C|8N#n2KOMZF=nZ(PJl#nK?SdRdQV7^cmyg+K!%;I4OQ+h0!y{ zkDWCsK6Z5C_-Qkz&lo*Btb(glaBxgHyfzax$4ngWDqZ|ag^r_54U`939d)bYOi7x*rSEU&LfAnSMxH0kL z(V5C|Xe+LCTrsauSCzOwc?A5$qv}f@)rxskPjm%FxoX5ze%Y~EWB==rYW|H&n5$MX zm!QA6)PBjOPBE9diLQE4uKF=m{tK6Xbf&@I*w|gK6tl7Y#irp)HjRqeG){CiiE=gl z7vYAwn*EIhb2SHx5{a(hmn`shi2yB)a-Wx%&MJr*YHAPjL1B8>jZJ0deK}+4?8CB8yp-=~H%@ z>-BzV)%)Eq)u+TVS5z^>fr+l@DA%BvK+O98>00r;j~z2}yle13*_K3=&=A)U^e`~d z74sL{RM*fxWnT7hSTV!liLMb*u8}d-{sY6A39eCpXBX-kUChq;Pj+L9*^Mn`H!jf? z8|51RF9V)BX66*vguk;2b4@H}757h8lZshQE@m|)(G?%%n))|Zp~bT>?eDDYuIXSE zmgq`&$*RoDX>b)!!;E59GZS5jQLb74!s^dynEiKF%=HH5Aq`tJshC%Zm-8?OlknF( z%q`|PFVQtW%C+Ec^AOthzs$qJzw=dGi_k?V74a9_RM+A@C0@=$axudtiLR6=*V4bu z!#@~e9#a3#j=Po>vrGFYyXD2~(u>)xNOWaHxmNzmi2t02RexvYaIG$8mHAIrYl>NA z6|-8K=vo)$S|3yWrJ|WLb?o%`N#p+75`XgB@OOTyYa{q!#bp=sE1l}vgsp**ZZ77x zCDFAt%C#+~#=r9$GhxD{Y2#hn|IV?!YX>++Cc4~zaqLrinQLdCfR|jCxpIoR?n-p+ zj&kk!_g;p!{j-;Q|Hd;c#I>(@)VVKtmgrL^)%9la>f2w;??9sKV3h07zxOh5m{?Sv%Cyse(8eQH$&5Icv zgHm!GU*J0Nr{S7Ce#T4~MAykaC1(Cv@Ykkyom${J9rtJHOFsWp1+Fs-TxW~R|GuMN zDu~j1p(ZuK4iG-1dXcJoC#rbMaRC>1|`Ko<5Kk+vR277k|`$QRGXx z{aYzS50(;FaA6(d~=7i@M;6vhG*Oi4{tf8clg5ij^QH~g@lJK`sL65 zkMD5elmA)&!~WO(|JS+y|C@g)|MZ3q+yAWp_WyPN$Bv(L=Eo-k&ZG!_rbgxZXZCI> zbtWO|+UcyRJ5NupI`Z`B)!(1WeZ1l%e?G3ux98%zY+pXLOH|~HE@f-I(dERK^SU^q zM3*&2a+mLdmUihme_5BgpR%)qoa}7K1N+&EGwf&Y{Tg=mttDY+xB7>jePey-*{BAg zXWQ%xIs4PGx6eKaI(zo$W_Ru<%C5`@?2->f7;DrzY>c=(O;Ft@BH{bv<)uTt3$9 zOZ@kHzC3mP=elLRx^@2Ft=oTSfWt@1om!mlI+ay@|0%yIH&4x3R{r$$1+7lgt6ULZ z7z*{6#YMtF?h9?|pdND73<*agOOEMkWiFFRG(+-C%D#u~@?RHo{xgGS{ zYKLuelEe4zKE<|uu_E3HP((&ag+5rVi;MZXdq54Na86qzpn13tl$0;KOkZK)lpBO~j) zB7N2?GGu=vnV>h9?mMld&)rU1PcUOLk{$Piy=neavz>6_C- zih>?8;An42k9*6^n|)HEI=@{l-HuEd zd2x;OwOcE#5gR21WXrtnTV=@kt+JqChqNxcr8{<~Ec|qrbZfh1_`NAZ zPJxy>D6RU3q;2vc>GnG;ZDoMXhoyG)up~Mn>DN4|#UGW{!(&n_rAyl)UHTsBlExcS zn`cOhm(q7xO4}^2beA-xR@Ic$(Uc;_l(sph^sO?bv&wNvHIGYYt>e#yLZZ*&eBN_DIpgBk9yJN%f9N z=ai$8>gP#kdG!6m15(`FFSTX+CGF3Z&g@*Nwa=B-fxW1+N79eGr0t7cQahR>ZB@5R z=bCL&4BaZlm27F71Pom(ZSSv=B4DL-dNL%9OqW*Obg8XaE-n8wsl7;*+Nh;cypn>x zCQIKpLejuDB<)R<+B%@YOsVx6C$0Bhm*PaEw1)MO^qfhpeq%|yZIE{ZNtNnIt8<_f zL8T?#{KXLa9vPy^T|+GY!Vrx2}h8vs2J7c3PFrNbY9zW)Y*Cr4$hxpjZ`C6mffrBC^sIZ3Xb-4n@1-R_OKtMKpe2 z5$}JX(1%wPQRfTAq8kdyuM|=G5%%Om#aE8FJI#+f^ZmGpuE52)3cPS+0CyHv@wbmIm7-Fg1+-TC$IJ$OXlp1kn$p4|Mc7gyhm;K6EN zu6ORoubuD5-P8K>n8lIY%#Q@0C>~gGAfIt?Ab*w_%@=+e&94p`%(Ioje5W~tj~_jh zXWSji%bCOY#_}WhwwWV%r=}zMyC03@aRsBeTsfNiJIC^GA~%$doxXES-gj6@y@-E76r<_UTdclMscwVLxejh)Z)w#?@RC+G9b0Sma* zU?C5%1Q(qa^Xy}bdEw2)Jfvwd7g@>N&$fgY)Lg=~MoYM@{}LWD0QtlvJkycF#po0s zd7&5qk5YI-m8HC(YASaRPUU&~QhDYcq+^$HD`6RT&tJx!JJY!Ba2mG)mvims<=k06 zom2;5 z@1=3yTWMT#r*RRJ#-0AlxbJ={*S-RFrE*_pDtGou<)lJBPnO~x;P6szdvz)H2~Y#+ zgDKpd4Y|LP%&iW|-1+@tE(#WNZ7SyJ=0YxJEaa5EfK$u)TwI#Vt&?*xwl}!w_y)HQ z&E{hAEKZ-!OKMm%Z-yp6njOJFOf!x~tI=7O6L|{Z8ZhafUt*l;L+tiDT7QHyl z?8(KwZkz^oBWGtTVSh=*+d29l4dJVoh*vMR4R5ZUxyPt1zyuZ^!BP zw%nT3mTOhpa8bWC__g9%Y)fvv-js_Uns9o*A*Z=EPA6XB;+F=THrM6W??Idbt3xKx zNvo@3{;F{8V}EY#E626A?<;iQr_h2g6^i;)p=OsA z@!KVZj=rae@%f5adr}dlPAKBJp@>zwLal($auu;OMAj6>W-G<0` z6SiuqzIvb)_w+z7*?# zF%6~dLSy(h&0wpWOPkhQYP&#Xu=HiLkzz-vv?_*4S^)I7OY07k?)MZaIyj^|u%om( zg-d6UCdGrU@aK9;_vK#F=Z%ogF|SEqx_ zKN%-O%1)Far^Qo&z80~-;mmZBw0XnrL|_R%w%(A!1lS)DHqB-W1-9} zku0Bz6sfmcDl;pm%GQHZWt=Bfo}ao*n&+2Ep{pP^aj$0O%&4HhnBi&YxjQe`GjHtI)Ms`0SBkvrLA%hOefV~H0LF6H+tv@6amL8Uo z3lGbHormFTg5EqVLw-Lj6LuYuk>&H?Pvyyky?N4?lP3#4%aeuo@+8$cDl@AdlbKVG z$%K!9@w&|XL6_o)Av3=uSo#a8u?T zgs%y|F(d>2qu&YXOFAK~ttX_|b3#(1lhTSkDcx}=r8eWFbk05{eFsiSO*<`pM}Ui` zrFQnT6phbFQTL4076K2>NZX4u(sv8#oU`!lfD#tQVM)4vR@&|$Ul-}0Kt~||2I{o{ zN}ZNuL!0lT9|wW9@QM4QpHJYojsyln7NG~ERd2r(r*oue2OoX?CitKmrRcX_3b|Hl zXI4w?@k*)j45`grfpYkuZ(^Q5OqHVlA}L1Am15!?Nlj+pj4(xt2{=QXjFr@Wl%&}s zB-I@zsd0o9?`x7W!=<>a3 z>^Eo$e9b+X@GEBxT&1_!4@SXa7viNt-C4Qx(xgK0`_+QDlXn$#mT*%YZumY@pwI_ zlr7wPAFyrZ;?NHGhC5)#c5%^m54RlqV9)n)YgjH9QMvGC_H%3He%P=>+zLO;#YcIt ze;#hdm|RRb4*h=u{X4<6H&1b@b(&kTr#Y=X%e7hOxcK-y7ric^-3y$)dJneleXa#x z;#!{%xR`bsHtY)gmjX_Y3b<(gAs011;@VFiabf?Ay9+)8@6Wk!%om(W-r(*9H@R)l zSKK$?EA;;>ZY%5KRN@vFZ{Fh0PPe%=`VLPDFXV###Kj-?U^DM=VxVun3tM)ClXE+# zeE4c9kX5GC$<|d;B=$(m2a>$6duj17FJ0U1B}4b_v~M_5=CzGyzG<=c%)UXZ&%716 z;>?yWm!6?XAr1;^;-DRM9W>z&PQFh#O~80NNltrramwhx>3LaB>1P#+!!@1tu7@VK zFhp38q~D<k?o(tYPB|X?KN!cdp90NMkAoF#UX$H*)+zx|cab_%C(x8aXbuv%ui!Gt3>Mm|q!P zhp!S`8NQ33LIJfD@|D5fuLJ)j2xUzbDuPcF(gAC!l|t#=;al}psLpVO4o50CvN1CTlC(!j-3bos&&?5L&18^?=VW&dh?^kH%ncYdgo0XXvOdoGNzW z)DE~5&Z#TP%5~$^2Jq|3sWYCpM){N;oZjn+y1h7sgTf!D!4aIE_U7~k(5oM(HLr2P zd6vEg_3O)NWgkv+`g0mEfRh%9^EBxB*KwXix(?_f;MKvLD#hSz0lEpa-f&I^uzx5g z?=Viokrtyl*`1sQje`F^7CeA9v7D}hCV}1s&W(rv9fx{I*8qKcB6QCr=(#DJqCq33 zaXJOuh)3UCoRX(QFX8O+>nw~rfz!j8oU)K_g!~C$Ln5c`z_Hn!%D%y=1L)Sd#k2@@ zlaQZ*dN=W`^?dX4CA*efQ4%!B~piUj+O{BMjF2wT~(71WU*o^mckPcjc zcYti<2ZLT+1f30B5S$L8tSxBC#hlh5J@l`#`KT9(bk0Kb59xRm_vKI%3A zkBTUd0OfeL4Cw}-F3>nUKLI4+Suju%?^Hl}3d)KwmT2_TLceFDeimpVp0`F{M*+FO zImjRq^;!e57-s^;RscD+gzR$g>^jn2kme}=9_d?1pF{c{(xoBC^N@Wn;1cHJJj&8Q zOJkm{fnLNsMq|E0CUdF@`aO^Z%t!t*=qWtUo`5;TT)zuCVH|jk!I-cv!jRU1i=#2; zqcBgPyGL-6!#VXvJ`(9BBQYPtFpq=K#$d=K8uLF8V;KOMU_E!h`n6&G@|WuwYrZVj zd`fqWyBlPzp&rs1pjSI%OyQiWb>z55!yGsuLm=Odz1R-#v_{<)7;`Y%YK-R%(T7G@ zgS9yI2;@{)g;Tcx*w0FM=8t#ELmp)y%hI4F(C^z^%}2I7^OzpY=2qXH)oE5%U$w zgzi+KL*v4rSD|C61j?Q>3U~(p>}!U5KwvE+u?=(*(3X*}DkEo8l--6t{}i}nC!qtC zq1TT<-|q+`N(v=yE$l#mPBcp=i@`>Gpi?H!hXL=Rthzx?X3%vzsA5n|Z`hbei z`>LcKuSps<5WWHKdLKqfab~O(gT_eV9V=-B(%(G2F1UU{Xwb!4?n znvf;ahOd=*>N@HBaIF;AH%RUDCh7cWlXOqqBHdSjx3)`b<~B+5c1Y1M2Rw76wsEJl zLU&8;tzA+a--GkX9w~m?2Y(Is=9{|IU==h0i55&-lKTun53U| zNi__#3Az&W+HpzMPe?LA&!3W_#c4@SVAWYk=PdX%XQk-$7JN6Hn<~Byxd8pn;SBSR zq+{o$P~Vl}mkZ#T4}b516a(JFd%#fO{6$IqK9Dr_68tF8vX^ll2U=Z`6k32e09|@j z(ru&<055$+JFw!Kq%|K(Dg}BE`HH~)k0c!e4kP_OXbMmP=^3DDsJ9I1&Y;hLP?Y@w z^Z;G}>w)s%_Xt=9JOIj}?M9$)quuUEe-4BL1CSpNS{n2=`n(hAA?Wh}paJsc`|$gK z-WZ<+yo0eH0@?xl-o?2Ra#{Dbr2KP|-i6$H1JOt~fGqPN(>ll>K7}+80Xf$^2_GL| zm;+{_+$+f+$nr?a%fmbXmkz`K1V$W$Z+%G8RnUV#0p_~peo2Y@By9)w?#7sMAmbhI zxwjYl$U&R2ht@;ZS$HoK@>~T!K10&hG)YNIC8Y{UMGH_i2VM5a|k%eAuh;*r#Sm@B!Qy(*QQSJ}0u_oYoL_ z8g?VP38(zVu;-oOGj!qP-v|D|K-g*6vb3?BO2ZyjNP(?his$LDDa&EkSK|3<*zt9+ zWgAeQ4Ig9^?B90Wac+eT15Mn9wt;|Mu$gYyN6>`buvzu^{MG_{!+uCUaaA;X*C_zjG7Kx zAj%fha)?T`9JY9r<<@YB%7G3s09aeyAv#oZhy~!cB)~yMRUG1Xe+S)0A1VVafpTRW z^cDJ6@^|=2zrmk)2A}ahr!#lp``mzBAe*97kSFBk54n|wEO%oL4lc&M`~vubqha?k zSI$=OZ(+OXh(gXZV4*^yI`WxVVC_XB;3H>z`49-wwD^zG^uo3lV(pd z7d@VR(%0tV$>9gyJZYxSIoWf5=t*_>r4#SW|Jjq5%qoIJt<(Mo}C(_TYCrUkrVpq1t0X${T68Y_3~YGcLdYbhYj_l_bcgQObOjL z_%}~x_SYWY-QynTeXqw?XOG9~nC79YE|1t2>(R~*_gHIQ_t-M)dYm5xc&ykzj)~DW zoc*S@BVX-2I`bfmYBaI?hNs%y@o9GFA1CapdEOo@zqE_5Z`$+U|HJNorwWUCQj1-` zUXQ5`8;eeC#^SF8GiPuc7X5J>7BZt93;Ni>(i`_?s|tHFEu}AG%?GgUy#_Gf#Nq66 ztdnUuW7ySN<5}~M$FnEXC$R9Plh~GflUT*?KSlWr%?D>jWOx-$*6;z(Z?suHQ)}5WkT3?*Z0(y;O zMJLBG(PA`P*K!1NJ{`>TaZxPzZWJ?Zy_xN9Pxkz`uB^4%l_fmWSW#>TrY#R+f!?-k zox2UY^KKJ1;(TKkGNB>cdagdZwxb41+*zGPzEhe71e9U{eM+#H1H`O;&+GwL?%CaU zFW9v^OYOF>1$N*0C_BBgH%wgb7e*gE4JF!egk}#uO6#^Bvuw>g?hU~n_s7jWR>EYD zHFu83_v0K-!Qg$KkoOOItmlV4Az$Wu+?NYH^!TyIFS(XpRL7Bf4*=*GI= zsAjq?yM-Ro!=dMY8?HxYch!r&j?(>(4bq(vL-f2&ak^jnWId+;WIe<-T`yQPT`vls zsb}7qp(p6G_3I0g^nge6^cc?~y~zln&u2o9uC!P$+?cH2IiIW-^j@N$st7&!evMA2*67xVES)XQ(v#+9=`oM9bpLxG+Gpse`ef+wTDsot$L0Fu2Fvwnqtf(PzDz$qF;$N) zu~dJz&QiTgLW!-1L+Sjpq-i~p) zb#9D)r;k(5+dNu#RvfKsdq(QM4@c+^+YZs6_8Ow+w~N+&F$49aX0PklI}Om&nh(%- zhxgZ?ukNptD?(Qjy6UC7gzI<4chZYacF?an6#Z%~MIScQuGc&qrnmj3jh^>b8$I@y z2D;jgcJv>*xn-2kF+gwe;41)YO~A*3@ItE9>X>mC+LxmDZp7 zmDac4A-z$D-#l?mo_V^of9kpR^&`)@nh!m{Ub^QAaev{7+V;8Ue$~%B)gFH0=|opO zVXs{A%vy5N)2Z=EPtMw-p1lJOd+H3y_4xAKo?F*8d%iun$x~v&a?iZDX&yafqG$K8 zXiuvT`g=N$3-KiR8vTjBT0KwZ`g)$Abpf8e4g5USJ}TwO@AvFjgw1y>A~gS4-f8`q zd*zN}kuwJ!3n@L|n3dl37`fXYrW#g=wp|IOYP~|8XEH-=hw6nneMw;fm9~b7mD|Ds zX1T-MBd&#ICVdu`xy=`5ZND8>xbZ<)Lcb?r_Y=z6LpGJQyFaa9_fsp`BL)T9gEs}) z)zS6s2`yUK10w8pt17qOf3t_(HuhC}K<8+C#NokqOOLiY+l{dY+2`8t*I8)4K5w2q zuV|q?_~9aZ(1jFx;TOy7^J}KtZSz;!um6^5kKMP%-g@^syJpX|=XKHTRR}`F{KpiWGl-u@04S) z3(B*t1(jKFZWU$*2eA9O)!Ea%b=kUsjhMEkG0Qm7iWRJD!&0V%viu`q>|ifD%dF;L z?&c2Wyw;wDJZaA&_jF<*Uvy`!NB3Z{4STY{x_w#XZ~a)o^w(Kr&4JAJ)j+0ik7j98 zhq2tdBUul}D0aQ>ICe)J$5Qshvg;crFuz`tSVY)l=Jz}Fa>J?278B2?)J&$mnaB#m zX0w2`vzc%D8%+CTHcRl%W#Y@ZEM&u6=DWU-C3ITEp6*-Bn13?6b1IodJWpn&r!HYs zBZWO3o5JotPhov;rn10J%b3l%jQJ`pXZfElXPvsFvvu9l*?nyV`mmCvtY69UZ?0k) zay1M1Wi`t$zlKr$wXAj8T9&kREsMOjmZh{<$J{@xWAU%9XX=9W?8eshEWv*RYyEr! z(`X~3TN_wP(~a!Zrj4xc-i_?WIndB-mJ*-M5_V;?+*+HMIb;(PZ*F4qzuLs&rf+69 z`fX;36Sp$!>Ne(=y^V$3-o~1A-OeH>Z)a3z2g{Axfxhlw2fx_CqSw3Gb9E=%J$fgL z-@B9DZ<52*_#Bp~=CI&tIn3IW!*cZ;*6DT*D_WPsqTB3ZK4llnuepoav|a3BiQO!B z&u-S_&TbZ1W)I7~x10ID*v+zA?_qvR_ppq)dsxExJy=RP*VvyVk5?_+A}K9=}mAG>yMAKO|smlX!(vdAa<*!kCTncwtW7W7^& z>j5;`pUa+GxvbutT&4}kWw~YW{D*xk=AC`)>6v}3sKGvV@WEb|vSlyJ*t?hYsI-^a zzTCrNh8H8^$KA|1WH&qI-NkYn?q;nkV+>W*g~dy9>6npaa`kM#^^f@b_(Oe%b9TVBl7k^zjyUhqnHSdd@BE z(!I?rZ_{QL^Y&&I6tS5-|6&t6ICv9_K9SAbrLtMJwUIr<-1ca<0Y1oDb}bugXLhh!q#4Bm+6xLzjwN!S#c`9@6 zSjrMY7csxf^O@FuKJ(i>kBR+rSwz!0Eci|mb8bvzerIN~g12U{`vvjL9Y2MMEbNoT z6Io31cvkc%7VBUf3#vI5YjZS<959mEGDkA!qhaj&PeWPcuo$K#$FR7!qFF)n{!IJ1 z5Ay~0X4aUVENy64tXYlumTQPX;LP%|KXLuGo%tSzvV``nu!mc)w870;8f(VV>NI8f zZ#QAt;~KHF0}Yt1etkx(>$0RWb(uI?gQ-E)S^kIBnD25GmJnZsMYQ&3F)J&u%$y4B z{>E}Fct;tQJ@99H_KctGnF}At9w1KJixN)RX}Q;4&`P%l z`FZRK^^V%rG5hT_W3S!H-EOx9ZnH;*341~7DRwI-)?WC{0K5C^?)HQ=s=Y9;nO);m z!YKJrC?!7)p{I3@QsIoFl^;6HKlSQ7ylu~-p#R~eU27!87!y0;rLxKA9(zW&LmQB~( zHhlzV`ur}P_0VSh^@YQodQR&xe`4LJ$$E(~@%o30T>9C^Gxew%uV_B;L3PmlCFq2KGJM?cZun)+0~ z{Pr(;$=u)cnT^O;Sh<8zv0h2zv%pfu;;%{@*GiW)dOav>bQoXWXzHkFRP*yU;t%;7 zpS)Gg=+!pJuwM%@I*hJu{2Eu=$e&r;sQje1abtUZV{T*vqeJC}MoQf#MwimfjGH3Z zNDgXYTp!xP*qqhEm~gU%G3rW7Bfb^R`2Ua(XBi;GKO&@y`95o^jb0Y9&{L4+f*a=6V-So+8bQ9H!9p|Z^WgvH!jU; zZ`gXYH}14(Z}``3Z`}VyHT)7(BVYljlN)!^6~pgmW)%LxjOWAcMqGTDk@01yVI_wc z)`AeDsB(z0^;TOWBDt|(+);-G@A>WrYl0GTYv-|y^r!{@3TVFlUb9>y^3;2CK@98~V zt#?;1y6MwNy{2EE^N}9$@QUt^%hv_7^tj%p9vrkm&kkLz+lJ29eVr%iksXKW0e-b~ zG5QaW_KVjeE=}}^iETXMr=}k2_<5KZS>7({jk0U+Ew)<$EA66Zrd_yq+J!i0*Pi%c z|N1klYF|dfMl$Qu@ys^I#R}g{WA2h(Cg!}wEcOBP$qj~kT2^rV8>W@~o)ujzqXb^7 zrrc=MSlOK#tR$^#sRXQOtK5hSSNu2jQRv6l6&gQ8v9`u2emP?lU!$o?VhdzOSQ-oH{C#r zF9*%&;h=;n4$2*hyPNj7Cn|~Gf==Nc?l$h)N_y$@s$L2X^U_7;rK`QY)NiquDyDg9 z$!f2tyv8e5@AT57JzjB9$M5SWz2e=oUb^E08jgW6_Is=-Uw!Ljk zXP35S;b(R;Bu6nLY^r%(Rn5FosyWu7nfdQ^H4B>dGR59rrtM5GllZHqujo}%YZhUO zPhT}{170MA-H892PI;NFf)3lNTOe(BoQY@MD z$sb;N1oV20e&0e&!Dr0)7joVB6wtH#Uaxb-8;-w(S$bXub&X4j^ zGsHuD+sI3PL0*dR^HRnyxQ985yT)YPi&g{u2={~%zrUZt{T=S^tns*8!`)lojrdK= zgWrB`;Xbb%VkBBP=pb{@e20Vfc6Ctp3J0~@DK4!Zt{g91NwP-c-slqju= zY(G`ZtD++2MWxi*D&2ks>%yi|*M_Qit+`5Suu3mlsra2;B_6KQ!EP!I=z(?ALlskB zRcU{OD)#qL>DmC54h>YP#$c5u4^f2`ql$^6R9ZSp6=JL^j*SD~ajIB0K@|l+or$Wr z3S5Xon?U7BD%GB(ihR(IK^sk0Mc8DOCQebsa$p_s9&i&VnxfKkphP^@GU!v_DF4pP_#_s2pa}h12?UN)wW(uOZO#}~j2*3N zX(LswHgGCNwN4IEX*Tf1V2pK;YBd|Aib>Hb^^a1uf_|zM-A~oBd#l!(UaAP{rE0x< zs#bHzXkJ%UYt{*TJF22}M^)R~LDedBzMm1DwT4z#wVKscYfv@S>Re5Aud4!? zU>_Iwt3F3r)jhS0sx2gy+PrXxy3ZV1z}F6I$4v))^s&PoebwPCyx{Ql_d0ymAxA>+ zn~q4H>&Uya&k;FcgCoBv%Mtfznd3~n%VA9%;1ECcb~sD5cUa+J4(*S64wAS_{Q`HZ z$!VPKEyC~TiMUIhig=^p_>Hm??i)GoWLt7N+64EGS8+G~Iqu2haHqc#_x;cn2G)6L z)k|UBz4Umzm)@M|r8_{4SzdbY4KF>P>lIU%da1`U?3D~JUCHnY*D9}QztJo9Z}d{( zA+PuqI%HYCm)5=K71clSiku(3A`d9@tCs}!;VWcnV@jHq0{yYIv?ziUG_VZPsw#^jv8<}*inJGSOX^KyP7uf$7xJj)Xrq~D^Y!AKB(Igx6 zgRQG6rUM4>OIMSobb}7+j(2;Qw63Qqeh1F=GwJ>SlYWRa>D|}yJldqF;U@hy(xegN zOc6ZM6k8^m^kR}pKTSse;!O&iYSO)Frno!Jq*E^Fz8R)?E725nlS~nsWK!cfCfVni z;^};oUR!92E{jYpO_<~pruImf)~v-QWiB?YZpnCO3C6m_q*+T$>w^@N?xdJjqopPV zFEzF9K*>~7bEl&Jpc9svTEKEs>{@PGzb-eaPP$1Afvj{>Tn1b#Oq#I*^2;!3X9o1; zN>f_{Y+ec72mAq4S!IfPt4yj1)CF3vGOZ@7Of49w15`!X&nr!Fbfsyn1JZ$+;PESX ziVRaM$uOfNbnuvF(g*RTm;^X6hy5m)6gI&W>Elew9%j<( z!%Pu1)D+c$8AD7l8rV9}6lqu!Px_l;Y+qA6>TQaOusxx$NfmmT;#aJvg;-muolF|r z0qtWghQJmDgqmVpTay~KHbqi%Q@r2Aq-ssDhOu_XH#8{@Yk6CcNsF+)TUIlvLx3sz zS23w`dGxQ0NgJPd>9g-)YaV%N>Rm7Ox$ULfUwP?cV8mx$+Ij^x>ynoSzw4!kuvy+i zUb=9=OP{{!rH1>wbuZVwatTHv?Wyaby1-&JE^wluB!F4yK03+sJ5=a zwg}bAdJX!tx9T3(N453rtNMP1u7B@!Rg8e%?>R^GCIwVR-ZjGdqsoQqRK zVy3I^%5SLd26NQ7m2=gMh4a6RGNi&r;RQ zYs=IdWz*E?)HKz3Cqt#gm1^Fim8x^|N;TxjN;TJ7snSO))il3Vs_(`sHK@yKHG9fx zwQ&Aw^?LI)s%^^}HLXgPYQ2`F7S39$=IsTptW_f?tWy(~uTwL#*Qw5%>(qeN>s9UO zde!=Uy&4&?K^5ILz;10+-Dk5QhfQkckj<*~{${n1ZBYY;Y*8a0Zc&|)TUGbgt*Wil zHr2Uyo9f%X4fYVU%MR5SxdS{vFYi#TpLeLXer}ZpyH#tPTXl|dt73v%^$mAJen<~M zIvTX6Tea0iof1fwLjA*de--!)0NMez9jf-zc2&E-U8OI#qdwAdo2u;x#5Psb1H4;R zYv5K@s}K7A7S-AdWNucq%h{^-<3?2&8&xe2<5M=MRu$mvdR0VW+%;cfooa2##M}b& zGgS>AThXJHsN$?Rqse`ox{1T*6pIRynt*KJ;YAQ9Wq|%rQDzz%5(#R6R`o-yZ>VyXv5}?>p$)yAFD2IVkIlgYw^WP|7w3MXqYz*g9He!D-`UzbR)h{O*X~wHxC%#zDYzoP#Ie_w__c z*SF(0`J?zvUIN!~j`#!LPL}dgBv7vk^ifUNa_EuL1HG_sUfK@7`QBnHf=Lw<;Um0Z(x5r8p_?dC?4%4D0gKid?iH<{L$ zZKkzvo2ixE4%@Tc6jgV?PPy)hq{m!F%Mbflh12Qiny zPhndgcJ$X%~XT4RLkNqfLSLw!B=%bw~HQcY#@jUE9Qzdah zrAi;DH2MpiIc}(Q=7~zNKdRK!uRVPj-JY5cX-^%-w5Myc+UKo**tt>pA3BSVf9%}r zqi3C)Z~LY5_cva27Ha|=RK^c`EEDm$BRPG7v-RvV3YEq0@t>7LJk}^_eZ3RkWqc;J zSj4FtCA~JcpVuOsw|O1-(yhFfvo&mP8?QA0h-&TCZnlCC-OOv=qTEeY^t^dMq|79tj44q}sLOR7Cf z(%ey!HX?3jz!1c&M}q?E21=^dA8|Oq-9D1O=#97}ASFW5mT*bYcElisqJA^PQUS-B zBChHcNu6pVPN=%1Hv=RkSAq_xC~3MMc$AaW_YZ^0zQFfc4-l*OEn?^H8`Rc^?~ZR6 zG~+YG(7lbA^D~ICJ894`#M+$NirBkUgH|m?%wDoVw`SoRk=OBVcf8ZlpcBj>jytWS zK*ZXVHz>!?ps><7kNl$3x)(a-KgRdTAM11wF*hOjPPu3_Vh5Wc2IqGV*~TIErXk`B z`yl>i1%As;M9krBJ0(;xp#s z8Q=N?yA&%BzEqb(ir92e5ts55G3$t8 zZSg2tX|JMfJf&!xjw@m_sBS4*J;a?KJ*Uu=^9p^8xbq45imxrc9o%t6@wNX@aVCDK z*z}JS-@=a->iU^t`{6p`&p%Vdps$tO&+jTljUOnh8hoc{gC8lCLLV#o#vhdYfzQy+ zGbL~MZ;H?Lo8qojf?Ge9;O-qIxwB7ce1}qjJD2%!C$GqDP5im`!XL3LmAH6V3EzrV z;kG<{@A!EQZhIKWEl*9ZjS1q~{yJP6QIA_~Ug4Uk&$SK>@f{1|lh)X{cDFIt9ydY^ zQWMl|g4pv`+zM%dSf&B7Y@ppnLfw;Lx%cP?gk zM|^q@#H9n#y|}0zfq0`=xtI{a#d+ZVYg`=f!-baOlh~i=s>imFo z49fLEoaRL%)+(9{8q7uS!H6G1Sv}BKLF0jYNZ-J-nlbqP2l=Oixac;7i&ud+W4Q1~ zIR`BR%tg8~=rmv+p6>vyhq4m57km#i1~eI1F%0q4p#6aAf0ccJvH?iDQSSz@2^*ji)4%YS*Pz^K#W%=znr8>ZeqfekyLHjFck6~Uw>)JUr2Hpz6`UD1qa@x|4 z(`jH*Yux{|L`+~S$Oo9$f>X0#$OWj>9Qy!tH|YMRSntiy|0eJY8>4^NyNy58An<>IIQHlGW(WBE31Z}ckcWtO zzk`^`TMC`=DRlX3h2AZIUWV@c+`>0OUZi#CWC@CN@-f`a9#CjDkab9*{rii*G2Fjf zp|Lsmc4((U-Jr9#Z^oTIbok*7;0tVqUO$$Bww9r-6vWTZQ>fkug%SrMrhX8ap)%yar=sZI_I%4UM z;`{3($nS%_!tek19hL5PLU}tzh? zW~7>hWDqu|lhNt0IdKLZ24N5`nKWu-mnkC?8_7s&&z_mxpr(bvNZ5yDBy1$1!yLjK z!sq!~d(ZcCe?O1=b3g9eegF5F$K%!O{p)&P@9W1}d#$y96fH3RLa;BNvERUF>b~bU zI*NE5%;NVtM!**}n$mOl9$#sb$-SP>4qCp4{XmncoWtuS-^-ryD!ik~Bs=*GKCi1I z`EGWP{hH0-1DegCLCvO&_rRQBGtUK@O^*`3*Len?i{-Vt=DcQe-qdDu_9etl=Xv3E zy#Dh#KBl7C#BXXgSI=uUKU6iFeQS7~zrWe^;4nXW)8neGL`RY*R?g9 z1uyX)Ry)@VuQprr8oylv9{jS|?)4Sd4Bs@{TRWSrZ&$Nv`M%kf^WE)de`X);K0H}>b|0c?{t2H=a_Mca6czeyYKi!{e$BZ&K0L7F8j;GMCgP` ze1?2-BK6}R6Yc)ViQMx_6CLv|NL0np_b2R$b$l*;X~O9Ok3|#iKt8ws7QbCH`^kixSdnl~dy4O@uSz&itV-DEs)YF~ zpVJ@Mkgzv`$_B1k;|Z7VQJd|J3A=>P=YQPH_uvx=cgvcD+mr7=f5vyB-&)Ief7d0< zEqvd3xSMd#b`$my(D(w^jco~&`%=Q}Xye-TWj+Vb=kuFi;dAWm3H#ft3H!>1gnjun z?j1MsJ!#PMjfA-|nXo@4xdz>ou=~7&56pg-Yt@c~ed}F5-;bPx-|R@3e(&*p3HXn& z*~0hnK>}R&e!?txKVkpyLBf8rmG2{LO_<;Z33DO(RUh%agAe(BDg0#y_oNc$cJM|j zVXyc&VJCf(up>X^TKE$_GY^0HDZf9oEn!aoEMap$<9pAb?{>a73ZJx{Yi{^a^be4C zf=$TxV(TD&4LBD0OkjXZo^!y{;4AP4FpEA?pbd-w#h~V6<`f)CUAfdbjhfE{eZg?X zk_|p!oJWDTwj@jrNHGuhGe3=>cr)LXg|C6`|XE$2~&LOXvIN&fyiaxxb2pstCk$nN z4(2)IK>Ew!nWH$H@5J`uy@x#$rvFF$W>G^k&m6dK;NGF_9zK(An@vYC@2wT`JaRbC zCXPkkmwQIb`Hs(yggH-!lbkQP#rPuS z-wP|{GubzBT`-H+`+K=ASioyn4c9KriH4zG{0cfP@Isl3Pc_O|ez%ZFS? zruZ!V$Gk>=GRpS(n(v2y$LHjK;8duo4xwtSn z&?!tE?G!q%omA+2b5fx_;gmvm&MAe?Lb$p?_1-yPr zp&idXyHi|fUgP=F`Qr-htD_6eACE7zpYk5(Ztm%C$}KdD4l6W+xQ`ygoc3UT%Wvd5 z^cub+Fqmr`?#pWX@!gZZvi?uZhZMhm{o-zqzEp^V7*~-fu9LtXz8df35$* z-_`%W_j`*P|9}1WH;aB}|4i`z;Gb&Wo^#~<|Jv_Ap76VW|M7pY^#3PQX}!)m|COg5 ze#&0>zaMK^rcd{=#?OuR&K%zV-lQq#fA05d|IP2m27ll0$Ntat&u_r~j^D5S_xk_u z*#7g!PnH?oy&n90DsK}0uD}1@`6r7P_{{ltV9cst?XS4vk(K|Y_PkuA+yBn~Io|T4;0C)2l@2sEy{q5^N zS+SzEbkT}&8@{}~c*d#k6hD8)0H^)iVyDwBcH55_;5vK0v;M<-ZeM@%;1#Xyn^%nM zz2o-cNx^rD&np_>-285_Q`k}L22L2@o;UKH^;J7>U*C~h(Hb7OV%)|#w-@i)DQ=#C?wc4W3W;Z!%$g zsnOhjZ*DZBYCNqqMCzGC>2)Kz*g_xahxlgA7sUtjt;1RplT_fpM)O(rxF+i>&_bbNLjHSMH-+Gs5P zeSpS9U8xb+=*vDu%x7F*lh2^L)a1TMd-@JjlYam;s~v3r$$0t@w?8uDc4_0%k9!<3 zs^c*1KhpMi_&2zPXZFb(No5 ztkzX^c42!QnX$O)qZ;3Ka5bPlmpO2-ndbrf#nHOHgl{%Fapn%%Jp%s~e2RY$=5kN+ zUd%jeUtu#L;@MmFUi^&LeLK_FzO0SbVky4y_=qvn@jVMi;a9=nSK+$= znYxXh4{areykYjOtwd%FsRi(1*q$clI_gm0&Df7bugfQj z#@~Z{-^EP~+mJc5`OZT8MM-kE2~K_6Yuw*cqcK zhkfWS@(|<=@Uw6dIM`L&%h*PdYasG);4+RB`CMYgW3R-&9XSq`fK?zw+r5b$jlUw! zd;s;O+)W_Vz1?P_>Ah#^mbRgZElM4k?x1@{8?fK$P~Kx5hfZbV*DknUq20lOk5RAe8D437j1T>G=g15nw zU<5b;P?tF%Q_o=d0d&S;ABNRmA@cq3%NYP5ZsQr?n=BYVcD-bXJma&>xxEhC zG;l0nyykjv5y;QPpMtEt^H;#Um_ccHbAK@MJTMk$j}L<{12+TZ)x61m8ax1Kf8=Fk z?P2Xx&2vxCgslBL5cz9ZwVn@71nTE#FdoRyJeUuOIRu^r_kwktuwTq3SjU=<_ruUj zzz*aRp!_>Q5jF!-$dll9SZ#EEkYD>HiY$8zP`^4}yFh>BK0rCw0`*e^DnT`fg5LNo z=M=?mL^tG;PqG8#Q=5`F=M6Xjl)nO9^E({)(3^nHCu&=ifzDN($jXx>uj*6Z7Fl@| zFQ3kl>Q8M`$m(-9au&#qr_VEC>@iRx9i;L(kKlLVD7qhRf-N@XZG*!=v6>szVj9yi znu{p$<*?d?iBTJ`FXF1vYk_Jhg?rG>W^6@p2M7SQiv-iUAD)`YpCdbMv(Tf+DSQV? z#%9pBf^76kxI1?h@(OSh_D`TZ=)c1K;IP{BcLK8N=?Rx(+Xo&7Qu%57RD2sj z09ydRSpIBmF8WC1ZO9kECjrgN-rzHAI;Zm*Xh)&vp*zIC0KWuyeX}#Mzll5?j>E(` z2Ou*JI|lxm_z}DopAE=upQp|F@QcK@T(RcoqwP_AkaMc1B`?$ZBUcn3DM z55Y6xiNFA@PbIAJNLOE~LGh|d(AbqrHNOB?!KzWYRiogL=M7l-SHNYUdn}p{)f%GR zd}NJt8@Lxlu$KWtt}OHt&;q^!8p8~DQUQMcN|7H2CB!S|7N9no8|{Tn;5KB{DE}AW zE$|HJ2eeP_B1Zd3d(|IIw;zptF}lWaa{=cQ?42AR%B$;Ajzi0_p=*C{e_hYd`%DC9Jmc)qp5I-P;y5 zUNQN#)~8DDKk>R7Hm<#@{@1a4ox6J6mdAP4OXYeh>jwz|cmz>?WSBtrx6) zt~#@1L-qlSZp7pgtDV|M(d~$IKURLAwz_YNg7SI4|?8rcNKj_S` zj~ZOs)uW3$VC7Mp7LWw$&%l0S!>}M-^(!s}6z2n)2SI->dbeNWkC0cns<9UV-BX4^ z8FCB`WNa(pBzhC<2dc*bitnL3*woI+#9255l%qZqr*>L{68Qq@+8#m<;CC~yF=PMC zb-)|wIjfC568|8ewt3igf&gPV8hs)1F|c7RR7Wndav69Om{{n;Xb)N-dswpCWHNpJe@sJsH;A z=4AR-z9_aB5T}4kJIPK%rdH$#kS&XNwNd}dpTwrIs*Pmt55I#fUFR@yE*yqaAb>2^ zI0XGg(2Fv5`D5tHFPn7L>&wW>qdGca`+q>SDNaxy{)}J!t4#3lD zqo5w2{I$p-5RIkB>dh7Q5YU`OU_;v^vMU)@t|+o%J-_Vy1>$L&i@y|EH7G{&uG%`0 zi?CbzlHCDGkOCp>VvSM#DOT+bP#yBS_+-yP?vBes_Tq?9F2zTHA6<2H*NVrRGqsi9 z0a0R7a1}PyTMb-v<#v!&t6+fQV?cHIfckaw|Nr#_u&H*n4-*^BKy}6Nb;8Ok_5%xC zpm_CT;qF+)h4a((DQ^wXm=xnjUIrpSKIIeCRz9^=Yy`a$S;o)Pk=3bo-Z)weAs6$#u1^r}YdWM2;WfQXt<-a2xWaU?O-1)Dct1 z{QFlM=jXWTydT>_u$c2iZ}LnfhqipK?l1;Y$&#F^517`LoHb_>^MNb+6~O&Df&Ess3E-;b1y$ zGV&;ph28rjR(nqVFnvraU~I?+yZ)Mx-2i(VyGuWs6K`M1Po5I=mx+;0`#c*~jbq?4 zupD_N*nzK%_G{C2N4aXS{S#jnth^QQY*>3E0c+pt8bfom5vax*;3rNsw#BI(*7$Rg z#U1D^$kkvsvii|nMUjUv25+A5S#mfa3IgapVx+4NL!1jc0(S2&pEktlyrQ--d@igx zb_M(fmM;Y>kLJD;E~AZNtf02A!EV)tKSy?S*&Gl>cQb4ohD~U0ox0EV)`jz4Xnp(b z^45q1}nOt6;wx&7fWJO|Pvc(MPMk9j{UKt%rhj4_|9LS06B`` zd$I3TzF@4M|?U^*>&UCj8nSFrdxr}7@!B4j-q`X#_~ zHrD71>U*idL?#v(TaW$S29u)B&b!FTygB^}jPq#Rdw0-HjDZ7j_5ij!kuM`>Bdqgj1vwe3-nTU?!7lP|MqdrI&NIN@ z0An$~fu}(c;5|%x2=-Hf0ReI!3m*l1jA<--3H>ZZpARfJ2q@-cK%A|HHFu@Rk7w*! zKlOhhI`ied|F{hP6W9~HfKM2Od;sm{!WyS+QTkPXLx4X6t>HDuNstdT&db3>>`8Du zvih3?Pl5-+`Wu8i8Tm+%2ef}f@C?R%2i%HYPXG60YS1|P0_ER|PwmdZcQ<@J&>Don zv-orksj+;GJc*cD$bSLZ;0Vx(eI(oyh&x!5o!E{;-U8nS-$%O!SbJs_xCCAEc>_@F zIJg>i=_>~7cvWu2-wNl!I{tM`u~)S|b~(%*GP{7rdm{QVpg?}EE43zi-ED$d@9y_@ zy?U3&(|hDR?COtmjeQV#2r|cxw?C_p^}6~Ub9fOxgO2RI=4ebGA^QRI=Diocl5w+7 zy!UlS$J58{8vOmS4FK8$S|{z39CEUjF8f|+?j-&Z zj&1du6Em*%k3Vi4*4n+2el&+K(N7lqY=d#J+ZyuUNq+`gkX)KGjX~$SC%`D;b(}1Q zFNSXf+N0AzR%80Ou8GqR^D`2z#ICj62$*ksFY!80h4I&e{={gHTqPUaA84KTK%W7x z1e|BQc)cb`KM`x~}Sl>~LO^%?HNEsS9)gKRWB?J*U}(Y-rPleF8QYIVM{} z+CH%{y-ze|*`wHf#P&kh^=J<4M^}6${w!n*yPyS>p@%>zat)kFrw91fxb2Y(f;zQdpsgn-88k}E}5@;eiYWv2_#J#2Zvc$Ra654^d^)eSfI-B}#8(blK^9Q_V&H-b&_9?S zo1b<*bk(l@S0bw}wa)_rEuZ49VFbnw=j@RMDJsdYK*J7^bI}zaj8G;W6x)&cx%u~pL0R1-@yEd2} zr{?%<@G4jY^p`{4I?xY23G20i`|{IcvByL+)_khXNT9qA!BKdKm^zmQ(|gwttMyg?k{!4bU#E2aA^VXlXqSU5J%z45RZ9fB zMK8**=2ErSWpw3MZ5BvYO#zTZ*IGx3ReOKNRvJv(4YqDy3RzJ5R6fj~AJ#Zkr}C?2 z7k1Gd*ny)!d9`PZkoi;Hij_Z1JN>1Q9b{id*FLhuC|3F8R~zLsV*DvsIUHa$Hev<& zWskxxTuy8i@cz)d`w(j`6xW?k@gA^+ku{$Vauiuefm-Flral#;w$cNz=G7IbBZ4n1 zAFLSJi!!m2)n0YwfCwlDnt#n@7AOIZau6#hw;$av2Hvp@`+$z;9Y8VTX{Y{s!2xu) zA-z^r$YZeO0Ui4~=a=FuLcRl=+RXsv*z`OrM2>dwFt45Y*bgJlfggqC_v2H4y|L-F z$tB-z^je_4EXc;ET2-5BipH>u(e7cjwcZJLq9pZN%N}{j%Zf=5BigyKIWJ_??Vy^D}ueZEVB(o@cEe|M7|I zPror|eQB>pTj%e3d+aM`Mzd&XBiB}ZzL?K;2l!mH2{xM87W{lpJM|v9{zmQ&`d9~F0KVru`xN~=03OBP z4_iH~^Y)SO2JFuxhd9?>1K$WP2an^6!!MF+U;5dIOuV@dgz!BEuLimX{u4U+y?ayY zH@ncO$x?@T6Kq2M23`Q(V2<=Tiu;lKgD88!f!l~_f>px}$e&Y}J||y|PJ4Z>$I<86 zHzU6W=-aMI+r7_N)FJ1QI||>4ZyAUnUkwB-+=M|8jAx59i(AfB%f%jQw<+>Ey4!*{|0}MuH zuDrc64(58)`waYb@DTiy;S|iZv^fR2Czu6J1215o1Ah`Umi^$(K`Zhe$bRmJnKN?^ z$Sq+1(>_Ig53zRaFE7Vr+N;lHtaBcG17p$HQ_Md#>9aFtD(!06uMU{cJ;1Hlu8niw z&EA{OeYx(DCI?N}&oiWlxCURHZ^{{eYIuRkS&2UknrQbEdFGLSE}xNQuQ+wcpJMC7 zJ$5mBm1|=?XR+E-|HA$Q#{&2EdT-Ba%*}CQz6F0QNPmX+Bjir}Ye6;gcVM3aV;=%s zuRDv#w*Xr*lq=P;C~99#k^+Y z8;iaQ-2u8M*bj^$SAW{?iEkeISzv0TcaLqqL~aFd0p`Ta0=jlJtu94rMkxewcQ3T1h0aP zU_H1EtOI+{e=V$Wt8V3f4!i@t0{h`t?vKF+une4$sat)p?);~etRc3eh%JTpf|d7} zk(}z`-|#bTZ>+B&KZfkb*B{^G=o**$S`3B(wci3i4frh&^8~DZHCJ!K9{|l|HP{WB zz-DkH{sS|4`y!u=jk&hHGdWm$OHHN^F^9vfi>J#z5M6UjKbD-Hei6J0ya&E!pOMp5 z8)|ZT!mJg)&(7;7vYsd0fc`Sv1wKLkrogz&gFS-yJ;|?eYEGDU`#G}q-_PlIt9fVd z*u${>6J~DgLC9;6p8`ASU$I{xkK_M;*8CO#a#+^NA$NGdNWMD-GzS`wORVf#7uG&Z z-7b5^VNCuV1*T%iNR#}uz#LBeDE9kq#^D=2(sVK2F50`SQRqiL>&}`vtNE;Z7J08m z*4)M!+jq#UlZ|pOe<=T#pZe7P)!xDGFpd=Cb_Zd<9ot-hY=45a4>jJ|@KTsPVr9D# z#&7?D{3e`)B|i_S!P_UyiT1GFm%P;Pt$zew3wmG^GymScd>0p0iv=-W5 z+QT1$X<#^@Pd%G5O91=S+gFdk>bnuBANuw_d;T`u12hBe-@C!hpav`iY8M9bGe_RB zG6~+vv9DuH?KLNgWeM0Q2u%3#;CVAOyw$^`Yw|_0vMTS+M3q?RTK-I90!z%kdemf-68Z zP!6^AgFK*i%AvMG5~yAmmS4G4liGwcwjAUvpmC@MoiEGL6&poXt%_0qrJw}lf)Kuu zuzc;v2FyfO|ARmmdI#u*to*7`@td$kc)d^#U1ye|yC7F~uoZOT_j2NQh^xX@2NbIs zi@j)|aw)d`;Wl(#e@20FM#;Gmt^qM@F0aRG-w{X7#7#riYpVLtYqJaI z0p(QRs=FR80UC$K6`^feocYG*Q14vqTH1K%b8(C6DU<5{!&99-w+FfNEX|yKokE)uVClM$QGA z3qkYjM=yfaM&nVQC`jTDV>2KNdk!`Slp&XcevQm4y5a-CAzpdZ#?n_gHua(QRoIh? zK^Bx>z7%lL{peMo9>^95rpFe7)!x7sR{zSaam(ieVXz}kKK$}2HifJ_4zNJF*H$_) zs#kIPQ;hyRJ2vHUVAaqb$APZ;^rt))IZB&y{EBh08TpZYa6Rnl*yVGH4P#dwe%JwC z8+>Zx2kK9du6X^4Z5-j>!1GDZ{CRDpXL2b%Dm_SBY+=|zcaX)kKMK`_E;$q= zFR~>@@tu-^*32cZ6;rQ^zX(}%)PNYe;#+`Xl;4j{$72_AHnuM088GjgIqQ*kg3+|q zd*&fpgKH@r9&oRcp zBjH?_b$9QE&*!}g-gj`g$8l-noX7P(>*C%3bDnfgA!ZO5#l6n8K@(X(9BrKM$a^Gh zBj~^8o7{;(6FN6&iuMVbiqG;*<=#R2;e$c5jdmL!B|gaen)8Atb`iOl`xI;Iev)sZ zr*j{41btnM-6dxsvg+hM$ec)@kD}xjO&e{` zpj{c*1D%}aZ-83N+1SRzy#Hzc1fK*BNwYl=ALH;ocdy@t{fT+F2mKN-6r7r7dlY;H z{$GIhPB!h7L;IQcVKuk*U}~}0rqYjU*bmzYFgfjUFg4l|SfAfq1K9JPeF<_7u~*=$ zWPI1adTp75UA!CHL2xI}rEf`MR z-{DO_^Bo7QiCGSG%s$w_GY)tPNYVBUYCN1a{jlpivJU$#4ZIHpzk}}{I0w7(SHP#h ztfggqj`my~F&i7w$KKh<%!N4#y)4Fj5Um0`$Ru50HI6@zR~u z#?|@bV)9QQ-=3g?y`a5*cFe?v#=W_*?@)WzaV8*tFyADvBfbxES%Yzg#Y|{!g9$XE zGbZ;5`oDu`g!JiVk!M(g@!yYpEB$=TvxS~;1u-`^@ZLD~r_q1FZfUa+TW@sD#bw9~ zkgop^YXiP-`^!+tot39bQazzX{4OU&oU+ZgYc@Bnm^pFaPE(EW@{ zv9Ewe@C+DFp25UTL%tAn@|k|}*n_aC&7Sa`uwqqv7+w7s`gjlh2;>&H6^s^*CQVT?*lXk zYQGiU10D#NU#s_DGzW)(!!xKP<{acRz&vmQ$OC7Bo%lWkXCco7+AGSr4N$MfU=9E` zfxo+_ePBKRqE z#mocLrSFQ_K7f5;l^5Nc6V1U-^vSrC$Nmi*3e@ja%=c7S>kQi)km=K{fLSk>dR?t0 zb01xRoDW!6)#!xdCVW3K2v=jjmpF3T)tQ{^hv*z^v$0)O zhc09Gu5o7JQ#;0Fjs(x4YoEQqoKwGL9=!eBiLMy!jm5}1{=}?>_Zt)=GjUU3_OPvj z_X8t9w@>HQ0J>^99(;nX{iowa>!`kB*r-!|n;+o5a1mf`4fA0%&+JL>T+;&%5VI$o zf|Z-Tz3ZVaj&Ieby`$rRbu#K-HtE{`I-Z*V$E|n&wH6+VU97nc!_2uE15~5h^@O8v zCD1r@tgZw)clgoOziLdvEkL^3wFAzh-glC>!R!%le41y@u_g`@Kx5ee&j1eKm@u2* z-9WFA>azry{o=(>MV=161`&{2oj$MTBCCJ3`xexKB|x>tm>ac=!Vkgfdk9zt<^$E) z3;6NNrt`Y$P!9F2_o9@i1%DXGzZ}#9eHKpNr;^Xf_+@kPJqmQLErc10cZz;|fNRNLcascGzr{gWLde3XrsBh3OPHUwL;eXejDKYj_pJEF!%@TS@2{q z8cYHjw_#6Y!+Je4u*T)5O)k)04ag3{`RR3$FAOFU*B{$ma24<&OYb1I7kUWRT9kqm z@<>=dKbV26KDCD|dS`(C^63WFUXtA~cGctJS8nYAwbh;s0p*RNYpl}65jc!ZbFFqY z=sqA_HL7;COQKi6xfx$ITn37O`f=%7G0GW(1K5>M{Zs;jJquP2*2C*lwa8bJ(H+>q zro1_zRC*viCbh51_|=DE?FjmUHE!ilZgCbU14Y13K0mBFRFi7c9CgC#V@DTH192wbNfN93`*8mW8akW&rK0F+jDdFUhJ$m;}^Ld#DZQ zI5`-M0y;i#06NYT(+jAtCUTU5Sztf(6FJtdB<44cL-n~9dtX@JFMAoBg{*U_GlKIP z@@{N8Zq@%3pksF;7zA`&X^iTt3`_%E*aZD)K2@XENO79eN?7ZtzP&$u8mk4G=Wd&W z9wJ7Z0w$xEXKV(x*!=nFbyt0gRh(Ep^&1tB$Q(!ihtEY%0n-g~C|-6apgd<@e9O8K z6Rux3>6mfrzFq(4wKdnAH@3$o4PzMx{c62tf+fhoAkQV?6xa=(0^C=6&n|i(8){hq z$H63W7vyulAh{v^+2cVm9;*|7Sipq(292{X_c&MPo7!!}ToEu`!-H(We3Rn7)Z9e- z9Aeht8$_O)KpjYeMc`@dEx-YrIIq&iJ%o0TpuYlI!JXiJuo1ioR)Z%&6np}DgZ|{| zN4sp~ec&*B8XSVh0~b_*hrkOUAD?O%1lMGI+u?e!94N2az5#{=xwaw>R-d}xHsi4W z0uDv~2mB%SvBdoe{WS$3@6FRx`bq-wnb+WEn7UGa@+oE=Hs;Vw0;)5JuJ!pD zK7e}qAZrh4?p^?D&pKF*`EFQq`~$WuSab9^wnF4F$Q57+(41(FwI>*ZnF+N1)y$3h zQy*K<$;W@5z_k?2TpQKb25Y}+KPaE%Z^0nY3+S~x1Z#cUk?TPVvi2HtW4;FSf%f`a zAP;DrwYFQ4%fL3EbuIx90nLSCb$lt`bmTD5_+_hsnR~C#99Z=ffoh<>cYsQ;1ZX^} zRW{YLP%=5Au;#rEsFsOvIcNf7fa<9LT_7AwA3r)r`_T2ex)Ii~qI#7}*RUE#3g2CD z6|6orN7+Eztoj&G`yh@v0*UlvlAXtToUa1V9*FbE0*wjq&_|dh?JKzZ1>^ z{ek)|YDkYs>k_38jkOmx0|&rNVk}S}exR`#YSXz`In=Hi);w|EG?z7SO^eONe=u@8 z@ecA7d~Mi!z%K2}kyZO3us>)*F9N+Ah=+$^tBKJ!avL0>jq1?xCff#Ju%Cgx8F?Ad zx%oEs=REkh20p(5Tb})B-mAdg*p|XAU^@ca!?Zsb^o!veZmi~SBHtJ7 zLyX2FY#QOc-Z<19&>sCK{(a$%JTtpAV9L)$UQ50R*9F%TzmZ%e@IuCaJKTaErr)cH z8;zdi8sU9-KlGKv%*8(y{Q~q_YSescZZvLQpG;rCYmsr-JJX2OUfmO$pSJDTHAlVC z!^k=KEzsW4J-|flyRaMNA;$>M`^g8rXd=+pAXnkAI{m3a|@*2`> zYLTn3MS%9R>e1Rn(e-E1Bj}yz8oTB@2TTO@=t+Fq@3pY@gRYr;a3!`V+!cfALp3O` z+IWA+F7{4XV^llo2Hm005bafOHT`IhYCkJ)lGqrWBE}Ez0IIbe7~0Dg0QEq}S_$oB zS39x#=+5C{v*;RQ7}=qXd=a2Jl*`BmD^`Au-9e5Zmnt`XC{B6($ldvK@u|*EV&xB@ zr;xokWUpWJC_eQkUs!rZc8C$OGBB_SWX3^FmN-8)uLg7HEr<{tD9>p$McGaxD+!1M?A6@xXkK{lm#zl6BQ`<05Jr-7C{i|JfJ@V^Md(B&S^zq1A<7pXNx2%||Kzg4A@RuVG!KQq7!G1UiRKNCOE$9S_ zNijb8r7J!PlvgZu~(fQ&Oe7Zlm3+VVwVpr|s zkT=5RpdP5kqCmO^jYaFFaf`deKy$4>hq~I)HGUm~EwJ)v9J0s2jtsQEF7Zipt(oRj zIn~ys76XUT9au3D*)y!R-ku}QK~~!k2;(=f;=1#yj|yx#pd4uYf+MUcGCU8ksnA-!7h-^W}sYxXB&YXD8^!oW@44gvng)?h_ER( zBwr?0{VG>V?UCiP$g+tYwatgSV-yEm5Wwcc*Zo(H9sv%%XdvxZjAX%&+zoHM*u)-U zX*SsO7e@CB=$gwY(D5oclus_PRR)8xZRp9S)@1Hc19rYlm2aUo|L~?5ZzC zYzX%HUX6_I%d{8!V(H_)3%L_HMrhb;uSH#OX6d93(J$VEW?Nf#?tIXck|7=z6Hq?c1| z6+aP_0O_hEGOrI6V4sg-sSG@WVs<4gB*p#y!U1LyQwVMj6 zf$CHrvw#KDfP8lW`PH}DTm@zT<(Ug_gyq`_YmCdlRxeTa&)v^uSP4HW=a!*9oTn)?k>acyueFM*oyypYbbw7I{_wA?h z-Bs?V9qt=a8vjtheY?37RzII0s~@q}TXVAsC{ANkf0FCr5QsEzO^>egXcWB)sEziw zh1Etr>5}EwIz>PVn=d2l7;ums_O9$H*fgZu@voOVhF`hmZ^x$?_2I$}HvM^hVGB@G zCo%fdaby$=!uWc{)BPyF`t@T|OgV^>#|Ni?3nZ&XaTK`7s@;;K6FnDJTT8nb90mrP zWI=ZC4_h=pZ3`eL@#Vl}ip5?8JJ^iCpQmF}F6C6e4oD#@r`m_``9T)wL|6L?WVKOl z^{+hs47;=|&DiBrzAE%6vhw(Va+Jei;3CVWToD1iM0Sk{t_9nG#;^=HKy@fr4d`w! zR*mJb@>T-riq8d8fpS%V@j$f+Gmtyr{%{D6((e%DI-vOZK>nF<5m0XVTfjqL5>Vgu zuyPpWE>MQNHe=g~UIH#b9|^7ks(%LP0VaSx;6k8z*+%Rf_(2dxw(u@k?=LFu^f>=N z0j%@#FgObIT5&Qw2pj?q09Dv-BsPXD-*k`%c#q#42I}zN5H}9vbeJQ(@2~gV^;wLl zYPyNn8D4WNb)_CaKLy`z+TeF@qfHh%?Y#F)C&KrE0QiX*&6&ol_f*dX1HeXX>VGz< zM{a^O-YK9F+zpzrYu;76YSw$g%Yo*2C|m*P*PCC>O9{}NE8i+)jqxqeg6;=PkTqAO zUR+{} zf7P#=%ke9Zi{CHCA44`kxztvDCGjbzk9M_cgB+4wvB(B{9SDN}FvQE3g{(H-Ul1KQ z*p*lH#N@-KT&iaiy2fwiN7firPl`SwZ~$n1Ct?pFKMQArJfJb^wYCLW_4JpmA$@!+ zwguaxK*#*S_&Uj{_o_73dgN;CDb8Q_Vw(au7unN!?%%V3IiQ|dTt7UH-Rpxknzu&u zBIF!+4mdAPJM`V44|)^mLQcWMKoVJV@>hTAF=2P=kDj8oGt^|JUviY&4Kn&fbf6cQW>;mOL^B`F^g_4CEk^``KJ@hMKZ^c@Seb%1Ix=o)7dxn2HvW)D!Kmpc;| zB}RRRfY&!}gRd67oHkL|2V9_bF7c8rP(Kb(Unw!|yjqBnO|?X27x;5w#W^4pPsghN z?!G&)tG)X6>Sw%))i~r6qzl@2>OY%)0?4X;2fD_hIa19@V1bu|Tq(|YLpBg&~7)TSI{0j~yp`YS^Zfe6?H)L#242Ufhk zx1@bok8D5@*rPGM--aOfK<*-5`&WM*^v!!`-uEp10X|QPTpc$fx5DbH80dPT3s(Dn z)PvzA3R6}oU`rC%Ax;0iIDJG^0jsdaeL*Lg?obo8I3rEluBj|fUnlp_> zKFzfso7T|<(ye^RVPqkSZeZENibYN#`+)(Hos6tJQDhrD<(NZSulW4v)^Sf9)w=p2 zf9v^c4sZSBt8drwTj_k~t%GyRY2^Ao=>1NQxs~?`_vE{Fd|ynzU8Q?8+YEZ~-Mb#x zTd`e^zLWFQVSMjzBHz))AFjrBCBMDHTKg-5rk2a2$Mp|0QH zfAE?3y&22>*p;_0HL$ME34FJZbF0lI-`&*B@nCg6Eg+6s%m93!0G$`JzyuKHdBS#V zPa+=!&PHAdbpCk}ow0hqFSHrkW9SjUoY=3y6X0j`Z>f{o%^;v!_5G+@@O7cz1FwUx z13%@nC&7cr%#-~9(3ki6qukTMUxE71tck1r*p7|9^jQL%fCC^-9s}}_OW;lTK2PUV zd+&aE6ucALYaoT}2g`}wN$f|+_kkng>0^O8^^Ub{e5|WpckDAjuYXf%vzGUtb?jV( z-X8GYL)NnqXCQOT+-dLVy%2jDtTC`BqUU7x(OJyl%f#qC^@kh0`)BVv0Ux8k3BtVh zb|8Mn=CY>V+}6W=Fq$z$^BqNJbkTnR|D7g#IeuqGW=Sf5wKp9L&PQEDAyUxmv_xk z(vYrYJn=g2QsmwUFC(red=g;Hb`H#5Hof2~c;9%sW*_lo;t+9SsS&6)PqVc=$vOB>aw>zTn|2fD_-4gNOHy$W)F*pJ>D zu7TC=62MxTE3i$4Cj-{edk*3>AQSg6ww=g&jnVVI7TB!j{S55Wh&9v_1MCH}8($HS zuC^MF)h4 z`yk$L7wDN|lzOz+Q}|~F)Ax|aA?w-n?Xdda#@<|pyb_tYvP+O1@_a&Gop&nXec?R( z`@{F(e-{h|?fB+mXP)h)=rdvE@1XCu$@NH_^F2E6|LQZ>jzzvXZrmDbngLD&M`8O1 zd2VRnZ6f5Of_!#?JlV)g$bByDu8JG~`)~vCCxc(GGfrm$r1oKcdEd#NKs&u3v=#i7_pWj{Zk@QPVvfsLGw0j@_sx8t<;|E$S!z0j zn%EzmhtN+O@dtA6PCfn;(0{?NXUX*GJVZZp8P_o6g~%cB82bM331pw0AgGEb-oRie0oh z44ckv`Pc>nt;s-eDmi0dIdYVoQMi^dQ=jLjKYi!IZi4&c9}S1|)Atj78@=E5uye3Y z1ik2UBTPP%2fig<-+A>1)5n36ojLVq*i+24Sw>tF=!N}mbmqnTebTpJ<bJZE>JCT0@$~OYtg`WY7!QVl9hIi2JCuH^iG+=GKxX<8MsBM4R zYF=Le2P5AHKMYjcZ}3?7Q&{`w^YmEs|7Ynrk9iHO18OguYS;Kn;PrrYvXju6V>=ku z9ytmw!uL4xK;)O*; zItzIN%sP7W&hcmJf%+MRFNw@PHN%iKNAH4Sa%sQBk#`WMc~_ge0Cw*;S@y=J`R@W8 z|5n$vdjhS$>L8cuQoNow*=$&2*4#CM62O{S^{d=ra2fEC{{YYjJ2|}H#8ErVz1pxI z8mqTQR5x>DUI)vuABX){WX>nv@4@YbUI2bT*SczrG_M1YnIm^dBbQLfT^!Sg!-vu@ z$BNwsrsE$3jwOaTS93yJtGU!T&qe-M=D4cim}AdHdlO3=hdtoDiftv>K^* zeE#j^I0O9%+ORj1?5$8$+*Gt-XZ{CKThYsWFXr^PS&LjwT$nitoy70bu&-?~`L(B6 zf9;zT$Aq8pr2^EL$2pJqR6e=22UjyT*2;cCyRV3+CP#aYxpOt<4V)LLTgSI|98;$^ zXJ6wVieKx>_`UDS`tWJJv03f^Ao9szX@=?7yO+>0pmRLq^4c#!9*)cyjADkPvtHgh zu{Pc@pgnsPvhu259h(#2T+qFDEawaDeH~LehipdHp3t$XeIQxqKCSmiz`jdAbJqC- zc?;-7E{8P-s)70Pe(Om4kmJ#Np2>Q8xsJrvgj@%7p3u2NzUe??x(l#wUVH7=`N+(# z_pCBZto*7g8)(nSud%6L^~D-^`&;>R9L@lWbwCAB{T6fr)e#`4`qMtqSoGYZ1Ktjl zBZOb^F4zV9L9PYh{`9H$RCJx8c%8o&!>TO@XuhTbt*^$Xx|e}+@?{0n&!nxG%s40S->JomtE%{wKM3RFF$>ZKY;xk;6vAIbrSztWW5Gxj2iz0pmR|S zy%eaHtsp;`zUI+uwyr78L-%7}38=@0X`}lNU4KP+zxL~x*`~Zc=}pZYj`hcC(oa;uORP@@YP^E^63TX z<9q?~-$7x}xEq4Td670(5mUr9)gsPSr3J?B&1Z>^U|v7qTIL&K4&^h6eYk(F<9?3k z6{(M~ajzS0qFo;2--jHNXfwFLI9ltoSc5#!gFc=o&tmSw%ISX(Y}J5szjuv#5$#U} zm!or!W$Tc&KVCv#3uaMg3jI*znfSyj;SzWgzAuou7PDW%P1rVK`;7i*Yo9<5qrZkv z$JbHlS{JQv4YI@io1UTeL}pDiPOpyTj4cQIztA5_f8@$e4SK$37s~t_=GqCk1Um(u+?(>;@UIQ1Q&to2YV0((Z z*TPe|*5AN$A+2XN@0Y(CG&L6lO*ZGm*7F%tGv7~j=!bUR=QFRT4YL0))4gM1JCkDS+g70DE8u&z5_h%o#^A!s}3UY&-^BL<#R${h-Br#Dw1FY*4y@qbauYQNnr(WN6 zo_QTT#(vN>+iK*G0qf&^rlb!3GMGLGNe+QmfuH_Z4>oQ3b*Y*ORxEwNA2NUVwJawd^*-oMjiuJVb?Xy z%g80jT2JMxW8QRK!@l+I-?dkF(=G-t1*%hfDU0~E)WE%#_xe$ZZ8v*-1GZW0&w&Nr zHJN=6o$=Tg@h8bM8(Cw}vDY8Y1{&8@U>ul6j=3=BJ#z)%n$pY-@*4~AcKR&h+`%5S zW61Bazb>c$$MG#D_Zs}S(B>cLUC1$P3(2<%eE@iYXJu{N6Bp5bK03!sn0pOpGWx;6 z^y?>kR?mFZ)@~3Jm)l^qya4ZcNZKHSH^ zXUObBJ-2Xw#;-N)7c?%%g~NGJ-&=L2G9T@XeGvGNSdK;SvzR*ewh%u9`!@P~h<3$vFtDJp2+3+`VBdN{d@Sw zV0s<2XNRHd_*WmyzxOPACVk}+`!Tiyke4BMfe5;eg`V_v7_kQV3|QBphau}Yo{s(m zatXOz(2lNSH;GSqF2h%aF8f$=>-@I_o5rncS?Mjr#qlo&3qc(|9cxkYX>2>O4X14w zUH#>P9I!Y}E!Zc)QS#)%d!TFFe*B%pbC2Ua1FS?(qN|T=pnaq9tpHJ=8s-MmeJn@U zy`{z%!PW#^{CV(vpmnTA*Ze8w%Xs=a+rg&(XJY3y)_bD{>w7AAn@djR7Ha#jC~u_8qeGHz*@-g^!?~ z6m#UqcPp~$cF-Tkr#W~7*8D0*KlCXAwlj%sf%k=T@oUV}@bPE&#I}?8$=IgK2A>D- zX0P_Zw-H|jxF7p394DF^;=T7tRFix~;2`YUGc^tA=g+jYh1h12rv`fg7>Vy1_;rq{ z=Q*DH;y2h1Ca>1?TCgMCUe`W)9_#3wQp`CgtALk5o{RLYixDae0=SuU=vTbR~zj~U3)5jFE|%l zig+J-lsdU4^7i{w^lE%VfaE;v7G4W_BX`lJ9iR4(gRC0HpnLrxXMqU){{SmC0(|&O z(Zkq_kaf;3Lf4$dfX%dx4^pA7kK94 z{U%sBHnmm#0b<)g3~0=HworwCC3-CgAeVp^TTeptjY)MlOe4}x*91O>ZoWN63)(ShKBg0Xq%#~O13KDlxXWHYT zAx?_ABI4`}>1MP(DJm-2revFuPD+NRw)v-`GJCJ}*k8Q``n4Pm4vPlV!)j3$Nq-{5HGi1T_h-h2FK9s`-HFRyxWz6`C*!t03lL+1Z z^eIq|1>YjlIaUwI4k-0KyY#lhi;t~8{4*=BIQ;xyJ+CQN|&_;?=N z4gGd7-;y@wwj5i<5S83Za+y=8@9`t~1Mf+ng0OoZmtb$@gUx>6%V{0?!46q;DE`T+# z0=D)22>Q+NW4IrF1owdL-Sf7Y1kRb`*p|aAbYt{_dD!O%it-v6T095 zxC9<;l*>rhZZR0oK8@vb(yyQjTr<--eG-|2hhRAz15;Uh%Zwp?uW1nZ66M{Ij>*0u z>Os0Lw_taEiRJHW#f&4mm*@J`Kwru4crBc3aCt5F?sIsLjaW6tSMXRGX5G&~UJM7( zj`774&`*I;=rW6^i31FV)>-voc=ctVD zfyiqig1uorOa}Lr=NRX;rD3z(B5aPe6WNAz{%h#=YYygPd*i9Qug4xF-JfTy8UiNCLF`IuOXXkgVf%R-i7P{{l`(($NrLrt%l5i zeGEZAIPd0KLN{JS8~uzGfqpRzg1I=hVkFD{WrJPMsm`mCR zy0Idp<1`oh4Xt$>TH4-r{@Gs%9DhQ5W7vk*W9qMul-WzvS+-39S;STYpM_RqvmN)P z={#!dz`sP<9L#q%W&3HyRz_yQe5dx+kBLF-0p?)0z8ige*Nys!*u`vpJtk3?p`4{Y zLwmoE=`~>!+zgFb?lz)xySjsbyy_ClzccSibY-PUNnc+G2T&^omQP)ZN2Zs=vCeuK~t=C3r~)<$^4Js^#5 zt`Tf&Z+!hjYu#=;{bHncWtuzryMDc3AKS)bd*`r2U-rx-%AU{lbKTVg(pW9%5xy}D zQr4#*v?=Y^eX|bIe37>4LT-y=-?sOfxEp#6xd~1}&mxVLG~y3a-T*bq#`C(n9hg@S z*v@d=E(QIZW9P+KlIoN@AfbLtt{y+27aKO)mnoZ9L>qPcb#EFk zLhnTSm&U;_{liwo-#m*A{{qrK;~2k$w0&sMoWmcz21zS+%Gri3P}V<&X4>kb9?-3m z61L1Xe`EyOjGG|!S6bFyq8uS@TSHnOfqyaMGk0yNFZD^IUKt?4X1#vqRMPIBQoAww z>h038;g`kFxzpb@DL|k+h`;mHNp1l@V;8XbS3-}_U3c3C$~ARl0hVuTgUvr}z1U)C zuhjc;UR+DtXYq4SEXMW_(tcdSUgQdGU_R!x0NrEvYsi(@^l3xC80I(h19D;ZAJHGd z*3-xTx8JU5&b^ z!ugA`^Vx;$LdMh?%eokf5GW`3+h=IVVQkv<&nU5%ktOZ=fN_#ay={c9uQ4*YKfZ9# zlsA_iGG+3vyG^ zk0O2D|L52~PuX@KqTCL?ckoM|VSFFx3grICUy#QleAiOuGiK>$vmT**3eQCoc~6A; z*!ReGDuz_RNz^y=4;i?bqnXfck$aFxW7xtFZMWo7nLwQ&dl-tQNG^yEtG!^UGQ;iE0KSI z0<4B}VI15Bi)g+*$sa^s#qYT@h7$c1|4wYyHPeb+AIHcZh@D#Ydb73QWyElEWL%Vz0{{Dr07kvMs895pp z$CE(+>7Fbiw*X@eW5Kd{&O^?DGPnmf(+BgF9vAKj_lI%r0p~E&sOv)Z;_JS24DY}O z@K{<7?vsV!ajcKW=QPUez(1pGd>2qfd)Gw@%8+w7dCdx4!h^<5)Z=w5~s_zNdj)9HX0_BMF@D$2h(O0zA zW3!LbdsfRG*gaRz!G8jNr@{>MF6`PKf?O-P;lpRt=F5tPeESY7n1*C8$_N)efNXuZ`(!lD{Lodt;a~eL*hF4dhtHU z>BtwsYe7P8kD;H7eH1vpEse5s;a?5;9=LCMz#O+C7a;crZR?Qj$QzK;p%WIt2$%;o z`f{yjz)F}3YhW>qhGyDMKo%fD?}A=*#*w~$*VU+tmDK6y+S_*wWtfbu9UN;jEP)8R zsWaChr18vUHnI$k#hA-Ln`2klRs!pm>P6&wFfW-4wl!`ESweOrjiD?eGf4gP?}a5`T>)%spUt4p zBG9KD*1;GUCWbjo0)6bK6U@W-Wu*2RxNjVn{b!(v-T`UO)M@XctiOJ1AOq&zgS4-J zv`q~9no|V&n7id6m<_hgf^GB}1KMR8Xxp|QThJ#!d&4#z*$nzRzxF>4E(Lw8HxJ9k zy&3GQ2*$O(6SSLGHyEP@w9SPPFbgu^es;{OUaO z!DHDt9+Te!{Z52$z^6E_<+I#x;uG8dXE+yJ2E>xu|MA_oFCvLs`w;h^`vd;rP~_K< zU!WZ}kL%dD2U5NmXHRVv`^R0I8Csw;cq{d$AG%?t**_z)`HA5Y|Ti^ z+uArbZQJVY$M)81FVbEgW4SlYLAyB`$9ip|O+Rh=Y}3n>ha1GmOapy7AfarVZ9cY( zC?_0C`e?5~tgWG&pMRF4hTZzY|CGz|dt!|C?jSUc(-P$Fec~bFbN-dAJw7PPoV1r}}hKj;J3*{*+U%2v+u(GomT$WDmLC%PtP@7==wWO{q3^=i_jfw2HpLgAe(dKjy?&NHf#~nc{avI zq;;Lp0oK*9+h!0-;MkOz24$qa-H-+Ai%9LZ*SG(F$}##NBn@eO)IfUyVo)zO$W+c6 zciZ8QTzuQ%XYBjn;b(7t=kVXG-ET^M;^hM|s zkgJfK*Zllq`hSp~Lwf(>G4AIe$HEfu9A&u)K8Jn+=P=JX^O^SypRIK*r}0_iW4~F=4k{eUGWzF74kw`sf2$pnVOwiMrJ=5`Wj>Sma^k zXfCs%yRW`}<~4*qh+Kkw3lu4vw{zhbGD!O=p=Vp`W3&H2n*`<&cP-^CKK@1cxrWY@ z^HQelv9^_ZkH6K(8h8zHE!}ta>3PB9s0-csvu>wceeZ0-7SUH46M24X`rX=e-HbVb z`Yvo<`woV8DBlC##}}}_kN!#Q?MS~j>R24}WOe#-{oP}8Df@nxEK-ws>w z(MF!>z4?Pk?~j^cIy?*BBS}9PLwy>!rq{qFV1CZQQdkGhUk|K?Zpgp@#PAlZg-*B` znyU5tv_bTT;AMEJA?G5mgZ1ERipJMg1LGU31RLQ=m<<)sRs?-shc(a(3mWn<ugn2UMoV=nK*b71@DVF-+88$ScT6}b=+SPt*O2CyIdcoE(N<9!HQ z895f&kJN7oJOyunb73F<2h_(O!0$fv-@;$v5%>$-1MVel_HW#Gk*>A*-wS_)SL>v4 zv>yic@izRD__i~LpMi6B6Wj^f9gp9;{W0KU3z|NLbg>fxON;itnAU?$HN%jhQ~KPU7WW#%+p zANR{G@IJU7+*hvEe)!%+n`WeQS_Rk2xpv)u4Nrjc|2k3_Lt1ZctHERAW$1@_um#M) zHLx#oP3xJ9^mua*RKVlOxca%T+(RBuD}nV$bG41==do)4##=O^jUeDuqJd9DJ-;ql4dO8eJlPV2xvHbVkiVG-zOyvg92tblIN z&-m(XFc0k8oMyvfU~i{5R%6@#8F(GqVI!EItOMuF82Vdw|82{ohw?Dz^ldq8plmGH zV=gp<>tU`1sDbA!`??P91mmuSe$dw(dci&7+@24%bu8xO_=ezBD1$L4fOGAhas4>X z(tYs&`t87;PuqKZ>_naOaR?;j?)mL`a4e2vGd9P>F`K?Gv;@g{CpO0Q@CESPXO3rb zJQ~kF67ZO(ou9AEFjqCsT_yaA3;4f`k6>p`O3YCykMGGy#vOeFopV`<81dKP-`HM2 z=AcUag-COlg8UuQ+;>6012|X2ms9p!&_d3fkJ4+X^EwHZ0Xe15^Ik`t^J8HM-!9}T zm<=AAw&}rc*?QNcjI_;Y(0(a6&N;xC(rfBN$QjTA&f#+CrCl>pzb()Y-N0DV*GyI; zJHa*cUNS~*0_VLQ>ADze9hjTvgt$?+5}R!U7`FiSsclU|*KWKHD8m>qpI$KkU5Qsj z4ieM*+V{|hp$P&u^KrX)r*X}D12RH-PO(47)UM3d?>Ae}-78&4 zA#Z#&u4VODLOn2I;Z9ffZfBN-+Yw7vcH855`x_ft+_Dx8i2Rt6ft8*_r6*&vvS-cK*8`ow&^H%oQ zrfuxK$YfYe{aeI(5dB8*8u$z(*k&P{bL8*XC_CmF(tZPWb8;*J=0Xc}HDnP#DYQ2B zW|)bgv-B8Tc=T1G~XFm}nLRnuiAM3grGK19LIWiA@pgTgV>T#`ADn$Xw7CI!3kewjs6;wuR^! z-cLLL`JX)RH}d}BPkFEJ_xaF;tu&%7%!baL+d^Rs?SGyRPLz75qxhY#f`EU>MUtg3DFF%$K>+hi5>9n~dA6n+q{z0a6UneWef&*<-ZB>O-s_G#G0A$R0lKL`DE@N*4k zV7nGu3w_{UnpX|AE3ltSJ^jTy)%*57HM#)bYlxdcGL}p?^`8F^h23C?Ys#;YU&8kw z>bRDKBC*=JXI(?wyRe;&ekZo8@w<%vZbzRE)Ri7XE`-lhH-#AEx&K{_ekPm;r%`90 zlr!WW#ps_ye;50~=;xqch5jhMlW2PrwtMjVH2P1l{{`;DXDPN4`FtPn>9~@2k+7tRBluCIpP{%UABsv1U<=kJKQVc%*f603uCtYIr4ON`6! z!}uND4A;$s`{vA6uJ>QZuaCNus$pQ4=+`3ehgmSL z!o47|zlNSgkC07}aQ>_!7r|oSvk)GK-lO>be)^rpK&-o{yM^-h@G5!%rovb7KN6d< zcY!RnKSG(;`F5f_AN>{BM2u^YV_^>bMxFX5+MkV_49CK)*zKbQINwA!ARULfT#tM@ z7vigFb1)nOGvF-hzK8q(l04!sApef<7^Lm;K&<$y$Ub2S%b|u--7D^pYTn% z9sT3T_wbvByci_7MK1lP)tVjNY*vsH`>dl>Y;V9}J&xh;}(O1(KIhB8$55p_)Cr;P<)V)mIx%jQi zhmHiFUvWJmR(vmge1r2pvh;0ypU8*RT#p9V=0gu{qF(y>Q9f+`E!PwD+0W%e^F{ch zH{-LJT*v$;AC|m9yIMZ9kx%i3eCWL|A3BLsB%dn=^5F~Q`q@9^!zk8i?7Vzfd22qb zA)knYRr63_ptRCdTv~<_ zdI316#517{{dnYY@O5;EC&FpKyoBrPw$MqLvAgDuFV3N#)fnq9IKO@woxK!3)sQFS zL(b{>`6=pce=2+oSj+eUjv?}jxrW5VOD=$eINvNpZcmII(1{n#z-Kq4V?`zxbL{Mi zWDLWMAyD?b7_G{M(j8oT8Ao{!>OP6z6zbR?gZJdZgm-hH%KYr{5$;8gCDx(2&~^s* zvgF;&SUQ^U{}K0`J0Xb`(YD5Yrk~NPaczn>qI16(zDnOU>ba&w%zbnK#=@gU@7(!kj!!V zyVx%v-v_T%+)KoaDZO5L-Fb+z`)ogKUh`br<=DOUo{k>DT=ZvPA&_(W9ql8KJ(Rb= zrLZ&Q4A)+LpMI9K|c(_I`VeTCWF`d{b~0+GQw{BVfZk%)yO5_ zHJzN(_o}qL1mh}o`$}~0HGB@8hI|<|6K4f7uGIT5KjuGuo|}bkJfAbQmyl~4wrpR$ zzw;@Z+X!G!gcf)Nn!#sQ+Z53|@GrxdNT*et+a;DlX=bjDC+%xYwh%NK&AfPr0sj~8%_r%m!_aIb9cI zGw9m|0oFk`3_}leK(?VPOSx+9DCS)<#+P^K-cI;bt&xI~rgQ&Wuk&=bGYmIC>KwULQ%k zH~2O&eo8(0Co$lY(69GN(M#x#i}Sws-N9=E*TIM}rOyrrAvx!U%Nyt0z%$Ghls#vA zo;I&p=nuhh;2tf(57F({ad1tFy}kwdx(yrS@bl#9?_>TEnI*@ZlWV z(TzVjB+OlnYj^De>`yWtu8&>UQpYtq!(K~&(|ig!{R?0Aq-&W>q)+x^<}S*2k{j30 zs0*EWjqX97jyx3k7ghIfI>j=yPw}gnWXyj^P0CS(kFmTDWhbg-EZ>UW3({lXMSFN1h1u zkv^L;pW#&W5s;wAV4hj}VDHAg=&mW_bDzWm$X)At>c_eV_mno*%vi1^`!ap5Hor50 zV<6qfdm`=IkPLHF?|z?roj{TmxS> zh-2i7!2N~KNnrtyU)+P_*hr6YkDUVg9mp>rv3viN-WyQ%IW_&x%Hv4ZJAJlrJnVsV zKH0~f2h!IOjPH429dafd56pG?yzD%*fpL_NLXEmtfH{o+M)_XoM`tYQ>i~0+UC;8_}mC=ONESl2`hCNnXh_=nrAL z8A<>4TiBlS?YsD11on3cdt7-1{x74`U*@;?JLjxnVHx+loX0cvvjU&rQ^y?E*ee~J z6HCO5Z>b+EHTr0FU73Rp`7?EA=R-T!f)2($_;1Q@!p}K&S;yk@`LLC~W)nA}+_{GG z+I$$zb*Y{CoAwrVj;XFGJj0V?KYjPVM%-)Z=Y85gNgmH~uAq(2iE)A46LK*J<}|~c zMcd)?APnF`{4&>}gjf-;1H{kKhBc3gYt9M#CXO1m<@o%L7+mLk?n*Y`^LN+`S#uuo%c8ei0a3cmwGk zV*Kf`<347N(r5k)sCyaxVI<>CpFb^s6Nnd{#zuc>zSp30?73%srj36AcYtyAXYPF6 zAbtoQ0mkU_ZhHLDU&3qs>Dbr<9((R3?c5T-jeZ?<|3osUjwuk!*8|eF zj`uw<4mm{Z#fZ28yT@{DonvRd((gPuj+@Zgi>Xb!Yh~GEiFr?0gY@cgq7fa zFsAX{XP<3I)-+wCm#~p%d^vI|IQBC91RM)}`k@eElcAE`6W6GvPIS&%yR6b&SvRYR&z08)f$d zITfzvm~(uLwYGrrDR2p|tDVcap@MBzJ}mzRuissn4_C3j=CcoKy}2-*c!eV8l5087 zkVox{l!>2VeM{_z0y3V+Uq4-qjl9D?l+Oc?$rivjc7Jps z3*>)2`uX6v_JeC+2Ur59!bYwy9mpeq{L}A;O+&W9*TL(L_j@O!p9Pe?HAUC6oMpKa2%l#_D(8M7Nb#y%g}1ZzN_0=6>v%xgcL*z_yGD0JJ~rVRo#gZJ^X z8nznx1oYX+8OX)32K`mw^RaQB@)N|!Ape4GBiP>e+)u-npgW$aFc~7SU;A@x9pKy) zkVRMy+uE3~{jY1(**2!!jnqGY^JENl+iSNz2J4#tN4HLUfD%}5S%1s6FGCFy7;Kc4 z+G5a0%23L6j@y4=WAkGJtw&!m(7EjCpB?kutyA`2@)DnKKZ$z@o|mIfa}D4+86Sau zI(jQQ=j^1J`y-y0OP|AjJa(S7W1i1_{w{IdOP=67#QW{GE#aH+Y+gHrdnKRkci~z4 zFWl$w{2ZZY{>C|sb9eJrBOKE)%_@BbKhF83-{Q-) zCE}R!JeX{Pxs*9}lOw6S47Tuk=-)YKR?%ls_c)T{D(0G!zQ1)a@+$Z?#O#BQP{%no zeTMlrbMywBMtLgrKKt2W2dw_Fk`um^}YdV(lbntn~bDVSjGC1eH#^~!f*85sf zAcn8w>`|@1?>8E|&#LaPEcQyJ{{H$9a)@#UyZdM@;C z_ClYGj6h#wwqW!1qGjl3qkjmtgCoFPn&BAOO#LU2F&qR(LI<|5A_tJxPe(r&7NQ$_ zAM~kkIIN^RvReN?GYcx|d+*VelqXfw_utdsEV!_mz84<8ft}pr-Dvk+n2Y{-7)$*; ziowK)X1D{8NSZqR~Hwz6;p@o^S@X6OiA5OX=?k%EXN?tMY&Tu-!)7E2``% zZ2uw8izuH-c_F@E#ztR(a_vXx+U}$N9`yaIJW^CcksJ!E;qGead>-Es*x#&#EsLw6 zn>ky@7;E&GJWYPgWqb}kyCaXob_0_9lW$^U4dU6zqltA5wk-X8i*hG0C*gMZ3I5b4 ztV4VqIL4D`cNNm}(|qmM&p9@5WK zd>r`&_yO2Y1!>Nf-8Y{`niu(nnQ$?<#_r8I@LBYa0_&FUNB72u(9LZI&{y~zoCZ6A z{XU6LJCZ$;?z^p&-P4|TS0E>1??Ru3%rF-ALE85T$P#;gG(MAH56b3s5}XX)j*R zJ^#-}ZUT?-F~Gh^Ki5^nHcLCypag572RgxVFNXl@AtGj(c(xk>OQ1-(Iafc}VY0pD@R+1N7ZlaX7ImKVT07*nZVhZdoG z-dc_{ml?=u;59dmiEa+o+fNDoF!Zm%Ot7zExDNdom=6VfPC%}2NUz6!&*n;`W1oY( z4D9C-=)muGr2m73dG&Rz`sXq8^vCCy4x((HS+3=GA-CXvF!C+) zt(5!G`-$;5ud{swe@MsS>#0Yfk45f+G}q^$1)uFGcLJ{&rtfn-h8#{tH?a^6lenbt6@73xf6U2)DMFFVUW5lpwByCn=gT}PY2udHheyY?%25Q zr8zME^gXykk<3^6zLuzTMxpNslfeErfcfY*8Jq1UG~`BX zqtT6b%>OVA-S)%OuLJwlF9XiK@$ECW3RTzd4$#F}8=#fcx%9u--h)(eb%Hu8r|N z3dV9ScL(#@7Y+dTvh(eInK|stG3GvJa1u3b!_Rpd%TceI$xe2UW9GOy>{oo{k#@D zUpO~C=#I-{RUhl;CZZX=QIr8Vg5!L=}sz8-tZez0ySFemA;Y=0Xl zuLk>B3EN?_ANA2l*GG&kU0}bqvG2`LY{(MQe$0=#O|OC0xfceZ0CD5GlSQ|^ZF<3W z`r4Pi=B?}ieY&8#A&rxOZPcwVL&EtygS5>yUwvZGPrHBWHGF%J5m?TEeZ*j04Os$n zizusGXFm}bUjl3f`wpOFpQY`MZ=VUo)*)@1f#HUp&Qqmsi~8#0)^8oS-?ldN9(){& zM8q0I7QyrN5^!D3$GZ7QYveZ|<*Rai`N`35O(AR*m2J!D|l-(1aKf1A* zzt^S?+CPLnKmpwQe(us3w!fn_bnu>AQmF7=6lJgX_UkdR3ON~KVtAj>g)C7npbvxl z(-@gbeg52sGaFp|0waw*J(&OYv)v5kOH;QZ{#`%G8y8~Hm` zc)zAK^n8&yKb3RPm-&tC3pj7>M45Y;@N?eh8-W~0j7`KEg?yQ|-djzeyoUN8qPL)r zL(Z(!?=87+i|0}H8G0s6r~Xpfx)*%EZ`rqvyA8L;C#(q~r55>lt+KwfLM< zm;&uE3heJ5aIF>rIj5h`@Ou>QL*7$NKWni8{UUVt*jg~&Xy85KG`C}s^TB*`a5Y$e z5}X1Ppa~rRY;X@QhO?m;#zQ9*z&T&8gk#`B*ow{i9l@BlfORwAA}EmWkC7wcZnzfu z!FhTcrh)ZEm;)2xd@$BjSOCUx-kb;fpAPz&zh%dC1JuYxU;EN`Pw<+wxvxIQ-0P-u z;&`1`^L~pqjyr-D%FfL)a6J}6mbT8h`SqYT<2Mey9eo76LwQ%qem~v0T2J|Am;s*8 zyYz+MT4oIF#qc=#AJ|*hAwSp0eDePtPG(PjjJor&ozoYJ=W*R#(i*yGmpp{;bo{o1 z9QBhbeAWQjL41EFDuZ-w7Qp?KcS3%YSZir_Gtxd!fDzbOVM3d``VNG9UEkko2auL{iWaQ>83mm`}wdMy&Gv?<`|HpC>QWQ z1v$P^*M+?X36xj0O8x4bQ+JoDa`~eNJzb^?wl(xD$4&HgZE> z58F|G4A~1!=$*)Wz_qo%rEn>9z(WmvBz~LG7sCJ;X9YO!9TX*b0hLrm<#8@UGPJg3S(g*+zs>*z6~?re%Qd? zEFzbJF((6kr@!rB%-%-%G-L2)P zn2p{J-C&)*5e!j3woy(vjyyg+egn4A(2U-O90&U8Z=a)J5PcXXpzE6f%gsn*j6oVt z|1y$!PVWt@FHzQ4zap5+wsz)Gz^0FX8Kea4wQV{wrmSzKwJt-p-cL+jdVXot+eTl} zHD{?@f)uA zZMENZ^l`ruo(He@<~|nYz%KavJ*|lQ0>|ooMi%K<%*SzRk{~P8v3K$7Z?oKR%u}^$*kyTL+%M3zXZDMbO7O=YI{l^<~Pg ziTwp+pe~J#9z(QEw$|e;!cUuh+h%p69FX=OgYB&w1j2-f7uD@LK{T+s|Kk_Q%mB4j3y+(H8Z{7vU)8J;9 z3<-GMw~xo51p@agC8Tq)8G2wn*fzi*amq;R=Uaw;Fy|ODPy)wYfL<_$bzLw7TOmLe zv}K?S`V2!lm-JIaw_diz*QUSoSwhyJ4C=~68f^>c!{FFsE~vA=Ym_~|Yj4Nq z`yV5%=e)NY(r3Gev2`J5Am_p)@VwZC+zh_2(vz*n3&=8M|B{2r4?V%=Uj~_Nt)IJ% zYrV2)J-Rt`P;Y+C$Qt$%cpP|a1nMnk(DjRjassxsO`?sMwT3<0S8rRuJ_Mep`=JX0 z6hYq_QXl8SIK5y@{VZFjZwcR~Ts?+m>-^K-IQ~UQ{p?R)`*ba$aw8VH_2$`)p4L;B z;Ab9zHVNfe$S1;<1*LteD+e2Ry(l&GKwSH`z5V+)j5L=1>H$4y!>^1iSw=UXEajp; zef7OnqfS5bNZT~(&BZ?bOOSEk>6(2Z|1Wv~4adJIJ+ zZMu-D4Ly^kPy7>fF^3qMu*J}!1e`>PpCx z5)yQC)~1iP2w4DSL|xp_m4ULkc>EWU2`D=#>z`?$uWMRBs+Wbb=ZPBnFqn(wfUZw7 z`XJJ|x1SX2@Qbt~6YRE2|B$vTfo(kx4Yt<%G^YYY*lI}I+GnY?o{O>osUOmM^||<` zb?9XXl;dn&w_j~hYdy9(WGLG%26bbaqvZnp@AxJe3tRe!pT1da&P#;d+-+;TU<@hO z>&$6TJ9uB`@v4lmn|u1FP8(wx(^%G}?I_!~{zWiO2B|GQm*d+>Jbg>nL9Ml(O9ys! z+xpkt&^!9LW>Z(956>sPl&zbLtqmqXRy*i3nzH@c#@x-rF=o&`XO$>-!C3V9V4N|~ zgKZ7$iM}5BZR8f@-pHA-5X`R!X?y!M#wqYAUPqh9JvpzNWpb2X;Jx&zlo#Vyq_Y0;4N4V{jeGm(033@;Op~6 zr0*$hLK;_Ja~T5t&BOP+e7|G@7-wsv-dyY>f{mb057PHQYe@Skz-F)yV-ADwn`tvo zZN68auXWmtuYW&M%tO0=>gLgdG`Aws{*38-*tQp%!F;UO=6glsz&7UQ`xTDAjBZ~M zbfFJH3wj6AvN3$WWLu0uZ0g$7eP6@2!|2^$TjOijrf(U1k3?zzo00YtgLxaPqhV8b ze2!C}9x!k7HJ;_E=#FPBIM!{sShh_%esueEjK;Q4?FBG5$2<#o?>3Fsf_)PBUbUE; z(!Sfk_s7;@i@^6!w;;FKjNb*e*S->t!AC#ac7XXh2JPBv_zZ%1nM;B+|1wfP=f%Bc zJI6Ey>2c!wz{cHnocTVmeHS>seUI0@>v8xtkZ*D#Hed7iGcsr4^H*%-UH)AzEPW2$ zzTNxpQ0M1LyuN-O=`p*R{WPT>Cq2iFMK6Ny@A@A7J?P!=68K)RYu}9C0glK0==s?B zwvTo&cgN4F-z4AS#-wo=!%1$<)8=?T)96X++4pLz!DGj8~$zSsg~ z?ZzumwohdQ|E~|Wv>m?IN0uAXcE+&2sY-4Y^wxUJVRYkY5Bg!(FGIP6JwQTPn{6_b zOZXL_xe?3yApB_$*!7L^9TaRuq-Eomw813X=dgh`eUb(-Hv1e#mLbB|*qI7tD1bQ{ zC&F(;o;`yepaiy0=xZ2#D~!cgS;AL((bvH!?5Q)k>EaP2lD zGr4-aHMAK4maQ|FK)mtTI`P{GYhVoZUC3jwJ2zQ$<&Km$pxejU=%aG=&(?ks{hRPQ z_Q~*3^rNwO z<%D)h=h43OP5Z($v;Na6%{49MhBxzw@U&VKy3g0Cn-X-W0_?@XIDPP6s{zhW^R3#+W zVgCoeF?Jzk-w)t9JUJ;B1`nnET<)33x%-An=y`;?wrrTrdoN1|a-nnATxhyD$LEPz zyNmf>H|vRgFm|qKg%S8(&))eZaeqdC{tw9W3EF9*NylY%70``eD*2kI+R{Rxqc;HhxVmzD>m|pnUC~6{U?!Q>F3|r4krfJ z#?s}i0iP3#?__@WK;DPE7W?gU_DDQ~6BSCy4o8Hbm=by9e(to98g`xE_XY z(&xkE@fUcy(brUbxDQC?z%Qupe?}Pu@$l0t}oC}+e=KElu zrOyLeL;pm4C-6HI&il#K-;fJS$fd+wtz$op@LGOgHe9wj7d9TBi&yQ=-d>#xJwMEa zWB3eQ^Cs%&5});n>1Qj~@_zPH=~LLQA@@IILzyv!p@T!|N^+oGnR$*MW>5V9pBb&8 z^l!%HzTq<_zPDa_4xh2Kxu_B{&dpLjr_SE>GaE7U=U(;w;q-sU-HZOs3VZHB{vRUs z7vc9T^R*lGOVR(t@!)!M+{8P;iTFm4;rQa1N?7CcH`9(mjvyAk>1T^);FHDYqqHfa zcM|i)O8qxz&GkNPhXCVA|4+2%`kTq0eHH%}o1c3rBfne;$zIhE?ONsgboejD=QiZ_ z`28Ar8|5L|9)vy{{Wbbn16&W{%ds5~kHd%HXxJ5|;b(i|#49K-raTsT7(9)w8~GoY zj=miED4c|T2eP%AzE>HyA-B?wy_Rqejz0jq!@nybIs|DR6Y<%P{5e0z>nXoY`wa5) z*m>d&f%dHebSiA$DXu2fj}EO6vB8Vy7@!!ItNKgRiVPvo8WIu7*QI&-llM zN~m3iWUeBwqhCPYL5@?GvzGtk0IvS;CCV(CD!@0-3^~3fO#(+K-=5d z@169?8f6Zn{$Ipl9iz>VAs_N+rtcc-(45DIx$kA|67*U0H~RHTxNz@kSp7~Vj3AFS z#vqp_`i_{puAPuOAl(DGYABEAJ1bnrBl6sOc{OAiUzza~*r#RuSbs-hYCLCX3qMsHt9X&WOT>-IXE2sP5hY8RCjK( zV2lsL_V5|XFVglg{5UqlPtaNWFo!y04P)mXAh2Iz_J#h=V|qWw-q7xSaC`vliav;q zJrcJ0SpRYAE=019>3tRZJ}d?HYGBR+?c)8AoFDxBjQOVba^#-g^Lji-9K%0Gvd7Zj zO)@vn7arFuz;U(UWB>O;CtOdgaoCrj)6Oxcc`d^J0rZc-*uwST8RV1D3h(0M8m&RUg|_6Gu7Trz zpYzyu9Ak_(=DbsI%_%1({I*a(4>+GD^c{5|pT~z7gFof`!EqZl92{z=Qnv%Y|I9h% z81`YU#OF87_XnIyS(ErluD{G(#QCMby2kz=#xLd4^J@CK`-kYm>)$YW26BIne(u0_ zF+Mf!?;gT_1LY%V`x5eW7{c~_tWkE)rj#d#uPJ%C_^Kqy;?-msXe`d`2VZgZ0y~cej!Y z_D=ME>}Bj_bgzMPkOdeG%@x`s6Y3Wr+bR2eH3~h8tru3J`)Ayw%yFi~?mjcW7-|2O zBjnB0``3crj@|{v>VT=({CftQd5=YofR|x%rT!hFKp&;ndVL4-N$L`07I_a$g%`oG z7-K1V7xg9dR~z+1`a?4~7RPS>-C)kPSq!eRxxI(9O^rJH8(B&3P14VTwxfRyn>oeM z3*76aYiXPxLK)l2KF$T`(_lGFhB4Uskk7&(y1o~}BJ@qI^*-8i_20xEpgfnlsmQJ5 z;ahbj`MMF-ir<0b=Zb2uP^mTy&q~%^whr zpUCqB=ibue*|1?^j^Ebhf5>wGyy@ZA_{Wu8dXnu}{c&xKo#&V?yo$i=g7%7xQE%y0F4oA3Sn zxHW8ig*|XcHq1UMA0M-KKAyg7J|4$JpxV{*wy}p*NQgk4(siDOcvgilg{F-@S8T!LN8N;itJUXA7*%h4)6}!*9uF^6k0s z!$tIeOg`RkUOrs%6@FX(Fuse#d8&O%J}jJyj@)%OzLRuhJ{(A2J+$5ZD!!X^NR!9AC2F|gyJpH%v+oRvehgq+4+%PZQ?6LRA@y-kLVdf3_aL~2+FtmFqER|#d&o*_TN<2y8HQDSjIkH{()SW#IZNwph{TJanN-fuNy4Lg_WEWV$N^P zt2h=vo(pIHITzOMQ3#Rx`#d*%U_*2}|w6^m5_kE#rdL?vo z{qMM(9Dc%m&z_aAF6Onki03C__Ix52hOViENprXdx`XG%6Zt;TMqURywl56c)E6dj z?^M{oHLSj#I4ijS`90_Uk^C>-A3`uHlF*h+i6Fe?3zmG96_GH zrrrEXSo9!r|7uu!bd?;dTyy)v_=Bop@A1{J;9Q=^cn(;a(B`Y;vY5DMRzmwv_&-{A zRKiBiac%1>Ve%Ul*0B-}oKX!of2JA+x@i01Y8d=VC9LE=tjOGNc(xMWT2u)Wxp!}# z*B6f6xf+)Ki8(p88YbkcVbrzU55CPDXUSoh@%)_g=+or67xh>2{P?>{Xxph8rt-}5 z_dTj%=q<*-FJ=0x%&dl8h&|ImFcdA z++o%5ZlN0X8&M6*E+^(8Jab-F4euX8-QLwOVvzO!LN$z`|4!!RzHe5;l5bVR%jZ?Y zDfIbI{A;Yy$4{t+|6@Ksa1!%Fzwb8j+{|2TAdh9t(FOEz+yeej!uPA;-mh1~-#E5s->ZhcFEdVZJrnjiw;CTm zv6{SbQ#E{rm`{JN8c#dD8Xh6;D|4#x2bquOTC4G2nS(p0Fo$EPo6rARn@|nUoKp?Q zezh9@!T$QywahW&9FNZ)pRa~}E~ti$i>vXBtI791d@f~f&Zve9nZJGU{ruJRKd%}t zJ%IIN4lZe_hF5oCPH4Lq_9G|py1=c~@FH{B^hJD_lO4(7C*(7M{vKn^_hg;6{D=MW zwQ6|b9$tHhS=;7n_zu3~PpgLQX}>qmr`w^wi|;#ovUeA==T2pRu{Yjg{(Fd>If}h7 zz8dDhF!`Lq_=cI!(byB_dj0X_@?Csq;=}rlSy=ZO^J&(Y@oymK`Q+clelN3+Mlinh zKVa{VWlu2f5vTB59J8un#%1J4y!kg$w;%no*SlG-?!&8L6L~Z-?{8kou}9qLyRu)t zUJVnkW9@0T2%o~|7%%y~w|_Ov{U^sOwi08SJ)JxmOA~9-&R&_%vt-j@(eo;jGzvFLN*(1*#3cq3SY_n7lJ z_-8-FvB%ynF{dNglZDqRVeEzELz|7`Xgim7zp8|()ORyaMSLRm#9PeW1olTfg8L`> zo~rz3)}R^aII-etey3)MVy?eefP@y7^3w8^%9}v9DxakK~vcdYt2V z2kgXa`7_7+r^tt#d)N=9eW4Q8Qr^mO(7ZEa;hdEH zFZXJJc{>{00?vc%b6~iOh_hyKRT?Bea=36@3YT7`<`>>p3F=kYx{WC z(m(KgE?X!O z;kA_M{awmDKz!d>lQLt8)#DGQ>_X}{;nVaN@Rj%67ZKwI@_Z-j=@k5Q>7zG!5go!f zpJ07w&1yNsno5k~n^R^udJglp1AhzItDH-j7sehzZvwHLL)_|l|66hf>%m);$%oC_ zL*}H+0oL+KE_R!erk**`o>ytv6E?AD{hB%J&pyEXM!PU~#4bsEWAK|9Pr)n5F+T^k zAV(}~Zbs)Ib4^~nOMF(aM>c=V{=5TU*7E!@$o|6o*5KD(a?=~f7{0`h1+?wLZ}wf~ zhQnODlT-flxd=Wv>`U$n=AImitwz3zoOp)4t^nCS$bAa_oiy>S$UR2eo3u3(qeklV znRpvLy+@gVed6eZsNYYWJPqtd_Ai{V5}%e|@>wOurROV_G3h?Qo`ilWAXWvBve!1C zOFd9enb>-}xVLzf{q`>S?DOtczW4VwY~RTJ%sTW}^FAi>8i!F>)n&Klj> z^nW#V&Rf#6Xs_ems1ZJT4r56$X7dW~#fFAB|03Sxk&EqS=E%dA%|nN|cRxjM8{b1r zeC;V*^NhjcS-r)URe2Qr{rFN&ytwAl%a{YsNbFqRpBdVI-)IgH3+Gk(%@6V063F$V z?+fvPTs`Q){anJWHzI@Jn;J0$i zpjV@QE@6CJ_qFPqXALdib5F_qz!+D9FGb`9@0D;fXa zD374b_2Mw^-UqbH&%VTB9=vlhu=r$&ojV%-H2m*}y&}rX#K*VG=!+aq98b*Np#4pK z-WA>qVo?Dlo^BX@;$V)!w;A-sJelk8Z6J2|{%Lm%?ZiNzoweU#_bzZ6{oezBB7Eja z*N1+K(S3+|AI^J*K?jgatVbrA+<55gF(02$KX%`R)nk{wv}erR8!y`K{i%L?<~wK} z2xtB_={V@lQ07@o8edPP9AjSMl<~{|{6Q_{qM-lG$7B7pTwgjTi=EDT`#SxTJMM`g zQ^VTlEM$*cNABa7UCZ^$8j#Og=DmPUHN20&gOtf_cL(FvwYwj%9{HaqUP-wZz@FJo zza_};0jzg=hVBNS#K!+k%Tf5kI$~J^{pZeHYyS5;X)aiEb?&6+=Q=mjS*3nQ%s)S9 zq2GHb*CWr`<$u<4I+T6a>_d)y$S$Hh5jq~QH<(Z0qhr}O_5E5pAM>Ac{t;W;qnK*O zxDtLD;Qqo+rha#(tnVz-cdq%*&Do4`fK>Y9_pV{;Imf;e+ife?+e;i zfv-UgZTG>u7T}v3M@DwIr?BhbeS@Fy^tU5ATX6BsKdaC98GgUjI)Ya0gKN0}#}+{36%=X~xvl=Gm9x5h!f z*wvKr#odDp_wep|Y!Wk%^<1C9^rk?$U-Nx?7@ft`S>JR%=nzX+@ncW-&!3nN_3L5J zV_Z7xOB0tgx#(`7FLZP!&_b>_XQD?wy5F*9vPSDS)FUqUG&SsNf9Lw2-ee#C5%pV< zWerZhh(7yGni%+JUO{xoL#wg)>M}O&ncx^^8~Vk^E=r{EeO6gdBNl zrlCWB{`n}^sDE~1xV~8v%|F0D0c(-o!}`CEEWNit9rXy;U!3dm2WTJa{M z4L$qQbIOC19ry++Stpfw?irxIh58Io1bT&ewhXNY{V1oiWd|F(vt`*{O!*tI7wiGY zBRdBg1z%+8YtU}+$AQa%{GAFN1U{o&0xizMg`Nmxt3PxHR5E##e*%Vsv%vr`ADjg4 zq&);J0+Q(sK7n^D^Z+yv&U{C90rf&~9+(28D?QqMS*@jEa5`u~ZyIzY5Isa){gr{i zU?><5}cpLfd5u{iv2~P8qklu1Uf0~ z|E7X@n7VWrrzg3&P~vP$X|D;J#2V;yc-WI&rxux$;WM`h*<2hp(L2K?{vV!S;d?Fd zS&wXFb=aio$61H|(;@$N=h9bD#|L*Zdbfk~!X{>^pMm`mnSLz#$_G!e#fD)__8g!w zUz#Z!{E(a_$1FKu@y9A}h`qfE%6$4Uz`l)9kAm|7{aVIkhh^%PKJ|N^=6UcYIFmZD z@UN?nvixJIYtHc9j0K9Z!2I~&LlsAGNl<$S=T4PM+ zLZItl2voV&9h?RtK>2wLRJqF7eZQ19+u@7qTHXwlkR+9>})rs!fniQI=}l zyMYTFPy;+rAWC0?e3g7Hh=634YHgLh1TqcK7}SO5K(&5LR{E9MHkJ3P4HUu?7K427 z3sBHVKGG(1qpq`}Gmp#d9JhEkgd{%oL`2E5@NZFw+id;UBj`pGm<>x^qbq8F4?eC$(;mOC% z?33N01L1ii;X~Kc)&&2zyib}*Ifb2z;rBpJ^)XP(=hpQ3G~N^We}~0;5dZ8jjob#H z@y>$I2MP2W8Z-C4iqU9fEj){U9KJ`{rL3`NUefR!pjfCq5zNHWLSxhe&>G;Pr+5^= z(^wRj1a;}Uw5tzapL#Pe>X&xekW4I?DXT3lQ1^8yCxPT-KLM33kFs9!*95T*EH?dD zR@Oq*F53nwUFj9!n{qf!JwcoNuu$c)Y`5BwjN0|`^Bj5U7=YV{t(&OXG}3@hSk?Uy7j`zt(n*@f%?9O*#(P0um`= z3bnuk=~f_Tu(zJNWMd#fy%<~SL-{ExyA7Zl`~y$}Rb4vrqZx=VJ4w(2;y`jVKGo{3KE+Ko8nXXM)d-^=tefl)VAFS}vE(j^nl z?`~%6749tyZ*RQz(k=)KN4HR9B3KwXuWEVKs2i%6EvQ<&;FhZ8Si5?`yb1gyn{V*{ z{4W>|mR5wz!(|l}B^9ASq#{yU77Ue?l!Zd2q0;h5q^vYpT3!;Am#6OBR$YvTLUdxD zoIO0!>z^tgcM_u(daEJl6&NL_N7gJ_+)?kYF@HU=2eQI3IXa856c}~Q&C9EoE}K`q ztfS)nkG*)&a_7iu4iRcq3w3nzyzTqv?!q-8*U5A{H4-*=Z0Fw6 zrOcVaB>{5PijHmTt)!#cy)WmLw+1p-EqO(oT4_hM%FU0Ta%8n(bK2C(I;ypww%p$8 z+o_dzRQs%G=mSTNan|#?T&$@Z6L?E}fbUU?*j%r8G$oSQVd+Ri9?X_|{ z+uu&BQ*Ir7X!Bxiops8s_JPd3YMiRWYMpXq(4WT-VO29iu2y}UR;LtMxbTV1N7gF5 z=5Sw~GUVue&1lo=lpsg%YfYP0r~DZAMsjL`F$uZjdbdrhQ+l*t?sK)0a}STJQ+Bjp z;&bOYz9EmRI=eA+Ose*4ZV_&_z4rKON8Syh6MOU+o)sP3b;Mz%f!R_k-Pk?DSn)Wi b&I`SsqWz!D)b)RKVs|ahW+4A0pOO6^2*p~` diff --git a/tutorials/model/history.txt b/tutorials/model/history.txt deleted file mode 100644 index 987e3a99..00000000 --- a/tutorials/model/history.txt +++ /dev/null @@ -1,4 +0,0 @@ -### Round 0 ### -init => 0.0 -0.0 => fit => 0.1 -0.1 => prune_input => 0.2