repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/training/progress_bar.py | from collections import OrderedDict
from numbers import Number
from tqdm import tqdm
from .meters import AverageMeter, RunningAverageMeter, TimeMeter
class ProgressBar:
''''
Takes iterable like train_loader and functions exctly like this iterator if quiet is True. Otherwise it additionally provides a progress bar.
'''
def __init__(self, iterable, epoch, quiet=False):
self.epoch = epoch
self.quiet = quiet
self.prefix = f"epoch {epoch:02d}"
self.iterable = iterable if self.quiet else tqdm(iterable, self.prefix, leave=False)
def __iter__(self):
return iter(self.iterable)
def log(self, stats, verbose=False):
if not self.quiet:
self.iterable.set_postfix(self.format_stats(stats, verbose), refresh=True)
def format_stats(self, stats, verbose=False):
postfix = OrderedDict(stats) # method set_postfix requires ordered_dict
for key, value in postfix.items():
if isinstance(value, Number):
fmt = "{:.6f}" if value > 0.001 else "{:.3e}"
postfix[key] = fmt.format(value)
elif isinstance(value, AverageMeter) or isinstance(value, RunningAverageMeter):
if verbose:
postfix[key] = f"{value.avg:.6f} ({value.val:.6f})"
else:
postfix[key] = f"{value.avg:.6f}"
elif isinstance(value, TimeMeter):
postfix[key] = f"{value.elapsed_time:.1f}s"
elif not isinstance(postfix[key], str):
postfix[key] = str(value)
return postfix
def print(self, stats, verbose=False):
postfix = " | ".join(key + " " + value.strip() for key, value in self.format_stats(stats, verbose).items())
return f"{self.prefix + ' | ' if self.epoch is not None else ''}{postfix}"
| 1,860 | 41.295455 | 145 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/training/losses.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import L1Loss, MSELoss
class SSIMLoss(nn.Module):
"""
SSIM loss module.
"""
def __init__(self, win_size: int = 7, k1: float = 0.01, k2: float = 0.03):
"""
Args:
win_size: Window size for SSIM calculation.
k1: k1 parameter for SSIM calculation.
k2: k2 parameter for SSIM calculation.
"""
super().__init__()
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
self.win_size = win_size
self.k1, self.k2 = torch.tensor(k1).to(device), torch.tensor(k2).to(device)
self.register_buffer("w", torch.ones(1, 1, win_size, win_size).to(device) / win_size ** 2)
NP = win_size ** 2
self.cov_norm = torch.tensor(NP / (NP - 1)).to(device)
def forward(self, X: torch.Tensor, Y: torch.Tensor, data_range: torch.Tensor):
assert isinstance(self.w, torch.Tensor)
data_range = data_range[:, None, None, None]
C1 = (self.k1 * data_range) ** 2
C2 = (self.k2 * data_range) ** 2
ux = F.conv2d(X, self.w) # typing: ignore
uy = F.conv2d(Y, self.w) #
uxx = F.conv2d(X * X, self.w)
uyy = F.conv2d(Y * Y, self.w)
uxy = F.conv2d(X * Y, self.w)
vx = self.cov_norm * (uxx - ux * ux)
vy = self.cov_norm * (uyy - uy * uy)
vxy = self.cov_norm * (uxy - ux * uy)
A1, A2, B1, B2 = (
2 * ux * uy + C1,
2 * vxy + C2,
ux ** 2 + uy ** 2 + C1,
vx + vy + C2,
)
D = B1 * B2
S = (A1 * A2) / D
return 1 - S.mean()
| 1,886 | 31.534483 | 98 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/training/training_functions.py |
import torch
from torch.nn import L1Loss, MSELoss
# Implementation of SSIMLoss
from functions.training.losses import SSIMLoss
# Apply a center crop on the larger image to the size of the smaller.
#from functions.data.transforms import center_crop_to_smallest
# In order to get access to attributes stored in save_checkpoint
from functions.train_utils import save_checkpoint
class Compute_batch_train_loss:
def __init__(self) -> None:
self.loss_fct_lookup = {
'SSIM' : SSIMLoss(),
'L1' : L1Loss(reduction='sum'),
'L2' : MSELoss(reduction='sum'),
}
def get_batch_train_loss(self, hp_exp, output, target, max_value, train_meters):
train_loss = 0
for loss in hp_exp['loss_functions']:
if loss == 'SSIM':
loss = self.loss_fct_lookup['SSIM'](output, target, data_range=max_value)
train_meters["train_SSIM"].update(loss.item())
train_loss += loss
elif loss == 'L1':
loss = self.loss_fct_lookup['L1'](output, target) / torch.sum(torch.abs(target))
train_meters["train_L1"].update(loss.item())
train_loss += loss
elif loss == 'L2':
# L2 loss in the image domain, i.e. directly between network output and target image
loss = self.loss_fct_lookup['L2'](output, target) / torch.sum(torch.abs(target)**2)
train_meters["train_L2"].update(loss.item())
train_loss += loss
elif loss == 'L2_kspace':
# L2 loss in the frequency domain. Actually this function works the same as 'L2'
loss = self.loss_fct_lookup['L2'](output, target) / torch.sum(torch.abs(target)**2)
train_meters["train_L2_kspace"].update(loss.item())
train_loss += loss
elif loss == 'L1_kspace':
# L1 loss in the frequency domain. Actually this function works the same as 'L1'
loss = self.loss_fct_lookup['L1'](output, target) / torch.sum(torch.abs(target))
train_meters["train_L1_kspace"].update(loss.item())
train_loss += loss
#else:
# raise ValueError("Chosen loss function is not implemented.")
if len(hp_exp['loss_functions']) > 1:
train_meters['cumulated_loss'].update(train_loss.item())
return train_loss
def configure_optimizers(hp_exp, parameters, optimizer=None):
if not optimizer:
if hp_exp['optimizer'] == 'Adam':
optimizer = torch.optim.Adam(
params=parameters,
lr=hp_exp['lr'],
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=0.0,
amsgrad=False
)
elif hp_exp['optimizer'] == 'RMSprop':
optimizer = torch.optim.RMSprop(
parameters,
lr=hp_exp['lr'],
weight_decay=0.0,
)
if hp_exp['lr_scheduler'] == 'ReduceLROnPlateau':
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer=optimizer,
mode=save_checkpoint.mode,
factor=hp_exp['lr_decay_factor'],
patience=hp_exp['lr_patience'],
threshold=hp_exp['lr_threshold'],
threshold_mode='abs',
cooldown=0,
min_lr=hp_exp['lr_min'],
eps=1e-08,
verbose=True
)
elif hp_exp['lr_scheduler'] == 'MultiStepLR':
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer=optimizer,
milestones=hp_exp['lr_milestones'],
gamma=hp_exp['lr_decay_factor'],
last_epoch=- 1,
verbose=False
)
return optimizer, scheduler | 3,890 | 37.147059 | 100 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/training/meters.py | import time
import torch
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
if isinstance(val, torch.Tensor):
val = val.item()
self.val = val / n
self.sum += val
self.count += n
self.avg = self.sum / self.count
class RunningAverageMeter(object):
def __init__(self, momentum=0.98):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if isinstance(val, torch.Tensor):
val = val.item()
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
class TimeMeter(object):
def __init__(self, init=0):
self.reset(init)
def reset(self, init=0):
self.init = init
self.start = time.time()
self.n = 0
def update(self, val=1):
self.n += val
@property
def avg(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
return self.init + (time.time() - self.start)
| 1,318 | 19.936508 | 75 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/training/debug_helper.py | import torch
import numpy as np
from typing import Dict, Optional, Sequence, Tuple, Union, List
import os
import matplotlib.pyplot as plt
def save_figure(
x: np.array,
figname: str,
hp_exp: Dict,
save: Optional[bool]=True,):
""""
x must have dimension height,width
"""
if save:
save_path = hp_exp['log_path'] + 'train_figures/'
if not os.path.isdir(save_path):
os.mkdir(save_path)
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(111)
ax.imshow(x,'gray')
ax.axis('off')
#ax.set_title(title,fontsize=10)
fig.tight_layout()
plt.savefig(save_path + figname + ".png")
plt.close(fig)
def print_tensor_stats(
x: torch.Tensor,
name: Optional[str]="Tensor",
dim: Optional[Union[int,List]]=None,
precision: Optional[float]=6,
):
"""
Prints mean, std and min and max of a realy valued tensor.
If dim is given stats are computed separatly over this dimension.
"""
shape = x.shape
if dim and not isinstance(dim, List):
dim = [dim]
if dim:
for d in dim:
#print(f"dimension {d}")
x_reorder = torch.moveaxis(x,d,0)
for s in range(shape[d]):
l1norm = torch.sum(torch.abs(x_reorder[s]))
l2norm = torch.sum(torch.abs(x_reorder[s]**2))
rss = torch.sqrt(torch.sum(torch.abs(x_reorder[s]**2)))
print(f"""{name} shape {x.shape} dim {d} {s+1}/{shape[d]}:
mean {np.round(x_reorder[s].mean().item(),precision)},
std {np.round(x_reorder[s].std().item(),precision)},
min {np.round(x_reorder[s].min().item(),precision)},
max {np.round(x_reorder[s].max().item(),precision)},
l1norm {np.round(l1norm.item(),precision)},
l2norm {np.round(l2norm.item(),precision)},
rss {np.round(rss.item(),precision)}""")
else:
l1norm = torch.sum(torch.abs(x))
l2norm = torch.sum(torch.abs(x**2))
rss = torch.sqrt(torch.sum(torch.abs(x**2)))
print(f"""{name} shape {x.shape}:
mean {np.round(x.mean().item(),precision)},
std {np.round(x.std().item(),precision)},
min {np.round(x.min().item(),precision)},
max {np.round(x.max().item(),precision)},
l1norm {np.round(l1norm.item(),precision)},
l2norm {np.round(l2norm.item(),precision)},
rss {np.round(rss.item(),precision)}""")
| 2,560 | 34.082192 | 75 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/models/unet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class Unet(nn.Module):
"""
PyTorch implementation of a U-Net model.
O. Ronneberger, P. Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical
image computing and computer-assisted intervention, pages 234–241.
Springer, 2015.
"""
def __init__(
self,
in_chans: int,
out_chans: int,
chans: int = 32,
num_pool_layers: int = 4,
drop_prob: float = 0.0,
):
"""
Args:
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
chans: Number of output channels of the first convolution layer.
num_pool_layers: Number of down-sampling and up-sampling layers.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for _ in range(num_pool_layers - 1):
self.down_sample_layers.append(ConvBlock(ch, ch * 2, drop_prob))
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for _ in range(num_pool_layers - 1):
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(ConvBlock(ch * 2, ch, drop_prob))
ch //= 2
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(
nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1), # here is the only conv layer with a bias
)
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
stack = []
output = image
# apply down-sampling layers
for layer in self.down_sample_layers:
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# reflect pad on the right/botton if needed to handle odd input dimensions
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # padding bottom
if torch.sum(torch.tensor(padding)) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans: int, out_chans: int, drop_prob: float):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
return self.layers(image)
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose
layers followed by instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans: int, out_chans: int):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(
in_chans, out_chans, kernel_size=2, stride=2, bias=False
),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H*2, W*2)`.
"""
return self.layers(image)
| 6,021 | 31.907104 | 113 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/data/mri_dataset.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
import pickle
import xml.etree.ElementTree as etree
from pathlib import Path
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import h5py
import numpy as np
import torch
import yaml
def et_query(
root: etree.Element,
qlist: Sequence[str],
namespace: str = "http://www.ismrm.org/ISMRMRD",
) -> str:
"""
ElementTree query function.
This can be used to query an xml document via ElementTree. It uses qlist
for nested queries.
Args:
root: Root of the xml to search through.
qlist: A list of strings for nested searches, e.g. ["Encoding",
"matrixSize"]
namespace: Optional; xml namespace to prepend query.
Returns:
The retrieved data as a string.
"""
s = "."
prefix = "ismrmrd_namespace"
ns = {prefix: namespace}
for el in qlist:
s = s + f"//{prefix}:{el}"
value = root.find(s, ns)
if value is None:
raise RuntimeError("Element not found")
return str(value.text)
class SliceDataset(torch.utils.data.Dataset):
"""
A PyTorch Dataset that provides access to MR image slices.
"""
def __init__(
self,
dataset: str,
path_to_dataset: str,
path_to_sensmaps: str,
provide_senmaps: bool,
#path_to_max_vals: str,
#use_SENSE_targets: bool,
challenge: str,
transform: Optional[Callable] = None,
use_dataset_cache: bool = True,
dataset_cache_file: Union[str, Path, os.PathLike] = "dataset_cache.pkl",
num_cols: Optional[Tuple[int]] = None,
):
"""
Args:
dataset: Path to a file that contains a list of volumes/slices in the dataset.
path_to_dataset: Path to a all the volumes/slices in the dataset.
path_to_sensmaps: Path to a all the sensmaps. One sensmap for each slice
provide_senmaps: Load sensmaps or not
challenge: "singlecoil" or "multicoil" depending on which challenge
to use.
transform: Optional; A callable object that pre-processes the raw
data into appropriate form. The transform function should take
'kspace', 'target', 'attributes', 'filename', and 'slice' as
inputs. 'target' may be null for test data.
use_dataset_cache: Whether to cache dataset metadata. This is very
useful for large datasets like the brain data.
dataset_cache_file: Optional; A file in which to cache dataset
information for faster load times.
num_cols: Optional; If provided, only slices with the desired
number of columns will be considered.
"""
if challenge not in ("singlecoil", "multicoil"):
raise ValueError('challenge should be either "singlecoil" or "multicoil"')
self.dataset_cache_file = Path(dataset_cache_file)
self.path_to_sensmaps = path_to_sensmaps
self.provide_senmaps = provide_senmaps
#self.path_to_max_vals = path_to_max_vals
#self.use_SENSE_targets = use_SENSE_targets
self.transform = transform
self.recons_key = (
"reconstruction_esc" if challenge == "singlecoil" else "reconstruction_rss"
)
self.examples = []
# Load the dataset cache if it exists and we want to use it.
# The dataset cache is a dictionary with one entry for every train, val or test set
# for which a cache has already been created. One entry contains a list of tuples,
# where each tuple consists of (filename, slice_ind, meta_data).
if self.dataset_cache_file.exists() and use_dataset_cache:
with open(self.dataset_cache_file, "rb") as f:
dataset_cache = pickle.load(f)
else:
dataset_cache = {}
# Check if the dataset is in the cache.
# If yes, use that cache as list of data examples with corresponding meta data,
# if not, then generate the list of data examples and also the meta data.
if dataset in dataset_cache.keys() and use_dataset_cache:
logging.info(f"For dataset {dataset} using dataset cache from {self.dataset_cache_file}.")
self.examples = dataset_cache[dataset]
else:
with open(dataset, 'r') as stream:
# files contains a list of dictionaries. Every dictionary contains an entry fname,
# which can contain a path prefix like multicoil_val, and optionally a slice number.
files = yaml.safe_load(stream)
# Go through all files and add them to the data examples.
# If no slice number is given, all slices are added to the dataset.
#print(files)
for file in files:
metadata, num_slices = self._retrieve_metadata(path_to_dataset + file['path'])
if file['slice'] is not None:
self.examples += [
(path_to_dataset + file['path'], file['slice'], metadata, file['filename'])
]
else:
self.examples += [
(path_to_dataset + file['path'], slice_ind, metadata, file['filename']) for slice_ind in range(num_slices)
]
if use_dataset_cache:
dataset_cache[dataset] = self.examples
logging.info(f"For dataset {dataset} saving dataset cache to {self.dataset_cache_file}.")
with open(self.dataset_cache_file, "wb") as f:
pickle.dump(dataset_cache, f)
if num_cols:
self.examples = [
ex
for ex in self.examples
if ex[2]["encoding_size"][1] in num_cols # type: ignore
]
def _retrieve_metadata(self, fname):
with h5py.File(fname, "r") as hf:
et_root = etree.fromstring(hf["ismrmrd_header"][()])
enc = ["encoding", "encodedSpace", "matrixSize"]
enc_size = (
int(et_query(et_root, enc + ["x"])),
int(et_query(et_root, enc + ["y"])),
int(et_query(et_root, enc + ["z"])),
)
rec = ["encoding", "reconSpace", "matrixSize"]
recon_size = (
int(et_query(et_root, rec + ["x"])),
int(et_query(et_root, rec + ["y"])),
int(et_query(et_root, rec + ["z"])),
)
lims = ["encoding", "encodingLimits", "kspace_encoding_step_1"]
enc_limits_center = int(et_query(et_root, lims + ["center"]))
enc_limits_max = int(et_query(et_root, lims + ["maximum"])) + 1
padding_left = enc_size[1] // 2 - enc_limits_center
padding_right = padding_left + enc_limits_max
num_slices = hf["kspace"].shape[0]
metadata = {
"padding_left": padding_left,
"padding_right": padding_right,
"encoding_size": enc_size,
"recon_size": recon_size,
}
return metadata, num_slices
def __len__(self):
return len(self.examples)
def __getitem__(self, i: int):
filepath, dataslice, metadata, filename = self.examples[i]
if self.provide_senmaps:
smap_fname = filename + '_smaps_slice' + str(dataslice) + '.h5'
with h5py.File(self.path_to_sensmaps + smap_fname, "r") as hf:
sens_maps = hf["sens_maps"][()] #np.array of shape coils,height,width with complex valued entries
else:
sens_maps = None
with h5py.File(filepath, "r") as hf:
kspace = hf["kspace"][dataslice]
#mask = np.asarray(hf["mask"]) if "mask" in hf else None
target = hf[self.recons_key][dataslice] if self.recons_key in hf else None
attrs = dict(hf.attrs)
attrs.update(metadata)
sample = self.transform(kspace, sens_maps, target, attrs, filename, dataslice)
return sample
| 8,347 | 36.773756 | 130 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/data/subsample.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import contextlib
from typing import Optional, Sequence, Tuple, Union
import numpy as np
import torch
@contextlib.contextmanager
def temp_seed(rng: np.random, seed: Optional[Union[int, Tuple[int, ...]]]):
if seed is None:
try:
yield
finally:
pass
else:
state = rng.get_state()
rng.seed(seed)
try:
yield
finally:
rng.set_state(state)
class MaskFunc:
"""
An object for GRAPPA-style sampling masks.
This creates a sampling mask that densely samples the center while
subsampling outer k-space regions based on the undersampling factor.
It creates one mask for the input and one mask for the target.
"""
def __init__(self, self_sup: bool, center_fraction: float, acceleration: float, acceleration_total: Optional[float]):
"""
Args:
self_sup: If False the target mask is all ones. If True the target mask is also undersampled
center_fractions: Fraction of low-frequency columns to be retained both in input and target.
accelerations: Amount of under-sampling for the input
acceleration_total: Required if self_sup=True. Determines how much measurements are available for the split into input and target masks
"""
self.self_sup = self_sup
self.center_fraction = center_fraction #cent
self.acceleration = acceleration #p
self.acceleration_total = acceleration_total #mu
self.rng = np.random.RandomState() # pylint: disable=no-member
if self_sup and acceleration_total==None:
raise ValueError("For self-supervised training or validation acceleration_total has to be defined.")
def __call__(
self, shape: Sequence[int], seed: Optional[Union[int, Tuple[int, ...]]] = None
) -> torch.Tensor:
raise NotImplementedError
class n2nMaskFunc(MaskFunc):
"""
n2nMaskFunc creates a sub-sampling mask of a given shape.
It returns a mask for the training input and a mask for the training target.
"""
def __call__(
self, shape: Sequence[int], seed: Optional[Union[int, Tuple[int, ...]]] = None, fix_selfsup_inputtarget_split: Optional[bool] = True
) -> torch.Tensor:
"""
Create the mask.
Args:
shape: The shape of the mask to be created. The shape should have
at least 3 dimensions. Samples are drawn along the second last
dimension.
seed: Seed for the random number generator. Setting the seed
ensures the same mask is generated each time for the same
shape. The random state is reset afterwards.
fix_selfsup_inputtarget_split: Only important for self-sup training.
If it is False the input/target split is random.
Returns:
input_mask: Input mask of the specified shape.
target_mask: Target mask is all ones in the supervised case, but ones and zeros in the self-supervised case.
weighted_target_mask: Only important for self-supervised training. Must be used to scale the random non-center
at the output and target before computing the training loss (and validation losses in the k-space).
If supervised training weighted_target_mask is just ones same as target_mask.
"""
if len(shape) < 3:
raise ValueError("Shape should have 3 or more dimensions")
with temp_seed(self.rng, seed):
n = shape[-2]
nu = self.center_fraction
p = 1/self.acceleration
mask_shape = [1 for _ in shape]
mask_shape[-2] = n
if self.self_sup:
mu = 1/self.acceleration_total
q = (mu-p+nu-mu*nu)/(1-p)
# 1. Determine the set S_low consisting of the indices of the nu*n many center frequencies which are always sampled
size_low = int(round(n*nu))
pad = (n - size_low + 1) // 2
# set of indices of all lines in kspace
S_all = np.arange(n)
S_low = S_all[pad : pad + size_low]
# 1.1 Determine S_mu_high, i.e, S_mu without S_low, so only the random high frequencies
# set of indices of all high frequencies
S_high = np.hstack((S_all[: pad],S_all[pad + size_low :]))
S_mu_size_high = int(round((mu-nu)*n))
S_p_size_high = int(round((p-nu)*n))
#### Depending on whether the input/target split is fixed or re-sampled, the order of sampling needs to be adapted
# This is so that validation during training samples the same input mask as during testing
# Recall that during testing selfsup=False, hence S_mu_high is not sampled.
if fix_selfsup_inputtarget_split:
# If split is fixed, first sample S_p_high and then additional lines for S_mu_high
# such that the set S_p_high is the same as if we would sample for selfsup=False
S_p_high = self.rng.choice(S_high, size=S_p_size_high, replace=False, p=None)
S_mu_size_high_remainding = S_mu_size_high - S_p_size_high
S_high_remainding = np.array(list(set(S_high)-set(S_p_high)))
S_q_high = self.rng.choice(S_high_remainding, size=S_mu_size_high_remainding, replace=False, p=None)
else:
# If split is random, first sample S_mu_high such that this set is always fixed.
S_mu_high = self.rng.choice(S_high, size=S_mu_size_high, replace=False, p=None)
# 2. From S_mu_high sample the set S_p_high of size (p-nu)n
S_p_high = np.random.choice(S_mu_high, size=S_p_size_high, replace=False, p=None)
# 3. All other indices in S_mu_high add to the set S_q_high
S_q_high = np.array(list(set(S_mu_high)-set(S_p_high)))
# 4. Determine the size of the overlap between S_p_high and S_q_high, sample this many indices from S_p_high and add them to S_q_high
overlap_size_high = int(round(( (p-nu) / (1-nu) ) * ( (q-nu) / (1-nu) ) *(n-n*nu)))
S_overlap = S_p_high[0:overlap_size_high]
S_q_high = np.concatenate([S_q_high,S_overlap])
# 5. Define the final input and target masks by setting entries to zero or to one for S_p=S_low+S_p_high and S_q=S_low+S_q_high
input_mask = np.zeros(n)
input_mask[S_low] = 1.0
input_mask[S_p_high] = 1.0
input_mask = torch.from_numpy(input_mask.reshape(*mask_shape).astype(np.float32))
target_mask = np.zeros(n)
target_mask[S_low] = 1.0
target_mask[S_q_high] = 1.0
target_mask = torch.from_numpy(target_mask.reshape(*mask_shape).astype(np.float32))
# Create a version of the target mask where the random entries are weighted
weight_on_random_lines = np.sqrt((1-nu)/(q-nu))
target_mask_weighted = np.zeros(n)
target_mask_weighted[S_low] = 1.0
target_mask_weighted[S_q_high] = weight_on_random_lines
target_mask_weighted = torch.from_numpy(target_mask_weighted.reshape(*mask_shape).astype(np.float32))
else:
# In the supervised case this just creates random input mask with fixed center lines, same as random_mask
# The target mask is all ones
target_mask = torch.ones(mask_shape,dtype=torch.float32)
target_mask_weighted = target_mask.clone()
size_low = int(round(n*nu))
p_size_high = int(round(n*p)) - size_low
pad = (n - size_low + 1) // 2
# set of indices of all lines in kspace
S_all = np.arange(n)
# set of indices of all high frequencies
S_high = np.hstack((S_all[: pad],S_all[pad + size_low :]))
# set of indices of high frequencies in the input
# recall that even using rng here, there can be no seed depending on hp_exp['use_mask_seed_for_training']
S_p_high = self.rng.choice(S_high, size=p_size_high, replace=False, p=None)
input_mask = np.zeros(n)
input_mask[pad : pad + size_low] = 1.0
input_mask[S_p_high] = 1.0
input_mask = torch.from_numpy(input_mask.reshape(*mask_shape).astype(np.float32))
return input_mask, target_mask, target_mask_weighted
def create_mask_for_mask_type(
mask_type_str: str,
self_sup: bool,
center_fraction: float,
acceleration: float,
acceleration_total: Optional[float],
) -> MaskFunc:
"""
Creates a mask of the specified type.
Args:
center_fractions: What fraction of the center of k-space to include.
accelerations: What accelerations to apply.
"""
if mask_type_str == "n2n":
return n2nMaskFunc(self_sup, center_fraction, acceleration, acceleration_total)
else:
raise Exception(f"{mask_type_str} not supported") | 9,552 | 43.849765 | 149 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/data/transforms.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import Dict, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from packaging import version
from functions.coil_combine import rss, rss_complex
from functions.math import complex_abs, complex_conj, complex_mul
from functions.training.debug_helper import print_tensor_stats, save_figure
if version.parse(torch.__version__) >= version.parse("1.7.0"):
from functions.fftc import fft2c_new as fft2c
from functions.fftc import ifft2c_new as ifft2c
else:
from functions.fftc import fft2c_old as fft2c
from functions.fftc import ifft2c_old as ifft2c
from functions.data.subsample import MaskFunc
def to_tensor(data: np.ndarray) -> torch.Tensor:
"""
Convert numpy array to PyTorch tensor.
For complex arrays, the real and imaginary parts are stacked along the last
dimension.
Args:
data: Input numpy array.
Returns:
PyTorch version of data.
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def center_crop(data: torch.Tensor, shape: Tuple[int, int]) -> torch.Tensor:
"""
Apply a center crop to the input real image or batch of real images.
Args:
data: The input tensor to be center cropped. It should
have at least 2 dimensions and the cropping is applied along the
last two dimensions.
shape: The output shape. The shape should be smaller
than the corresponding dimensions of data.
Returns:
The center cropped image.
"""
if not (0 < shape[0] <= data.shape[-2] and 0 < shape[1] <= data.shape[-1]):
raise ValueError("Invalid shapes.")
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to]
def complex_center_crop(data: torch.Tensor, shape: Tuple[int, int]) -> torch.Tensor:
"""
Apply a center crop to the input image or batch of complex images.
Args:
data: The complex input tensor to be center cropped. It should have at
least 3 dimensions and the cropping is applied along dimensions -3
and -2 and the last dimensions should have a size of 2.
shape: The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
The center cropped image
"""
if not (0 < shape[0] <= data.shape[-3] and 0 < shape[1] <= data.shape[-2]):
raise ValueError("Invalid shapes.")
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to, :]
def center_crop_to_smallest(
x: torch.Tensor, y: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Apply a center crop on the larger image to the size of the smaller.
The minimum is taken over dim=-1 and dim=-2. If x is smaller than y at
dim=-1 and y is smaller than x at dim=-2, then the returned dimension will
be a mixture of the two.
Args:
x: The first image.
y: The second image.
Returns:
tuple of tensors x and y, each cropped to the minimim size.
"""
smallest_width = min(x.shape[-1], y.shape[-1])
smallest_height = min(x.shape[-2], y.shape[-2])
x = center_crop(x, (smallest_height, smallest_width))
y = center_crop(y, (smallest_height, smallest_width))
return x, y
def normalize(
data: torch.Tensor,
mean: Union[float, torch.Tensor],
stddev: Union[float, torch.Tensor],
eps: Union[float, torch.Tensor] = 0.0,
) -> torch.Tensor:
"""
Normalize the given tensor.
Applies the formula (data - mean) / (stddev + eps).
Args:
data: Input data to be normalized.
mean: Mean value.
stddev: Standard deviation.
eps: Added to stddev to prevent dividing by zero.
Returns:
Normalized tensor.
"""
return (data - mean) / (stddev + eps)
def normalize_to_given_mean_std(
im1: torch.Tensor,
im2: torch.Tensor
) -> torch.Tensor:
"""
This function computes the mean and std of im1 and normalizes im2 to have this mean and std.
"""
im2 = (im2-im2.mean()) / im2.std()
im2 *= im1.std()
im2 += im1.mean()
return im1,im2
def normalize_instance(
data: torch.Tensor, eps: Union[float, torch.Tensor] = 0.0
) -> Tuple[torch.Tensor, Union[torch.Tensor], Union[torch.Tensor]]:
"""
Normalize the given tensor with instance norm/
Applies the formula (data - mean) / (stddev + eps), where mean and stddev
are computed from the data itself.
Args:
data: Input data to be normalized
eps: Added to stddev to prevent dividing by zero.
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
def normalize_separate_over_ch(
x: torch.Tensor,
mean: Union[float, torch.Tensor] = None,
std: Union[float, torch.Tensor] = None,
eps: Union[float, torch.Tensor] = 0.0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
If mean and stddev is given x is normalized to have this mean and std.
If not given x is normalized to have 0 mean and std 1.
x is supposed to have shape c,h,w and normalization is only over h,w
Hence mean and std have shape c,1,1
"""
if x.shape[-1]==2:
raise ValueError("Group normalize does not expect complex dim at last position.")
if len(x.shape) != 3:
raise ValueError("Gourp normalize expects three dimensions in the input tensor.")
# group norm
if mean == None and std == None:
mean = x.mean(dim=[1,2],keepdim=True)
std = x.std(dim=[1,2],keepdim=True)
return (x - mean) / (std + eps), mean, std
def apply_mask(
data: torch.Tensor,
mask_func: MaskFunc,
seed: Optional[Union[int, Tuple[int, ...]]] = None,
fix_selfsup_inputtarget_split: Optional[bool] = True,
padding: Optional[Sequence[int]] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Subsample given k-space by multiplying with a mask.
Args:
data: The input k-space data. This should have at least 3 dimensions,
where dimensions -3 and -2 are the spatial dimensions, and the
final dimension has size 2 (for complex values).
mask_func: A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed: Seed for the random number generator.
fix_selfsup_inputtarget_split: Only important for self-sup training.
If it is False the input/target split is random. Always True for validation and testing.
Determined by hp_exp['use_mask_seed_for_training'] during self-sup training.
padding: Padding value to apply for mask.
Returns:
tuple containing:
masked data: Subsampled k-space data
mask: The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
input_mask, target_mask, target_mask_weighted = mask_func(shape, seed, fix_selfsup_inputtarget_split)
if padding is not None:
input_mask[:, :, : padding[0]] = 0
input_mask[:, :, padding[1] :] = 0 # padding value inclusive on right of zeros
target_mask[:, :, : padding[0]] = 0
target_mask[:, :, padding[1] :] = 0 # padding value inclusive on right of zeros
input_data = data * input_mask + 0.0 # the + 0.0 removes the sign of the zeros
target_data = data * target_mask + 0.0
return input_data, input_mask, target_data, target_mask, target_mask_weighted
class UnetDataTransform:
"""
Data Transformer for training U-Net models.
"""
def __init__(
self,
which_challenge: str,
mask_func: Optional[MaskFunc] = None,
use_seed: bool = True,
hp_exp: dict = None,
mode:str="train",
):
"""
Args:
which_challenge: Challenge from ("singlecoil", "multicoil").
mask_func: Optional; A function that can create a mask of
appropriate shape.
use_seed: If true, this class computes a pseudo random number
generator seed from the filename. This ensures that the same
mask is used for all the slices of a given volume every time.
mode: either train,val or test
"""
if which_challenge not in ("singlecoil", "multicoil"):
raise ValueError("Challenge should either be 'singlecoil' or 'multicoil'")
self.mask_func = mask_func
self.which_challenge = which_challenge
self.use_seed = use_seed
self.hp_exp = hp_exp
self.mode = mode
def __call__(
self,
kspace: np.ndarray,
sens_maps: np.ndarray,
target: np.ndarray,
attrs: Dict,
fname: str,
slice_num: int,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, str, int, float]:
"""
Args:
kspace: Input k-space of shape (num_coils, rows, cols) for
multi-coil data or (rows, cols) for single coil data.
sens_maps: Sensitivity maps of shape shape coils,height,width with complex valued entries
target: Target image.
attrs: Acquisition related information stored in the HDF5 object.
fname: File name.
slice_num: Serial number of the slice.
Returns:
tuple containing:
input_image: Zero-filled input image
input_kspace: Undersampled input kspace, can be used for data consistency steps
input_mask: input mask
target_image: target_image for training in image domain. Can be center cropped, have 1 or 2 channels, be rss or sens combine reconstruction
target_kspace: target_kspace for training
target_mask: target_mask
ground_truth_image: center cropped, real ground truth image for computing val and test scores in image domain.
sens_maps: sensitivity maps to compute expand operations
mean: for de-normalization
std: for de-normalization
fname: File name for logging
slice_num: Serial number of the slice for logging
"""
# Convert sens_maps and kspace to tensors. Stack imaginary parts along the last dimension
if self.hp_exp['provide_senmaps']:
sens_maps = to_tensor(sens_maps)
sens_maps_conj = complex_conj(sens_maps)
binary_background_mask = torch.round(torch.sum(complex_mul(sens_maps_conj,sens_maps),0)[:,:,0:1])
binary_background_mask = torch.moveaxis( binary_background_mask , -1, 0 )
else:
sens_maps = torch.Tensor([0])
binary_background_mask = torch.Tensor([0])
kspace = to_tensor(kspace)
crop_size = (target.shape[-2], target.shape[-1])
#################################
# Computing the target images that are used for supervised training in the image domain (can be complex or real, cropped or not)
# and computing the gronud truth images to compute scores in the image domain (always real and center cropped)
#################################
target_image = ifft2c(kspace)
target_image = complex_mul(target_image, sens_maps_conj)
target_image = target_image.sum(dim=0, keepdim=False)
ground_truth_image = complex_center_crop(target_image, crop_size)
ground_truth_image = complex_abs(ground_truth_image)
ground_truth_image = ground_truth_image.unsqueeze(0)
if self.hp_exp['two_channel_imag_real']:
# move complex channels to channel dimension
target_image = torch.moveaxis( target_image , -1, 0 )
else:
# absolute value
target_image = complex_abs(target_image)
# add channel dimension
target_image = target_image.unsqueeze(0)
#################################
# Computing input kspace and target kspace
#################################
# Get masked input and target kspace from the original kspace self.use_seed during training is determined by hp_exp['use_mask_seed_for_training'], while for validation and testing it is always True
if self.hp_exp['selfsup']==True or self.use_seed:
# during self-supervised training we compute the mask seed even if use_seed is False
# The reason is that if use_seed is False we only want to have the split into input and target to be random, but the overall mask to be fixed
seed = tuple(map(ord, fname))
else:
seed = None
if seed:
seed = seed + (slice_num,)
if self.mode=='train': # the last option only matters for self-supervised training. During training the input/taret split is random if self.hp_exp['use_mask_seed_for_training']=False and fixed otherwise. During validation and testin it is always fixed.
input_kspace, input_mask, target_kspace, target_mask, target_mask_weighted = apply_mask(kspace, self.mask_func, seed, fix_selfsup_inputtarget_split = self.hp_exp['use_mask_seed_for_training'])
else:
# during validation and test we want to always use the same seed for the same slice
input_kspace, input_mask, target_kspace, target_mask, target_mask_weighted = apply_mask(kspace, self.mask_func, seed, True)
#################################
# Computing the coarse input image from the undersampled kspace
#################################
# inverse Fourier transform to get zero filled solution
input_image = ifft2c(input_kspace) #shape: coils,height,width,2
input_image = complex_mul(input_image, sens_maps_conj)
input_image = input_image.sum(dim=0, keepdim=False) #shape: height,width,2
if self.hp_exp['two_channel_imag_real']:
# move complex channels to channel dimension
input_image = torch.moveaxis( input_image , -1, 0 )
else:
# absolute value
input_image = complex_abs(input_image)
# add channel dimension
input_image = input_image.unsqueeze(0)
# normalize input to have zero mean and std one
input_image, mean, std = normalize_separate_over_ch(input_image, eps=1e-11)
return binary_background_mask, input_image, input_kspace, input_mask, target_image, target_kspace, target_mask, target_mask_weighted, ground_truth_image, sens_maps, mean, std, fname, slice_num
| 15,090 | 37.595908 | 260 | py |
tinysegmenter | tinysegmenter-master/setup.py | from distutils.core import setup, Command
import os
import sys
sys.path.append('./tinysegmenter')
sys.path.append('./tests')
def read_file(filename):
filepath = os.path.join(
os.path.dirname(os.path.dirname(__file__)), filename)
if os.path.exists(filepath):
return open(filepath).read()
else:
return ''
class PyTest(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import sys,subprocess
errno = subprocess.call([sys.executable, 'runtests.py'])
raise SystemExit(errno)
setup(
name = 'tinysegmenter3',
packages = ['tinysegmenter'],
version = '0.1.0',
description = 'Super compact Japanese tokenizer',
maintainer = 'Tatsuro Yasukawa',
maintainer_email = 't.yasukawa01@gmail.com',
url = 'https://github.com/SamuraiT/tinysegmenter',
license='New BSD',
long_description = read_file('README.md'),
cmdclass = {'test':PyTest},
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Environment :: MacOS X",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Text Processing :: Linguistic",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 1,437 | 27.76 | 71 | py |
tinysegmenter | tinysegmenter-master/runtests.py | #! /usr/bin/env python
sources = """
eNrMvW2b40aSICaffbaPd3t7ez6v7+zzPRDbfQTVLHRXazQvtNgzLak1295RS1a3ZnqfUi2FIsAq
qEiADYBVRWk1z33yn/MX/wP/FcdbviLBYrWkXWt3ugggXyIjIyMjIiMj/ss/++HNO/Hrf/XOO+/M
N7s2b9pkndaXb/6r1/PxO+8Mh8PoPC/zulhE63xxkZZFs46WVR1hoaI8j9Iyi5p8lS9afIIWLqoy
Wm5LeK7KJomghUGx3lR1Cx8Hg0GWLyPuZ16m67zZpIs8Hk8HEfxX5+22LqPvR9j4aBp9Bn9+j72n
bVXH4x/c6mmWVRvsJd6kdZPX0sh5XW030Szil8l53tKbeEjjSFfDsSmWWI3QW/xvdHQ5muinlAYy
GzYAQj6cRBl0PRte5rvrqs7oeZluV+1sNJoAhtr0Kq1nw2evv/jy2cuXzz9/MTQtXeSrzWxYlatd
VG/LCMfQRNcXxeICkNnCv+1FHp0XV3kZNduzpq0RofnNps6bBkAAVOqm6L/h09L6HBVNlCr851fp
apu26dkqt4v4DVxf5HUepatVRFMRpfCkuz4ioPIsSs/TomxaAlgK+g0hEQD0RY1Yz8s2WqzSpslh
+p/dpOvNKp9GR5fRiKYNkHRRZRGQEDXoN1VBO/WIMYIgAXBUTpMUUZy0D+irmpyA6jS0qMoWAXe6
HWG//II7CmA1ywrsCHreRTLPjBqFkLbS/SMsBrIeEHge2zoV7BWlIGtEb+fSyVzGPIL11E66OIbR
5oAN+Gt6ZPK5SK9yNYUw0+clAJkVNaxJGAJAC72tE25vPLiF+odH6+Ft1I/LE8nKIv/h0CL/z55+
+be4BG4jfhowYoepHpt1CL6Dg1xT0zoiJnFMM1BWLVV+PAqNMjjIIxpD3QDYzhjnbb3NO3A3F9V1
JDWi+GxbrIDbTaLNansOk4kgbPL6aFNX3wLOo6rMm7ELiTAjAAXIIR6aztVP4qkuocPX0aoo81XR
tKOxy/sW6ww/AckUZQxktizOhf0Vy4ifEx50Ih1M9ZDkc1bN+de2Bgasv7bXyDt3SVElr/J6XcA6
+FNdtHltlUFYsXukZGkN2KwzsvHUmTokzgnwdxjfjKomzWZVtPFwCqM8Hjtl2+vkGnuMh7+zNqTk
fjMdRvelpbNqlc1ewVR1qmLjMXYU/jIOoAEQHkCEbEaPBgGUJ229WxY1jQah8CanWtF+WJXzdZUV
yx0MZt3E9O8kcmZLlj4SPTTlTpx8o3LMGkKl1FpUk49rwW7VrA9pYeqNkB55B9+WbbGCHj5NV02u
GrQaOzk6np5Gs1kE02aacesSOtQnd3h2S9Oj49OBbPpr4ZGz6OSUXgGXoTaBjel3SHOAWEQikh0h
08DgwukOWj7EUntil/TI1HScpJtNXupKhixyQI1bCfq2UOB+6yLhBXAHv3pgbqyveiA41WYUutK4
W+vAsdj/4V5VlFsXNj03nSYUdZheOgzmoqouE1kSphgvg5l5YcChLydIYaZjWFm015Is+BlAAe+4
JxAsv6irqyLDXS9aVYt0BTihAlpARcbKQg7RTbpYwNYiVNdUqyuoCtsjkiyOBeUvroXCFG5DIMqw
BMsIXUbzOUDVzucxQL/UlNRYU7DeYRMNjAF2cY9nQvFJhKIZcU5VOWH2MO5QVdGg3JWWICBTpQkh
4Xm5rMYoxoQ/f5IvKpKXA2QhsOE2FEPvY3sJL5O5AV1+DaxxI4MHOPXQkQ+PfV7CkhiMzWlQz+Hf
8ojvNI0aS1T69yQoQDnYE6NqyUIProUNrPmdJTfDN1ho+AdmtVHSkUx2onrum1cqPZ56+OE2Zvx1
H24ADA89OBIXN9RIl5FwTSyGf7tTKHh2uKy8Y67NO1GQX/BGMdZ4f1UXjBNWP6qScCgrQDCbtm2q
hF6jn0ijiUKhQIBqR6z6mUTf/zCxl60CJdGrZmxDewCb7sLM4EhJW9WBEhpU5DO4XrBvnrY/FauV
GTWOi7BeLSPde1EuVtsMSUl9bvB7AWIr6zhCkZ+jSKu0FasYLGmRzlF1yAvUNoBwp7QSpt98jH++
wWWcUjPq/aci/X3DrdvKyER3U5WOJrFPjwBgqCGcWR6YpUdwI1pT0LoDzrUjiroLpndXT+p8s0J9
fniEoi1uXKJt44rOM72CmDXSl3s4RhfH+LDY1qRIMt+G7RwHIMolIZc3IGVYQHC1mKBkBEVvyCwW
Fygpjx2JAeGzmCijR0TO5/LW46L2OIiNUge02MPDgTKoQjeiAGp9EnVIGWJVdwdm8wt7GNSKasQe
TgewfTB5a1oBoqYbXoLidH4hlCF0sYYiate/SBtopDZrdaTqjnqYnhqEKpfM51mxAI57C36tYahd
YebtI7FdaaxAHML/0QSXNoWa3u5FzSZfFLDbLFLYHwAVsMZAy0OgmwK1TNASL2FmmmZIBIhPx8n7
Q5/tClgnVi8ssYLi5kilCVBU3TbXwApiWRvccBDSk19MT7vbK4mCnf6w7GmHEVsFmBdLTV+V1LqP
q0d2llZHs8StdtEaIG1tTZrV6oojx2mbnmbrsHkhj94R+hUvdMQZ4JVnqF030RHy+aqhZcW8Lfrm
G6vvb76BCSzPV3mLVjFZx0mkTVHTgSE5Y5nUrxylswGS0AiI1I5PeFOU7AtuSDHcwzXuMsiD02ik
Gho5o0NZTg2Mqshu9M03ThffqDIsjLqSBy7EPqmsYAnp5BFrbfOhC2qdFkD6T6GB4mzb5s/quqrj
4WdGAIjWW9gGXnz+KiLajZB2o22ZgVy9QIPQ2O5JMQWGZCQUMPLQw9IPsJ/FpSxuj8adWbfXP42Y
KwYHCzp5R57y5FHfFBLQhGnl3izyTeshJjDLnvgsCscsWgWlf2UxEQwcZDc5y8+LUpTjW8wmN1BE
F4cZV0VjKgovnNIr4q43Y59UFCfag7Aw1dyvqWoKuDzHjaqG9cnV0WpD8zhBgQ9nEb83LVJ3jP+4
1n9FRvgFhIj5HDSC1XwuzPJrDYqQvimHXUA5+P3hKl2fZekTqPPuzHq0eZAmMcODngKJycuAOY66
p1fauB396QK1kQ0gOmc7TdHyql/UedrmvWvd2P13MGcsANb5cvoN4AAE3CvA3dkuQgUaLbos0GjR
4UMgT1DW62rVXBabJyIpOmPSbI8N19WyRUmYYMqAji5hv7soGpsTsjF15vDwF08/e+auk3tqa3R6
cxp5HGiEZv94RlrqGBoBcQdewEi/A3DcpkRYzWBXJlZYAn4UhhEVeoaarpRqj+d3BMvgLbg2z2kP
PtEwgPQIwMBUZxXqIsCvlxXKDlqnjUBQiJ4vVUnimqlsSkw9PKsoyYPcDZuZCPkwW+dbFLTEgmVA
TLUioLUdVRa0NIBFhDpqFtaulu2ot6aC16YxWDqNQeq2rUBjKBZ04MFykNB5ikccywoYPmsaqdB+
Ik097h+lliUPGKgBrKzuMNBUm0NkVcmYTXMKCDK6qpMYGh5gRrcKb2BlZLuIDgBwqMC9+PinMHsC
1XGIQmHh/YRpRtEFq9F0vEQoJKg3eQ0sZY1Qj5YgwSNEID9tWWimuhNLLElAYIFazBKxUplf95Ek
IR6hq+riHO31ph2nxqghNCDSt5uMWIGuapCBC0GpBoXZ4ghC0Z5fVG0+jV5BtXq7wok4q65ytJdd
YdsulIrfLetqTQgm4wwSA+u9PrmglZdIFtgh7CE4vrRRZ4sB8qERyEIJkE4VrNXIQG6xAE2wQjND
i+0kurzWD75hiPZNNgu5H7AGfKA/AEnsGdy4SVShr1WR739gyH63qSsgmXan4UN+hj0QfF3DmwHk
Hu1dZ+kCW82OFtV6A+s2fpz8IjmGzTWjecVVVNty1RxUdyVGWq1nJMmgKCDqWrKoNjtrIDL2LNlU
m3iET6OORDf80KWJ+/j/T4xYEGUOJLzfyzy8h5iBP+8xjizQcMcGoUXo1bBXqE3n3mqyp2rPyHl3
bi/4oLEAxpE4ewAt2WsUboCSuOeHqmMofkS2DbY9Sf0aGScdzQ+HtiRFNdQhBDfgbjdI70IZvmQG
1Vd5GdNoUWrnI87Yl5kia9/d858rTY3m8zNk6vP5aDwOHjb0lw+fMnSr8OZPHhw9dUj+hNFjhcQU
7y3bNdWsJmQB3tO+QrLVPp4lQUWkq9Peet3jnQ7k6iyElsrg7o0EwArD1N/MRbUCHQxqukKwZgIT
OmYa984YVwc2gMX6IdXdqI023osZq3vN/iY2q+utPR70t9kzQIbtbfDPNUn/CYM57rO6IxDmbPNa
8Uau5XPGy+uEt9nYb1W4vtkiHkQOdmyWDuwQt0Phh4m1LTHYsi1dXo8dCwtOllFsPhNfLRGTlCIA
KkaPqUWrD8lhe6SCw1qQ96bapp4qVfH2bRNqhXZ5XOyTKF9v2h1zA9CkyXy3VIpXaM+1W/VlyQi3
M+yl3jkNt+SP0tO2t2F7+j+8omMpWM+xjRQhjPHp7duts2GSZAt7JU0w/JWpVlvn4O2WnrXVqhXQ
O4dIO7gVWpK2lhj5dEVvdDJUZyN00KK4poOa8TgweQ+82bPXmLeibMoEob2LUBzBrshXma8rNFEO
WgQgfJWS4yLqPcqn8YhkXWcoKFLZgBsTiQzPZTdul/7S1aMfvPlnr//qnXfe2eySOQhpF4laYG/+
69fRO+x9uS2JEKNlATJNs2voPGRTEHRv/pvXf2n5bS7SDZBP/uafv/5/fkGVB+iVRKpc02bVtn0I
f/IatRIsiMPWPp0gEWMFEtPnoCVjQ/O5so/irM+BJUDneAqgHDoBHPWzargulNgQqPL+VY5/YZV9
Ci8Hlieoa1/myuR2ZGretM8/H8D4F1WWs1OSeF4leA4yGGzwnAtgwKUM379/NI1GMMKiHE2iY/4N
g4aHx/wAQx/9o/mQHslsjCa+E51tlGDfuWU2tAWpCihkmG03wzHtZNEQxmhV02527NI4BO32oiqA
Wc9ORssM+htBefxTVqNT35NNU4RNA9jMVJ2XL7N/gPr/UFbJAaNs/OHNSa8dTli/nQ3LSnsNCkJC
znV1u9i2tMg03mYMwGDg2OOV35U9h6sqzWjdFOmKzKrkXBjnab3aiZl1IjOp1t98vgbEF6xpyJyX
yNjtSsllWV2XuMAb46rM3nzk97dOy/ScxCOnlvNxwKrzBh6h3Mc8tM/4W1w2asmOu80myo4ac3Ug
CSksBYb6dG+N9oQGvoiJxinGriyoTKLrZU7HEYBqtBNEzcUWlsh1GegdZnuuPgsECdWea8JpAgCs
qvNzpCmyjKHQzsZiNqizFbWl7SMXsw/OYwOsAYSNuVSep63p2j3MGEqRIfHfXZOsqwxNEN4Rg/lw
omucJgTKMwOJ7ee2Z/B7oNMIIHSi9awuzhHnigojpE3EB0g/aPIl/VStO1Dpmm3+m/fHFpEkxP8N
ikWedI42OuccIjU4RJ3kNzmsKduhUYB06wKDBPmnJr9ChqDZNrhRCxVJfZ5Ay+4BKGb2Kr6a8Gvs
f4Vm5Sv8sqwCOA9aWnXXxLRP1mQm1XHJEWfymXy3BALg2WoIPS2g451Un0XIg4NY/QxxKkDiKMnr
lFAmv+Tb7NNPPrYXsz4BtvpAPv7jOnm5a27tBVjuYZ0Q+XMv8rMo5/xzn78jL+k/ojFfzn62JbHK
wJ5yn05/BPVmdnwq96Q2NOmABID8wz1UHJpKQ997zXSuTFYeCShq6a3IB/TmObYg9nmfDzJ86ZjK
0CRmQ+xr5XigDtUK5pRdVRw+YhtzmCuYo3lbzdHCG487hWDn3fSCDeqOjYXOEjLD58LhhjyuwNNh
yMVrNMtRKLgCeZEOXHALtT2vCVfKdNGd2bviifHDHCxBUz6/CCGKR2ENEYcA/xuHFgw3Y+lLnTEx
7JsdviC/WIcgpBiJrqpEgDqkWM+wVSP7qETKJHMiYG/cepYNOObBDC40ZV0aN230kPpPN5jFqmpy
RYJhIVDBLoIgiiHK+R4VbBArZYacrXFifAL3+oPpUtopDHWYcYi3utTvLrrxvo0bF8KP3bepz+Cu
LeOCsp1+kyYX1z6llw9la8hEWYQpxE3daQk6vGtLUGVIkI475wJQOTC7eOp9XWNbtT/B9bakv8CK
QRkTHuQuu70TQGq5W9TFG1oR1uIIOKRehuO3AxFn8y0gZCtIZyG6bvvuMO71c1x1JMmHb30Uchse
sJG3RUMLqhFL0T/zZKmOhndhGJf57qyCenM68q23G8UvQNzF450AqLknnR/emTpWzklmeqt+uhsx
48Agd4LXHUqrvcP5BG5OqPgw45zLqo6xPcCvYQiH1jALfzCgMeOAQAJeZvQEAAFllchztw2pRfBN
PD82y0zpiQ0aidtinQ8tU8CyuEHoyQjAFeM6f7PN9YnQcDjMSzwFbCyZtFpGpI2Q54hRYB4abYW6
x12kUSoZszEotQEFLr1KCz5bvCrQ55C7diSOb75RkjiuGm6GXX2E52G9WE0KFgces/IuJKCiS0gg
NVdGluh1PXU1KV2Anpi6Avgei48Vly2rLPeFAmOg+JTxG/vahoxh0TsTy+zOE0HGwixvFnWxIbce
Pu98fPe5WGY/21Sg4nbbXNxhFkTWsO19o2y7sc9LBb/o4RUzMURqqso8z5qoahIyEb7FtHqaqplV
VyWX4r0quUwJ1fE5mP2NBU79aOnoLK7u0UrQ7Hurbqy1kMkhB+L4n9KmO6CGVcN8n2LIYuodtcH8
zjoOt+VoOj9aNdRrxYO+z8Dk4iSoaB3gOes0plQsYiBNusznaNqfA2UjX4jRbAMkra+Zz/NyUaFB
bTb86tWnvx4aLmMWdQX7W4StMGtRrmEXaTtCRypoegWydsu2bnG41gB++snRKr/KV+RZUjVNceZz
BAWBffiOu528dibWQeQSzVTLBIEqK8esFmmjpGvsPNP3AuyeECHo5CoOsdr7wgbCU1LuRdewk+b5
GhkuBQtIGUNNC7MIQsMqR+R8i87euBnbvoLW6fegaw8q82saF3OjeJmNe0aAUHtXPfBU58EMCxkc
cVPLDCcxpsYVBTwa4zjKKjrbLpc5ErPNvJ7hyPPsUyEaPUkgZ/ikYw7K7UpMJoI3YtXIt4Z8xWGI
XQs57Ij/8un5Zvd+RIdTbMr94IMPxn3MkqE2kPkMj7+jCzf9cD9aNKd+mnXMFlbuBUbhqrSWOgvf
JpGcpnn0AZ8Q8Wffck9iLlNdAWHJvS7f5sawKgsw9O1BhR7rykiiIhnYzmVpm0K/o1HybVWUsS7h
dsKtY9nxHW4+CF2ohSPXJ3Q1Yk667pAHMhxLM8aVwtl7bBIBxoWzQcwYNiB1sbtv+vv2Lde66zkX
4vyV88A+wB0quB7tV/apBoOrahzvV+p59mloqsZjWwPxNkNv9+Dr8ARkGPTEt1KpGr2g99boBV3X
MFpYd7cM+ApAKX0drilBcrsA9oVKgjk2zznoy2rbXCA7xZaEj3ruD77qFdww980Yk35Vn7unK/um
zFRxj1y0QZMg6miT3myGLbsHTBO3dreJCtVBApfAUnsJqVPXiErwXT5TFAmO0uGaxW+n4f6hcgt3
Gmmoin+8YUM9HPcMu9MQD2sVGLm9ah1BMED7WMLS0hD/fF5s3gmd++6u/s0vquZ7f3aPja5Tx/ce
L3F2TnUYAi+syO3Tk1Xl3Sanp0I/7UmFA8RpSzrVLEV5tXAwiGrZ56DjsRRpJjYrDhqMxzZCbBVC
3EeGXcXILD+3Aew80MBYbYgvKtkReHhql0MWRxcEZxHiBR+ZAimUB1Ir3l+jW1jsVz2V7ZJb1Qqp
EeflRfT8c2CyD8mAmYrPWdWIjI6itLEcdO5GOhswQAdbP4qTLTsoBT35VSmAVv3s14qcGvMGxWot
CDvfOurR5y8DehGb5wSLjKtpN1oMG3sZx8FCwfg0Zlyz6FHXM9Y6bxXsTLRxDpefvGTnP/KwGnba
UGVmpIDFiIf8qtyuVig+DsdhCmx2jVH0jbUp7mKu360Xh6d8zvY5NKNu4XijxWHnYRrkst992Ay0
V1+dDa/PHgTGbM8E6ES2/1q4v4OQpEl6fAcP8m7LemmHtlMzaPnVW2TOq4cfjLZ7uOurZgbR/Saq
Vtkym91v2OXVWVWTwAL0pdQQK35JS8yyRpZmUrYNOy+sK76bqIbhMOEOI0CNFZ0j4xBA3tKPXdPI
RPGCWzdLbC3zWB6tWNhCCgqPsXZvZDMnehx7k+OhzT8G0hTRlaBpq/AQulTCrU8W0FSeX9q6SU2R
P1gMdnfa2nfgyulyzK3mFVsghhoosFj6bo3ha87QjDoOXfhuXI9SUApwJXOtnG4Ud5VeHlhbo3G1
dRSv8JCds86Of/9oZJ2+ozRxq0zGEfQqjNLJgthES/Zy9U/rKMTErbnNL/lSn/Kuj77L68qja28r
C5gvnSIBWnO+91IalGarac9S8SnRk81senO9BCyNJmhVtqm7ozq462XvQDw94ra+OvL6oUvTtaag
VsfCDBlDXEKhEkofJUrwDlY6d9Rs4ufIMrHXrmWjwT/KODTctstfD/Ee9+vXr8leaMxtS9M/k6g9
ZmOp8vEr1h0RB80e13v2sEeQk2sl9uZ6ssxOPb0CNhbbB2nXKNPPrVdUbpM3VCAFcvgjWWm6R1j6
pCrbL4EjfgoS7vNys/W9d8I7uKnPXvI9q8Parvfvi42NB/8uiZYu3m4nUHwfcE3aju3ZEuSloa1D
89C9HHPvMHDKx4Ouulz5XgZ3Zy239norw7gT+vdzA92tYzntfiULVmxWXYcSTcw1JTUD8WzPJARA
FD3PcnJ6xvN+1gooBjXFWMxxfcq5imU+uKi2sO7OgFHBfOJh7hLgJ4FbW9fxej4xHXXiy9fEq2iZ
1hEGZFSRDYCx5TkGCX6F95lN0yqeER4OIxAYyYp3wrN8kdLxRqmCHuQS0QOoEMMUIPxneQtKPMXn
qlM08V2k7N4Ff8+hKkwAcqJ8tTPnQv7BkGsKJoHHus089g/wn38uMh6WpLiHfFkeMcpO43LoXTQa
K/rgN6WotKRnp5nzrrFfqgtS6p2GTiR0XxrvyJ/QJ0UDA5S9VHO9afJtVrG+eIHx5CrV3Hhoe/ui
dWjXI/BL1MK9x6kUnOPNf/v6X8pdKdm33vx3r7/5D++8Y11GGmjZHIMR1E3OBgj+ORAR/AWsLUv/
5iniItDxm23uhn2DCe5+jo6eqEroByOxDuCZLTo6jJ2qYAKTfKnOKf3qdodh4xo3RbeyFUTdi9n6
eJVLJ3ZZV+x23autxnU4y7AJ79Vuo4hC36aE5aKRNOaoURi3RYMxDDBzgUqN3yCXkaXCQPjF9DGM
E9zFkw+grZD1FF7TNvWm+8kOQ4z377GFThfBO4beobZbqUS5KlChMKcCKtKtH/01ehKy1RTR0Sw6
7r7Ge4EzPcqT4jRscbGHWfTdc8bGAhP/EpSR5zQNoED4uAGcnbcX84uibG9FkTVos2IxaMoM/92z
TuFrTIRwtvKjq95IVGj61pHIbt4mkqoBbYXhCuDffaCtVm8HGi7DfvDMFSUfZgNeA+yPOB3/6AFS
mOZ8sUbfFPz3Hh1l49olDzx4AzzcuihtST7UsB7fBAuruCj5Tn7JUp09soYPDUuHjpGSAyP2e7ug
N8x602/PQ5ik6fhmEu16AkEIuqyRxzcnj06hAvx7iJx9t372tb+ia+Ew6Dhf0SXa8SRSv4hI5MEm
ldP9Rl3BUBiDLvLh3w40eNRuKNaeGlWvt/FVggQRS7l9d4WkpGfuITrxC8prt+xeKhG0nyjUIRrj
uYVXwOXq1F88K3vlYKINkLa+y0s+ZnJf9KwkFs8whpYSPdxag3sksagwqoO8hE2SAtCg/4b8tqD4
KLXuJUIZ57kHBr+OKW/a1WE2n90UODjnuadd3nWdkrFu27M5kOCtr3bSPOEc8m6RTUSgbin+OkXN
uaQQPMqvCt7p5th8QUdiHLCICpBm8PEXnNPmcfJBhBGmaFara+hHvuhGkH7oIoItQelAjc6YQCzi
G6IkEY/M/dTRYDAHSTLHaIrx34p/+HPlHj5B2wRsjtjEJPoM7cQ7seM6zY+tWdABiWb6555tRBWJ
XVceLzYi+fJYoREHAzzA2zWJoGCOXuXRExjC++g5JV5NN/kiioebGrdotJDgj+h/p/fzGf4rUppQ
tbDNRl1ybap1ruNK6WkqVkW7o5vc0DWrGhQyqFVX8VpzOUAZWQl87crHuwf7XIVcYDwXppBJl10i
0Gerd8swPk4YDirLY+MPdvs+0FtZwe3dxvRuEhHcgIig21XD89l11jj71jo1KRoad3zTJQqroxuF
HrsiofzWeo2OB0aegWbiqEF0TMO/A/eGbbFGcS5W0dv6/a9MJD7QL7AOKBfKlO+0iAXRZHenNofK
Sj0Mtkkzdsf2sE6oPVwmfNQH5c5X1ZmKD7eqFiHqpSJhqsSlhHiGZUsjx5iYsRfUlavPqGyynONj
unLjKGHHpgQlUGg6niH41SVRqhaESxqkro0cu1Enuzhw50S3ezBebesF8rvlxj/tCd5lg2LK1mZS
iFDk6PUGUc3twQ5L10qQT03Q/IdXamBvKVovERDxs3hRyfzw1OAlFL20hcHNlSEBNgGQfDW/IybG
rAqgEFeNAGszD/a6scurtaPWkqIeFATkZ+8ecfelT0v4zgtfuYT++JVdrGlp/9Mv7D4H9n0r/W18
2APt4U9qsgsxb7zx/oiGa1CL1ykvWA6sgyc171/Kbq1j8PiRlnB5jqKRzXpG8HZEUeUDAQi5hkRV
otiNWNpCAvnujL4u3SbhdW+TXMVuEkvba58IfIR/rUbs8FFUi75ThhtzScvRCQJ9S1wu9ItnZ+IT
3FZvxkb/5lqeVtYxaBUlH6d3Qs9Oo/sN+iB4kdzaufLYcewIutcupCIdqJpdWYPOHtRR2cY7Yra+
0fjczxY4rt3Aqgdz4m1nc5ZPZG8x25l6OJRCSb7sIdD5vK3TRY51LoqMpW7fpU8ACG9KtDseslti
CzMu3rNh8ubX25NsgW473raqdtDeRnQDPgCIpMcOwhWu9wrxSoYnzlfnRLfxAisDvU6i9syapFtx
Le5eSpmgFkZWLTT8eicBUMbaP0WTgPm9DZ69sHDTdq2BoskeFB3QKMGGkjVem3GrY4QziUAPLexY
Cojfs5M9Wb6aFNebcvw1WzrTWm4pJDTVwuD2pD8aMldOdJQcSjEBO5lTs2eDms+53fncS4ogm9Fz
+hrYieQGsawMeCKqiffGbxFDrBUlCftkQ4mezPfU3eTBm//+9b+xoszpGHX/4vW/h9f3BvO5IljS
pR8nv0zeHw3eDF7/J6sWYwdj5Zb5NV2F3gAcb/7l6//ymEPVfVqUEnp7nWcFGksoiW1LxmY6N121
FHZP8KwZjbazKAEuevryVTKwTwXFkwe9yXTXyWZH5oR2iwJpQtHvAnHt0qa1o9ZxfDx/TEqg3LbF
qqcICMK6a1X8I4b3qSpD0zvQbABgbjHEsJM35tv0KlWO4feil3keXbTtZvrw4dkW9s9vySCSVPX5
Q7qRdPyL3/yKpxWzwCA/iocfVdXq8w3ulB8VJf/4CsNg8s8/kE8p/nq+fHZDrz7BG1C+C/HwDyBf
fgwCOpawbB/0/Hd4dR9/YIGUbjsOP8YoAp1Wvswpi+zwxXaNf1629KRFL3q3PWMfFioHhBqGBb++
wgu8IsnBTrhuecQqp9Yn+ZIgQROX/ObTOBplvsq5w6eU7qjby9PtufoUDb9AcQx/fFoRyH/Cc1pG
Gz3CZFH7uJa6Tb2qd2xUI6jRHZXUIukdqIFaIioxv/BgvtvUM9aHhr8nXoe/nnFm3uEXmLoIpxl0
sEueDU7xqDCENDHHK8MqJZeSZ1OMtkp8yGFiTEQWeu9UmebD0U/mUJTajEv3TprOdoS3mVUAXA0B
99ppCNs/vCEDvrWvHQiXpT+VdDcRyieUpu4uQAVbwfLGJePTtFihQ65vgAVeRbxCcjOwj4BmL6ic
Igvc5w4P0K7SMuXAjcPu7W08EOrdUvimgGmBtFn9JJd5NThGfa/ZqYV48nwJY3OuGK0pjhsigWI1
SjXu9QqQ1VLMiE9yYHTPVds56F613i+dXVWqJPQ3XquwX7KfCmatczp+0Rn0yfFpQE2V0rH81bf6
rbF15jsexmbDwBJoIue4gGjUhr8pHncfYXpsXDhdF3s23RbkhmLtk+xxQOb2rOCQ89h8EkUvt+fn
sAtRbpRQe2hgAS6rtlQrfeJZvqwonqT+CBAhURwd8fMM5rsox0qeAqDjarkE5QJDJKL3icy3rUEA
gliM74jMSrrf7BLUnZNPSbr3ZX0nXIGhr2C/Apc1V7k2g9uEi1t2woH859aXOLcJXIiQ7uoArfP6
UJSB085S9NZe5coDx2pGMjKhUj12y2GCLrwKHUUffiiKpuR0tSVBG25sxFybFb4qFqjhfa2uxgrY
R6eJypDkrH5NuljXljRGrlgyVXRg+4RIf/jn5PiXUycFHYfoxbijKBjM1xQR4XuRlNvko6L9vI6A
Kv9BtjR5+bqit3/vvn0KXA7e/mfr7R9eXhTLFt9++KH1+kv9+skT6zUmQIR3D6xXIFfgqyPrFV73
xXfvWe8+Ka7w1UPr1aerqqrVe/vDZxX1ct969ewNvpnNrFcvqpbfvmu//QOPxXnzjF7ZpX7PQ3Pe
UKkndqkvqmsahj2O5w2+KhrnFYDCb5Fr2F9Kel26UPNbvtA/HPwwGGxRbuxMrTSK5e473ZVXSEDw
6c/O+6/UTLhv1ZTBW+xLeSn6/J97zPI/Mr83O6QuhJthxJIKZoVd5ekaWRmoc7AzQmvnzFG9bB69
nse1e9GcNkPFv+ivqUrRoovFnPcguafuCgP30FKzoqMA2geuczRtj1oVpkElqaS0W6juMIi23WSf
xOJurM80g+dybrSQSvv8K8P7uvJuYHX0VnHwxXUzs1CRUALKRTX2ddWeaBeusUGJPX5tm/9ZfeF9
opiBCF69sCpOBFjXkLNPWHMR+Bnpy/EJFjo9BH3qwOIANNrYkxOMnxJ9ggy27t1yY1Ay/DkbAEYh
Yross1XO2wsLrmMnP6M6s2E3PBH9+FIcEsWwKwj7xzzDDy3FWi1imj7ge9SU1SEtrTkStpFs6570
SLwMKRUr79Z+MzK9Ei/YCRZMaWGBPiSWSXJWZaHroLLSWYp3G3+hkjd1YlUECFTRkstBXNMQq/+S
t74UuYosXYY96KM4zM9IjhNoCMMLv+xUIxayeKyzMSVFNvHO4VyqtuXyIDFzDvf97GA/Ld9j7oc5
GRq6PEyeJEB9wX58zzJ+LzJmJLfkLGBgtbf1No+pXNhJUSHCZxd7OIo322L5UKGy4MmWDfNlK+eb
Cf52Psyd1umNSxNMC/jBzS9abSbkbjqvNjjV3xWbmHqoNg1DkJDPBmdP9MZN9ZyO6U2oY+nC5S7V
Zt7s1mfVik+Jtcx3Um2M4n26h5+DzBrR/xMhdvGgOzg4yFdnTOPwIfVwTjsnCgBzmhgAw3pVF+cX
LQFlQLiV8/tr5G32zknkATazaOFwLHTGMrNm9sdtML24DQ5VDr4CC1FgCftWnqH1KgRmxzmrB9xA
3W7Jbi+3LcW9K2bguFOiikm3nnn95fs8Ghulk9oVLNKzoXC67FyJ7dyq7S66wKXUA1gbG5BVXCt4
qGzeBpNKhhrLvMVlEuRPKER9XgeVWhz5yanH0YCTcPJRaYNtHdM+RuJvn8yxpJlecVJHLu4laEYn
NTzjEd5GQnJzkLHBWd7RpsP/0HOPYj+Mh9ED3oBIvbfhHMOX4Xj4FnMmFn6ZNNLbnPs1eOeJzCFK
ozuhX0mYfQtCfQ7NL0MzoBrz5sEduYKCWG23g71CiVXX5uc5mcLHe0NwH8aA8cfMHeKh0swBTPMO
iw/PbNTaK8rKFysOlB6oauLKELQ7uPX5VX8D9N3O+xESAbhomJJ82ldSwLhHDLibDNAZ0Xjw9tt/
Z+9/G9n4Z97vO3u9PYH/JPT6sYmDjp5ttkaFnlJOC5IU1aIySsRG6U7t5Inz/l2Dnr+35xRrD6Mp
Nf6D3YrkO+5sOpSfgLM5J0FHIxdofBFaIPB+3Kkpu4INn5Z4yxgYvCdals2JqnbKSQ3nnkbiDEbt
Y6rOeB/oTungLCM2lCOUwojKQv+WWJHqSv/+x8GPdDq3lOFmdl8aVhCF5svFqtcM1FV9T8aHYTrc
grPJeRGBAeNok/8RdOi0cRDGsfDwpyDD4XuC47viyal4C3p6vAHvgJxQ0tce1FxeZ81PhJq3x80B
yKGcdfStKOlmoHGM9Nvtk8ZgjcRqH+6yareDTmpbpzse+S1bLyW2Vv2Rh+zPuNG+95477h+5Gxrx
GZD2dfn9fUQB/vrBFtU3B1itewViKI077CZwdnfofoxWbuXvXvbaIu2x1Dr8tLISkuZi3itr4rA7
txbs2pgmP37sxBqD7lsZF8NxB1ha0Y5HKi9u2+7PEYQFkkV7w5rtH6o023Pr2TXmUtse4jxhl98F
pQvs19tEO+s3EUYRapsaCIHgrUuyliuGo+r8yLX5oyxhpIg5mAkasfZ577OJ+bN0dyYeDqosa+2c
X6GtMILBOi3PV3n22z5rlkaJ46SH1wlQ0zdfgXNb33puEXesVe6sKAcrf3a0WkVevHzGj+lfR+Ou
y8eBtkp9E1Rtgj9yUugUjVE5xzEQkaGdw+rkQWTwtQcLPbRptz4Z/5hx/xSE+KO3lM5aNttKwjuL
jofY3U7uYBz2Tx5/VDOHsZl70ccX+eJSHVwRORQNX6pOS516vv8Mq2cdEPXzbSGg/u9/CO1KluD9
E/EyBHuuYP45icbvyD/1cr9bp18Bg7tTdhzKSXfI8fohYs/PLdTItk3nxWrPbpq6dZyUGs+IxBnK
QrsqVk3wc+9KFC8o4TqdtjsBdgLYt7ofH7IxErcI+4r/5Kc3IZxb8AYQX5yXBvHwYA2J5BMX9fyq
B/dQ+xaZJkkSojHjINeDfdm2yDkIZd2uUVgLbPEeRoe+dWU1s2FL+F1/nUW1mlfLZZO3bj3zfmyn
T5lzIQFWECoVOUqihCaceNDcBkc/PCFIAj4uGrbTvSwy6OUSDpbterd0GaNNHT+ztdLuavDmX73+
19ZdlXa9yYr6zV+8/r//Bd1NiZrthi5soEFsU1dXBUVpa3VYPI6MVtUFu5hRSDl1JxQj5aE/mdz5
qHPrOgkW7Ltcsq7Ky3xHUSzVfRHrlfaHw9h80P3fkFdOf/psDorX8fOmt+SJgD+8MIh4x0p/40cM
4hgPGT92hLe8xOzviBDl+NNyvFooNzv2LhDGGewoOUZ9zMbGW9TH5Q53igsKAeLySJAZOA683MNO
KcQIUByQAD5vQA7H6IQUfGTb0PUe9969jvCHDs1Yd8QjGulJ0wYbzhjnbntEADneW40A6naHN6jf
bPOjTV4f4dQdFeUVKMbUjh6Na9uhSMXR+Tat07KVGEygbVBzSTAQ3L3NLllV58k83RTXaV3GwyfH
yTHaWWgQBH8X/G7YNVFEW7y7TvM1TnjyYnvK4H/W7K4vrZnFisyOyu36LK/zjK/fmzlWTRsfJqs3
f39UjbiLeiO367FOsr4EcGLV776bbRt2IUfCSdgRilICqz7m2A6Q3rK4melhdPjnZZ5vZo9A1Kiq
FnGh4KBbhJdzzEWJ2XzcMNRmxcRDRhdMzaaD/Y3tG2qw0p8wAouEFsctEcmdHGuqm7tccPfnUJhA
RQw46bQoE6reBw7HTXvOBMXqQzBYviaBBWoMcZ+HiCpV5+vqKhTW3yOlQyK+9AHcT1EjZtxHo1A2
Gqu51qbtOk9X2Hgw5j6TE+zDujwQVdjHtbUDepZFcxGMSS0tcgl1dUPS06qwqZj32torCGRr4xFA
W0mmYLYeVY3jWTC5zBerPC23G9jyW7Rdn8CIt2UGOz9mDkUgRLRYYxZp1uq4KipzzNTY37QGla6d
0GaPIYP43pBflUcywSgFajvCaol57LQQTGTKHXczmeoUg727FcemtFKN4hZEuwNy+DyFN45wEJmt
ghnRAggCLxdjZhwQO86sDiSiqMsReNYNV4heXeRCExhel3MfYlTe6BuHjL+ZD0SFU0C7WQ5Ffnby
iuoo2PpjAhDGw5Ov/3SKGxEG6TAc+rOnr//49A9Q7P1H6ooJneHRxcAn8rkTtRv/nEz5Ix/L3lhg
KLJyKCOR7Sm0KdkXUm4Gb/71638rEWQJEwuYjxydrd785et//hd8DZmuDfO1aKTiNgU1OaL87O0F
xrI5wqQfEdX0fPKbZDB4ulpFH+O3hjxYeVECp69qtLzTAeCErilnub4sCFgB/o2fgBw5Yy3HxADB
pbo24YyJXviuGGnubZHSXTAkCoaHAzPTTWYSJXGrUrIj/QaqAmDwpxIfP0qbYkEQu+ntAiLkOr1B
SEHGnR0//rXPWsxX5Bf6wS20qbclBsCjnHltbNU5suo8/LUfrhqtOniu/4MdJzhP63DqWCyd8Hcv
XgxjmoeD+A67iVMDJ/D91KqN9+Tc2hjBrHXcqLg2DRID1DXtdY7uF3FoONQ+xQOENqyo5oocrG5c
lrUniICJaW5G0HWaDOz26N7GnfT5UNpiiyJmC0RF0nuAo2Fp5VFPBpH9QUC6DfStJ28utqYbNV8d
tRDfso5uz3dnFkPZArEQpYJUdEwRC7laxPVcSa0ksVyWCYcZVvPlisam3JOZv8J8Z/n8CtdHulhU
NSumFQ9q1AgMAz9SMLm9xDxyLsILoteYIK4ggkO0wlprjVqMx6fdbvzon/ShzCi6ojXEI5c9dCIh
U41gNGSEyxqARE9dNydTqnQalhjvYbnPXkVN0W7Fiwi5qIojusb2UB8797NvOdRls+6uYCH8FWlz
UTXtUwohwpzWMF0r6shTLvsKmPNDLny0qLZ83Te03Rg1EUFnHKCMkMo9VYwVz9d7M6Uig+aabRdY
auD6MGzXRxK2vzmqlkfpETfxHu0aR211REvsCNo4stYJ/ofihkQ8J9LHboDzctCUCwELtrM6ZyrN
MyePht66rK0A99nmAvNzYACPLWjVC8y3a8b7Kcy5i4toucpvVCjKNV9cBu2cDOBGIiNpUU2tQBOl
LcpzMExX/rnHG6aW08RYgOS2huFxOgPBSrG281LMD2COKkiTbMbWArmiGy1SI/bDcXXLCw/7E+E5
zz4WinlGdElBdaDmEfVnwgqES9+2//N9ZEQqz2pn41P3o5lEnG9Cnop56zbQVVT9tmzLlCYktLdL
rQedhsIbuYFEwQZLAAiq3cXqAhhj5CneAd2zOEkqZEolJtUQ0+cXhm6RlijUbYWRZlyCulWgoucm
B3Evg+dHiR1Fm1ZBbIDkzCBjHYrHkqTGHbFM2mSxTB4OFYvcPbcrVVDmLdxEQEa42YAaksWhsPuu
kNu13qoN3/1u7893W1/9q4iHQ5i01oheV9EDH2194Fh0c6flQ2gipoU9+vR9yBJyW7CzbKgp8FZO
u49ztFq+kIX05t+8HoiSlG6KzeX5m796/f/+NStH/GJKilFdrXjTvJF4WBTiakNhlnAX2nDUZnhx
mZ4Ddx00JlbRZrcpko2JVYTPD7nxwSBejCNg/+c5SBx1fpnD/Dx+9Og3ICR89vxVtCoWGGR7YJnU
q8aO28Saz26T6+jcfKCBQf4Gfqyq4+R9TFM5kqBgm908PWvIHoP/GAbAwcLlKra0QMPk0mwSwfV/
XYAGCBvgVU66tRxrc1im6Nu0xoS3SqhQ7WIuLlRH7eALc4CFSAy/cCKOji1RdewasFT04Ia1fWc8
KjJL0QKmY/gf68w8hfCp4QP7GcXpHFv8D6vA0IvvlO1dppWHZ8ggkuw4fO6hBgjMkI+XrJBjdJIg
EIgmzBoeSXVWai6uTIf7lCbNxA1WB99uDip7EpfS8ompjDrXUlXV4bRNH5o87DgX1ID5gm1wlcR6
vafRVZVmmDmk06b+4DSp3u5p0ZCF0568xtZOHGrmWEx0L1R3w4V1EBP0nqgWUFt59xmyIHNEYEK4
vMyHBwp/c8ZFr3hGku0mw7RfxjUkME4T/dVq3LTGeQmlIfEqZDp7uinkFDNI4yg8itFWf2e650bs
yHhS4JSsoJksIOIrwPDx7BOxyNUtT0oxIM3suH66rLmCP4ksTGk0KL8ybK+zrrnlgTJs8U4nL5Ud
Dfd61YDEnBkmchSkI5N6EQl1W2qa+cUkunGMaPxWb38az4bHOjsgDA3Xub8h7T2uwEp3OapwSFcr
pnjzaD7vTauiC13mNFrdwtiFvslbe//uyLxYxtmvpR1b1FRomOgmx73RRZgYmWZwv3GIlUmGKDWU
vFr5/vkJC+UjpQpAxnBjJaXRHVGSnOjdGeISLY417EakEiJp+k0Rbg27tr4YaKmAeYwkFJwtOuJI
PCcnKUXopmCP6KYl9oWQEydHRR42KN5QdlgHExPTWCDcRmvlrNfl3Nsv/nS4OAsD5qYssKeSrDyB
wB/bM7F+j8i7b6Rd+9xxdK0zIDL5DE8a66OhQB5ui9lJZbrIQE0fhDUuesihGh7NN0wWCoUSEms6
CjiMMsMknzEoTcGhHwWQIJxOlaPNin5hVC28bzsMnS1K61hmBphPRmFTkQHCouUH6vVgn1+wy79/
mhl3Z31VpI0/7wJZuOrdJltx10SLBP1Dugtx9BOIz2NOFISBnfb2lOIr93Kfk0sPIewT96iuvm8j
RWborky+kAKgrupeSup0IYJnX/sjEqFHXuNcyW15Fdw0Rx/qxY83M+43T0LUNFT3cVbjjgrothCq
7uBa75nB+yMg86/S74rVTlkfxfSiQ4FWtQov5OzpFCihvCyr69JJ36NZvOo2zOO9JNYB/9jAvrZf
WtAt2UU4Qn23Kc/ErQXE97iVcbzHNa5D2l6fVnTmQ05yBG4rxgRxRGo4vL+HNlagjXqH1j6rqFhP
KTYPpstCeJs+Ceu8wwxClh+XCOLbHHy04+g+EXxwG08KOTMHXW/NgVpwJm6dDd7zAHF84oA5Pxdb
4BpX+ZGyWKgTCPiYVduzVX6EnS5SzPWEDKInl7z2xaVsnThCkrfM6rRXLGtKPne8h2tykVNyQzLE
VyVaApWfgagw7JhGGmnGp8+qPaQwnV6W3RAWLeUbB0CMPqA1tRPjnn/q1MH04qpqQuNRwNrrl0r2
Bk7xuC6mqvLWpB2L2JaIA8m1Q6Tg7EzCAMNb9IEXkBRt+NwYx6k0Ccazq0rQu7EE2bUlAJgwRwIw
K8LWFlhyODl1TnThZdy95gZvXbBvwgvP9YSzZHsVk6w/Q+GNpXDe9C1hVPdls7wZB/RhEOHsjLE2
Thy11EtYGpYZFFRKwOsZVmgoGMA1QSlBFerZrg18uGEjXapdV0/hTSf1rGCJKMrRFg+6QuCyZDXj
IbZ8W74BL5SgD2ajmJGvV23zzsFACBhV1m8XmGKn3XHH+yE4Ott84UjMUFYQDjLRm3/ruIDX27LM
6zf/w+v/63fsAn6Gx0LoS79CjyrcUKEI+XLB4mwr+OC7A9le32fZWddG7Xnn2GkGXA9xKo4GeYqN
TEkSpcCrvF5j6HgMo+9482ljvFrXgoDvNcpGGBd5pLE3pZjRxht21FwWG/szPlufhQ/UXGwa2c9W
sfymaO1W8Jk//zAY3MNEmTtG4mp7Doz5oqouG2cgaZax02lMUcHVKc95XW03rPHBSzQm05t42ApG
UJoGgNgoMLQf0iWUmQ3pMi7GozTtJaaz0dFRJj5XzcjyEU4XHDK9gaFiuH88aJiBfDpBOk1hVxYj
zRqo+yqFbl7YEfsxSyLUvaiuoxdRI24isBC2m4eEBd1lFL+YPeJoJKvVOBmOJw5S1CDnzXa9Tutd
rF7wODWWTHsYpNkt4rn06qLKCmnqdqJ1My1JyPVA09w35pc1ChiOBdNw4EuYaL4wC0o9Ow/EXqRN
vIRWlKpCR8RVuz3dHRspSEcB2wpBoXaPWqVTkg0ucxu3hsXVyHkF8xuvKClGdDONbjSixlZBN3Wt
al0h0DZ8chImYJNAqjOkTEUF7vwLVboC8J7a9xu/AdhO9IPh8GpS6O/JVJeQDdRCvYcbdAAtMnLF
3CT8kEhWlXg4ncL8TREe+HfchRdvKsXD+48eJ4+XTXT/6NcS59GZLZwdjVy6FpiguDmRrseut3LD
YYzpyCyWh7HK6UtPyZwWFqWJAbhf4sNLfIBZ6jaknaZvaSlp87TOQElF4THW/gMYgPp5uax6T39X
4uXr22zVe/QGk5/OOGWTmatNBhM2rznuH/5SgfXhZ1Ig80y8eqvqnHHkzeSMqvBvAx2/XTkeyWL+
5/Z6wJhpeOy9hrOUS3aAQG2Ajbx0rXYsQRXWuLj9Am7UgueKw7l8GdpHR6o0KbtorsKxqJKWcEuv
cXLkU6yA3kj2xTmHeKPkUNIhEcCQQNalK7ZlnsBPfYaGVItCvX2fRYoqJtTTw4LS41AHTg+3VFPk
yFUnAzv2qTszY/Frou2PlT6py7suq/Vnec6BnsRR6150nZbsr65z9SlEU94pjKheRel1uutOhY90
M59udjr6qruwTDda3yVUBBcGzUzsr4XQyt1AMxhLk+k01BaOu9OUfIzDVRQO96zLvVwkvwGJwq/s
9kSDn+NvrLttYn5jcmrwcyKu4VGsqdVQh6vcSQXOQeI7k/I1TwDyvEIDEAgrdbuiG3MT9Hw4q5r8
CINYhZSbISVDphxE9M+XX37+5dCNtS59o5i48TtXrchH2uAoA9TfPv/ii2efDG/PaTbE4vS/AYuX
zx2ZnBEbXkq82RiehPk5MxUmGaso7qCmHleNU1dqMPeCj6gta8ZsrWnkZwFWjWYFC6IZN41dzihy
nr5FUVle2j08X9rhPzMhGNUAXaaaU2R1FGev8A61XK0lNjOJHArzu9GFdRMUBt6HV7Xi9O4saJmN
u0GjYsZj4C6V+05x/Lgj/lmCIjQBdHGdNjdI9xSfpRslxGo1AVq2zvGQJhOTh+mOtUH/Sz7Kzv7P
bdEqQeYgWppqYlLByLy5RnMD1uC5FapTer6hvoluw5kHDM6IYkvMAu6UW7DJT+BgsGb4j5Z5VGXj
JfQl2SUfaizxDpGGbjsZ56B7Ux2pQlfEZp1rSSbLod4bQqk5oB+G1DZzsrsaZqqplhYEU3YPXprN
XfZgu6a9vw7X+boSA8DQ881D1jszE6E/KEdfz/cufDteG7cpFlroVsS+eNfSX7XpdtcNLhQKgy1Z
vRSiVSomZ1L8qyxuf7ceyGGqJKsXr3/a3ZDGNQNQWYwasezq1eVHiejsB6Yxxiq0VJuQORJTtWNM
HH6oiJrpnc/TTD1em9y2STDVrEByQrBIz7HyhzjT7KSi01XIhXhwgNkYdS+qq6sab9e81vG+JBxS
djKyk8iOTk+m7586KoAPA545YSsn95tTVCePjqIv2GvRROV3zW0noyIbnU7wR7NrVLJKfHOFsgK8
xkAP2xaTmY9OuxfRrSto+Ze8X2kf2r4Vjuwo4IPjeIJBCVMfs7izcUJaqLb2vraqStwra3Xipp73
HOgi8nxLAzRK4TjiDjVQq0QSwUNi1R+0ayDtHBVLocQaDA5jD/kHrcAaStVg0PL7FaeDf1bSv2ED
sG5p+OG2pPNZSl6v2n1ih4bA45wm51AYgkNxRZkGXG3UIZQ6pFE1uzm0ybfNeMx22lT/7TAXqXSp
2x8M1PELRcfU5yu2oQcGuahw08Vdl0oqFzqSmw+pxiWlmki1h9RTArCg8XeqgrnjzW6dHnu1fdvE
RiPef2iXoWOSgArjyZ1OnGbZ2Eiy0bubMtKoD7gJHOkws3x4pcIEy9lefBLfTI6tbOwkmqhCp2Nv
k7clKbHELLxgz2Jks0taPqIKl3rmQiteixLu+ukGeZT2J+E9scsLVN8y/cHFTI78C4ePk34Ulh/l
VMITQns7VvTjSgCmS7p/Sfkl6rTxYw9YENIZSU2O5CAC1irkEPxag1Kbnu/dgm9BhIrfyyQ2E+kr
lIpdg8P6OEoWOqejylHdcS3Ca5GWHMpWdhQylUR3a0dzu6f5ZqfJ4E5h8Zp2t2LdyDO4t2f0yXjl
1ixd0JUWiw1qWNAK0GWH6o2yE8XDj9MNRjLIoOtM7d1yRZFEY926m0/zFRDYl4YNaNtgwCTY/U+t
5Ima9UlktjYa16DPZYzBn0TGrq4jG2mAjIhg3Umgi1ps9ha9WsIdxOmqoet6GWftoplHHVEbvJDU
GqBAubeR74inj2+9seXbSyeBgXcH6mFCj3mGZ5Rq2Bhx5r33QEupU1d5KTEtId68yNTpI1J0SYeA
mSsJaSM9/xjYzaRRjD5lIKlhsAQ6t1erOavWKQwT11HUYgZtvHBbLHR0AaeZBeZL1wjgqz40B0gn
0RFm6tJ3LLNiuYStBe2Hck3E1cpoNBimAhSxPDlP6IA94hAIeOpUlBc5HiBkEhzSapC9U5IDjOkO
DkiLPnoiPmm4M0EZjOMhoR4ontRqZbYvIBu7hXVaw2bZYKi7alFQpA669qpwYPRbFzJrO1Q/Hcio
tl466eo63TVaP5UtbKL56MRweK8fw3Pll9OL0rNTne0YyTJvuleKHWGY0Clysd2cwDeiBYYBZ3FZ
4V+1zkZoHRZays15OQi//szZCrTdBR1XQR+w/hYsIIKIn44jWid2zk4Jt6JC93pz1lwAZCiietqr
kSqwo1g9j925KTgBXYvGFRgQ5mf+dotHdhh1DM/n3buORjpSPwf7FRZe9Ldrz1pJNZwRHUiUpioz
Dj+fdLU1iz9MIluXtajG4rw8fyT997Hgnu1dZtIYTvpPxBRX7LK9XvLrm76T08NQbIm/FMdJeJCy
BMujziIrZl9tJtPfk7llDPJMQ7yrhuTMt5FZXbnKjhCofEDKyjLsIF8wBlRETfySWcUEL7di2YQ0
Mrdc3M2r0G8Gteq9hQhq4dAXsOx+Aam4Pw3HiZZT/ymkVDrQUFqJhtyRQW2oO25wON9K2ddt4cmM
VuVD0cDtXj/mXtVatJrpFdt1CetgU5r50iN0zRO6cptuXZmT2Ro/YuOZuvWnDysT64wEz5Hwuq9a
XlBwjh15ZwA6IIELXIfV7JHEAuJmb8IkR+7qYTq+CHXL7no3hqUNvPIDtmKfcfVvSQdvIV1rgRKG
ejYUCSPe2LcTrVd32ZmcicTNaZWXPNjZ/Wb/DtXZpXR0HBWOPLxZddaH7RO3J5pVc75nw4Gvh5oQ
tS3MaQPJUMUgE0AtDxQnVgAFmL1IWWNDQUmUFhK/yTMspWgjpLloUbtB6tHL2Lps3RmrP0z05r8M
7ZlLHCMoGnXjxdpKs0x/U0ct6hm1yZV15K1GBBwDo92lpiRFxtXBW6WWk8hc3imHswkULxq+Ug8g
uG1dpK2VclJcFjCaC11GB2UP3dgdX51wXFWJ56261s4jZsuTTxPWjcZ+zc0ukawqxAFRyow1nFbU
e68jbeCk2UB+iZxlWxbsCtZIFnhWXrnQ9GHvhGEIRHFANPCenI6VVcACyApxVW3oGFz7MHjUokCd
WZDS/RefGWk8oy6koGKOo4jDzhAOWDKQ6/jEPhE51NgZL4KhB+o6oMNW7F//AeUAtFpT3bt0WqDE
6jV+wO0QqBHfNYHuvej169fR5+VqF9HFGstIRXR+hjdrYHmc7dCAUU8CDRDFZzmGNSZdtbmotqsM
a4r/pRUqyY7TAmjpeHJ6wEuIBvhFx0auVAhvvWi7hvJhX6ADxhh21YU92XtIo2/iJbq4SyWajgKH
KJoOgv4uFiVrt5vYveXrr8l5H6X4a9gkWQgv669dROMtt36+YjYcm215a5Ip2XQxDQW989d1J6Mf
5W5i8cIdbih4z745dOL1CHZ01gOr3cDg2POJiSDoPMVubBj6a272OUm8bHi1PCQ4nMVFiksyJFeZ
QbTVNfxq4k7TQbJVpUXq7NQ5dGbUybdsvrNuUydTLfBQofHpIVmqe2dcj0W5vfUtNxI/2DhOUglH
qJGwogq6iFCrdnAKc8xmuk5Yc21jhX6vimrbAJdymndud4bmVy1Wa0bfdi6ZYS7sdDMrjE3nG1KB
k7CDREZB3Z3UGsIg3JXd65G+wBTdo7kgHTXlkFd6kHNC1cSuOA6B0CUbj2qmp0G3ENy6lSdptTpg
c0No2LfyrlucP47+TcULMUpeUZ7G2DHJKB8no8Ea9zrHA1oKsPezLj0z7VnKsVNxr2HIry9KL+q/
WidD8UH58zqRiqgwLpKDfN5sv5lD/O80ZD1OeOOu9n0PONGP+A/qoyky+pxVsmfG7ISDxMsuKL0R
9WJYazlOAJlfqUR+xdg/20Tm5Bfi7AktxoY842bUltoYY7NttxfbvnijnWHkvlY5ilucQBTL5va4
fc253PKBGcGI6n4CBA1qElA6vfh8zbmlbppAyTuVfUN+He5bBW2Fg+soHyoHCK253zdoROVcBQCQ
zHJ2KAC+m9roG8cMjtZy2e4X+7MmkLL8ewHsqGgu8Aghev8yugbZHBYdbi8rWAx0PwgjnTOLbAY6
ZGtVZ6xHkq+6dGWJ0OSKgmRGXst5/bAsFnIdaT7nIyOO+6aaHimwPyXbXx/UlJCB9weVpgyTyRSL
gq25CJKKIw8NgQiiyOeWbp/dFG3ccewL9EohT9d5hmdO6D9yXqdrukzXRLD6I6KRM8Bq85CvhGEk
yFtIeCjhFvASd1M5MnMPaXYADdL34B4F20KwmQXIiZ747yPQsbG+4CDxFS1JSt9SVxRrIMUD2m6X
0TUGs62L83OM8p5YiNY4uADtiPBNF1BMrIFnqufBAHs0jhRQFL8xfGi4jgk/Q9tAA2952tGljbP/
YIhnFfOWjRpib04i4PttPgUuNXJT0HRTz6hsP2l9Se4dGJmGDhmxUJYDmdS5Ol6ku/iOL9G2xAUB
NE4MDgNWcuw9WEewntZFs8acDYRW8c9rlOdylqM0kJcLIBUA+GVuw0PnAxuuRxczkQCzHHpZefzx
ALwrloBIJfS7bix4OYtL8ATQCuIJCHJYphheffmKj+5xahbbGo+GV7uj/ZP0mUwSs9QppnuXXqYU
ThCvpnAEBFgA+oCUIygvt7DY1eGpdfwYUKjZkqTCYGokaTXfVdbvgE5hVYJNgyL5Ox4gfA56ucbA
CV5XM53rmAFFqeLcWHewrEQ0XA1ITuJCYJQrOtpAmxtHfB6aRoa0fNtGxmRibuo77JihCY9HrJ4p
3jPG2FwWmBEJc0MSoQ/Exleh1UQvfSXQq1wNaUma51muQWXM4krk2JyqGwxMAxyywT0fm7yu6ksJ
X93QHW5dklMi8ShoRQ2Pk8fJ+0PYmljP5WcMIXo8RLWzAojqu6wQK4kaT8QID9Axc+RojEGoK7yK
swDAdiDD3JBm0nR9i90og8bNf99lfpr/4YJsR2TxXIvNHnd/Bc7YjqpoB4+SAtoXz53F0HXhSEWY
gnKSAlYdLHVjb9qCM4csbeFjjAVoSrpnDyeYUOLG8jDUZa0IiBpcBYNlwTFdUEzK6EP1xgxt7GNv
raM44EKw6fw+nQC82RZEqY24n3snHnrSpVN7GbrOWYi7N//u9V862Uuq9boq3/yPr//jMUfndULi
TijQgIkooE5LUPPQdq05bwKgbYFWAep2zqlFR/TQICWuivIS/2J2NPiDLkSjXu9sK2aukWRVBDcV
LI2yXBSd1B3M22Cx6htbecY5Lq3ERqv8LaplVRusaTnLEj5VRjAnDm8ysrPc3egco/V5N14KvPTr
eubDGiUqDKf2AH/3g4KXVWYzKmN1jnPyFuNX4wqC3ouFTv/4gUd3t4ZslGAV02Kdr9rqsMa4qFt9
WZJwc1gDqrDbhM7Kva8NpTcRdZtE3m5Lc2TZ21QnjneuKThRLbek31xe9wSPxPXfvSFL3CKnlfTC
z0kRNN9I4duCcB0Yq0j87fESCQVLBGrvCZeo4dc73MHRlfaCffL+9HTcd2xxaLilTsgltXlhr71n
I7zEMIYQxygLNj0ESeZ+zYYdvF59BSIX3Yng8D503M0kYGjpACschgYXn3P02E6v8WeM8I7h7Rxo
kHKERE+i42lf+MaYYjlBFcl5MI7+XqapH0tC+10i3B+6EVP2VdVKOoJ+6In6/nHdyiTHgAySgZJn
Lz5/9uIVeeTrF68+ef6l/eajr17+XTBoLH2JljkMhBg3cE3YpylkYY2Z1EKnbijxobZ9WfDZcpmj
loquo02Buyb0/9mzT55/9VmgroTA4iB6xRk52IpiEExo44YpC+zRvdhXNS+v+5EMxYD7YFjmQBCl
OxGCijcdCUtABzM/7NXdgUNU/UgAnQAUEqEDQ7R8SdaUjpWReIKU+wJWJjog9fpo+JlnYL6MY7Wd
UM+cdCihC92w5Kcbnu9Ki1CkQPRsZBS/E7cjLjTW2sU2pybIGse/MK7S+5c9MoB3+KN3fIyZi6RN
G6bjJOEEzcbEpTvU0+KRqjqiy0Vk4NDig7nlZDLG2pHPtXwgEbPHjnjYB6x8egtYpaYBVTVlQSqv
XEDlZQDOzbbO92EWv3O+Ro58ygorgXwIxHbzBmz7rQ27/d4dgP0lMAol1XrAU8pOledCwR3FwOxW
20zlHUQZ96CxQGtmCCTeGsjh0QUYXoSpQl96m0TvYawQvhCKv8LZcxFEk0DzC1qbknWKjUX8DhdW
7h5lsld/udPpLvBRkio3YU8i5/Zdfq3JfjYCHNHa9WG2ReE0m58VeBUhnAw4zST0imQgPSMzHfmF
kUUaS6hJ6a4Jtm2Q9xzsJfGoPhuN0UKzDJ4UoMddmsU+eK1Rf3K8HOqq4WEw5TJp9NagDjGaiOpu
pvs9BHgHemWgxoOyUT36efGLvdwZv+hDqL2huknTuyCquxFU0SRiYejwtJHMG7QxTujuDRAllrUs
kJguObNyuLBoQ2lqsaQ7SOW9Xvu+NXxVbaZ8XGFoo/qrUTBNsRRWUd+/Lkf7fLOXqlGhWr/RfbEu
GRJG6th390J7fMCfK1msqsYJZIBxb0MLMpTCXm6fbctNsbhcKbwapIwdbPpjOxuNb82PoUVbVjMy
DlgEb5s2S7jXBCGeRFbo3OBwA0Ol1NlMfhKz2h0yfmeHTGKG6ApCxTpEIq+Ndu9JcKxPGTH9+Ys/
Pv1DzLW6kvdwkZYcjFZSKqFZruLkyE5uZBDOoa/h7UE2aLeWIfoxNgxUrz959scpHzuCNlNkQPdV
0xxlmDcwx9UR8MNZVJtdp2WrZ8lOfutRrrWDpYp1uje1eBuTuQjKPxxMXnTosdUpp1IPhlPAftlZ
JmVUo3BrDIOUV4E3bTwzcnMf/AlYIJ6s601yYkvhylhPrbIlS2Tdaz7SWDW516BgXdkrp4OA/+Uq
nx1rFSulF+FSj1xFjF5G8Trdsf6HWzxB1TUxAI1xH6oTeNG9Ogzk4EDSoQ+mMRw3lrsnv90x/V21
pWMMPgPZcdZpPI5QVgUrudZE5iZFbSSEHDYR01wzniYC5ZiCwNFDtKnkvJxWlIO+AOf39bbLa/QT
V8biaBod/xAUhpTOE0vSQm0qcyJ1uOY8AL/N69IlTiXVWQSlhKyHeDJHIjG1kvNx3fmqOjsaSVsj
i8CYuOQDOrehG02AzN5TdilpFJVI0NsBZU6x33rFUHDEZLA0d9K2U+Gkyd+cehV0SVb937gV3uUa
fgWVmAPL6wrP9WLj4ak05ryqjxp0zUrRq6/Fy3r6XNOJd42J65XngWpIHyZTdvJo9N4Ii6GrF54k
Z+IeOLBIEKtZaC9cwLIqb9DJXTnohOBThW3A6NSOMQFwnOPYWoca9gron774jKrWsSIyCTYfNEzD
b+9YI8Sd+fZpwVRXYyDq4sooqYrzma3Jvgkxki5Gyb7FZzkOS/kJXXybaIvFOLjPGrvl8H49FfYn
UPPUKnsFnk+ppi3bJJSVtyixUCy1lZtgBj2PTCGkDfN4cnR8ilkn5HLTxo/SpKs9MGU6dya85mYH
Fn3s9GwPCN/JaDp5BvCUUzlLUFyK66J8/zGd7uoMeA0eV5JaLgeVbBX3jeLQmkrDiLf4F2hWkt7H
9rGIz7xDlRTWx/0pzbhldgg1FaanboARKWb3b5cevFXLOvyifbbS4LVOddQnGrtvZKBCRjRgTVy0
bwwGqrZclvP6LT3ckNGwvx9CpeGUhI8frKV9hlodwy9gZfDm0NWd2msb5SMl4buRjIEWQQbKJHeG
Bf+YnXpUdmznEpOWE8lUYMFJxJUph0VhB1h/4O/3JD1g3IWcb6zlWlvE6mH20pGSxY9QddpLbkCk
i23tS0Nk7ZopcRgPqGNCcOheLJYlEVvA5mzgUJ8uw5RZdd3sofVAu9jrYxsCZuT4xtdGVxkHoiXE
9BUTVFCTXTyU0gt9TugcJlbMBu9aHx+QEA3beORe48WbecQAELenIIGUPkwCfSBUjgl0vupiR+cz
ojEDkCohVGjJc+G7pGHszMxtJ9fW6mU5dWznMg65CHRqkHDuVLvtpN+WiG1LrnWuLcHcZ6SU9HGG
uqraI77Bpeww7Ny9yDE1gF7yoCi5y3S1bQy7Y501vDCVhXLmMhiiD3xz6t088Q4A6Z7DTLXiWmx0
y/IrUabVDvlzK7rgITdSNBFKnbEvyQh+/XRobgx9a95Wlu7KHCV8VOLZfbksy2BydKMdAalq1Mnv
AvNG3kAsptaSu8ZuKTxVgmvnZJ78kCYRXbX6rtgoFwIks7HArh67+SMpEWe/bQv7C032zSBU0DI3
pFnWe9Jkoa/Mr23BkPE2ogojvImrRX0tcB9kB6c36umBfZBlgbhYbw4BEVMziDdFfAS67aNJ9OB4
nOzf2pQRcr1RN2ZqFqR5OuRx/BZcj5pU0uQkssZGkRDkxN/Mu3erftX2jnlvrlyBOPrQHcFbsm0C
Htoy0BsYr0D/aPXd75XcfwAeJ7+KcxBW85l1yjqJzpYzsT7jbAX5KQVnbIh1crr0s3xVXTsnNE6S
ITHoaHMPassxavlap8TLJnINGyZCZbJSiqutnmqpiezh4jtKACHwcptajCPEFmKj/QMAASzm2Xjg
vV4cCCp7qKIiXFerBqPo4AS43mMS9KvBDXxBGncITBcCnhcCojT3UiRTFvWJcbaoUEahexAiijZQ
1HRD1N0S6PbNztZpmyg+2ykwJjSTVviZtDaOzh5uzpY4O2QcpAlYpOgSn9KOkrUXvLc2eVqj6A2q
L55/cL9e1ifqGd3ZpVISfcLvpuIV4MqIyDicjukNUBru1cTqMPU5HoEY8/IKZn4V5v3a3/SPuEJA
zYappoWh1gSuA14B4+Q8L0MWcQ5RemNxBCyuL+hSSBNpwr3vU9X9uXEaCng+mtMhQTf/KJNnwzlu
Gh2gfb+4qitYnKEBZq6lrr08WxmCefet8yW6RMhGgq1Q+Au156QNs7hejU/xvtnMYVhM2TIZvdE/
9kySg2HL6kJVoKfxtMONZpZNCZ7HPS1Qf90WRFNZOM3A89jV2+1IQtSSNbVyMLjobVgCzSLCpq5r
XiB0tqkG/7revDxa+Nd9LVxmJth0PzoLegaI9oLqbNqGkyLQHzIf8U0DWFl2hFwnpnAZdjnu7JZq
XbMHMt0pRs2is8dbwwjtjwObo2vlcmGlsTRhKmRA8cmGjafIHASMPs+ojaPPEA5i2TuUezo8xpvx
+LQjTndw3M2ozkdllNWp8EMaqDIb7XBGyKUaPa5XElF5EK6uhi9DHofvwcsmrkan3sWb/gjOnZuO
/ySDVvcF1Uqd7nGG904wlBxIm/9MG7Zt4ZfWc5i0TUW7HRspyt6+LED7InInSwQwyCNPOdTp0i0/
7T0cQeUkbslkfoEyLTcvLJgvdTZX5W9DZh5lzSya9KyJu2jpDhAPFh5EagT80xmsOQ/Hk285OdI+
3uwBpToaDN78tXtxAgDd1qs3/9Pr737DFyfkGgcfr+HxBhKPus5iK0KIKwxXyt4SN9h+uhqgfohL
dgQNj5LoFZ7Lyj2ZKC3WtMfhHSNxE7kqo+Pkfb4iXZyjlIe+pRw0lSLXqmvd0XUO4glVAjFoVcCm
KUA1CV328G57TChUJe5r1r0PyR2prvewGiJ3LE1iSbZGcBFWd8PfAPjrBe3NV2WoXEK5BLZtsVI1
PtoWq2xRNe1T8n79GL9PoqfngGf6PRh88uyjr34/E1FNLhFflR8zUr8gQ63qLIEP+rjDeGYqCG2/
TJJtqyXJGJK9GqYh3lTkgbsj55cW5G4z0zpDD4kOdCtyqAza81VT51cqUUJoTPE6vRGWNzt+/Oux
qoY7iq5ohu0Uf/QI1Fd40eR4g7OZ/fJR8sjRD0GNBrawWDUTmUBoUxSvdNte2Ff2FKPBIHWcQMWq
3ieZcKNYwN/8rhSbgJ+uNrLV93oS/N1Vq2YOkzDA6cmkPe8szfAcs4ld1dtuge5xbeLRw1En+0ht
Xy/yBJgrEmCu3NcCtgOxJVUefs2e2u9yVpXll3lMfL8eY2JfG9rbk1iZypNIGvDsFQoCm0hwaQKr
gLfKiWyd9Ry60PVY9OcgZiTsC0iJ7HZRta0jvBYOIIhcicp/VfqnkhoNwZsTjiMowoaBnGMLpj14
kDRwJ6MjvHSm+jmFjYB0Irf6YZ25eKLPPx2WHFsgAY417jc4c9C+sdTqgQ3vN0NNGHNQ59ONJKbj
q/4SSgrLm9oYZbRFy6qLAG1w9VulDB4ZUpJ1nHsPpPcb5cmIgSFXmPSPle9rukY7ai3PHMqvax+d
YtVZNIyGndMDIAli5V6QMAzJEA8Rm1hV5/NBL047hqIO8ob8YZ1heVyksV9UnT5tW3dOnTpqZg+a
0miEd5Vhshzr+5puvGpeBUhjZMV4tANfOyuBOEtvsnNs7wFINhFKNrp8grFm4FPVicLrULSMDQna
CaQnr0PD7ShCjGLZ+BOnxa4PnFcsCdgN805Yn5PjU1/8i3N0p2OBdPQpnlI8Y2ehMRq4j45DWdSc
TvzqmGM4zXZypKSaGfdd2jLufM9eP3/5yjvsN6GHbiOuDTppvgVpTcyB0abY5JJflz2JUREt//9L
czRkn+L4ZQgH5lA4WNHiRXsO5xQjtDc7uxWSpTAVPEpOu7CaJBu8QQ8ZFHoHr858vAhjttyW6A5j
rQGx3GWfXFiY6XUyZ1IoqSy6ViFF+xFE8Kqfv9PiS+XoxR7xw/qr4STo7/NHPBMRhx+qd78Wh4EN
m2HpKiJ+sZ18lPdM54gUjy+QntNW4CdDcr7etDuGX28Vby8amNU1Un2pnSw+MNdKP/lMovH4dpFr
HyxHmATu7SACFEz2wDb+KW+3bAK3WzY/3e2WlQkDW7NcTy782uHBnn70E1rhSdIvDvH85hEPyW9D
eBm5Zw49A6i0CkL3L/rnMHzP5rD5v8ulnXu4KIvlDjm5hDeKYlbz1pgbAsOQsOM43REwXvaXnkNU
7yRTyLQ8+s/h2fYdpZKBfcEZnTPXzflIJRvRk4s6cJO3+oAcs7WocDhBJwQqM+dQUAwmXcyk1jHv
AQ4Hg9VudiCbn1kJUKzZ45EoD0J3lkKlLBF+RB1g5Imj9WhigeOHhPS2Bqsx7dvQ2ehMIdvDYLNz
bkFwrKwRvMegNKGR+nwem/DuSPA8EWMtdJQobDjxOL3yMFR9j2BU2sewd6Pfe6MCM2CyAsfHccV6
zTkZhz4OPSmaGSAO5mhN7M/8M8IQMc35LZmwOoxv7DFDuSoxvmUy5UKJNZGWp64V2cKesCF/yXpo
0+enZOEMXG7pn7ioZ+YkS+SPmDfhhz9m1uiyjJq1oyMQOxe5O3v7Z47iSv1U08dZcbqrsLewM7/W
tST0NjiW9ajurh2yILmsuvVhOWftopiyIZnnts5zdrFboPCDsz84YNna8s4/8lTXa2d5qpUZmL27
zpPlv0cxUGWRVQF3eC7Bt0hIZbHi/Rifd1paEq2crt2gEocGgqrmQFfseYDLcGKkTT/kOJ/jUSfK
fhl2bK2UbdCuEwv8/SaZOED34y6n6ykvzU/Gp3ejD8/kpWLOoI3vlKMFNQcQg8wDW55iUPx4p6WN
dtLdABlY32X7IOlEO26neGRL8B2JTGLd8bJ9Q0DH8NxZcDmOE7ytsqu2+tZT2hVa8O4+RQAcuRSl
bq+w/Z8izS5CbtdsRu67XfFPwLm1K26vNIRilStxsRz0aNz1PJH9kb0JlfeqF5JWORYqV9vbPDkD
krI3ZArAYoQMDIBGl0tAq+DbVShKvzt0FIMblN65yjR0VHhjnYOHj2IZCsQO3Ub0or9gADg81jXz
8pLfhmT+G4tViLA9cy+vWvWsiAuqtKcBTaJjHeCAGgUlmWfRYUAJSLT4aV82eKmqLjWoDikUBeGl
Y+aZ0aGBOnMLyWs9iutQFrLYZV1Wxt2Ow2KeW1Yg5rsLgqtxT80br1ifqbqX1zlWy9797OaWO9AK
Zn0vN+wCL+qe3I3vaH2WcayuNrhSrVhbtpmrsS3c6qRmJJVGfnAufYu3OZninad7ZDPfYBzRgnZJ
ucw/cPvXPmMHdo3lrVM1DiUwYz+vwBV99f3kJuEzOSveIn07OZ6edm4FGYvmF9DlH6DLTwqd+oGq
2S78aEvecx+6jDATH2Jju8A0t/pQ/QjGc1Wgb7IVl9UV0zEivYlU0HV6xynAyvMmf0OHiuQcPxen
obn6bNU40/4YHTcKE/SN4o5j7H6r+WA+c+SOZ2Wv27eTLDrMjkV8NkTRgb0ftQDVlu55VwzNEcFt
NlHlSlXJBSEbs3S1Ck/F89rfW3qCNIRpckVBJ4+uRsEwdQccSxx2NCE4t48Xyqo8kqvuLQcgJ5Mn
7+TqoCHsGtQ/FRPuYhwIoeZ2P3z2+BH895vp8Ofuic9RcLNdVtsy+9lHhoHB9EXY1PhX7Ebjd2c/
Z79flRR/D4RBNOn+fL2tqmskeulVkheTJLTCT9lden768cfPXu7v2a9Clv5A2dtYeYDfnZx2nOhW
DUWMUew9GKCQy/SFg6QclsiyjU9PzDXGfd6QnKGeGOK7FC6VLgmy39QHyASyLeZ6wDiq6NrXb4Wy
x6fzo5vWmTOP+3GifJ0DvNgu9pOdPR1mSbBPLIxbEioJVU08OO7xFpFDq7vlr8dNjhn7wccozuFZ
P1hvB4wFjonSI169/vUUunbi+Ubhhkc39szGJkGJ0LlKeYfJfQ3QXs3RHLqsLrflopPySNdqvOxr
nqJ5qDO5CEs9PuXeohWU94socnJiV6NDlIBL6EStV7uwrdN4sooj+PDtZFxcFCITL8RTtOhAAE7b
c9l2fL79GF/UPPccn6+hyLVI1i1ImCYOFlsr3RfCHBsNADvtOLGxsGuEXH7HpUHB8JOM6WsbVE4u
FHS8VvHbwI4xdW5uns7pWr6+XnUFqlGGRk9Q6s+q2y6lqpuof6jOn6EKZCXGWVI0DRUwaKB7UjEr
6IHjDIm3V+xeLaIoIeTCj+uJW0CdSOrTDcS+un/z7OknUIXjruMwsBYaKCbGhhOAmZxhMUWJXMCn
MJsY9aQ8x1UWyOJpXyaO7iG/xSQFHNS/Fhzk5IphTYrCxCxysEKu6UjTQwR/yD748t2pjViYRRY+
emrCVycSitXzTBUVfztus7NG8QvsHuh/NdzLjHUxOruesnIf6w41ZRnalImRemqaaBBHVzQEq8ub
9YrcWWZR78E5EHV0dAQF8ezcHJ8fyO1jGcLEhmsSuYfnblJ06CrJ4K+YQNZFWcCjZWPJEV55nZBb
WyzjsFeoJGi2RCESg6rzXIhTtoZYEtRCswkti48vYFcC2oN/X1RZ6N6DaoQSHGNYFpxh/fLZH559
BmLn/MXnnzwL6ksAmJJhjEKtVk2s2hmHTAhQdTC49+j48fu/+OCXv/r1bw749ctfDTDT6OPHH/yS
W7vYXKqGj3/5AdD4VfT4F9Hxr6YffKBvPiSb3UDuAmyqtuXjmd9vAeOT6OUfX6Dbe/KIUiVkBXpm
o6qVropzdLqfsAGykaPpLH/33XcJhOP3jx9H31YXZbmzEHL8y8e/ij5Ld9GjD6LjX0zff0ypXuZZ
vqgodFBDsIg7uSt+qkRUFL/00W9HrJ1Izql1kWUrdV9SsolJvDW5kykhfppqi4dqhcpOtaowIt+E
0p7TCsDcRyAbs8l61ZjbDTUGi+CsH2auRn8fvRf/9osPgfCffJ09GEcP8AnXU1U/SR78Fl88+i2X
aYrvcio0/m3kWsRH9B1dDp58ff0gevB19v3jH6IHJ19n01PVJnLRJ8l74/9tNO67ukLS+dRyGaWd
gy6ytrtNgUcvpBbQwuPl3kQXbbuZPnyYJImB6d6c5uoY5or++3a7Vp8eRf/HdgWTGx1/MH38a5h8
4PkXJsEx3eRQ4o3GXsIXPFztAYab4tKmOyDndbXdYB6+TtJIsd5i6ROWTLpHNFRIxRgaPRyFsqGq
yzCmPBrougVZCuLMGbfdtlNlOb2Gxh59g+VCpj1KCU+5BR9xuKEvRu4I2VqbzdnzHb1ueax4oHHq
YQO36zlTl8YJP478hPRAbLoIPoxORdJT7fNL0mYeeRnE8D4KG9KAheMDp5BdFw1GR5vv8rSWRpBm
O1BKdaut9yK8LAH/2Y76+ZuDwmer/IyIPL4ur95gJsT5W/wHDOaeynGGmgHpdW/ZlEQh6ccTvjYa
A6VYL9PV7juOm0PYIUbGCepU+Dlap8C8hrJKYTMfyNkZadTVtt1sWx2JrbmQ2/fYZYQCFDPkkfTO
JJeuz4rzauvlOVLXh1JQN7KUHXvbOYpvMwIvOac5VHeHWzqvkm/QtFgpVMKhklRpqoc7PtXkKrIG
JtHo/tlIm/Yy2AtuLZ9B+cdcXkJmOEWA09G4gZIvgL1PQVrYtnk33RKwi+F0SFaR1L962PHCNDRN
bfswYgsI2t+NnNMj7J/llS2dSz2y8zmZ1qdOhYkpH+7kb6b3P4N+3p9+cNqBCmcKITAi01yLQzEW
mvCsTBDVE6c/DL9B/+donbr+E27cu/OO3R7NJErSW/elXLmE6IhlnK9j1d7YDutPklL/fU2kBP+2
pnXDiMK/WKIeSnE0EQmfXsWjr159evRr/45SupAkbNzAed7qaBzxiD+Oxr1NaEdvaQXY/tPQroRu
Wrjw5w60bmeqzBEFG+zv0+7XabcrlrplzMazt3vcj9C95M2/f/3v1LVMMeyrKXnzH17H79DdTLoi
eUTxF/Cq3pHKDglKYEbnYMh93vzPr/8Kis8llyJPPUjEb/6X17/7C24G9ZF1ikFA8iNis2wcplBs
mJSjUoKyfXoUcUzKVVqUA4x+Thqoe/USM2sR6+a0iWkm5r2YOlHcmqQStRHWiBh6Ew85ThvIUxwk
AwZEzt1qACr7opgM5TCd6iamq9HRka5BxyfyeMTPTDuzIcalyofG4LbO2/QqrWdDnNuhDiXCtgfD
V2B74yKkCSkc4i0GQqFgDbpKADwbF4uqXBbneIDNvwQXZnBo6cIPCY/DzJuI1JhBnCLAgNLDpglV
E1PzrTAFLC7CJopvMh27lxRrVY5zMLZWdm7sD1BEtYsStj7bWQyqkIvYjL35dTtQ4XqE5/so5+OG
erYl9cVJMYwDmduD+5J+g4amu5X2x341nmXQTdLzHM+jz/EYSkFrNeriFwSO2zGsnFv0yOc2qTjZ
qfWHqacP49wK6DpsuDn4W3UHv39821KP0B8Z6LB5XSyI28RWnnrOPj+Lgnnhz4XnndBL2MISk5lw
2chH/Y3f6I9eri06tKUoO6VUObYTqpf5tW4Ri9mtKduoKTKT7juWAO64y07P+Tat3OabjronJAGQ
90dscZtM9obbd8s+9LrXXTvhQ0QbImQ4x98qZdijU8TlKD4ZhTpDuct04wDg+kMYtCv82tv/SBzd
zuXGEO/5ZgE6uXwCe7+/QD0pgD9rfuWpUpppqF/EuYA/pHgm4PILDQC5t8+hxty+y4Trmg9HVpQo
GDdohKo8x8vIdjCIXSK5pBO6YDmPh2RnIzufXVe1OJ5wIHQbZNeVTB/0SW8sj0sChanvhen3Ln1j
8WBPtrV7LpnWtdV7Q6GKbhmyGonF1FTNEZJfkTkMTQmhqlbwJpLVJrflL2gahD9Xt8ySFVWSGXW9
LekvtMC9OAN375YLGBSK690ZZj9YrZRZmL7g9Y48uyVGTWNCsBIDvqiqy0RBQ+3M8TfGW9o2gsaZ
gGMx7yznA2TbfQITi9AHEEVvvDWtUKADRRPEBjHO6blu5XV/K6ORW2cPDqxajnHc1FpSFuQfAbF8
ldzbd2kIlM5xh2voZaDI+DY6WlSrFbCxW2mIg2z2Ymo/RqyZH3467IaYPRBh/TuSHNW4yOwF4eVe
EIZ4uJFNhfl4wHS1pLdBOgVjAQmdDiCVa8bNwmNO+Lio08bmTlKKXIrla4dDdRkaFwQlQKTyDkfr
52bS2HBxncmRj+v6Pt7P1piljd4V/3M1yPHgzX98/deWesUTiAI7xnl587++/k8j0rG+gqcCM4PQ
dqJLATLPtucY+MTKT7yhfcPWorRqTvYrIXVyopYyL8U9bdCX1FoVwJ24wLinH/Ee9VRBQmXZH1dt
X/K3SdxCg61bbN5WqP8NBveiVxd5RKFBOIt4bjKaq7hAFP1GZUmXLAriSICBVTViBvc40g8ICxKz
hoiecito5NU5TVPNgcNbDELEx7isGF6nzeAeZSnKMzHLRsogsqIukfU3HNoIilBU27SxMxNA/U9w
hp4rWEBEHzgjlMi2LKKzcjzPb0AtLgns2PptrJMUBYkL85VhU2ggednh2wokJAoZuT7LKa6suLWy
6zw7F2doALpGt3aJyptKhDn0GcytaJPT6Ovy+wn88wOh4uvyz0lE88Xn1u11xQ756O2RCc9ZVNgu
dIinVRaMDZ/kKKsuz4qeabug1hskGQuoowlFS51/jNmZQPDiJ2MKGY8FLrJBUsBh0wrt/PjO6gPg
yzOKXwTSSWnywhCm0FPsPKF6ktYQBLd0xzE8l0vPSGu3OotoS0k3TT5foj7hzOTA9iKbkzDYO+sD
5/B0LjTCsqM4+FrS+pZd0Vhk13a6Ace06AdIk9bHUgYHk5Z80wJo2Qr2+bLiVUJOyg3FvsB7CUPZ
e6gk0Mo1kRM9AdhJkgDpMJrO8DjPQx2XVqZYDrqNupClLIgfgSppQS9unqpn1ScweCrscHip3nXL
dK9osFHMjh9PoWNgS+OIg9s1ZpV3cHhCEE5Pu4fTC3JC+H4YCCpP3TyYdeLZ61o/9NY68mtZEgqV
CHsNHnYVxWXZ8XBbnqUr9BzJ8DRxkQM/kMTLFgbsW1DsuMGT9iAqrHjqMn2lE3HDxiNWPYrknBCI
2YuN4iwyZwKm1B1e6epMy4PjD6bQLp4+PtjnJ+HB8eDYz4zN8CPuf2UvO4cD82rbv6r1gvvSdzDC
g/GrItvKQXCjTs87TJ52AYqy25O2kCDAnROw+D0aL2EJjojbwq8/S0qcpxhymM7e9fagchjKFsH8
GSTJtFawkD6bNjpg8Aq3UQAYp8vK+OSu8Tq9niueZ+MCj4SAbY2AdfvOtPpOhK578kjuQJBajfCY
T445CZ1CCPKVmElORoSDH/CfP+M/T0an065brzKRrPZZc7g/oiakURz1g2hlEwSVUKQQ4Nl6/j+l
b4RVf+a70ww4W/CZpBoMT+efbawjAeAJF/rjKFRBF+QwBjg/3+JFWx0ahZPORSMmbPQwgHaxUfzJ
rcuSoRctJZRt00tsbYFbPpOsjg2YorhPLV+nO0lP1aFxgQhoi3Hu0ona6xjLU1GQYR0vLpEWHlnP
C7pyol45ppYwSWDmTSs7D6AxEERVWsYJDpzLQIdAroiZKBodopVJDcawV4P60jSnDePjQCmBqLtd
qK+qmUe+V7Xt3wSARA9GGPQIfwE478WcegjaGB8d44eGgsGV+YnD/ThOiI+/H3z88YBc26MDZPcT
A3hCJXCENEQFwL5QdixxUEm1xtXK7vYdxh2TLhKbRkKQTv4MKEMI7HJHXltdRCN6pQcfpQp409xM
weY5l4ES8QVfv6KbkhInTme5f9+od9bnmfWgtLoX6do+2XZKww/hVgwYBWgUJUUfcFQbNMst0ccR
I6p2NjG6ZpyuMEOBI8ZzzOAK03RvcnYhe0i/yqzREmCRkYb960eI1w/gH0RNtUE0P8YTK3hHwSxt
bjOJjlFsx/yZNd64gfZ53wAg52LHAFWzqJImRSPxpo4ZfgwIWnyXz/BAlXp++FiWHI2spy59M5Wp
4hGFzlH9SSPNdr1O6x2ve3bSvI8R1ciALEUZnaY7MY8UTWPp2jpCd2w5uN9MoJkCvQ7aLagqE62d
j8d7fUDViZ3blCEDqY7HR5RE3PTeW0MqoKvPngr4eaxHt69k3OCF52VdfZeXDca/YKQon1l9qok3
H+SAVr4JX3XFQ30DxHcxqTYkXc5m3axyPHyaJ44WLi/+P/beZM2NI0sX1LaxqP5608t7XYiKcjjp
AAcpJ5QgFZOiMvkVRekjqZLqBuOCHoBHhGdgojsQgzJVi36JfoRe9LLfpp+mz2SzuQeCUmZV3b75
VYkBwGY7duzYGf5jk3y7QDrFd+FUtyAbrH1ts1DONxvu9Km/3LNfOavosOW0t+cItl7n2w/q9w6T
Jj9Cu0/64u6d6nbae6UeZccZ6e0X33Votlrdtu3ChnWmkHEv3tqR0xtwkIHygNCqK1ZRjSOZi+fl
tqgWwHJJ/zxKPC9W+AyXyQmZzwqdVxvdhouEXBRuNDrvKEu9EKId5sNGRdpIz+E5BcRk4tokz09r
OoG7oEmjIl8cCbP03m1Kdu4+T07MhrmIvPeSjk7AxuBls70qMTICudy65nT0PKTvVgt0gBkOrVgK
0jFutRCOSnW4XxhnEglFRY0rY7qSwjmugpRieF+RZ+92nVyU5caMBWX0ZbEQOVlyy5LvBNWkoeF3
N/pRhpfRguKqYIxbyRYFM3FlaFL0Yg+E3sNK3hV+jjBJ8cd3g6h4sW1tLg7F9ueAAnS/6Vs1A+EG
ZKxsJO+UQY0POXp2plmkC9nJjj6ohOmEz+I+vRDOHW+hxfZRn4OSxJ8QRA6dEAqS506JCrgvPAg/
lvUahY8zy1ZJ2h9+ba7OysES0Xnl3s9yklR4cJF8Y1jmqKJ0qVQG/r4NIATXKfk8+fSx1xjpHR4G
97qRD0nGopC7622H1uQIeMprNA4xarBFoIqw0+5gktSic0oCcXqa40FJhpekST1fX5G0U7lyuNAM
r8jY/U3ttazR+DgAPSTWPJlYqx0mprD2SNfJ4pG1OIihvS/D2MaEm9OxQf4m7bEx/ubc79odDVCR
7hHr84F75O/TeBgp4G6WLuJPRF5RBDAfBEjH/COIRdGuOR4S+way0nCcmhJF2K6l21dsci6aKP/8
EBaxD3uIUMeTLT2CrpE6cMVQGYsNHNZp9wsA3h9VrrrMdYfZcYsq3Dl6n0dPXmR4L5B8dGL0JeVI
25bLJhdjEZyBuqCvxvQs6rUPN/J4OzKjOFZQYAbDlEb62V4j9aWsV0TUdxi3F3IXeywe6UHpsYa0
iIzivi3psAV3tGE1mZBQ6yoduB+9ykJ0UXrffiCp00gtJgFPdGrC/OgwCHrLn24VRena0a2x1CfP
9HprJTj1BgMfp5nrYaVQ/SONt3TgbRd5RGbOGGnwHzBInvSto/Sav/Mw92Jqrc+j23dZUrOit5PQ
4Ihs6g07DwzMI1GUHhIEhj0OLoTNXBxzqPgFCVzcouJ6F2Ti4XNycazXvlEB71EpLsJvvllWrN12
Lks5uuGN13qQSBeHnpEWU8HPnZ0/lTlhb7Dfx21lvWNJ/ThXlT48suIXXev2cWTd6CboGuqXFBhK
ysNgtLovt5VYSx5F/vlirIb1E2qPUwIW6TazJZE21Hx+UsuPB0txGaOgGCaG7nraeqiK3nZa264m
56TaVSUriLd7bpbMdoI3A8vczc6sKSpeaeYlk6R67hz3YkmtF9kvMktNd/40hRdE5tnOqSy9CYZr
5GJfivEolngm/Ngliz/W4A7OMYEp/3Q0poJMQ6gKUd/T1/fxjFO9sWJyNUKVTEXlSQ3dp3r2ebTu
ZruCN1wey6q8klpHrPytGrUX4iQ0JrnbOwI8faVa/vRxFpq03KMZsVJYD4fUf/bgEKrVrryl/jD5
0Jr3w5qyFJ5FRJlCHrvWpSAxnlvZxCWbRNqk0Hj/X374O8uDjhXE7//rD//n/8HRSSoTDUdrbsQB
qVpVQ3Iox5+wDlVGcHEOgoIlZHw7UZ4U9Rmn61HJ2s4X5bX6sEWXiJNidqG/uNmUjfpwVdQYYtPY
HnkHyZffvEy3OnMcjZ09yFTCoGrLaXu1k97sZoZhVfV6d7IwrVNuurWkm5NFUDXIP21TziQgn045
591bwzmXnHzsaGVXhzUkbFVq41vSNH7N8SVoi5IuaPN3G40D8FTWkF0Hv4IDhLEzWk+YdeVyPCe3
T4KRMTSgq446KwQZIOMpy6Q4OQ7RX8KBloVg+AosDCtWGxvZyIKBKa9xV8irtThFz8FNWSOzZF0g
EJWOoaM1qncr0eyNoQsC8h1rG7hDl1aCCFNexmKqqIg1Uto2Cot3t10PVdQPcJj5rlbaSB8vDPa6
WFQ/Ms939IWOfUSHZEw3wJpAkOQvBgxaLKNyVNrRzbeuqU7QwO2VtrS9kZC978kvc4B1mu3cwV+z
GaI+dyPtOSk0M7hXjkKS4t5GWJ34icp+l6Gv95xhfXt+wf6zV6++eTWGVVHY2ugSCrLm2xU7QI0k
BiRsQqjm017I4VRsbRhDMFti8GU5Jcrk3ycSgqaCb6QIQbeRrL+tTtA/+IbCgsggypHeHNmFoQjV
jNHzBvidbug7BF1nFy//pBIQNTn1sqUZ6XlHGO30lYb7x3hNpBOkLn479CTYcSqUgu4+PO/+sqgv
eFQmOHO3WuFBIlt2spmfJDtoibqbFRtCIW2UMkz81ftbRv8FmfiivNkQmgUQKfJZzNCxLU8qhggR
Ql4R3I62nZwBE5zBtm91e3/CLhFPx0T3zdczHEM/06HzPQGCLXH7ZWoqeEeM+Ugcai04R6V8UJfY
WalWZSoRewQGz+F54U9jJfypdgL60R1wkL0Edza7E1ZmEWLAgjgFrTong2c85iI5hemeu0DvTjQh
bNy3tPHOHSAvFTfuULOMp8wqnF8zSWKLPr7kblvOv9DCDd1PKNy4RGOFYzkd8a0kpQZY2REKnMJm
yzwm1s7rYa0JLT6IARAMeeRI8Oel9kwRa6Y2agiPtGMDsqCVI4wBIExyqxHPMs8NDcTCjzdAFkai
BOVD2LswcxXhZ6p0gxhNoKDgsRPt10ntZQGWP8k9ciYMqrpPNjFijtrhg91WZCFXXbWKFwkJQ8e9
8tchMpxTOsZwGbequ5uJ8ymnVZmYdWi38LrdMzD4tDnfbefrq5WNj0058xSwQuT0OZ/apSol+jWT
I/Wn7Znc7DbYVtg+A4FlRuYyDem/Mj8brKy8ixyINne48leXVQ3SBsHsf/uvb569fjOlTJa+3xjc
NXK++Mb3Ms2D0MWOSjoCqMboH/UDGgB321MfaiGKj8w9scAx323wFcCtqcYm6o8oUHJkc02QUNOE
kVEkpYzq9Xo7glc9h5xghxwkFAtFU5HndgBt5stnCFIAe3SJJ3vKkgper2Xd9F1dcR9WgfO4J3T/
FkQcfk5nHY3R7GbnjNCMbwY3RKwvx1KQCmoxSyN6myQZwWcLdfagaCiz9g2FxDCKZznqZz9nFoQI
8zeaBPaFc1jQXRmfAsWU8OtOJ7ufqude8IjG2eKPA3xATPrPH/VzlfdmIpVQTa8wVRDNwohi/Jl6
jD8cJEJJXrW0ns0oMVm2IzyCxDl1F/K7Bd7HsYTXo2mxWq9ulusd3gHfEPn/gVE2ZrtmC09HhaKR
Cw7HxGUG3AiBazQuBKBkxNTdm0Sq8MErxfLnhAfu/Ybgg+yF9uefwp9ILhbxVB833Q1rswnjyhaB
HfQyUzqw8AvExrxsti0wLlbtgXRhxqERS0xyAwQMYRmV0EX68tyM4e1irhPMtCT5JOBgcG4u7iah
XRoZ4KEx/jpmaAFJaCglGfnEFLRGMKYYTntMRIjDIeHwMcyVVZPGavrg6AJqPmdvGqy8ruesi/Za
0c28IShAvLuhBoO82N5Kq+TdOw3T8u6dpIwwfu2NDZjQVGergt4SUHW8uRkjoxi/E8gY3Ywu/5mr
TRp96xX8/F1ystsan5xzzFbN1h87UnyDlpLLUkYv1iFBBYOFefdOepE1ePcuDm+My6WbsE9SQIm8
hwpCeOXYLjx5iIoadAgB0bGPdkCILWe7isVJndUbN1LKHncWGbgZNpPPLS4ydnMjEA/h+TWo7j/K
eSaB9Mez1UfO4PtIPib4RPmYULQIM4KyfANHK6JLtM8WtjJW54lYTo5Jv4VEavIzopMk3NI+M9jx
WGxfKppROZEh2RC1T2XUGRC8vusk1NRaqndKW4nJ2+qivrF//EzAJOGFi0Ce+OwereuzB48fSOEH
qvbofLtcfP5uatcuZij9NNasn5AizFkZrF2ZaZIrW3FZVAtCNZJQXtEpeLAfcsQvqwJm7EIYvXzy
9TOYN4dRvHsnH8lDb0fBriBdmeQhNySNkWIOCiNvhsJ6ZXM7qlRXsjjKoD8c4lYRdlOznfSxO/gw
Go2ytnPq3ZQWipRHX9YdRG8OJkIvE5ijkZ3Cj6jpWZRbrZkFgcX+2h0GdMiHVcNpwkXB37jZkewm
Bm7VePZq/TMD/WL9ZkAv2msrYw1+e2znlzbde1lSbp0nQVOrL827wJ7h1zfMuL61enB5m0mJIh/v
C6S52a/jOMPtZLVC4SG3QraJrxUqZV+cqmkX0cd6ZStWbNYZD76S83j4A2wxgmCE9+pGoavdMkKy
sXA/eLqIXQ3ivjiFKUYE3FZMBu8MeHBvRYSfWZC3JwjmsKguWHktW6ukCkw7kQxYYUa+bjZYUnxR
Bl89f/Fs+s2r6ZfPX6EAhY/y9F6ajTTVTFqISKVWV816JxN9ZJyLgrUsgZRIheWwK0Ljo+1mnjN5
i0hAZ9R7U1WDpo7IXOtDAzXy9F3LbWy1FF54UtZuPk+sdfKZ0BTB4Fd8mqP8aD+OYmu4An7Quuyj
YAjUuYbMlDsbBHlHSEahKSdjm7xibJi91lt8ZZ5J4RUeisdY+LKoK7y7TDnsdExdq3KqjL7yQbwr
tueoVHv3Lse7BaYEdxCsyLt3eE3yL47ETaMf66yOQiSo6/PHrJJAohgKV+D7HQjTCNzviM9cnwI0
zRwaNT7YBKiDiV7xti0E1sJkwTJysgm6wR3g/A++iPzULvN5yw0penJaNiAKAXrvq3VCfEZcJfxX
rVCQqFUeekcERIe2BUMFmgCylhegi3+m3tpPhI20GkI4iLWguDbUEK90Fa09Jys/g0TzG0QylMN2
i/cCrFMjKbC8dPBdb3TKNxvwG0ZtpsTh8N/g5ofC02ouMELBe3M6ha+lg8hLV9ePZqHpC/kdNgoZ
aODWyvXg9s1pgzPwt0IvPX9NmpxltaxmjWC94GsaI+xOyvPissJw5/Wp5icjlgb01k2BOKbLAm/V
P+thpNVqm44RscQomlLWisPX8If5+iedp5AEWDb9kkv3Ger20Apcree23pq1fKwMfPOv3z6bfv/k
1UsNLdi22ffkzRB9hxDEKPJqdjTc1NUlqqYuMSQSeyafgVVi34sR4RTbhXHQv36CFXyi4AsjqqFB
YbjtR5SQVaOSULV0ch8q2tJLsUcaO9IKSKNHKX5Ko4nm0kM59GkiZr+4rKP0cqyDa/VOS+Vdgtja
5H2hX0NJX/XUV5mWT8p2T/xUZVNBrWL/UKVtyeDQpO3OvF/tUE3xPY+1vRiFDBPKx+STqJb6n8ub
lgx+jpI6TI96Yy068tTU9mjsaNppNjzwkudgSa5pbJOiZMRsYi7gpVsuFsC/14iUflWstqyL3Yoa
NwzX04YvGKQTRDS1wlOjkR80xQnGR6yrWSyH0h4U+wGERTRBN582uGFOb/1GzDgJhNjlDutRd5BH
mnyF7IbglusznUWCGysY+oLwjprdZrMA2SC5rT1N05yKorx1AAP1toCbIKM0uLQXxMRuidnYk8y7
Sd3QltZ3lNcbco3hGbBD1VZSFeFDs6U+ofxbspz4+7NpIf7WsU8IuhjCHwP5kgmrSY9Bfo2lN2zK
/4Fo7j8t1fg7qESPkZIWjuDf8MY5YBU2ubA0WzYao9li+Ln1xVSY/RlhmMZtArQtbay2A0zEqgv/
tHPyAwdTjSRPFlO3DMgGMxCZfRoxuFh3O79F9EjVbfsh94IIGiWJGVMmu2bAW+/7NmjJYhxFzzcC
ScsaiVDiFYYjiY6nOno0HaLpeLoXjktUTIn3Z2Qp7PDR+DgS9MdIi+iV3HJTW3BczgulXeRA5xqE
2VmzZ4conem8pWLZN8Ina3yChNZWUhdLIrzvL6T1MCeVkNcOHJPNnEynqxtboY3wDyc3MROfEk0V
gZHeleS51ElfYhNBSB4sg6pcXqqMq4EhPyQUlUnnfrskaonOR/jfY8sNQVQl8GdUDNPAhPsIY5q0
LUGa5u/JMYUms3bhGMsUhsq1jIyZSowo6pH9gV8LtumMq+BfQfnIILCBODHxk8M8fXwuoA2w3ruH
c4li3tCa7czsh21SLZBIiRzjxH4YEjoZy0QM1Es3n7aLFPiEY/dFT3uacE6IJhLIjXg0FF74uC2b
5h4Hta81A/xI1ZfvOOnHKywxXh0vWIQLKgRt04ruJTsFXKQ6j18EcEMPHiTfx+M23EAsglBKiDw4
ZGhs/OYRBRzBN9kvwKT0/G3udPsqOCuhxLR1vUyGCBtzTXwG84zPUbeMi5OpJWl/RAVLFXsMK2aC
q7fHJeGs5PixrKW1mI//KotpmRXvspYM0EgKq+EQjWKcVxtZtFrMn7WM+rpwVtHSfzDUSOQWgmnh
ZaDWYZCGbNLsUaBcwtoIzWeXwaA0AkwZ+NUpbC1PIl20CBl2D6aI34H+JWjfqk9XmFeTrqzImFSq
FWaVLDNmrSNjFbXbMn63R8vqtmhvXOuq/ZHzzRLpgisfDUH6ouc3/EwO7MXiqrhhJpaUq/Xu7Jwh
TYA6B329/f0kiw1lopodDx8fx5Y3S8Ocnfij1jhaHg+tzpRxlxxxhoi5S6nMHY4PiuystgRO7BZj
OtyYqk2bYHx7VcyloUudiBUIAYgHsxVU3zPPXwjV3cypS/TDuYT7RnwOCofn4EsQ0d9OUdNQsWuM
McaXpIWoy0Y9nRA8G+SFUeJkWoJ77WpdzxtOtASfhvzRTyLbKE0fed6QkdAuDvf64iYn04hpEyU2
I2qxGwMMBtZmNb93j0aExkhMTjUj0bOSkNeqUfarkWoratvQ1j9Ns5Et8NWxavZTrc5Spjpac1g0
oDCOgrS45p32+q83Lg52iQzL1LK96iJzChzaTYnQJdzIZmQqbeP+xilPJBl44lmSzKhq8IqrB53X
r+0vT8UJK4i6VGcTcbfqy3LeD58lG8t5M3J6R67noTLQxDiAdW2yEUe4lufwoHXUao+/tTOkRYPu
rN8tN0WPu1hW3Vj7o6hDqUySvTVdsQGNE5T0jLY/1ziw9ZSmNfmyXm9eE7OpXwBz+SMU/UoVCQzW
nqlaTMA6AipiBC5QumFYZMILwJC6dVMpjamyyjkWSzL4Y8iHY9f3TdVWvw45YMWQklF5xjzlMkq7
FCyIoMv1maHhOLmyEVDvz3SQYgqw2fpsVf1Yzs2cOpBWOD6SaB1bO4QrXVI+4QA9TEn17uVJ2/4E
KoicKvmXLhZXtNu1yYaS3b3X9kC5ipj7n3r+dGwn5PzXdAno4PTk/GZzXq7k/TlMFBQ+MEBsAC4A
pwl0WFNOpOopzw5jdlvclICeSipAS+m2LDZaBGRkZKA+uCZ3Nd2E5Jo2nwv1yYLZs1Ht44xnaOY/
NTkaeHb4DtQ+dsjhVupptBTFLj+xBAQTpl3MznVaQRySeBCy0531ZEZfACQf9eJ8zJlkHYO4DW3N
DU1NXKA6mDPPcr0GQtjWNsk6Gz3as0EnyoSbpAWgP/HMfMxnBuRLWiljFlEHImaO5uperidN8TO+
xNKpRq8ORxlLkwXNRI3fddl4lzR2x2NQSOh54lpR5RmPZfkpT9MeWF+jzYF/wcNhff9Ivs8CNbEh
MaTxdHiei0NymtDnYXEyI5GscHX7PPtR13roCe213PjtlIKdHcFXhAo24If74Z0zGOmff3IJxNSP
5vZx2rejB0jCoCPpfO+62UUd7JytMvvBn49YKwBsNpYeygNbsIQjkXxsWOrhMM3200WlFmuxzgG9
/TGHB+ooxsnRYXOc7oW2dqgMOVlM53R9rcVONWdPBw5zUoWUehfPLeOzWlsSUcfq18jENOHq8yM3
HcpmuqL0Ym0wbM7bXhsmkC52pJs4VihjagQt4mS0rjVwS2GOHgfWiDDK4QRxSk8r4sW4SfRKK+mW
wPcx6W1QR2NxkgP9pLkqDdgoY8yslQuqhF5wOnPLYeQWEk8Ofhkit066L+BGQ2uORGdgVoeU4y1b
n92hQ1OZFBITGyBnP/aG1VS2Hd1JKP/c3paW7RWYgptZE5/sJJJw7nnOBEHC5EzwSiToSwLBMCHH
bGsjQWzXVwU+hBXaieTRapgMxO1FsCVG4U3vivrrFXtnx6PF0KHvMfYDXTSxkCxVHfmD/OmVUKOk
pqCZWCtYBq4txAKYWMDCONyG/BAR78LS4bB87qpBCMxBipqlIXfh5Ky6LClLNUWQo+6ir5vpjxwq
e9L4lZfFDelYYNNgfBggFEOi8ZLLYLTHOSZfNnF9yOSXN0N8/aLLGScYx8PNhg+M/8Lv3UcGpUDi
t80lxQE+wUgl9pAhAvmknlOishsVX83BTNi3iwWJHIhzoOlUYcDw1wqTTAkuVBvZKq4CuqKhS95i
fVbNeq5TV13GXUNnu7rmTAydifXcPZdKfP701oxMGRAYTxpWVMT5e6C7jzTCiR80ljydJaHMRrR8
XAU3frqup1DH4qS71bxYzc4pPZ6bWpgcBinl4Mq0GM072JArN+bfdWW3ulyuJVX3EE0qN6ttce2B
BjMcjiTJGo/7WQhl+3GYD8vqHP85GnswsHpKzh4wNg+u+aOgG/ZYHnDFDEXzq5KXh30p2buZ+E+L
NQHDVZAWNXNQjUUiIexVd7IqizBllYgoaiJdyTxthVdYSB4pMkWXdOGkqnLCeYPhH2DW2xRY+IJO
2Kqp5piZD2rcQ1APDI1wtAtUfQRvutnFAH6ERQ+VDex+zyXxboJygz412Y/7x113NHjbhK7duFb/
ZwPgZCcf9m3ujtnYuUqO8NOejiez8CFhzlzNvtt8MpAPNoP4YgRMB5mn/yVIEYvtesDNtqxYVLBX
keFWrmRuhE9T3wIm60dFWruybBsliG7bN75Iafx86xrytlqK64lmtvgEDcV8ASK7hSyCcoZGtph/
0BRTI24ICrVTacKxv3hko6SXSSsxulPBjcfpI0Iz3mJwnQ+kZLaHE4iMGGrk+glKH9xc6nfw/Qgo
gp9tippVkIJaHG+/DDaE3rV9j5QvWh3ZX+zr1nVxJgTrkODmRn4YBOxf1Yg+wclLX1BR+E6bT5ub
hvdn4HYBZdDFN1z36L4xxXuD5LUc3BXfgzcojnRn95B7aGexk9K9CRjYsZ57AJx1bMXl63C9Vfkq
yknH0eBBHjwJ7fFq8cA0e5exS+SRNDjmizKSLFHizmSiRtfu4SqhQrh98lrH/luL0+DaeJPm4+qt
0ebjpX4PmzDHX0GXtlIr/EO0OQ4Rj+blgkhD+M6RFD3udZ43chJTj8PlnC0/Tfg8PF8v5hocziiG
G8v/btTxpLt37+LKi/njVVFBiezRN5BS+zhxUMTQZ2bMyWH9eT85HDjt5hqO7uV6i6jZ+7f88ps3
r5+9+bzf662oKj0D8Q84DpZtAp/plmyeWg/t6ixcR35RUwCCQPHwY5/f17mHdkXEzlE/hJB0C+iK
i4xmOUyOnX6Dl6K3j3a9wRyzAM0w9ixjwGsTumcSZXWF7GlUCzTclsU8ZotEcDdDfOZ0TNFp0Frs
NpOimCpd1H0yGPYPlX9hciRUe0yaSPnPaDQ6pkizaZFDd56TuIVc42OtmIKZs87eDjoIeMZW62Ga
udByIcKTtg86SF8W+BOqrPq89P2sRdshCHqs/lEqFpc3qaJeEwSHFB0C/hLGQ7IpKKaa2Wwfiy9z
qHBZlMWKotMDv5Q4GBvTfJ/pLj53tyZ6haraU7RCIbyn8Av9vQ/xZtaQcLPmkUA7r65zGEWGswC2
oF8pGF9Sq0Cs0kC121eJu+Zz4g39uBfdn1OneQw/DPr8KbLfCsFOoPYMCKwMQNDq9k2fwhepAN2F
IzADAL6NYkjHhI2ajKwxePNbxETSycyzmNhIyH7YOzAc/oFyCWb7rjkzExxCymxIraxwpZ9C1w2L
irznZEdXug51xZ+kq58sCX6+9rDdbGRIgyDqj6OLzB3NRkAXpjfBkfUiAGBAu9VtQ7rLcFz1VjAe
uzNnRB0MIYRK1MOn2Cf1DkJMZkFTCxCyVgRBBHxfgbVxPjiJhYFeG3RqG4VgLvbgF+szqc4Abk6X
E/l3v6N22ixE6a+cVqCtas66dEeBMlU4uVPBDewI13DXDcEZmFr7qg0Qpsjrop+NpturEHfQwcGM
SCsBJIRFISjTxyBh8yTW0kgBGofeUR5ihjXBYMAhOVljuzpHdaJIOuri8rRjxN69IpH8xFDOJjvM
WXB6Y2FN8wQFblp5u/mGEW3MUr5oSHGnu8WCJAQ/h/LNAu/mPnn2d0TV6oLoq3hZ9u23u6SwlWEh
PZCbLoqF5C6FrMMlV3SmJKV7MznzoEZS81ua8zH3xDAayoT+GxO7aqOpsQ8V2amANNn9SEY9kX/v
dG8laqaTABleNL5WjHCxusFE2xFtqYIWR4W7DENBQPffrmIaUwNZyoCeg/7zl2+evXr55AUBh3+u
kMJNAoOW2qeLXYPveKa0f6I3CqPaadJDSx0+msjvbLbQUDT0kgqOJ2c/AOG23s22jBBA+AUEu7w7
ETkZzUM249sby1cpLwn3OXa8fajPqbKjifcY39CN8m/1SvuQOPL6tCbs6vauOVOOXTcKHSxFggGT
rl1mCuNDVbavreRZGaHSF8k9pRy/8v3nNJ139CpFm5pXlsGXB+6XNgpzTBZVQw9UudK/NWALhlJ7
5tpQXAJPJDZ/LwCQvmkJiHCfDhqbBavgF71YrAGFK5lQA8UX4zGfeIDtIIWR4kyqRuxkWjF6QXn1
l+DFOEBlS8QdgIteobzzWp02npYVJEK11AZeTKvsAw3fiPUH1ukxAPlH14aGuy7zNugtZbpRujJ7
aMdBf/eDwUa05zLdOI52E1zGZAcTO7puVy0dAQFPgzvaG4Rls7crjIyv7dTzsu3seySIwUpMNltM
RTWWlMc12dI/VXjn1nOhBaXKwTM3lUfQAyqdQhROfH1rvnkz0h9Hz1eVws+PtxOoNQWthKBPTDuS
oa0tMhoLnmLnusaRamfvCHTdhvWsi8gnTlEkslOVRuRIHk/4M+I9bdfX6k+gA5DDoGj/OItqkwQ0
epDKDYI+OOyaG3OxwF+xz/SWxkBGlvAWrCGZmRc3QPjvdxW+b8RFRhVyGWvpQzma+43cD8ahCoZo
MCAhqeWF/EKpI3SC0sYurNuXsn2MtyIXa+9koUFQhjvY7xJxruj9qtB2bdfrRTMtV3DmKLlQs2d/
5erSLxk77uhmcdfjGJiDIk+6KM+ymQ89UPM2uEPG/ndwcdXoMt+OdkuOHCrZmSdH3YTNiAGTa/xT
dnccwam4Kj04nQ3aPVdbce59QFnOGwn+ulrXF5E2mjVny16UWxVJzQpvTBRnPagFaL1czePswt19
jZGuLDbtUape1h3fUatJBodNxpl4JBHPHhxMkizo0+ucEx8WLaqXYl6hTenE4Thg3+EigS84/+qO
iBviX9RTY+SZ+5c3XErBpE6lD5CLW2pgZ1Tps+QyBqfMtkxpz8oDFA9BP2zGh/OxYoWN1BseNuSR
v4Md0d+kKMG2O43IWikpnK2l1g/Iudenzjq2oMJwiTyyJtl+CLsH/KO4+Sm7mfG8AxKXqEi5rtlS
NIop7bwAWr5v8iRciD6iL1JMAndudYsGnmS5Ri/MFaPKiYlKOg6sBuu6OhM5JcL9W1g55+IxJiOy
Wo07lIhKLaSasxmjnwBF8cU4qzZoqpKtxgjmwWudkqUFd6D23iV4jNnVfOA/juz18LBDrSQTtl8J
PcwjsbEsRCBqRIgYSvCaos+hQHpy2kRnScvWCy94FBswDtWAVNyUhGCAxkrECSSPTwouJMdu7PGk
BCmmbBxQbwNKpRQUFRAoTch9vV97IoIrNQrBWuhp15JbyDTgJFwEaoH3AEb1MvAiWY3mnJCuWmER
R13pArQGCKzWK9qxqYoj+ioZ1+Xp+B20ws6An4no23z+bpQ8d2HOTbQxvVjh/OFVR+6qVjK87XlN
EeXow1ut6wi2qSMEJp+14f4TqKljd0gGCnecRy8vIzj0hRU+ircmsdq5u0/dHjrKPndkvANu9ciJ
1QRi4Ch5Ddgb0oTVOcXF2w/h1v0Mhu+kCXCwWH1W4AK47je3MCh3tyK5L0ZNksKK8xdkHfIg055z
h99lUGjFkinCJuPZjvs1yeJKWb8JxsJtuuumaWy7LBusagjBRjSarrdDm8C3uuVCzlqcihZxb8oa
hEnlaGRlB2Nc6nBKC8Vg5hsVN7EwbsP0VooS56LnANro6RJgcPwsBaNpaUJjDUebOeK4OPZBKjaD
RbE8mRfJ9Rh4pc4fyazYUhRntCfHHY9jBwy5cf3L3UPpKC8sVc1UbXWXn2RA9uS1CIuOTgkRvVXo
hGk1u+dx5ZE7M4KGlIerQ4OoGJtOyTtnGiM8j+iE4HDs7MnJ04jqC61rTsq1Z+YzQ+e2J+ovHSHU
f9D3qo+acpN1tCBT1lTOI9BU3oueCikX6N0WzkXrwD8oeBLGPmdPqJyyZgYwC849HPoYMRVbcOgY
id6WMWiUJP+63nGcC/qr841847pkklCDsUiL5N274fCbb98gBLqKLCNnI9VqH9WyfTt9ysgdSCc+
uoptO+UI7hVHbmPEgdcKrssYK+EmuM8g/Mky1M3XJfN1bseNGa05DRERuVq3WFyLQMG4CnJ6KvK2
uZdx7I7y0OZGNvx/8ObD8sJM2FUdZ9sWIOo4KHeLBXfwbQ7vxZXl17f/zRgferufiFEI00ZiokuQ
BNRmUrxHTCyIyxeGojidlmTFVJXts0ibH3DgWA4Cy1GP/fQs77zMoRpbGjSFPI9w1fe6pvn+zBEQ
v+Drd8+hCI8RGBgckYT48HVena3WdTl5xhkgdSRyzC9W2RZMFIOTT5NbCoqLj5eKNdGqZZBolIO4
eu6rtBTXbkYKDTtD2SrsyGrEZzvutb5HVXterNpxL4T4cGsF9x9859oxcehSVSJTJFzB1yOrCtZ0
xQygZx8J6YIB4M98KVllo+Elaj/b/L33MGRkbZ7idzVdeJvfZruQn//8k/hps/fY+uRP7DomaX4D
JhLJzALFQ3d3F3ucylLDvv2Rww60GwV2z827KkGlqcO4lLY3gDiJo5uHsDdOvBBNIMT9okzZjL4m
S94bRKZWPulR/wPfGz0SfCBNg6SGbnDT8CFg21ut2QZ+9PCbasRNxhF6/JGcGsjtoYkp6NSrs3e3
B3j1n/wJoyF5oX2kDHPB7DMFzr8jbovOyN4nP/zvH330ERwZZZHHBPYX5RxVEu8//mHxdx991FM8
+Cv65Sv4BV2dL6s5hngnVwVlK613GJ+sU8FV9IkqKL9oZn8gCWCgMAhGqDeyXxaYM3qO2AIkLZAn
ikpRKKj8kuGckyguWP8PjGGLaeV3zQ7kK/J312dK/bVu1F9AZOrPZVE35wS/pzzcdquTHQL2lPNp
tR6cwp5rayev/yk5EiAOA6yn/i1P+leiEIVtO53jrp1SBO5qbR/YdYNJfh8PzG/QgRCl5BzZbddf
oe+NHadxyik85FKdF9vC57zi7UM/eb8oTx67uelUmIGDKphFH06KbZzaQp78pkc7MGhghkS4uWc/
PH/z+s2TN9+9nj774emzb988/waTj3zSa41KAApyULQ4xkN5J1azcspY6g8jqvjZebWYT9erKV2f
Ukd/iZQS8YWLZ1m3r1ZHucHjiVeQ30J3dZgUfAv/bdcQu4ZH1RD/4bntlMsNB52bv9zX4PJijj/5
5sxXz978y5MXpp54tA5SBqL0rdKv33z5zXdvIsX5mEaKP3v1Kl4cjrIyUhPropRAa+Y1Lpg2/ERY
nJyPDG2ENvdwLXnUCvzX5clSm7b9tsqBpkNZq6HuwBBb7lGWR1SZY8DDqnKs9m7AtgTVoqC/QrMp
wairXNCGvTKkJKvhTSmrEUQQwirPv1EZi7DRc7okep7HH7LcSaL/CDnho9wmiKwXegxyff4jrP84
tykk88BRT9WblMlzRAy2f3Vi2ZgMF3ESvAZPVBSQ9ZqH4gsQHP5sbeweUZsY2uzsXbeKNWRDg5hi
Zmspw2FTB/c0P8DoN4sHtFVWTF9uMbhZlhuUz/G3LOucBVLcHSaBxaPxqeNei98r8yP0Sh/pENbn
5EQQCmq2u6p4upKT8FS8hGd1gRdMZEoOUdCKxS6bnuU6jSYS32sZiX40W6yb0hudjCz2k9oB/zeg
L/7qUeS7x853vKpmwE4sQbWFwVYwbWYj+EVZT9aYYqXaAuOyXcQozdlNg48DEoJgLbj8QLG5PHno
BpdYpcME4s3o++dfvX7+B3Qa/nJgl81i+61kL2bn37959uprqOzWS+4njx7/dg9niKA5sz5uix1C
uNPGQzft9oKdWsxa/UPy8Po3p76912oCBUx6lVP1ca/9ENvMK61P0myvOH5oYIpim0aPHtVlMQ+d
/kPKNQ10kCg+n4Q/oM+K4g9TV1KM5sTj2s79aK4Kcxn44zXXgWH4QRkJocNoZU9MEcnyFcFCDsxG
5LIDuQxNPRVy6TBz4ut0wy15BZWAIvqdiPFSleC2Bg5O/LxctKcsRLHCZq2Jgn2kmI6E8/guC8qa
1yKZuCujBGtZEidCOCI932HJPEcL5+CYD24hc4zoD/dHDwjdi9y1hAyvY0t86L3v//B36j2K8mxz
ubqavT/44Yv/+6OP8G0Hn4dPxVyA+qJ58hwT6aJKr9hqOPDXuxPlUPY9SJjV6uzpenOTfIst4la+
vlx9/1SawS8TSkZ0XvLO0IJDOfstuW6IzeYEPopLirlV4RE3L+rwuWkiGnoCaMazUcoeBqzq9Q6G
H/6/3kHytCDXcMogAnIgxlQ11XZdo5/oZdWoAB/8fkh4TlBngHmgsJJCZkUIakbvI6cKgb6qnEUF
hnvQY2edJWYwW26HTXFa/rzxC12TiwCTIqzZRpBy5P0kwoR8QpfOqrSeV1iBP+lDslk/W2lmGzkf
u5oOxCVvJJD4cuMfBCgCjcJ/feq+JNK+9B5jqhl8+Ki/b81wqmADcG8QD/kfD5uEk5gO1Bhy3W3u
9ZTZ031q1g+16ugMhF9zTDV7OTbJvGpmwFLQDcWiEqYJpZRhcDhFOYgd8Byz9aJvLOZTg84J9lF5
3lA9glpDT041IQJ4VMQnxjQyXTogfqqxSfL4IXp2lrP1at5IMlP2QyI8LrT/8vMFASTxRURGvIKI
nTtpUSP4e0qz5qe8roAcud679Ga3bSehiFYBySWqI+CVctwlNAlFK/gURoQwsF7TBAeNVK9DNmgC
gWxHhD3hokhhEQGMfoqTdLxseCr0G5Qdc9GFnoiun3OVoOAJiAcXXRIdTW1iDvcgdojDe5QWQalF
qRFPxyoD5BJyNPQobdOW2Xhrj5Ec3Q35gP2w7Tx6X7KWVD5XyWfBDt03vAFOVPwlpybKlIfMv1ou
y3mFgcjPtvXqpnVrLFalR5ebne/dXnb4yOd56uue5kTM0zUzI6EHuMIlJwzlC3Ne9npPXrz45vtn
X06f/vHJK3zz9afJ8MHbt5O/H/3b/cN+coCuklYuqqKxcnBR/lFON9jjN9BoA/cmpW1Cv5uravXJ
Y3G6cfu5D7+O+27n0z9+8xoVY17JJP2ncSqYPJjj41L5DQ/g38nRcYe9EQpginnL0hhLx3cpmj7R
1M+WUKScDfq4VsP3iXajt1Qnl8oxyGkkHQlS7KWC9x6l2dH4sRXlgi7gcnhCEP1LhTzUzIpNSYkU
4MW5La+VaIp/Clgkfau04zB6kPRGzvojQDqtv4XP69c36Lh/jyExb9/+feroorGQjImkeoS3nZ4g
HiIQRENt5Ax7X8p3E2fzlIIfju5MBd1a01EqlFHVFIvVbuk/GgIANSws0W+my1vqyFQ0kEJoW6ap
wax4UoZVgFx3vrnI6Nnxfgevf4T2h2v7RF3Z5TUcCrjDajhyILqe7ar5OrkafaHEqO0a2VvFco/y
HRs/QD+jR2bvsBxlDEP7heWhdr5GuxLUF7cq+EuR1YNUt8AD5ccQzkNMzHxmG5WLB3N9UUez9YJg
YR2YyUF0d7n/8IT6QKvQX7S+jNxrwkdvDj0z1OBx7OKVQdwtIw42/Xn/U0IevFXwifL7gvzd8eEw
Up8toKjfk6nefQrh+kptX6A3HsBNidc87JPr1wfz6AJ4gOOBK0a/sbyMsnhZb28GUpvNy+q/fSxE
8iA+OeANR4+7fnaroGx5ienkwKgWLBs9zUG1mi12c/7lcgin86Ss2x1H1NCtns+L5rxVRscfB3ZN
G3+hvLIQy/x4d5BrCOpD3NadlFW0ELQIyZMkhXGnSYB8zi+geYU+Mmhohd603DsKfNs4YR6uwkV5
g7mhrPARGsFNclnU1XrH/fLZGo973iV+vt1u4ODjkRoBuT3AW/oBVniAhx4zq3gV/tLywPuLgxcY
9RBo+99fVAvKI+QuNblfG5rxTtgN1II9Syex1smfUDPJsTfTKWyIkA18oNM6nWZ2YRGPL644IAu3
2Tzr3JLFjlinKoofVVn820Y8qtmCr6aXO5NFFdDWdrM/uUFvSC/6p69a0dWcNqAJN5dUqn4iH5mL
q3EY/WvVl0KUsgOaitaJc1SdkJNDKgTannkrHLCOV8LmhFcPnhvixDdwx+Quk2/P2Ia1ceS0mhH4
6K3WDMPfThqMURrVk29Z6AIZ0T9BF1dHZnXR4QVmwqV6pogzMNm7NFe0kLWVBOauiMhxECYztt3t
OHBREWaHYvbhQPXToHpig//odWx4azp2pr01r6rKxXLyJ/cuEgpmRot/BqhKWysGh/ia0j0wh6Ug
uNROMO9i3aP2i3yQ8TYsagpFMzySI6y4Wcx3xMMs5x6fZhkLUyGhWzLmtVCDcIh55DLcG3FbFo9l
j5Mzj3Zr3JFB/+Ls+UOZ889lzZ2MmTF9LEcN3q6Jc+WLTBoeBxtsBDdCpzzyz7KVTDN4VqlnOhXB
tKRySmNmGzctOIxFQdbDoI/Gw0fHWSRvsm5ZH9vOprkxbGsPy5v22pwkul6ohBCHTZp+TSD+yO5i
yooKBxrD8fcJQF1X5nMaJv2Jj/m2pjBPgLklj6rxcVStolbVuSxaEjJaq9t6l4T7hRfJrQ0G90z3
xPnqfAldyM35JcXXk5IUd1DuTHd4VrYvS/ot35tUKpjVLS59b2uOb9CoyijaGBTYpCxm5wmh1cQk
bzLtk3xNyXPQSZK6wpb8+Q2Mun3CI6IPiBQV+yFzHhGrsnUyMhSFa6gbMdXpEIo8HwGw0u8QFMTp
VTXQhhNKz6eE80yU2BSuvcQntztJdS/gKKgdO9k1B2zzOlPtFYrw8YiO1gDlAKvOcmfb7uApODhC
p27mYZoh+u7iFutQ/NR5CcFL10f7gKVReUIcsTjG6bLAxOFKumrDuElLFQ9PTaOVbQ/vJVvFerVF
g5YsOcfXq7eqGwfrxGBOVSfRE+QFwinDVezxSgYt9MRSnXKSFrdrNUq7c2owBvyFJa3sQtWPZazf
V2YNsIjOUy9QhPaikFm09b3M6B8jbMT0uiSVd3e3ZKfht6/I8VjJHsitnVI3PVFxNecUw06IIwxG
Z7Mx1kQq2LPlPDz4nrISy4gX6oHx3FsW10CO9swOPKqCEtVytzRmLlY44LyohSYZ2KyKQ8L4F6OU
OOBx6UyuSpl+4AYAkP1Tw99LyhSCouEFskZoGoQBDuhIkR8VayHmU/3gPHBugkt7BYDe2Zas5ta1
DpbVeajXQqOZbhyaOjB6okB9xL7sUZ6h1UgyclP+QHs9Su7vq2JxgX6SKJVoq+MQB6cuKzYX6hYY
lPORt4JaNX/gclNnXWluDrlkWVgJIW5I28JZiyIFTEojL9j0wAtxGzz/hm75PPE0/yM7jOkgblQ7
aDNIHriyhO7DArqZlwzYimt9pXY7cXabA0DoommlLsG4R81rWTdKi6k+Z66zOyYF8hhLlweVwZzF
dRS+cYFUIEJ4GsussbnhNMOjloQoDgSCnDvEJWpsuyudGefHfcdtBqzSIGXdo3z5zbOXb1qH6WId
d4ANWQYGZxbIjH+5RcfW0p8/H2eEEv71M8f4cwYVpwUrB/imntKtyMZZdSorbUSv/TdT7J0UqsTM
aWSUHj5wqpOe8RCoTU86RRqD99G9K7ZqZL32MOG8H/7r8HA5PJy/Ofzj+PDr8eHrvmtaw2rLC6pk
2tNOKN+CrPICVuXLarYdWOFnBF6T4LeY5pVMsCgTn5bwRCgblSFygH7Bry9XyqdLxbPBXbkofqwW
Nx1ZQlgEvShv7PBKVmiQetYpfIT4MnSXmLhLVfU4QH/WnNl+WyAKybZc6iahutWpEh+l81jhnhsd
vWtB23AEUU4TJMKo04g10ybsLIg9c0VXPvXXFC5SKkHCN4zHDLMKBuTF0+mTFy8mT5O056KDoOke
c+itQPxDS99udUGy0XpOCEbNenFZmlckCgUgjirLCH71frcmD7WqaYBCes9fvHj2hycvtNU/vZf8
JXmbPEjGyWfJ58kXydtt8naVvL1+eIL/mSVv61QpcDBlNTs1o5kLdtxpjCflfKUcPrlG1nv++vvn
L7/85vvXAodn+wzI0vQw2cGU7LzTedVckDsMauY2yFTr9L/DU2v44/Hb8du32RdH/318fB8t2FDk
eWbbq+n6t/AyMNPuGXlTOgM8Ei1Gs1Gigy1LwVz1iC3DNTel5paO0wDiypvDiB7yg2Zzmwk0pY1E
BaYk3NwWFPg3Wy/QMjfOpCuyq4ultNm4NnX8WmVBJuXsiPxVCB/aqiazuG1Aat20IwgG3kt6vRQt
tPgDxc0z696eT7frKSPMqpyR83mxtUOcgy3q3gKqb2eeuvSzWlLV9LD5p8OGxtRscl1Wws0XqqFI
rT8+e/Klquew6mbD04JTNUXP04CqeJ4y7mDiFvo1HsKSvU3QXwMaXFQnI/q2g9JY/zNpIScfxlcP
hv8wLh5v36KPxwOXTKmN0Vm93m0GQXJJ1VL64LCRNXXLRxq/HdOdpivDPsL80m6b2djW04Yilx6V
3Y7aXrtwR0HRuMWIyEzaEBJ/FxJTtDeHlKSmQ04kyY0fPHAbzyzPhCc7IJ4ga5XwATh7pFDaESIT
55vWfgnGQ7vjht81Za1QJJoGjdo5e59OsVHOG2ASTF+WUXxb1Qg6psif/nXPbdOx4D/dAqZLyjWj
PvQ8NEk9DMIz1p8srUlxUcLLTcCAfWF2Z+OzqJEauu2z31M/zZxsbFdzIynw2Dur1OU2iGjdhbjQ
UEwpptPhUA1m0gfhk0hh50OXEJ3iaLraUSM07XAdryGdecYsdVerq/UQiwypdBpvydqO7qZWQ6to
GkhPqZVhPdvby/szOSma/iaHTTIajUwCOk3oGfpFXk9PFkwLjiTxtrk3eDu/n9G/r+9nyWB0Dy9Y
cxydoIYOb6FN6BIEMtppSe41nPvtgau5W5M/5hUHU8AB31R25rfnhDqqtHJJUy2rBdzahFla6qBV
8vECyUoLf245SxtKc9BGXOx5tqgwF63jRs6uSyyquTYAdMugnBRXMw0/kxMjiiWJD106oG7WgrPB
LQIbWvgBcVCOfzQaLBQToWDrm5LLu8bomXBnaYskdKdU1eLjiFYVrpTdJV1JzA1yP5c3GSo/SPXC
+CioegIB3owM1guJZy7r8NfAmmHcCbTbmYHCIwQd023meaKJExn+pN+5l/GWrlltAIeTeMvkYca2
Cqc9pQ1zndk6jGq2PsFaHAuiBSsA59Y/avjm21z0qlMHnwvamEIpVT3+2pQYG0vtHglJY90tKZTx
zwNxKxyob0TZ7Hg1hQivuq3czoAzRTsq70o02RLV6H2Yq+H3TxN8GDt784GWAq2f3C0VwBVwDc/8
salmFzp1PKM20+6qJFtt9g691RTHjRKV1SPwv8he2/h5ErWlnD21Fv77p0Ocvmtn6dhwBX4lx1Q6
VpvsEHh3OlQK2xsc1hmxBsd7MjlIcrOve1+iyoBsHWyrKkpWyDJaRCsVncmiaHtUju18YG8P1ht5
0pvdPf4OUzY0ZSGYZAFUF+Nrrxi7mCGx8aFrCWU2vjQvlT/BrOWo4CiofzZgW2P0xucbuAncMsWr
lqV9KHQcYIxxH3KAkLJYmYZ/aZuxaRF9kXAGLoyBhpdMlRTotOrQiu04jvE0p9W1xC4mrLlChOoC
weOuMCwNPSCIeV4RkiLqKq3AZGL7E1vrldxP+kmfBbpFB/JhVMV8UWJQEurBvn72+vWTPzx7HTqu
YBpkFlFKhp4bRbV4ggkgZY7gd/QDTJ+GDXLUXCQExOefnXHjmPkIRhb3LAkHgmXv4JqCmaW9Rnqh
zj1qybprDgq0VFL0cIk6/BH6UNShSxaXGrHyHZ8EBMCYZv6D2pV6PLvAwE0PqXU0duP9Z48fwv9+
N+7/7LYx1sEZNxnu2QqiRp5YKOLuith1MNliMb+5a92rR7+CqTwe71uhz8dPnN7nVV1iorcbtRLZ
7Uvx7Ifnr2NLwSkyfCfRne0DcVWRujISpYe3JP+Mrwx2//ju1YtIgk3NxFMuD1LTEbR1bPFQkrrX
diCovCxA7olBXKryDMuOcgmMgZiPku6dYUThprBW9MayzOl2rLJr6U0Io/gy5o2HFNYdjRVzzqL/
ecFt5NOVPhp9EnN9xmFiCB1pmroSSFan7e12NHuISKpBAGqcO9nXajqs0RsbxQp2IosVIgGlhUpm
61TSeBgCkcSATB5IFCnp1zp1VEgoXM9+Xt/wpappVsd9JAPa1+HnCTaduRRE6fHwHofJ0QCOfW1I
qyIEq6pp9wNNSL91GXboYY6VbQQbC69OfI0If7GcpFepN3Uqqz2SSGLmE0LZFgisBQ5L5LwakZm7
czqy5XVxrbjVrw+lCs232C9PFBBKvu52lPJ83SyRXkbAfYsewvJ3s54mhJgG5QMJVrDQ5MCH2OSc
8A1VJAxqAiXgueqFSHABu5cw63R7dfp5eeF6K0ST4grwm7XiHCAVWXsuykH+BUuQQ5THlAc+31rJ
4ORGIzkXLmZKQZQCBwFmgN70ypO+COKgyB2Et5cOYerHUK10zADyT+CGMxgGnsETDOMytEG7GnXI
3KgdIpnyXpiUL445Y/0izK+cB4Dpug9MAjKIwmNvQqBCDidCPxjEgYo+ejYtJKFpbhMhuE5i2qj8
s337O3fYeshGPX6hfH/i51Rki3+IH1ihGvLIpYn7nrKdXrL2hgVE3pJNkJ5UKRVOs273W8n8E3vD
Y9T6eleTIlLUrKFsQj1BSTvTn5hz1WUzmzzKmWYnjwIGhyXlpKBIYBMzyHDlCEMQZymMAq0XlXvA
GPJ6Ti9WlSwPPy6uipuG/cIH6hm2PnVllBWUXdzgnUbh/OWyWG2rWYs3syiMYCQ5aRDwRYd3lgwf
ryT4jQe5uOnHTQbeIYoAVR+wz/R8zsiK2NGgWN0sYZJfAHemJHPcpcs9g3wlmYGH6gD4OF0UEbGO
NsozF2JByxhBRYJMkUQJ3C+c6HtUyRZRQXQQksDUzOU2SMK8Yf5FSncqMXIYWAu6ANU7bBRGjfLm
zzlQnnvKHOJEc0bXSLiE8Ra1BpTsMyKiCVmlu4wM9u8idg4Rp6SgXxMEIVzskMyyZK1AbRo4pNCT
I24ZNDQjEGELaRaEBwmRBiAdBzpxIaKGsAcmv4x0Vvc98w+VnORpRRRN8xBDiq393K3a5r9b8Qoo
v9XFDS0JNXTrpLnZ+LQRYNGOkIQK4zT7JVZBg0sOoI+jT8fOWw3x1nabuNaU2eHqhmbX8POsdZcZ
+Ko5Jz+8E/Rcv2ZM6w3yto8//viWNJ+y5H7CGl82ayxvdoZeU89Mehw0k4fM5R9SnBOlqm8cGc0S
ZUEYXixKktyz5DWDuolOWuuGQwf8AxVbCDt0gmn3gM0PT2AZKc6QvjnfLhcHGL8/Ox9+MkSE/OGn
o09Gj6w27P89fvzwEf/x6HeP1Zd/2i0J4MPKmUFL3HMjbHmGt9mjcGvkmoDtoAesLF6W9LutYP31
SvdDr60GM+L1u679g0ejxwqUphmbUaK2bjjki3Kov/V9YK3Cqften/lyycwpEwNonHGfzqWY9vzU
iiptDL4skZVhIEpjXC/kX3PfH2gIzGD9D4JJxGbsqC6YcD21BX9J9XddU7QKWs0GRwzuBCwiyIXD
S7gSrpeLhNwCeHh8OYhaO54OVPrKWfbQ0/GA+yOMj0xDH6TcjIz7bz9iIiVMWqVwH79/+tqwnmyE
jJE1ywNGjnSUcxGytNv64esXd2pORQ3oNuw3/OmppVWJqNp0bB4W9d/t7HBwVqAh0kQvoF5sII9K
PyRcPBcohAk7axFYYwo70b7FwZVt5VJ/WCdae5V13684K61u6lKFkuNIJ4gKBaThAmEYoPhYwBMA
dpOcQbzjOlDLlZM0gcDCkqY0DpBhDZ4GTuNx/GgaTsRoLRqVwTYJ9helBfTepS+c9yIwLynkGSiV
EjQjpFm/IlmocnijqG5kAhSzv9otSwz4D0/ZjyDHmJHlpn8fF2hJfjrisyOujrpiFssGt9wHCkMy
X1BOtEMKL0O5jh2DNF+XgNvb3T365fUGbmsQXNgPj+yyvBhBjsNLlSMEk6ss2d2xCR45uI+KlAfV
aktqVCeRbSzwGJNrsU8tLhmTlO/nNLr3lL7fliZwK2HPp5H4Tn/5zZsnL15k1rMHKwiLWDZnkzSV
N3Hw/qEeSUug0OUo3s6+R6VUExEDq+RsB5w9IWslvWu1XDhfU8bcLfocn8MT+YuPv+h53F56Hy4x
jXVfvV6Gi/UZu6w2ZzHnvTx4RQQSA7Z/HzpIhi/T3t7sP5I6dcH+jeQYQObePTOQkvzqCv3hKVHw
xmrj5bBA2aj6BIlqabxt3XjbpnRTTUoGmZjeCJ8xTvQtsncOsqDXXRCuSP3skVQjWD9U/81ZlZRS
B55eSOnFImmBLJ4p00vV1FAXgCFV/KTdZNq432nonUfAq2+NnD4Llqp9hRhzwxv3mR537PQTtgWG
8uIrEZavqBZ4hlbllZu/GVsCWmwfJ/xYbsufN1Ro4xcaqpX3lF5obVfvEtglvXFPvWhwJEj9DUdG
jXrP6WWAsgT7OZOC2pJzdGCVahYEe8Z5JrXWjvAY6BfJi9T95IisEIkKw1dx5SZ6pEw1y6SQdRyT
XoemG2C+pTuvL3XhI1CGxPS8XbWUOdI5cE2EF/129Gh8fBybghO6xuPmG97WY8EbqXtzLyl1rnJJ
Qe/I1ZkSrDQ1Vt5mzjESqmepMwN1dWyL7BzWogik3p09iq52S820847+HwPi7n8i3N0BSInsRi71
2A68nmUTiYPIooP42qvfYkXV5Tospj5iS6ud8T8pdssee2AMV9bkf4GlJY8rHYbDDtCPWlZ1lWAY
L96Vu9kW7bksX18SlCvmCZzbAUBRd1TVB5uZtAw6UuJKFnoynK73cNOTZ5TD+7Bq2hYMvofuZj/n
NKXKHBmnqm/FtEwb77p7aINcu19at/9Yzj21gFFZo+jLAMg4pxk2uk/U60X/l+7d9d4yD6nX//Iy
eTT6hOJGZI/WK5U+xkrHLUlnBozXAY8nfPt67QkZPvwYrT5rWNmTasvxx5wn7apE2+IOg5LXqrNK
deu1haITZ34ajQJ/Ka6hxQx0T0pjjnGG8JRPouV9WCTaPKkNDun+bnL2mnMfWcyfX+LqdUTQQJX1
5vuU3PZqIJLiBJGZgWshyCFmToERr68aOsuUlZPignCByD0Mnr+BD8Oe4N52ZA2ecWJsH/uc7e4k
2G9b3nE/ud95R/ZR3wojkFgWParcG5OnK/HfyhIj0QsesvCdMUAK5Ieku6yUE2Gzrredqs2mfL8r
VzOCUEJO0lhYktIoZ+RQMPwV+kJj8g5U9bHdX2n/TO4PHhaqcehpsvLjwmbn62pWtl9iVnwHzIXe
qH50boWeihKN9tXLr/HRX1L+1MzTruxW5Lmj/HVAtMEx0WXyArfgWwsyxYEHwbTulys70tl3M8Ga
GuUQiRKVh5ZhgR9Ojl4SXxHm4q2CUA21kdx5FqoC9r91QyUhTYfw8ZIB+SzDIsJ44B/4OuYVRASh
0AkDrwGgLXqlUjGmtSyW89xSunOiW9aoke9enbZe/1gWdYvIo7VcxZ6LWDfqNKfbN3BynuvebeCv
8QgauxaWj2IQuaqdFiiiCN5IzCeOgl1szJAo1I8CCwjFX+xK86z9MYQ0mJbqvgPkZuAxzNz3u87u
Ai70N5GVomJSmn08aRVO2sbrNH63+/gDegoFnj3BmNg55czYuZwsxvgZjjY6ecFwT+BdEjgIRs08
L9ZnzyQXjSDreCBtPd2TSoJGHwROX5Tvxkymg3op9yzbxmRsqr6ToMmvy17LiMoi0/AUXNhAZMww
1TWjmwqMEHoHi65l7qq3xI3McgfLEk6Je0ECZEMXXC3LALJEtbHDC9RiTBJnYcjrGrlxnxzZxbGe
f3dq40JMEmtJWmqWlnM9mw1VxxNVUpDruMlJEgsngV/XGxxsv9+pANLF0OjYjEXO0Z1q+nLyv+D2
SD21WTSP4SXNwuqSI6qggUs3wsq63wkU6ATEQpRJtKhvdg3uQemrJeLNtIXX7srPZ5hwoNUGr5VP
BtaA7rc4p8T/l7L31Jn4EShLfKIhC+7Umlr53F7OXE82v1tjLbFxbFyyZ3yHWewxyCSS/YJEDlxq
3C+d7nD0LV3qGGAYZZy8YxO7wvNvn7WWhV3ds+x5uVgwHIj+PfPyluYmNykNHHV/SxA4UfU4yMIk
p3aE8naNuYAGuqEbcqsWxgYi+RplZzsmE+TWar5e5s+uYc3oVsSnAWV/hP0YdMYalnhdSgMjCmJ8
zT4T3H3gb2L6uA0baSVmAcWbb7HLs/x7plOZAc/FvMJ8H5HbMF0CTxEIc0RwmC9BfovAIqhGRiv4
/c3NhmCx9ZfPXjz7GkSS6ctvvnwWRTS3DM3qZhio2tmtCuz/vwDk7pvKxhO53TeKjcNscs8qNx5u
EOEF2Go1SQap0vyneUou1Wi1huU7XVQztASmu5Vc0vhB+Sml4TFO2aRHxdAYNDUNYyPk4kp/kuPT
tLgsqgVChMWaqlaoxsDmsAbiUi6rhmzN+Fn82VNGWLjgv8TsPg9DbrNeGzqRQrxQLkn0fjEf6PKq
Y4AjIw/kY69Eo9w2sgb6I0TNIB7Df/TiqQ2opNo9/yFjw0aw3fnI9qctFgsrjIp0FSy1eWahuUnP
epf+FV6+AMFx8piLqyP88jjkCtisepWfBUPPWgKTj7AKKmkeOWHv89FFeePHQsEEPTvGCL8LA1gW
Cp8aFRisemxmaJYFYVe0jijylGmjQiYewzu2QKH2pNxelXCFaoQqFXB5INiW5/BYucScqPikJi0a
J5Qjay+3UXF1ZUfGnkhFukq3Cje75EDCEzbUwe/NGnPsAEut14jaPx4YjxztvechD91H/5u/DDP6
6/V9+nd0/wv498+P858UEJEiFsvRD1ON5+TU90HHJbDdKF6k/ZnRd5tSqKeWw63DNqMOjt6I1GDU
OMw2C8Phs+fkQ6f7Ed2zYA9wBLaFehzz+8LCSnkckmiQD5C2T3J3igyEG8+ZHcgLIQgfIcM7XuP4
89H4t8ds0T76rZf84kDeb7P1Yrd0XetnD/PZo3z2OJ99ks8+zWe/yq9/nc9+g3I99uA2g5mf7qXK
0u779KOMyMOnqv2cUrcNOGaFoHOarfoS//aU0wgO+RDbTr/44XlEfXy6konKwjMdPWpTLkBbqLD/
oiUXh+bJhjLYtnYKT43ipJk8yuLKAE1eI7mmlLDi4xs5BhkZzQ93GI3RJLbqsq3SnoXQzKIdHIq0
klYToW4yMml1p99l1s//ensgt7s/mvbT5tKsGiVS3b99nBIA6ac05tdphLwlDct6q7PQl3Px36zL
WVldolIUyF0O7eyhN5KlxZJGFgMWzzg+FPt5kOK4f0MjvdeyunResMlo7qJf8hx4MtptpNHK/dT7
Ac+4q7jzvbnHXZpBh4Wzq2qzVdwaagLDUEvyiwxObhsrviTNks9b1YksOlAII9nOMRYa7uv5mtxI
R6MRhracF5sGDZlXxQp/bWmo2fL9viQt3ra0LakU2CgzgXskxwTJdXV2vm1pC5Vt1ZbUZqzX2643
wwXIIwsTNoP+ghJJeVXNypaWBmu0WkF3ql6eqG/gTVovYX0S/U6gUJyspSUTZ0ojAnGKDMmSD7Tx
4nnutpcHyUVZoqvfjR8NEHfQ9oHZxVNbXc7ZXjrgQPDI+Zi2uF3f9XAeiDJUioo6tBe/Gb+O8I1Y
fXyZ4j2CWSTnaD1m33Inqphz6smOquc0knPoq24xDvXm62IY9j3yhBn0J/ThfpqMuxonOt235S/T
zrbksbpva0+7W1Pv5X2b+7fu5uwH775NftzdpHlR79vgq+4G1Xv71uYIV/xhu9TsiF/KHtDZaPQg
/sx7HOf9qPUQWWN0VBtd41QBfIRstsZHIMbuMQyqjtvjOINgJI9pJC/4cPyKPvxz97BYEdI1nm7x
4g6XfxwzFVs2PO0W0vH1I3FOEtWWxPiCpzuJ3PFGgBjvKftw5+bD7a+9MPyNZDb9kkZFO7pkzBhl
d2B+0QjAdsSd+kOOzC//KudLL8WW0mQAXSuIPe3QtWWnSEJ+2GaM7VMkjX6+R1/rTPf4tNZBjbkD
y9KcY7A7iRtjEiOsqnTrGBnAuBzmZLgioYPKnO4W/DuOtjq1YQbPS4ZeuirIIZnEEwoP0g8dEMjs
6EIUQtZ2E/OyWGi/FTK0UioLHDwsBz1QKL/FNhnyzxTOhXKW1YiJtMXzU9S2+CTRygUKhDAPS4yy
DUpGolqvWFEkxl1Le9Ks1QCTU+iDlCkVjv+vrz1RJpLk7jaS+XrWYiJBatzbQHK7W0Ig9GEAjh3Y
tkM3eoqGhjGhTeiZRFH+/uZNcYbpOfVTxUUml4pt4bMeG+HCmJQV+3iism6SF79vyaGjg6aRckGK
qdZxUaE0wIgi8VIa8HojLGIvbImGWy7cOpHermZDLgvPrYfuKhM1KxJTDXpdY5k0vDzsqhO0E2hX
jsgl1So8i09W/G17d/1O9IVBwow3XP063G+st+h/2nU/9vzi2p/9ND8foPXZey2UVeavsG0tKqEP
H6oxL/01RruXqN2uwpKskvFjFOEX8ZPEtrxEwH2tH+AmjCeexl7Th6H9TEti6Rfhj1r2iv1ImIgT
PyF0ZEf66jXRj/lFNs0eDwDRsysetl27V5BZcoqBv53dccGQ1ylRWtqJi5N6yVSx/XYxXNS04/cp
31t2F7HLi8q2dISgarqtcatamZI/ET60LgzLYnkMjDt9s2U692Gs5EBA13HcwBclmGAYt0wZW6GN
e9s5rL3GTyPyRt67O7voRVQy1hFAd2ZtLsczG1fXDCJG92r+9o4KnOAWhnEooy7afsNORDYdO84A
YTHn8rZs/mFJpS/hgsbbINY1civFztwCP1FskVnG3FpSf8m3lF1RWZoNt86d6WdBpc7HL8V53yb4
UKGQXdt1xWY+4NnmesGzD9dE/Kd7pNu6OeYC+I8f7ALEN1+UlMW2EXFU4Z6gM+RyTRrz07UX8Ky2
prmV7dsth5tmGoqsnSVJm3Kx66N25OW6W2COsGy7PpGN9tvRPCQb/1wTjx8CerHP6jGGXOA9Vq4G
0kL2AUqsX1LB4gdVjdtcgyTYykbPQieysQ979t2rF2MVkIwZMht46l+MVuUWMdgeYDAVBSZva+CG
D+ZVs7W+c1t6hZRXEev+7rvnX46T0/nD+W9OTh8P56cnvx4+/OTRw+Fv5588Gp78ppydlr/7dVHM
C6e+GNKSx49+ZeO54Q2X/HMFkzW3g/Xza7hk5rtFORZVifXTC/RveypXyBM6tzDZzUVbERgC9v7w
YVuBL4HkoMTDh58MYTaPfwN/jj/9ZPzo0+T+Q6iWDL5GTQ98/w1cZljM9j/+lvEVqrLhRr8jCp6r
9h7BEiWPPh1/+pvxp7912oPvX64vpb0uPyflC6KiBH95bxCT19X1fEjHKTo++GWhEPxXGyc1tEyC
h907aKpV+jdqIJ6rfBCXjgDWAnpI6PTzoxTzD+2JIcPaFsfG9rIlPqPvKct9RU2etFYVFX7od8f5
q3HMKKvhp/RYJRGX0FzSIhKYMkpZTslb1sPYnqGWlt+Ps/1WxmqCdGjxdMUOQC10Q+oaP7cx+bra
uYXJP9bRTaXomCqCGsE2oBopMiBGf5hPnbl5dY9bW5aXRVvjWHKqb323Yal63NY0SfBtDS8lGzZn
7b6a4X1PzrpuH9TGcQSjR6pbbd1LHj2k/31AArDpFEFTOFMcldPf2LnFrVG62cWNR3ED7QHPoOx7
qOaG62AGD4jv3jw1TsSoVS5Qt/ABTJRRzpRfSorugEP5/wT+fyz/nyWDo/vDY/prdA/4jJOoPPRe
Cc3qUoE93Tyks7bM59zNjxhoE5jOD9CIhi2I8KdLElA84iblTm5sC9ELFu/uWdSTeBZ1DM5YzYua
6Ods6WZSV8lBY3g6VzOUWLoz+vGN012mLq9dt86+dSOuV0lKTpzjfhaQlos2JMHDw89t9ByDNKSJ
zcDyGDie8GZEkriWXPXYiblVOeCfWrGyVFUrdfVRqM/goRVzwTh8QqKuC0bcbep2nw6BtBqSd2PU
uUOYryE6W2S3/LxxEzsUgdiRcqq3QjCsRZCMYYhn9PDYAVSGd66vxZfWvKWKXuu6Zx08LF8Emfx0
SaD2JToTnReXJSdTUuhVQEsfW9DduKNHvAgoODh4S8p8pFt1jgtV7fHJMDYhRiE5Ojb56umbgLXS
t1q8T6DqaI6WLWpIGY7c32m/a1Rsw7BUSWM56plwf8lqdhQxYB17Rx5HIU8HFbnS+mTQES3jXovk
oCNm2rSBrhFosaHQRS9uRzfSGbCDVd1oHfqmO1THqfiSyA81yXxZx1+UbvAC17aCitrVfaQFjvS2
bM5autLlTfvteju+3Zuzuw2qXb0caTeipWybFMkiLb6DdJE//M3w8e/ewEX+8FfjR49Gv/rdb3/9
yW/+W7SCXFh3nxgnnmHdCkslxaaeOjLJ3hMipIEukpDwJI8bBhEgcQqn/lrJ21ekBaS+2YPUWwes
mCi+9jlSjZrLsn1TZ6afvVAhd+iFAfKEuGAcNqTSgn8/DyM4FafI7ROVmz3DWK73f//D//rRRx/B
M2FarUdNcVriQN4f/vD//C8ffSQMb3PTk7+am6bXkwBIGjHdrpsa81Nv6kV14odI1jdck5RgtcRU
YUErQzX0+Yqd/emXEX6wxI1md8IF16fUF/3OuYEWFQGyCki/hJOruD0KILCuK4y7Xs0Wuzm5PZjE
8+SQKykDGr7958l8V6uckiCjLdwU0wwBuFHBRddtUjzWlGyx9tzwyS3bgJV7TptTDD4FulNtA+Wi
md9RGqFbw2yLqRhQMUROJfWaYVpAGKvXVwluwLxxsCRowLuQq/fTvtIV71rxCCJhr/30sEn7GRDd
LrTEpf30gxrl7J+xRjvAEroHpxUcMFGQ6ftvUzttmdDv4PpozFy4uOZnhvtERW1jg97abiFvMVHi
K64HD3NJbqvLDT/JHjx47HLCP5nSfuFh5b0wrVFWx8n95PoIB3SdDf80Pg6LNlIqHY1GlM3uiIeP
pQNC9ajPycocJb82Cre5gkvWdilJ/wZfUOM+MG+gJjlI3mDQHBzDy2JVLRYFDZMOMyE3mDxhhgmg
31IhfmfB4mDXg+sALNegl2hSAl5XXo87kk7GMnZQfnkMhD4J3Ih8tJSpyOLKfkRV0yl9PZ3aAa7Z
7bgvqhtSV6M3Ugs2S9fsWnBKIrP0etQDjSZP/uzosBnguc4Uc61WwpyPEX1AnM5gCx9eH15/Hs9u
oNYr1/0C/Rjfl5Fatjyp5nAwsu6UkuFxhjsjIrrGDjSUjB3ntiNNxf0DfaeT6p5WCl6XWxozgueJ
9DJ5/OlD69Y0+GA4ArooYeWxJq39UJQpCu6EUZl4K/iy+0qwjZVkkpzuVjO+I+FypehSA8RCAGMn
3DQFXQg+JuddQ+VJvTW3rPhYotZUTSWFY7Aqa1S8JFuMLZGnVU2w3CuKd/VS/8AtvqVlTpI3lLpG
xsfH/6pGmYtfaEVNr1GcJ96/D5SsomoUi2p7ozE8YUrzop4nj0e/TvDKdq7+A4otKq+syZAGSbGa
RoQiLdRk5mvD4plOCBvA/ZXVsNHfSA+IUcq/fmg/J/l3RQ1Z7/0//NATeQ4esLCo79Mf/t//+tFH
OIHlGm0ytOe82iQv4T7AblawCj/iZ4QGwe8wq48B72x6Jzew6yxgobYQuAmIk4pmEHVuMMswK/QZ
DPOiLi/KRa4+wtlelrDkq911Uu5GqED8HaHzGLES1W09BABCpin6eeYvn0+SwSf5Q8tleTdoIheS
lc2KJSjg8gRmFWRcx0zXRUNM95pYrtQArhvHiUNOo8tE8lxiQ1nPsJrWQWrhjtuQj+jMyX9pcAV8
FFEs99dwO9BXgy28RDJbUSAXh9YVeEHuEjx/NH7EBotpGtNzaZUB67pcFY+k/8TqekC3Kct0QQFX
w9N4AitUzOwsStviDDWtBg5PvphO7c5VKStzCpGmDQogZe48NxqduHBYJjuV8XQ6bbbV7OKGrxX/
kpeqRykcFiyBNqHEwVyekWUb92zA99bATFTBamS5aco2yISgC7NFiN4L3ylqeVOcDRDaS3afv8TZ
D/h0diPYRcCR9etR2R3EtD9QSMJt+qp2vOVmh/IfjFQy/Ix0TVcS03gQijZoHgbD2OrZHMn4q1kS
rfGRq1ZzuJcmohrG7cVLbWI30wtYCE9Kqj62eli4OojXxBK/41r/UpHJfaDcYFUD2QhVcj5wseIM
mBaVVcELZ5JxvYDIji5Oi5aC/Mb7nx3WeFCUrHU4/5yQvpi4KoEXhG7N4Z3EWFCqvwPxVGgrT4CM
2b8rtY5xOqbjmls/yCjhF6QC9YN9zOAngq/Lez9p9cAf4RpC+ob/t6+AO24QttK2LznLJ+Vys70R
+Lw77BSaz/ASw+x/iwIYEt2bK7VQMgv8cqBXT8ZprwoMV2aq6dOsC/xIiIC6Eq8xmkBRaX4E19gj
K7+BEZ/TIi9OTuq8mNXr1c0yL+ZzuMqbHBZgUW5zEK+K/CQ/ma/zk+osP4Eb+iI3Unx6gs4z73fw
ps5P1vObHFoCdrpdr/JZQcJPPkNZr85nIF3muCHwn4XdAnwkE0iO7pW41vN5PgfJYH66yudVDf9/
mc/h4zYvl/B/J+Xcrn1alYs58MP8dL3C/9TL/LTGRYSvzh/l54/z80/y80/z81/l57/Oz8tinuNC
201UeUVV8mp5llcrNIiB2JpfnMzzRXECI1mUZ0gLiyqn2SMbxThoq4llscmXRf1+V5Y5zGEH/9kW
+RLVTTjb1RqWZbXmwa/WPEC7/mrdzOpqs83lwECd9WbLy8I4dvkmB7k5f583uRS1qgMRYq1miY9m
IJ9VjkLkRYn/rGGkzfZmAR92J/D/m3yLfld29S3t3Haeo2KENnx7iqbD7Tn8H67YttpCjW2db7f5
Lt8t8uvlxiGCAg4k/oc3gRbzvM4R4XdeXuebAr7JmwIqXRY118uUMS9XtjwHb/U1jnjvq8nHDUYq
z5MbBgQakdtazFUG7ZrXxruFHtTDiO+be91iywbiqi6u3GGCzEoB80Vysr5mvcOsYJh0PPqFluhE
zqcS8jhi4xRrIOfiNbW4URFYbAS1NYzeUkDLKOB7diD+lgVIDJ5TOtXYfeTPJDGprS65CMF4oyeg
msco6RgRldTMFx69M1Saqg8WT/XSLTPYFXUz4Ubcnxjx35HK6HsaZMkuVohVg8425bbkRx8pfGk6
VlJYAUWbc8Jy/sPrSw0ZVUPqb9+RpiYUAec+YfW+niJexeYDQi7wi5c+Ir9OKJuYBUBnLhiQ8RDg
2EtlVMFJJ0Rs0upCgbTh0/MAj2JDF4wLBMcSJxY1EkG7Yo36Z59GJUfQuh9BM3t6ZuEGaOV8RQIp
9Lys1768HPbnonOpRiwtDr3YYrZEu53WhIrqfyfA7i72UCRHFmM65eMScXDSy4Reb6ZmL9LaYGWw
qzmKVOAhk1Pg5ifF7EIcUGqcDFwyF5REuLos5FBQ6oHicl3NaffPy8YgaaNAxymuzEj5mPIXytHI
4RoH5vSxwiA8lQNKY1cOlIyFxZws3sVVtGm3ZWFNmQtxH62oYhfm1JV62NlnPsoIjqSCi91GkY6L
ckmeJSd/irxp+OhhGWtwKGKqvORn0bHh951jC1E7izPhHsa3jngJxmz6Z+w53qaRU+Y04sTK+muB
A3TXAr7xlMxbfH/pVytNNvJ+CDQAFp+k9/dakJSnKAWgM8lAuoq9IZkk+m9X/eQ+SM9pAkLBPa/Z
zHv2xxwL9RDuT2zO3tYh9PTZYYOGUbQDqQHm5oFJqoGGli3LYvDstFfKdovFQkFENBARIHZDadcd
jaMLTtY1gwdqAmqBuxZmGF+YkOlhS/LGlnbvx5bF15urPUc1ILwf1quWbY/uxAPZCNV3nu2DwRE0
87lZEtOUXh0bm9LMJXawD7TSs7SK9uw4HUJLxzcWnZuRq6ZRBXwAyEUIl6ugLlWV1qx39NbXg9Gd
+vkIVchG2XRfgToO18m7t1C+StQlMAZ6QPAx2FMr4Y2Tlxc/N+1ayMfHFJE29dWQysVccSi7mRgY
6AiGLi5u01hIijA87nTohe67mUkkOyjcZ5F2oFRJgO/s+y7y9e0Eq+t5tyhn94wmTUyTw2aiQbhF
KUPNWGuuNypGzLbFR20Lg/03u4qehCytQQNwdF25MXRNx27E7RJJkD4bD2Tv3nKVrFH1uXUcwjWj
1uWkHV3fx7g8uCZu5J1H7yM9IPXaixrHjvBqoaIT40UwSP8RrhumYN0T1DfCks3N9NKGyUXxmTfA
uw9fcxnauhB+p67mwGlpjCLDlk0bOLN5IDi9y/351+qa01FYujT1Gow9ETNJi8grYquXEku/FJD8
ID2pSb9C6gVWCKBm5LxmVQkpVkiNEI/BTVkvQ6qF1NYdiHMDL9EdhlMkqPVKROuVnCRKfZGczNfJ
SXUGL4MEdVYJPdbnpyt4qiVUIDLCtEpgcgkNMrk4mSekOEreJw0mCdwkrKBJSEGDALxkEKouymhb
rLTBPUONeKKUMsl2m+wwT2Wqpg9kmx3/LJ5LVh8W7X4Gz+WykrGizQcnIHhS9lvkppT+3iycju92
JqUnJeIqoZwr3uWEtTTEtbQqaMrsvMVR138FceEg8HmHCbyzZIx//APqVf8xzXL88Jn+dqG/+1x/
d0bf+S39g/4diFAq9dHpSb7crJugmqdRmZ0D7Z1OgyAAaOgv6t635iOI2LaQNRUFm3rKk5t9iyWG
GzliBDpO4P3QYcXkKGQUXhJm6d5y8HQZyuoavwVX63bL9bbTNnh1Naue/KtNmOOXJf43cr21toS+
O9vT4W9TbYFWUPJpPAmn2YgRcIaBSonOS2uWotfThCX0CILn+8EP/9tHH3003dxg8CcIdCvWIb/P
fvi//o4M/8raX5I7CJ4PSh1OXQ9Fd+lm6eW2EstEv25sH1DlGHpxtttWi17POIqqP2U4PfHWRwjU
6Xa9IW+ygXUCSSy5aXQyNcwS6ISRI+sO4tikhiXUkuspln3gmGEJivVEMA9AkJ88ilv4oVDPAtqQ
tvgEgDQx2tygwSywjkNRF08h2vhSlENslX6xXl/sNrZJmmO6Loj/DNQiMRCbREOYHGOUvVhlDaMP
g4y8hlRp9WUmLkRKxhylPJsj1QE6Hh1dj+yMweZNS40cm6HB3k0lmsfePdUW5fcJdrin5PHHTT0z
Cl184Us5b/EwHxP5qDh2TqytR+2MF7Ud+ge4RefyDiMpk/DyZxfFmYvNvrkh2sd3u6rJz/T03mgT
Il9d4PFWe8OiudkhbisLntY0Yqh0TIRLOTus0cmOqIK8xsh60TSnV1n+VUEtfKpw4r/+FDquZhcL
GMqPim4J634i34/mu+WmUS3kibiqSZkf6UBId4L59zunBPcyohQ2ZeNjqEkhwrafE2sEgauZVVXq
TA9/73lzk/1oBhzaw1NTieo1fdhPZi8phJRV3ggxwnSDbdpWlQem+OKUOeaAPOxzRTYaJZEnHM5C
l+OzAPJ0wKimU9z96TQbAfOhVBlyrabsZ7YAoQofBQsYBZEf0+81xRTC1w5V07fwX2PV+qfX33z3
6umz1/8EN43ZoVjBZy/fvPpXLMbhRPYiQUHZKObZ02I+Z7skxyMqZ3y6slWcR42aA77E+/PyZHeG
gCvVmbj40A8j005/ONT3Uh/EVrLZTPoETA2fRSk+MR4DLBM020nfroevCZCRJ31cyb4pic7Fk77k
nTcLq64xro9eaCwugMSNENW0Tf3Mmfpsib58JdAL7BFPSc1ejYNIAX/AFSDpeGANUoe06q8s7+Yr
po9qPXpT1stqVSy+R82WHaYwLzfI3pOjFKgBndN4YJQAh/9yApwDb7nPksHj/DceF8NGlS6oD5On
HfSi5bdXIxKT++pQzNW6cfbYHUVKsNPf49Gvh/zXJ6NP7t/vd7nb6na/f/Lq5fOXfxgn8Q5QjeV3
kvSj78X+fEduoamaSiq5aoFj3YyS75ry9iaguiUVmeKU6nhbocZEk11dztmGaaRnRQk+B+m5D0cl
FuHG/aPc/69vGjjxz67hyhGRTYhuRESXZR4iEW5eLIGZTZAu09G/RAqLLtX/PbL76vCY4yT0LPmQ
dZNhhPHJeuGvmMrG7XIa7SajLPhS7M/paV2WP5ZTFbGTjhPvm5+UZOl+PTD2bfpXxcGb3JfiBctx
tbuG4WhhAWm2ZLDXsd6uyR6Kza6n3J/rFazzpJGJa4pHeopPGe4K88rqa4kK3m8tKcfdvcVU8rIe
P2iD1vkawqDZ8rS6nqSptwbPt7SlDemN6NnEk19TAidZkMa4M5yo+F3xTSCuKdKndKVdBxY3km/6
mYAI6B1vn1wy/NwhmSPF5UZsbQNmNlqVVxW6FsGcAgwvXRy9kEAcbP99Hf7oZ3A/dpYKZVP02ZSJ
ZkrBDlLQ2InuVqtN9yGVHU2n+MN0evTwOLe+FBcu9PJPeyGP1FmAjaiKAtBUuX5WWuAguZxfXCNa
W7WunMHKVQCZaqHRnmyj4fas+ellx6OTRKUJi4QymkdEX36DvlpqUe4ny44rwStKD7b39374L9Yr
1hACsBJNCO/v//DjH3RYIvoUl2hDV29O3LmmFwtaJH06otn1KLRb9By1EqvpEsHUF1uU6VGkBebG
RcMB4fqrihxFOC2vQcxaCab07znE5omqwJHiPYSc3J6DXHR2TuqWIBhHaZow8AKIujUk3PMWYdXK
9UwMm2LTYFCSjiAkKrBi5/MVeZ5z7/9SlaHPE37JD3H20+AT//w0eSqxVlaIJpaVrL1PB9dZwlIZ
ltrU6+sbHe5DMRksOJ8rF4bkmoI5ShQQd8Ui0Y0S1DdVl/P4lHO8UcgIH/CTHbpl3VNDuYfVnpKD
EEpazL4pRox43Um5WF9hZ9q7AkS6nXYeuuKwz4RCPXgUFAoajmfgzv4pZsFTy8CrTRjjPL1IS9ey
mNqMwpAO7D7Cc3V4MfeKcTHFbrvGENYZBc3UlM8FQxWouW84oe6GgjwwVmaBCumm3HLIh9UZtASl
kJAJE9x0gudBaNBeQwy4Ncsi+6W2j8hBob8xjIJujxeDonbYRQWYoh6IrAK2Za05a9YlpifcyukU
y0Iz291mIQu3FYgHfngADXGhi/IGyvGqwph/f6PeHAwjzx2hDsx0znmr+Ym6RkEIkWFOq5m738nV
OWZA1kNBJ1dacH+X5cSobKvGcarhDVYDKWr4lRK7lXPL2YcdBnhqFjGVuANfES4+Xb85HDoSvFio
5nxui/X6ghi/oVVuiMaPPejhT5IBXJA5SHDrRY53ZSYY+eipiAi7Ko0g6XvgN95W3QOKifEWK3S0
xgZztU+rhH4Qr1h1L8MaYbQXCOQUnwa0ZK/lUwn6wmWDI9tU87LGPEOUyNDyr1KnakGA1FsSVHiF
o+QlUs+8pvchkFex4nirgm8NWS2bU+V2FiV3s3NsQWx0HEKnSZDn+LqUPK+ya4nEr2JKb2oNncHo
VjZyJjt/Y0Oef6S90k7kwAq/Jb0gDY1W2onQmDfePYLJsjkEMKjt3RzqAE+clA3yk65EBfSnngtC
J4X10nRh+7txDrqyXg0CobsDalrYFNOCOh4DaC+qqe91xF+Rf4jtYGKx2YCPSwqJ8wo4NJz4G1om
5sB4dditAIfG8wU/7jbaXZi2KW2Q78s96Q9XGff0jsoXNCI96FR2Eu1H7OBizdDsTUeAjTRgOZ1a
LZCxxzSRM1ihsVh7QisLaaOqMbsw9vELG3Ll5YIwpWW9dncrlDelkkskyK2QENGNHH4PfHIk8fVT
KvjUtbPjWKXu05E6f8cR/xxopBW2QF51oa+EWT6PKPUKOmFVelDyqCxXu6X5dhCeSQ8RyJtbFJXS
cpoxE8753o28BUwZZEpWDSv87nxdzYznk0MpPo34gF1SN2prCafbc3FnMBM918/QQP4o2oqUsFG7
QrI6wHfECXom4ImkeBDk5VI31iw9LdMvUlk5PZAcePkeQV/KlHPYDA7rjJyFwulaTtT28cxEfeBR
x0zD52pvbsuX2xRE9jzueZ7ftvO3T3XUUhZ791kVe+ZbimbUgLry3kMpckCStH6KPBEMNsw0Tw8X
FWN+SkHZ8CZUsvNIvemt15kDlUkoYs1UXmFyusgJJVxzcaNR1cpLNEJGKxyQa/eQfLtxnQQbAUeB
1oNK3Pmb9ssOSyKmNbw6n+lKA3cz/fIjNDOI61v6GQ7v8zR27TGrvq3wbE0PVHkIW6N4Ct/8gTWE
63qAHWfIg/HrQRbnoLQ+I1qw2ToE3LBfxXcF22B9qryXvWA98aeUQXi8Vt7MNlkEX61rGbpG+pHm
bJ+c3eqOVED5NwlZfH8i+JokQcGxwi9eb5fbwZG9o8fZbSQBQ+3eZO5l/w2Wfb0uZ9O/ycbqRV8B
y7Q1Li1cMlTNDPw9zpxw+4HDdnSIpr7HcOWxnMM8xB0hSgOCLAyrf1hTMmws2wy0TvFssT6hL5CT
sz7Gdl0ISEJNjBYe+t7CO29gHS/uL/uFtyLK/6B3Hv2eU//PPdXuG8KZq8SH6K3Gp6752lmGCFab
FaDgupr8R16gjruQ3QBf6lllPDgu5kt17HWnjpTfVBfbJlqSk/wU7Xf1XodZiu41E2HHrmSixlNn
TrnoJEk5otRxOZV7rH0N15tQ7FYIJfJM47snpniXYMPZjnV7KLyds0OXETmaWCgdvUkdArJvub0j
6mgm/tLQt+Gd9Di6Ni172z9sEvq/fgskFC61rXn319fl9U5v6sz2p3hD1Kgcny7K0y12aH1FmbCx
e9307VBcruQRnMluPGbnf97YJjRjbvnDWqHpTHhtlDATwwlrZxJ3wwjrEtCsU0UDUgf4yWq+z+GF
YvseXEUCTRiZQwNQp3AVZm2KHnv3xHeeen8EygnDp92s105B1q73bj3BVuHICXZPb+TIpQMMvpCM
BmwItIef4Y9Zqrbqm3qfnfqm/p8b9VfZJFiWrj3qHaB+4zuGZzeGoMmkhxntiwXG/dM6k2WgUUpi
+GsDF09ZU2zJn8VqA6Iv0Br8b4x4FlubqeAfYmTHcs8xYdcWyw3+zSulPFt+MqgNFioEjfRJjZFd
MaoKKYtVCA58hE9f9nQm5s9sD+KJXO63UlBks0ynFhZFdPHu9r9uwrzbxWTG+GHXCv5hX05/+0ul
x2peIWt1eA1FZXIafl9FjsN+9P9kPhf6H/gyw/3gjs2sA/F6d9JWcdhZ8evdoq3ivc6KX1aXbRUf
dPe4bp3jYWfFbzGZUMtQ28ca5wO8R/8ujIAGHGUE+EsWlG1lBDTNeEu8AmHpuzAV68TeemCjbAcH
n+Yy4XY2snd7NANoUGZitffvyZdIaKZ9+vlCM8/sPxZ/s06KUWU9LRaLr3ar2V4vYCnrajvW69vV
OpZByFoqcT7CFrL05yov7nYr+qOY2G/Zf2c1iLhZRZgBw8PY5aJsoF02vixq/ObP9mE8XaVjboun
/1Nk/5zig9SRtQstaKOrwLgjgL5gffQ/lzdX63oekWUv+BckN1fjp1EU6Cccyx6h9aY1BwdFteOu
b+HlXW3lrzBJpSW3VuVwjjo6tBbiEvtZ8ermSKod0wTiUr8ab2vwrOzH/YkeBMjukSDp2MukaGfb
LUgaurP0sJkcNjkpIWWMuRpBtlfn3ILXQAvfV7ghmJxoGlKU/jp+QvTPWbzWHbcV66Wdm2lajmyq
tYb38BHWvm3RVaM61tBjG6iWa96yXvNbFmzesmLzD10y9BPqXrL53mv2QYtGlea3LFtcfzg4bLJQ
e8h81tYcYnhN5Cnt7grNw4IfyX1trGav/IcLP2ItQ9fdeJv2EORplyH9tQ2pomaiNbNsIUw+Amun
dfckO8RU97WeTGhOvUXYBV7zdvXnQyR3/Osn4jo1yJp5EjHosRD0B/F92kMGkqJ/GytA9AKm0sxN
NQhTt3nsg/TOd36xdyG7sOHh6+JG3C4N6gxjc5PzKntlLguMOJ9/ETNBDDTxqEnbOez602kfSawj
SZiqpU8l1WnLVB3I4K4tQHnM+XujPdu8/BxpFgaXcUlhoz9nEzjclZZuKlBd/SkiqFk93E9aKKbT
ziH0RnzR7iHP/ko09Tfx9QiYhhypQWgnctbMzgZdnXrEPFtjnJT2Y1eCb85e8AoZheJTSsINDk/6
IFWWPG+tctwUxlTqs6E4ho4jBvSQJLlmwDQ6bMY4DZ3gR2+nen8Ffd9xt3/Z7fbHquGvtULd+v3f
6aohjeKrcqjDkwqFRHu6QyQg5epDSkZl3aKooL0MXFRyH18jClyM3koUbeeUi95KBwg5unJSKBUN
gocz9uVn1rQtHyKXN1Djt7tZqTZRpOPxmJ97nZYtq4Nsj9uQ+UU8Futvrr3Re1+drfbceyi5z97/
fInkVhNWbBdHoxH+A5voc9eYB9wQ47qFuLYqXKKgOS5tnEpcJHsBjI6E9fLkfhWaTyzWw9AtHU5z
0MWxW77LTW4PFzloIeYhF2G+trvcv/PFKQT5ZdXMinovc7sU/Y9LkgEdKiR03PY9Jojl9pkdwygu
t13GaPo9WAH4MguKjbAnmT87aMOlta6301PZMNW3N1vqdhS4Qmr0CevLqIkdRUKMYBonfT7Ezvl1
FUheNZZ5May02c7XO4UoAK9tRAbAe5MkSyT2meWMXqrQVFdLxAAxhALUyGrb6ZqU7TzUofHXHN+K
oS30eWAlz1YFNjcjgrL/igpYtCaESv7k2HmKxCgILAr22eg6WWRznbIF4Sim3iV6NOpdhx9E9bzd
Z92ccynnRAnT90Gs79EjO92jR1WRfY5eu5Ye2dd2MAWUda0oQMdIU9heOR8nihpaqBVp/uDD/wcS
2JNvnycPEsqpm2zWIMQ08OWHN8iZxdVGarlXTIiMC0GLKHliBMGIESV8EhDCkjZS5P1pZtHEAUtd
/TPMQU9NIMSNwb9yde0yBopxZ1p+YxKC7UXvDg1KeKHFfn4OcamAM5++7kTTdhpQiewf0RIB0cwM
a7N2IXD6HvQHPhHmFBl+heGmFfxbYMgcCSTIycP3e596ZEdKjDcu5xVCxBDvQhiBbTKvGAAdmx8l
yevd2Rm+/dYr4H+R9hBrAJ+SwlGsMJD/j7m323LkuNLFaN+cNVjHx2MvL1/4KoWaHiDZqOyqJnWk
gQhKHLKp6SOJ7NXdknpcrIGygKyqnEIBaCTQVTUj3fsFfO3lB/Kl38DXfgbH/ovY8ZMJFMnxHJ0z
bBQQ/7Fjx4798+2LygyhEmEIfoRAAXNZHx/T3xNMKJT3U4eVJ0zRLIwPfNtcGb4zgw1y7NNjYTPK
nBnGbfEPinheSHrDl7Db0iiRLVMiY7KafrcXBAF/oQt0UeIRj96dNGhALlu6hhG8gDsuLEFY5Hyn
awVU+4vCvrZyyLpdC6r6PQYLhufZlE8caQdYE0e93Y+gozwg3gyOIOe9s73w1lRAJUvJs/lkg5fi
/chHdrm3e/cD73tM+ko3rE6CZVa1IeFIqgi87Hc6rSyWOzs5B+10P8s++0ycbuXSzluEAWiG4cGh
CeYz1f2W1O9j104gDIQqfNC8mGpa+zfwX21jOSQD7+l7T4/P++3Z6X/2EQ/hSxapQJr7/1m46L4T
UtfBvyF7Du9+TLRpkWkwOSQinllw9iOJTHcoJJfDOMbmpwqePPHzJ+7n62EiS/oA87oO2PkHZcC+
6Sb7GFqDYf2UWR//hlx3mMdfDi851ALqGSZ6EpS5pOaubF3I7PupLlHD71HbYP41X2LlE/8nxRue
P/3k6aeGvBarcgsNEBGanesj9/Hr3cu8XCmma56dIY3Vat1IlnIqYS6xUQYQmqej7Hn6Fxq87gpS
EZ9Bi2be5ziHT/2xDK6rxWI1OIPfkQquvV4HV7sbMoNf4yqY396P3v1PnF0WISqapripFwv4/P74
3f/231psHoe+szLEDpCxktsVoA4hNgs67N/Vy0+e9zNMSMvR6pgEm7OTDAY47MFyy3TpnRHuYEaw
P+p8vMQfAqsFUOF8BaMdruvQVwHw03g+s9s5HM1hf1s2N1A8e/Z19uzVy6+yJ3PDy6CyMHH/Ru7s
4NXrb7988ebN9O2L1797+c0Xb194eVoQwxDzAAG4IIl4Zmnmi0VxU5mrZPHJ8+LbdbV8RWMctnrG
RN2MOKFiBkPyw//S3TAw4baSvmhco+z49KD6Xy5WTfUPWIerholxk2u0IkLC1c1Of8rvwqAgQBnj
jvBeIRzNXOUocy333hfv/gNTqnmkvH/27v/4byxxIjmKemAe50WDzPJgvVofE7gtZzoWnDQHcFMC
5owzF4wC0qQMcNUtgDoQEks5QxrzQT32oomLsp+5PI+jd3gq4EgbdYuJT2mUptIy8DZqP0VO9AmS
6/bNSpuFHmczxM5D8YlWm0QAvwsBRu71TB1Il70FkNP3J+/+WmGAmWfDtrqol+9P3/11j4Csm93F
bU2it7nKgG8QRiFL1HD3QdQnfIa8WZk0AblePhjhqdCQ1uuHkSaF3YbfE4w43b/ebtfjZ88usJFi
WW2Jyu5vF5v1jGFys6dZ/xl984wVI9erO/0j/G1++iH4qlsGC+Wb3JCVB7Oq2hscH8uUBwou1VBM
peBSGX51gPCrgByLQKtSsRWPldEKJmcDelkBgy4Xi8F5CMTaAI4NlfkzoGjh2wKMwXYl440JcVgR
YRXEFSOZYKZNhIYfZR4mq+C+Vbdr0OKyXk+VR4UrJBqwUiZDt9J6WRLDy8jU6I+V7hcLTqUIokZP
bF+GUULn5ebha0DMH9w9HWjtrwOJXS92V/Xy1ki7V7Sp9MVwILtKm2rkIFd/tZhLfsTtpphu7wo/
TSIs1Laqpozl2ZrCWdrxigRB4PEsBSIUgg9zPSk3ElwI7t/bt93S7Zy/VS6ikb5HbFvVqxbrk6Nq
qupmeKLVxHjoF6srt9h+DYWd3NrqDO4rD393kSzo8BoBy6zmJIb4xzAHtMpXUBjYINwV/ZEaXq6w
HqlBw2rogcTPI/M3pIe5G0nrapaRlk0dHezh2HThNG2qix+PHmFRePvlLjob4HAG5/YFy2thtzsF
TfzJyMjm+pm1umV+CgD+fJzfGJZQbV5ZzDhfzFJ1zObVAKXQUo8vGfU1LjTVzensNx71yhJMm93t
rTnaw3BN3OzCX4oWxvITs8/ECvvh+6dntyZsTDoRRouoTxvUcTT68RsMAXdk2lTmypgYCuy/MXyY
NFnetYiUistiGG/fz7wXzgOhvQNza7pbgg+mxYWrlO59teI631uDe0JYhvogeWYW0wFOPOwPlwEo
dyjrc4hB+La5QiCQdbFYLa/ANQD9A5yqEf4CcPS6wvyM+Dec/MVq9n28o6g/ODVOuwP5lnGdTNsh
+nYbNDgpkOpVgKfMD/tiu5L1GW7vAvsS9H9XSAMOrjzw+qTnJ7x1g8vBsTq1X0leF9R7PJvjC0aT
0pMmOz7+nMnIrObI421GWnz+7r9nCR9VMKTce//Juz//3UcfEXbr9HIH+MBGcmYWcSV221Q2EwYV
gwQPN9Wy/pdKQcmSCxC8eKQpMgaDArA3Wz9MGVKW8wTAhdGzZCgpUczwe5aFwV/yy6uHL7+efvvN
b/9x+sWbt/CqgH+nX//2i1/32oRxW8L0eELflI1NEWqfN7gm8QunND3f7igxHAMaXiPQ7hVgRJJq
FTPqXG7KK8zb7u6wlbl1LhYAoElpJpHDbH1AbFkO8xgA9ZcdYyrFN6YaSYowFHnFOkz5VwU/oNmN
ahE7kCEZyTRAH0edcFCavozKYpKQEumB0qAkEdHgl/jQw7eJgcaqeCg44u3Ju9uBz7QKgZN/qsUh
qvZHiASX72n47L6gJcDcrLmf/OX8gM40hDFmpQ4TNrVOJ6WE1sh5OKy0E+fdNRwz2+K4VRdi2rGl
kKGzXWDcGVcUA1wkJ5JI3JqOutBThxSOdCBp/YRcJ/MqT+ywmcB8T4vSwtB+GQQp4lg536cqow5j
9d4eRYQ87VIbaIw9HsKEagXDP+CO9AkKGwltLGG3jIaS214PCMtC3QiEmWoEyhAq3Q4DkQfr5Tbf
M+8AbzTaeQr+MRRnRBjG/mFXpNO8zWfMJYAeQspH9IxeGAnNcGTiE4J0B6328w7wQ6AzqElQimRT
GfGAVmtv982t34oziBKBnXIerKn0wCncqQ9DZjrurbqzZjq+ifL4R8vh1fJic2PTWmRkttXccOhc
B3NQGVFNFbnT+K4jVGWz7AvtsovC0HJu5l3iTXixKJc3fO9sqtvVh2pe6PZ7OhiJJg8X3cgunJss
MS0sZ55BFSInqzTaatItbIqqPtVKY2oT2vqcf060aX5uZX1Q9Vi32L5ZeqfOxueHbla4U2sjciDu
O1MNWcAnA3MHl5dGTsBPzBMH2SD7OPu0JcmtkU7WD+JKGG+un4aFumFUE+xokN1tIL5ozjj0VoAJ
t5Zt9HZJ6G+VBfqSxBv+Gf/MDzsA9hrOhpLXHL+jixit67XmOecdR4eGxX8+zeRfGl7Am9sPEt8l
tDc/+jaAJpAGRk07HHfKykTfssE8vRuPZChnvLBPD1vW9oXB3MvsX8DLA3WXq1Fm3U08j6BgnZh/
O/BHc3Zn1xmnXnQ48/I/WhDsYnd7YQhsSIL0nJ4OJ/kBfMhl9+ZON+XyqhpG407CXOsznVwFaux7
LMVQ3RTkh8KLAYlzCendvGbr23IR8j9euk11BXoMbwXZ0KLXbZVeIpbehyfZZxMuaBiyZdh5ytKh
L2austqhHzOugrqKMYGItwvRmsErzW6Cf0lYMUfqu6W3ch4LapeXTbXVrgzRwYwvPWmDzh014afC
eXnJX4tDBKH0X+3A1lbKCZUoBywI9Og/4s02kK0LvF/cFYqeUhdN9X4Hm0gnlPbwuvxgZBxfbEU4
kUR3mNTgAt6m65oyll6Y328lU4UaBOfIaJrdrc0PgK86yAAFBgISjZNEcpS9e/cuu8XgNvDHgjmJ
TmADba03gCBj3sBstLUNqiaIaoGXNVm1nRXr9S+/Fx+j69YjAPpByCA/hLM3aFSyrtjq7YGP3yQR
wS8u8BVowjYyyq6r3ca8MSn7SJCvTOsFgNZsTobUYkePDJ2TZ3OwUbJ5MKRyP0XPBryRF+XtxbzM
7seikAEfrkHZoNmKnG+6stKHzbFRrtmhxcN7otF020KB+dFS2B3MO/sN63WEFBzJxKjSU3hOj7L+
Pah+YYLBG8Gfkq6UJzInXKzKzRx9lza7ZEhBWMd6DHaAZbTPW5Gcfmc023YYdFBaqBTV4SPFLg65
8XOQB5k0wZRvxE6kghhm5HJRXk2cplCyYm+m8ENcfG7uoGm9NM/Rejsx0r95HC0jV7PgaHGTc1Ky
cYYG9EeTYBTmwP7JWH4A5gkuB+aevaxndblwNYibzs1RX5QPET9kynqGAtB61dQSq4apS8whrgTv
uuvetJ1hIl2dc1B+yXVK35M8ehbb1WlHxXBFYgc8w1jdgP0Am+WIL+eJa6G4JN3wbDWVAY68X6mK
15CY/z97Mj/myJ+sL2GWgWKzI7NIWNR/vCXWNF4IFZnE7gSDJ834yfxzjEs0E2YJ5hA9VKqtTaab
s+uTatgFEqdOHbRm+Eg7q8JAKqUmd571tlc4kiM8exE7eoOcK8H0MZFau+MjXcTg7g2mnQbODjFB
zspO0jRwdrir2MuH7+xeYMtJKCnG1X1B6xRpes0vdDkn7UIkekg+1qz/sS0OC/lPgU40qjBEpgGe
7JaNGIHkogSlPM7IsKIK7G8IfWEPpg+BUd3h4qmlHTpnXukyT1SSkU7cJBOF7FG0nxOF2HuXPyUk
byzWrWYDgsn+1plDWrWHs1WsGcU3IqQgDBTgqddhi1UbvKY/n2SfpOARQIpcP3wyaHS+GqJ82JVh
To6YDaMuBanPXTugtyizdbX+5OQ56OxW5dzcRlNInwWZ6swFNNiyGN3RyJYuG6aZYyM2w6UADQBa
A4i1l+VNBSIb0E9Mt2axnM1r2J9SXCQ3N1031W6+4lyS/VirT+5hhSwE+7BdYHL0M6HR8xS0k6qt
vN/OoByUv41HWqg1CpkutwSbi5mjCsofpUcwPB1lJCawnJ0+Qo6uehCJtN5dLOoZBiQJHnd2uVui
q1XTO3KZ0GG1Iv6XkEuQtJuJrxhok0oCKUSZ+sQw6d7HzMvRQW5T3mkZRPmIQRJC87SG5zW9/TCT
2iKj7GMkbJhbeHXXZJC8fKPFs00N0W2BXojsiStKQKv69Loslw86tyxnj/TkpKYIM7aSKZWSKuCz
FVpSWn0LX4ChxW/eenk64JBywl9R0+zW+AIxJwruBVPhl6HseehV1vgbQwT0OPmmcW9EBvXv2RvV
UsGwpXMni9pPXvwJEK+NQGmIUYO1WNHPa63KxcQGKDlygliWE+y+EoWxQCv5/DQRkGEDcu25dI42
eKvv3Ixlez1RYkaRlhJ0gTHXMFrt/G1DcjrsWJcIohmwJJ83Q7Np6Jm4mq0QSS6JwegXDM7XvSYv
G04GfLlEEg4k7CXGNnj80V6zx6ceOgY2dIBTzHTkmoCEjo31F0giAr38tsXZZS3JnAJfLZkQbhd8
jkeOP6HGSMkJcfitqMzAOKcpmLqQaRArBi9UeM6E3DeYYSKkgH5ltm+HGOx4aqEk0jFM6ntItKMK
5RvJRqbVQ6F+GxVrbMMP9NtuIl5QlDArWS7R69rpRI4Y5jt15ExZc2XMvIPnr+B202xmfoq+lsV6
6fR7AVnZRvrf9f9+d3X1IMI5S1429We2W19t0Fo3EtZi+uNZfscsJCYmap/MzZ6rIPNZ/tm5zPY8
JaynfgsCynwFqo8bIJuTcFzgYErc0Op+bU7/trxohrFDF5ZY6BBHLZzGJ9NK66DjRjvIccLTK/Z8
iIVt29JJMFcIjoqNxzXrubknI8oHjjEEdkAOcVMr2HCdgxeOssdhgeARSvZR0Cilagw47GyDToEt
o4jJW4pCrWIpr5Y97g5eJciser+VLJle45z33fz/YUMBsdPcfK7c5yk6RLIquuDhVlP8phlKP7HW
BZvLPrdE0LTlHTDyxwtKQlsv1zuwtoC4N+9qT3Y20aSi6uaM6hxnp+ftFM7Ne0RONnnzzdmYCO68
qBvD9YErpn15VLdn/Aw/NxP7is9vVEdmIC9sXMOeLzGYralh8dnwRVtSJVabpMqv8E0mHAPtVPVy
B+IknYLVZaKSNSPQFjfZsC6qQn3Nuon8oBk0Z/W5x2+HIcN1no7FW/iAX6rp2Fv9KAP0e5DN2XBT
QRQ8zLCpzABfFFeovCyX3BUa6CDSHu48dpqwQ2RvJBqiT0DnHjeW7+0tNbWmNMRxtc2AXABxtOj0
NnYulhem95l1sqS/CM6bZ8U3PAUdwwv5hId6v7WXv5W3iJ+cjgMRZgq9kjcdfXF86hQJMiogP3yk
gCnRLCD7a2zV49RcTuaWmmIJUNKp0Q5VT7FqjvLmurqfE68PFiTZjef9QelRxSRq65/pGuei2DHH
mA6m37W+CLp7057H1KQZ9wGdKjPZ/ZaRV9K1eu3yv7fBydpPT3WvkRXKWoTHvaQYjORhNlzaVt14
KDD8m6Ft+Wso80oisEKJhGOQWJulrqXEkZpqwp9oArZ2Z42G75K+4ey4ZC3muPPDammhrTO+pJF0
wLTv62AZ6ITONYSvaSt50pFN2cD58LsFgsVJO1YvnHQhfS4x9A0Xb5T1L+Hub/jvYkp/mu9p7OZ7
CUPm8vb7k9zSASe/JSuNecRB5AiY3MA7HP69WM0f4F+yDW+gt/5qA/JUH0ewLBdYxO0jw3v6fXMX
pIryiAGLK3VHJ0I74HpmCT6AyyW8mBJ/Rw6/ErBFjchiJBsSUHLbULTnln7NKiw0m1+4V0iHpwS/
WwIfEwRkgoYDKZy/jcVwuIjhQmYDq1KodEQtPR/9LA/tGdTMUzKItHMaGYgNCxhyVbOe1Lu11o6y
05Pnn+ZwL8EHpLMv3rwNec8fIIQjZQDvdjtZLebti5lUbNJ7NDinYS9dV7I+tLwOrCE50lrn2Wqz
gWsatESmugBQHMPaHYtxBmVBNHggoqYtA8dKI23OV3gsWBneVKY+wPcoDgi1wHcY/IwgScuF6fV2
vWXRxuKnLROw/TRrfD6ph3je87wxffnAgoH7/pjRA06wkD0nEvvyIubqQSMfJXTsUk4ytPQB463f
R3/pPj7o+o+rg8xq8TDu7/McjV+L7jUpLklEAaMUb38MrXrOZT/MseyHOJWRgcpIvE4WoIvaMG7o
1JsR6CaA0C7B19RXX5MCenttiG2xuqpnQHWr5eIBHI/mABxAmo/nxad46V5Ui9UdVzwtUKNF6tUt
+zbxH9S5E4VBY7NaW8gMNkPAcSr5ZBOwIce9+L6CIEccn/pO83Y/knoMj+bJrzmwyqKf2ArsB+5R
BB5baBHfGaLDyH6A75pd4xKiz/asDCxTECSJWWYHoh8gL1+w4AVfSdyR/foQR/xFfQNmiDKTlPX9
SOnC+C19m/tXeuxjc973cZf8MvTA3q2tOVrFMW3JU2XkPsre7+rZjWF55j/omwZMr7Jmbeu3V7Ga
xHPMPQppwby3h3QM2EZuSJAv8T64PYLaDVQ3TR6NuCABengyygZwsO/v780L3QuLf3AA+N8tJeWC
1M8D7zn7v3/K8Lnv2yUTPgb+RGxvkVZ+qKzeo+xbc91fGjrkP939mrjzcafUMJ+rI1iR3iQ+gHK6
8E7xz9bT05G6T/Knp3Eka5hUIuFtb4N3uXCh3f264mm8q52IXzsEGOEZgO/quaijiZmxyzwS05M5
+sYIq0BEx/ef2sBMw9KKuxLlv/c/fff//pXCB/KwN74CBw2yLP2Rig/jr/JxWwwhR4cqg1QQREjB
uOa//tdsnfAME9b2j49DJThQv+BK0u6Shv40EvpsuxjpRmHLZUz5ga5uUp4uzGm5rmFR6cZjYZUX
wXw1u0HUmsnzkbWAaOn4iO4QuGduV83WMLcvv3318sVX7Awjt80z3ramWD8MGhwj9ikC3K8r0mqB
C4eKL0+gDrohhfFde1HWCH5HCxp4g1qA99i7C40zqsscAvCPw6AZrJ3wUNKthqdrturuZwL9jFPa
TBxzWlPpGkDB0vx7aNbttJMXr46Hlu8WMvJAkw2QeUMcfJdzqh4ttUQHC2K1h2bHDYsQCw15Hw2F
KC2dsrQMpBQSrPsIZ8TSrnxgK0yy6mma1mHx+ev0g1l5xbWYdV3vkeUyYROzpYlWfKOmv6Dx/UVO
mOFxUDR26FPwarG6MEKyNMWIHZGchpPokuFdOzQ2QyH8VbotWypQOIGQZvOmwBXIrQSYVIKYyL+e
uTpdoZa2Wv8zUo9/3k9tLzdKkd9TPMemYQsLFDthXi4XaBTn844hCf6DzZRQeWcMn5xhtpr4+0hn
osYlH/1kUah+8dv5GyA3kkD3NuZydNpamOHKsPIW4GdZRcwUQ1CQ/QP8B1TfQGrl5urD2cn590sm
VN1eVPN5NXd4rYZjaOc26QHuzyq72F1lRz//5O9Of3raNSyHbNk7yO9WVVWudCIopF2eLUsDeXuS
EmZiAaWnXEnsVQsfEN+1ntXbIX8Nlu1tdbXaPEy4uVFE4BPQZnF5HGI+CvjVRH5lSU2JGFe1qf0w
CQcjfKNoqi1HcPjAg4ZSpLA0YqhmlP3rX2h2hmG//8/v/juN0QH/ef+zd391kwCKBHEQZRBmwfJY
/fLb6R++eP3F61+/GfHn37z4xz9++/qrN70eO1ugVEboLpteD/4LcEITzx3DjI4aHA6gAJwHLmjO
RG+vZycBgAhYjIyN5J6ptXpN4emu8A5ZpuLZT9cPzw9sQpK5sqOFj95BsasbEcRfkW+EdpFzkIix
sMx3VR4Bv4u6lwuMALvaCjz9KDTuLvDGUtegdLEnzZ06a1xDi1ip8vZCDau4H44DGYeSNES9eD0c
wKMivGdYsVIv+phAnu3y5WFGi/LODeFABAYt/2N9gVswf+gmltW+JiQu3GEnYIlfUSjb9sHFaBvu
0RFMXxIv5JhChNxHRdnKc/MEXSa4nJbmFLtHMClcET1lhfmGVhjLMtvuQMtodqHGHFrI1fLCiz9Z
R9xX1kSTjNYgUPgeXR+QHQBVbOhvrWP+Bc2Fo1YCIteBgNDGdbkBpyNQCuE7/66qN3PS2wX16qUo
ApbzEsoYfnTmnI5367nh6fj53IyxavzseGsJNAjmF27qum0TQYPGXledW8lH940XKCq+nJTFin5C
9EOJFtKvI71JHrcT/sahiLaMaRT8WKx2S3nTyZSjiEaopPAefujE1KE1T1uJkl2qOEY/GtTznSPt
LDVVbm1Mqbcosj3eKNT0dEg1OJjx0QWszo7Q6VLU2oheAVkDNlc7VF8DbTRJ3+pIWWr6GMBbCBy3
MMDT9LeykOnXFbfFW21K14R0G9hpJbWxDKGhDAscZtOyjmARJaKyDvKE/IynHhZCqdfv1BlwltLN
FYZTESOFoyHf6BNtRj0OE9tStae2HjpIZ3+rpIvH1bAySLDnXNgMARfybCwtncudTlDx4aX+R/9O
L+VWpye6gHOZpxUwvwb1y/5rrBbzhcsw0YjdAbzxd+BHX7SDDieyvXuaMD9izrtm6XKeuueilA1f
i1yQ56DaxC9S1yWW8H9IOYFrxU3exhR9CIfgjDlbEF4UNkTRHPEgDtIOonBcNh3MmGYD/X4SciFs
sohAJwp5OqRS1ZLExsmB/anJ7mcDKDSQu4mW1kdKgdoDtMMQ/mO5yMr5vOaPuE2WJzS9xGQbnUSQ
mQj3r5PDebeGowePPgrAFtGQkPI9XZ5BXnZeSVwRWgufKke2vl4/TMe2fwFNse+zeL/oWrx/u6VQ
rx7JN9e1HplaEJsxc5R5DCq6iQZNeWlWYwggDzZex1RaVscEtsJ5T/BhBlx+wPdtct7uoNSrAlrG
cfAINIoB5aRqGSC3om++73HHoksWUC7ctU02JCccyqxCgXrmArPX3qEXbL2cLXbzKrpU5SINL59D
L9UNYnr6aIM4yM2VDWSU1xmugun8oPSZ2K649QxN1ZFPkpD//TxPRon8pnroCBOx9t+arqfVzFAP
TXSHBstXzYOK1ZRrFccjV+hbeURj4q8UvCW0hP4YILbXKDvZh7e9A8mY02wfUH9oHT0lMaMF02x/
SGP7EVy//EBSCn5suVTl52J7oS9Wd2VVWzVIsXb5r3f20btFmRuiqFC6Ai8OALrt5+Hg9Jxv7VM0
8YYg9XPSIOWnvfFnDXPRiZziljfVAiFeO4xddjGI6PB6jyKFDjbLfeYTDIe9Wxudaj5hrvt+8sSe
F8hus8FwdCt1eDhO1uQbTt5KB2mktX0yw+GvfXzkm/d5GL+Z4NqpJfS4LzGMrt1OyIH2IpQhD21L
Ep1L/+1zScOyjeBcbRblA0rKG1Ra6atN5QGLJvza/Ur3O2EDqg0CZi0yTgnRiwg9Hdk+5VBerDDr
nBEfYfj4Ozm4mL+gBsvioVxpeU9SoIxRZVrSnsn/7pWgPPVWgN2q3PK35UILNCVdOdFadWWPy43m
8SvFjO+TtCMFPJKzXCKtKzgCr5b/Qg8sEveVApHfTMenmbn9SjimSjEgiUvK+2EHZwJ/YR83zQ0D
XVZR89OBD2PJzx29InhM3zlsKrhWkRRt03B0YbfQFwj81JrtdlMpqfMItQi1DtjAVZbsh2mdTiGv
HUHTTrjsfi8u5qFMtT6kvNBXIzhR6l72r/V+JBdgWopWT2lqIcm+YizX9mZ8N1/pFg2NCCfr+2+L
473A84WUmu/1JcYAsS50OcX3rf9kJ+Zx2//Et1qcdw/HZXEIhHyonnYamg9cYoJqAOhdt/PpizDC
L7TEqK8TcQ9TD4zrej4XMMBOUDR9i7OOBrJBgXBvxBErbJr2qsD0bqT51eIDK80d4JWzZ6NTDgac
CebCrqkOw05rv1TBmh6MSpvVuyT3tgdC1JMy3Ld3ddBDoQ3LOekklQ7vt1ckmkOjlOoPBfo3FokB
YPXBL3/5S/dYFRt3wCs8nXw0DPGFTdzVkf/39wdeS/bZN6OPBLV+lkGuInPfGukXbgvIGZNJ5hjE
kwr81JY20as6JMv2Z0G4PqwMtcskrjZyBl2Ai5XmuHgozkFz/Tx6/w1VZAscUvuDNYRWy1m5bnYA
UYIi3Orystpk1/XVNRhyPI8rSs5Gp1Ka4YQgvs/2C37c+a+Ktmfi9iLgJPBrXS7qf6nodiUHc45w
3wYzKEL1o1hptxejbGCeWhDgFSZAoXydhj0lhDPygIM3dye7dZHSuKn00IYWkyWhuQn8t+ARBUTZ
bItQ2QzpgruQEFOVIMOwA/jbCRwqnPCJQ+xZrvgPJZHxN+bIgBrGVmhTAjkCEGmXjO6IE2Y97WtF
iT4Tv3gAIv9ACTWWD+CbfFEvBcrU8wQhVb6WHUM0YIJTpUtmy9GqjEl6fAHgZixSK9eCRsGr9AIo
QFBqE26PWUHJUcidyNySK2BkmiIKiqbXz5Z0fWUDr5whgqWT+GqGvFviPVaRbfei3jZkOZtXAcYv
2rUwag4KivJY5NNndnp5WneqwbtC304Uze4Va4p+bwUqkegcBLzR8Y6uFnkQ55GRDP43VCSna2uH
C2kJXVIW29UgjTwDVaRogQV143lL/0xlqmuHzjax+D3JqvpV5NVPv3owha77M2/3PHf8+97pjZJW
kcMTOZAaFGNBhrZ9hlgudGWo1pX+AeI4h5jdYHiSh5PgXjykx6H5Mho8aistL64XmMqJON9y4rBp
YU/vCyt35o9hSBgQY+5RQOiCudksBb1QnIEj7zxh8fSB+Ov0pKJxHumuXpo2w2hu7HqEPItBs7Rc
jFpcOLUwHClg3tIl+PX5bIhUS2qjUNajFsyiROyUPdCIf9E8kY/5zbzEZWBwso2RyXFZvbaG4Dzo
Fhw0NR+qvMss4aiV91Ekpdx/5M82ZXONpNzxfoCocFR+0AAU0yaVMW4OZKDoBUvlHurIn13eslZu
qPAqMILLopxH8Vz0ZiOhBuHf6/OU7ofUu27tWs+3r/LWuPLg5KpUcmg4Wi0x5L5j0ZCkEEOAbwVk
QM+8bQfS2VSuTR+7ZlNfYVbjBlUDa5BAN7X5m+ROjtyzdckosVE0q9eW1AqT7F//4i/3yJkbALcf
oVnCSbF30Nxz1kBbN0hjVTWnmNi71eaGXQEScXMNRxVCflczkyuzGLeU5w9/BlBGRLFBryNW4Kzr
oCE6JAIh0/jug0iF6HRr7qjrZ2j9yqr3OyO1bn207yPwkIKBAzeBkL2EioXoJtKy1/Nh9Au4x/A6
pnBDJapc/Lg4T6BsWwlmD9OnL0iiZzqoZJQjK3L6s/NIxblI+Dj7M4iD31eYfj12Y9DEgS53UBIC
AdPSNnh8F2LivCzYkj3FVW/X34Dpg6ePk+RBTE8n5sPjqz2fyEjzjkRZfCx9krLWQr2pzuie0uX1
ZH4qPn7QOiOQL1rHPUjOdfBLRHs3Szmwj0cLVP4SAIOTzrVNACpsz4V9QTZeymPc4mX5ob4qrUAd
MGibGQ9f/ox8Oxi0Php3a/tkIf12+F6p0USd1pduMY+0P4cYoti3NISgmaaNs9PzUfaFBPqmYiID
c6kgWEjdwW1zNQg1oB1jSFOc6qCxjXe3B3ORP3TY/ICNpYMW4ibBztsif/7jbBDYU80K8+DMwJx+
fRxc2kh7flUQNKmajpmIasqO+JWJJVPt047acLVYUgz6v+D6zzvq4yCXkRMWfK2VYvB3IfE6Ha1Z
aWcoZqlYsg0s1q6Os2U5kYxnkj/CGOwxgOyJue0ujGg0IYtwNvTm55J5uXHkngfRDPxB5dRuHtD4
3uVh4i8I6ozJDuaLvCgID6TBAWuNq0aUxiSDB5TSBHlUrOnPJ+IR+bCiCyk6X6s2WAMQpQ9QFCVg
6qV0ajUQw2aVODCmSKCkxlcKh1SYti8qI5xhhG9S3pJw+GSkhNuukToYyooinJZizYNAc/4V/rE4
mr0gpBrWPzJXYA3FWALGkWAvqqszS1O6RpSICkHRHaVtNjqoFunNLMV+K4VPH46fgCxtfku9eIKO
aE2pO3XOIODD0AI+e8LDxudC2IMcE3tAfPuyfnu4c1b4byqdRl2pzxLiZeA13yJP8lxfmymAWvy3
jGzsARuDEpzH7r/1lKMcZDQmKUQwidC7ZkLuN75YUl4gXK0U3F7Qi3JCJxpe6OCk1cU+4AbM0Xss
OIeikfQNur7Czg11jJ/FywIcj4OHaBD4ayY0zmBCf8b9+/Ny9WdQZ35Qcg6V8hkHz28MD/FKlOkA
MAFPtchKBAS6RcII1A5Gfm+QXmllJ9S1Wz/RR/DkoJ36arnatCVsrLklkCgH1NggaUgD4vDlRfjG
3j7fYNVhG1y9o+SQdQ3bpQ9kZe0/01HsqC59GpnC6dbiCnnO/NI/xrla+8tbYFhfk4K3mr8gQWeo
6N19FKLH/6Zpnv9VVC8fFOXLhzgE5HaLGdlF3uLkSPtsg2nOcqZjben9FrEB4hk0Ys0/wJQ98rla
nsjhthnCG0qPb7esgTv9VzNGHg+PU4ITwt2O3kfMdEjfYH2f2OFJ/Fa+FuhtPJm/dlkLFO4RakBC
QxtFTS5Wd9PbckOZt/oc0A1tq29ftAcy7OHIliKJ6x7KhMmG65jMRPUTFGLvSp8hsqTKHGti+w2i
I7h7kHf4o1+ABw8eEPQp+FncXlA/5V3WXgrBJkw9bjakvtqB8/puy4hBFJ6jckZ0xHN6iHq+H6IC
uDk+zX98m3fSP8EfEBymNl/lrs7jAbQNQh2sk/CkfWrOI60CoEjiB+sBkAcSFR5jz10s8jEWVxY6
8b53md3/PO1blTb4BhhDKlfdvoy+U+utb53yk97CgV9/EHIwVQ764XTFM1JOQxTZ1fje78oDnoQ/
gAzAx3K99ERF5REfJkHUrWsfeGqQJXeZjrSf53mbCAHs8Avoi9HklSA59fZaw/MTEO3kmERQqwIa
ZXufmJfCxDmLi2GZ82y3tjk8EKwr+cRS67jHJc85VAXRTeB5Eieht7jGJ70A55t/+SyEDla/PdWg
i0o7AKBgU3MfXdbQch9XwRt//HTzULK9Bp76cGuue5s9u8tbIc69IEnCVENpgPS24ZDbTXtpPBTq
pnyKK9DvmEh+2JBdjaen4x80aFTJzUTD14JXS1fNxHBJ2TJG9RTpVO6tYPvdUBjFVlwqpyLzcN82
lzX9M8ITUS7YyTmPMxcsJbZNzqiv5Qmb/dS1GB7K1LmS+fbN//s4CyDpjzB/BcXQeXoZLWuZG1pd
iNIHD6vlrV0060W9HfrpPklc4/EQQSkh6ykP7ux0fJ5Mx8B9d5ww1UGKoNmuyKuXML3o8QUrld4t
vE1UwMGIn7iJC0U9htOZ4c5DUxblXDGiOC4CG3P4xXkJnwAy5CdmZ3/Vj+sWDWCPxEdQcKShTLwC
ouZlJ05MUZbSfJMuFuFuOPavmU4H7ekRXDZBXcF09Jn89XkCYDaNOkZ0+xb96J030AyN/yXlayYs
bEn43tIC6m0hjw0/HkZsoxREVIZrKeBGNSvW0sq8bq52NQr9yHU+VBvwv1qidAuKk6IFiW0jKDLB
/R4oFL3eYNvhZuLKubnGfnYi7j1Ki9bxaj/q8sAeEuQ/xiKOMsDjyfNDNvXJ8ekJUCsBEbLYIoNs
mUvX5lprB8ZtSfPffYcKc2y+rVULo9H+M2tL1mjb5X94xWDQVXk7kacsMLi7TW1k9VZZ67d0+MN0
wMgY7HNz6tw0WOL0hayUuG3vIy2iB55L/+ZCU7xJR+4dEMUSuFiWkfLGOYHol39Gd9P2Lj11wjEG
vLT34+JdWhiovIipCRWDGKvk0gsnL2r7vE5UTMQ+jkP1J8VeiO6PiwektN7wq4JwCOz7hPZaLi0l
qUQpKxsV4GEF/Ei0V5IEXdT43y4BJ074FEu4GENQNU15hY7g6OYNTICWPk5jmT72rgUR4MikSiKG
tf4ZTtfvBykwUDdBh+W2vEHfRKWoyiO0NXO1MeNtUcVrUgSFPI8tWCg89txQnDlb7dk45cHh6qpd
ZzmC+w/8GEItMTIUzgwmNDRSTY/0ZFkjlZKUxz9AwP00lmajsWlve4H4x6G4N73dNudeHaOFaWXU
AfqYZYzglBcXFfD3BfYVEweraL590x0kkvLWxQQ4a7iO+Xqm5vNUSDrS63LdSzXbcn34DwEvFMXZ
qu1hiazvIoFa80iwfK4NZUEg10O9v9p5LhEAZ98DnuHw9W4JmX9TzhyAZW74fH27u1U+VXOzB9e4
F+Cc1sfXollPaZ2ko9TmBMNzUwnc/nIvY44u6anAxXjk3SvmYGxKjnnx5k/hU8RkY6c8Z8oPfCDZ
k2NII+H7Kte8L0rBKXeGp8VPShsiZyDNRL6XtRPfbHMRv+fFnATLm3LKcQvT/8lPfmL4gNvQbTXb
orUeWDg/YP42W68axCrJ+3tSAyrO4vwweAoj17M1Cdk7OxTHtCkndZqgkD4NiaUVOsy9SmjLUkcw
sHH19lr5vJ5Hrk0XgYTBFeUCqo73GaEa5zLujECdWWfgIflZmFQcmymqJVptBrvt5fHPB7GC9iCT
01H29T++JHsxwnAsFuKPsX4AJ87je/OMQnC0aoOhStbGbDUSPQ12Z3UT7nFVM0RyvSreGpp4+a0O
bb1Tv9FC/hGlesRUndSr0AVoteViw22MDmdauUJ4kJ0RRW0Y/OH+QE8a5xNRbrMnJ/cOGsI6+aO7
qjh9MxHEdKPJoh2uvpW6AmNTSP/e33FRoXr7ObBKVWySQw5qx1bO5/wLj45eizZNUVOtJ/3jfmQI
49asejyupk0bagfZVequc7ZtWy4aE78nC3Ujowpu1Dvzw9p0vKbZ5eGv+KrlBnO9u46/JXaW3oEV
CDb9adZujhR2nmSPqVVwV536qxfAIjj+bj93WSEP3IkjTmVBAyDATvQDn9fg9kqYJQ8avwquyhhD
Z79TvJ5pLIiQzO0eavicSzhV8d71+/GF+tBGQ3KXinQYDgYgeBICLGRtjS55rnNW65Qs++YBMsR3
KX1IWJqepyh+uO73r4pH80AsTKjqje7wRph2xskz4ZdJHw12gvG+a+d924swWaxHrH3yxul3HYoz
+6bhrqXN8/Yzwu7mjm2r6omjHQ3nkdMRFQj/fOjpM8uO2i1Dz+Su6BrL82j4qYGTNHcAU1IvVbGs
0l/iz5N4rbZM0iVO8zZMeTLoP+OC1p/C/ZFojYbDIPc8sB+B4VlrQHDoEnDVahhdrMWzq9l1Sr2Q
5pLPWyd2ewFGvLyV09HVcbFazNlfxTQzMf8XZNxsY4wk9ESz1xvUNnP+uetm3jftw6d8+HT1FFLm
nCPNCe3xGGV9UhS39BuuW9BF2yJ460mkEufi7qCJvd230N+BeYT6gPVA/4eae8hdGTGavVgxwVoc
Xp4H73EyT993mPisHXSdNnCcTO1kVT/78zvZH53ukz8dLjxZRHvJWW46We22a4Y6hmTqWeCQecSw
h+VSlTSPKYrBAzSYrJrX4DyX7cBJCSHFHaQ65t/Rw3a0SBmvKFMR7LQf9UyJkYJcKpIm62ysIkAt
UQJsXtOd3QrTUPl3FRtkDtvbztv0UXfpIfxIcxmfKsVh57BRo1tPMGi++vDKe+RdFHNhMrZcluj5
2E/akBmj1E43bgSl2IbTNfm2wRYbp2hSjUz8FD/ZccAXz7PPYQUBzuuunoda4MDNB2u1RxTqnaAO
2q2cvA5mLo8wUB82DNc+ZPYdoTdNopvursKBBg10j2TPQugLwvzP3Itir2eHTwZ1nF1bs/7QpU6l
K5QjLbXTGzr27mxiUGRffhxPZvvZrhibfNDYL503WTo1aDIzgC3X06G6ako6YDcIKxr40bs29lRh
H9tWggjGFE5yS1mEGeMvgp9ksrxi44PmwIUfM3iu0j1q2TZnWOZviB52y+9HERSodRBR4AYzZP2h
RHHQ+qulPAtp4LxYryRgK7UV3UvltSw7I01KZmLJaLO6+GcM75tZv7EoAbEKoFeO0+LM4hbDM485
zGpQNa4sWNyejDSmvAq3xcEN6tspdDYgJ9zOolAOezuo8MElZQZhWQqkmyFwNqgyFPQNVfRT+Axi
dZBf3PRj2oL0bba9IHtf3eBdPvTdkeV/9zRytbeFNOk2mVz5UleiDOZ+78iDXb5PbHuv9/7n7/7j
Rx99NCXNe7FcNdX7v3v3//zVRx8hce2WCKuQNbsaYBpAP7CtlnTtm6JEVzqHVi/KrEUtU3It7kZ4
8G4JzZkfLcVPm5t67SKVGoH5wK9xVSk3cSXObDASSpU2FV+24UDahfB7+fwc/oAx62UyNTlUndqg
DH2qvTwFfmZ+N229MUN6C52Efh5qqEU5n0PxQgrnOik5OlYNdfmcV4KWaboBmdv8CyZx84QBF77p
9NYw81roBaBuRmhCcUkn4a9CO07oLwKDbHLJvUfFokJLzO6i2dbb3baSqE5sHVGASh5uAQ1lWgcL
HT/n1IwKsYgAiNLEqppCy7k3dvZ0pwkXEKSce30VLhweu5a/zar+ihsGb88Ccj4DWGxiqQ1l7dZD
WFa3oHUzXa9AO1+XiymQEJTUZWIsJ/ixIPzekczJBiIFBHNVAeSjqpFyHhHaM4WBB8EozEccberc
I31IFg2oVBDz4gopxuJRxTIc/EsJj09vW9wf1d/TLRT05hKmj+XZ+83jUnkNh05pyO05qySljKXW
KSh5w8Eocl9SidCsnuizc06IRUNGzdv1ApTFd5yuCUezrcrNfHW3pNh6Gg4A7SiYH+yjoew5BVEi
xjEBF7mEZ1v9L9VmKMfHtogLSOSYE0dgCSJR4NE03bn60kG4AfuWztXraWdWoXHv8LTTOnl3Jvry
arR0R2/ihT52fj2fEwMHNvLFYmHoZcqsmP+059k/Q/bXzvPvj97WCUiMhrJ3y47IGEd55gTPiRIj
30EyOtxM/AcCHFDljD08kwXiVsBNo97AyZDLk0vzLdVLB+vTXcSTlRhMEpKcvSlwTaY60k0BV+SX
ZVN97bIu49T9ZcLFWbp0UbeVkfznoUDo4F1ZRm8u6/stxBhO3E2ONc1LmwUTLiFic99K99yFiiOx
wp9rF75RUj8MGly+uRePcl8yIxo0iNC53qxMwVvGxgxhS1aMaW+IB3Bjt9el9n9oQOsHoM0YdM4A
QA46jzqP3U6Qzb4fv/trzpC6WF1ZwNP3v3j3Xz4iCdB8fQVtfvHqpRGszDjnu1mFmYbMZI0AumzA
rGr+Nh+X5NnzoS4lsUyTk4D4/rN3/4POxGo7mrwruCNGoGUZGiA38UnDEGGwDjAEaO395+/+Ezfm
/Pne//Ld//5nbKlHuY4I8tJwYgwDZfcQ+JkEUfNagT2DsH+SRUGUmdqw0R4V4xzzgGTGxfib23JZ
XlUbJfmaVwfasEaZaeK+NpyrXilh2OogZFzU2O3takm/Qdfy/Zvpyze//eY3I/zw1cvX9OH1i1/z
sFZNoZuxse1m/28Z3awxX44oqWUDf5oDPUJSBVf+ujEvhhsjqzd39fKT5ywCrxflFrS/aIfCH9BW
PLSnqkFubI7VgAETc4Iw2LbksM0+z4afjE6U5vK2XE/LZgpxBZitHK+rRL5QLGAK60K5ymyr2gGV
cbkW9ekbM1Q/atyid8IcrM506Seb425lpuR8s2pgTyibmGER5hZAhZzLLdNqHdA1W0wDcey01x9o
CNSfbQlUDM9mZM9QEsX9SyWR/Wa1fSmnoppzigwQXWjH1YOHafduHmEFWPxyvGmqOT5fh6YkPJ/W
d7t6zmp58ylOkgaNAOZUy5yuNqvdugOOEX/PBKYbCDpCqf4Rp3+1WR84fVMSpn+1ubLTv9o7fbrX
zeFssZkxA0hQZAFSiUs4SA3BanS0ZDjIoS0Bg4g8CW2gHvFdaCK+WJh5dXdEp/XVqqnvX4H/NXHC
Aj7/fWmDGPHqvzZEzqdr1wBcBxIAsNrZ5CQgkdk1QlPisWiuzWuU4UtJGwbV8d7CFvxILP83yLoL
wSpwF9WXNcWOldlyd3thigXwyPQTBHUj8ZlxZYjNvaui8fQSTsrm7fGQBooxp4eEGhhcPR/CP265
r+RXHLH5Gf/1FBamhzhQ2MJUFx+MILYdwjo6SOB7zkUM+2+WN/HcM+0Ghdpe8qlTYq4u3FEMrhre
mwcLcgvvpBxSE0lTV1bOveU8Rb2KhZCtCpFsm4fbi9WinoFV5KZoyQuYHo10NBIfjg3Zet1tdwM/
b8XFeLWYB9cOyj1wBli+vqZkzcsbSk6NYJhMWV466rYR8Wjgrua+1GJ5AzPT9sbGAW9GaFgtdttq
choeLDNZyGHgr5dLBkwHjNZ1qPNy61nkEaOW/sa9gyiHR02TImVQuPjtBrILQHGaSFJCYDep4EpK
TcS74iTJJhs8w8SK5eKufGgw9zo0ga0Gp3oRBO1ppG6IhSbwdKgYescs5pXjsW3FluirhEULzO3L
+T2qdWCsKzcQzS1ha9WafC2Gg6Iw8lv+8RJwQexozTf5IzeBOoionzW5mmfx+9gXKOKUXsTeDe0o
4kO+PMmsbGHoiNoU3EjBE2ogqTz37XNEr3e5z+Pe+Vrxu6fbYJLZyx36p2aDAeCXvd7Xb/6e6Ixa
J/ka7hV71xF6v3fdyX2Izgp4H1IzyszDqirzwEPBhbLzXJYzTMog7wEEiMXjRpQL4oARyjWSutnw
cutnjvMhYmkkL3G1flc3t2C7IzGJvgtjd5Ctgmg1B73B+oHWeZh7urYyu+WmgOtOERxvOh00Nhkm
+QwCdVVrNYwvkfY2jcgH8rfqn3BgQBJJgF3vTR6ENRGyJhX81WYA62xeI7mWFg7JiU2B1BR06VID
/fbbb189vvVFS/Mtk/aWMSGGtoqi1E2h5EZpJSWDtsqh7c3Q07WjId2AXzV1+x8gv6Zk2G0RSMaJ
956DSjbnD/hQEh3npcu745I5QsZfPKnIGIQVFW46+D3HUePRRdDDlZcAjHR1q42SIl9eZl7Kj3or
KQyasDbooOGuTrfiJiUyreH7tbl8jvGXhoE9XVpJW9k8tCryIvPmZm/QWbkxj6lyaWUAHHFQH7MX
YyOQZZBR5kt1LwtfvKsXi6wBdc9D6GVKyrTSFMKIM1WryH5vWPWyujPcyqwK3JYl/JlBwutW7Ea9
sAlvVb4IW5+JK7Qxzu7mWlbxVcekREg/idr6Is6y8dQK6XaVhnIK0gXh/yaCIOzWt8at8r1TuKKJ
QNpgpKysGoquimNjOwQ3erj/AYQnfq9j5mawrGBKZzp+AZX1u/1C+2jAXh5Xt+vtA0Mg49GwV3Df
O/SQiKDVsfVaIGgTb4DptHpvuQXezfpBfepQBfyta55rdTZUQw0UFupLQ3tVHtT+abFY3XnRu+24
dagLfJ6scOCd6CXPC1k2oug3z/XyLKvW5eFaIElhCajMM1cNLLb7GvAo8LP9y6r1ho9r/PNHNA7c
yN6VcfMJPOUBlgE0bgyGAjLHhBzA1i7RR6oBm9HicpBmXiQaPo6uSIw1h3YYDlC3KIc6ru/tW/Ie
D+29aeWdoioKE7NGXFnHw97tEb0KD5Nm/PdMvG3klmaD62aT0xHD/U6xuzb8ZHRmK2mfDPOx1202
BLhA9TekymXljXmGF97S6H7chcx/U249m/5MWkRLtLkebUNGlNiLT4yPZNTxmGZgiklVT0rFZOFV
Z5XMekVKGeTY+CSBl4LZ5Pnqrkl7IiQowLtOZtdGLBt++unPeQtysFrOtiAVnPzs5KR3mC6K3baa
650RaYrNLax8sP1pb15vu72/DkE3aVco3aKV5lCFRtdK+avUtTwd6i3YvFblFvgsmlvA3n0jvAYB
m2XSv53/1DCT2fVuedMYgXfy0+efPv/5z9PM7bq6n9dXoPdYoQfANSmRKEMRJscOFfzRzZW8yvi9
Dy2C+c6wRrMYqQtNPWvHLQ5315QsA8OzytOWmEdXDovFAgg5m5kHBHZo7hWp4RPNNcVRC5NGZzNb
Mk7aOvQv5FGWfqanBamvVsvBNrtZru4yAGQ38g9va/Zkg52Ck750rlG2LkVwWa2r5XCwuRh0pI4m
xpSIar3YXWLyIuAOQ0suSe8kuIdM8U4PRxhoYekp8QjGArv1HKKATWNqOuBdswgT+RazBTjO6Oyz
5q1AxP7xxzd3bTpSs2VkNGBjqEXH91848ji5XIHRH1g2W81tQryGmnqAvM71akcNYJKMZjzuBdMr
x88g4+UzKPNsu3pWPsOjE6ZDzbL7+w7BeL4xL8yoQvA/r0K9WaYSU0Vur64uvDoOriNsarepDq4n
lQmKJC0LoQcxvwNBCr1jQc/payM56OYukH4u/lk9bJJyfOBu663ySNZuZFdk5M1zREk7LPTbxQPY
owIRpk9tSVO2btiQH4A1kJ8GiP54F90sA12fC4HcMoCEv6k6aQZTLw07r+diSKNIO8NcIADw5q7r
dltDfqCbOy+CwRuTv1YHPG5oNW/uznAOj8wJvtYJlNrvdnw3b61vUJCnZVC0+WhznphikD3NwhML
I7azhuTzZmmolN7R4YBJQDZLxgDV5afzoHvvN9Oyf/rjKQb7IVUtJbtdCEo21XowymJDhX+ErE7A
67T/ZCjNNxD5t4Z/7L43ISlp73b7kHOnh6GuzceAfUNmTlD+gORxByqnARQasPYtNA0GUFDIkwMm
wKC3btIu1zdD7BOqFuOgmc4EJnc0yDM3IYrPQauPAJ5EiCSoF4ti7jQ0K25tApNVon9gAmcnIU79
wmuCt7u1Ed/SRE2OAfz+ELFYdhS1WVDz+LQbbNaeifSR4jGlGcSecD0zBqlXbDCEFY5uW2hfDcM5
Pm2Px/N4MZ919/dg8D1C/LqahPhZd6ec1ePztpHbtfQ4a3uvQiytjDdNOMBv9zYKaai+x0K03zmY
TCx18fjDZn6x0cG9NirdCHoETgHyXhBo66VIBs6BXAy8QXBCGFa9WGQDqDaAB4gnGYCgbQ6+EfIK
MYVPTuFNvwNTGiQsh+Gju99mBUqYS/AUdYyT0mshJ6HMWi16dM22yMomp1T59Qjm6Jm8OszfnmLI
/J1LYgPhNRpgtVMIMkOnxcPomoEZaHgNmtXrzKWAuYQrEKer+ZCHm3ZNIV0VDHfchrQsiujNVVsJ
HguPKlnKB6UL5gGRYNVmOzyhheslZsO9RPkkaGCMIxbZ9Lue/EfsPLxbWn8S8lyowDXDaVyKVKIJ
r9vBswSvc4U21XpRziooRkhbvfQCy6enSHdPvQV/lPjdIiREKpLUzY/vUzrJYMmbDCB4t1o2hoNJ
eh7EtDOnNcRe1ud7iQ0BTB08ZQO3F2hY2ezAbIYdOA0dPw0pzsDTzAGbREf9Vl0ctZWw/phmcB3y
goqQqs5HdeWpJZWpac1PvcI3fagGhTmqpZIP+YHeUi0tal37ZdlsFd8NXKVm1/VifjjJYPGWxxkS
JLFA9ucNJ4G193lD2gAI+2NKS4Y61KTKoOVRKW6izvwd6wIZGcE0B9fraQykaiih3/pIc1YVI/5j
0fPsn5QXatyfNAvEf2i7WJYatlaOou3INru1EYSt7Y0T3LMGGteNlhS8543IvJxdlxuJA+1//Msz
cMy2inzhSOQK0GxlXogqJVlsAPE6cd7Ro9zp4hmVz9z/61XT1BekwIYMWpRLQMKs6wWGWvvMHKJB
pBb013HGoQGdCRmKp23MQK5Nl3GZZ9ymRlaLrp3x6RzJEaTT1+QtwY1mtIebkS1as7d3BfovCfSj
aTAVw8hH3h8bFk6/u/HEeCe/OyL6DMuet1FyKIrUnCgcrPJff/M78GYizFA3nB+6O9G7Ur/vsPEg
0C69RLFcHtCYadX8SWuVhE+2yJpUJIhUAPJEstkAilvj4canxGnQ6na448LPItDulvNqs3gA4ZlM
ZeTfkfDL5RVE/yFoQbm3AsZ0R3+Ir0xKWn4XQAUZATKRfR1iF9oYsrZZmNkrEqWNlCkQytJTATxx
sHTRbn5DSNrT+FhxRWWjS2X9FRdQLu3AttKvN85PDiV+MqE6PrmZoaOavpp7s41oDSafONCmOvzS
Ujd+55G9dTZc5+34674r+kHe7htP96M8I1lYuGffW0rymZDx74Ml5arpsnvltDbH+tMW7oWNahfu
e+uKPmxJhAL3WL3cVb3kO/2+i9ZSe38/wkHk3c21UaWdw97lSJORR0p2KMpAD4Tt01jIBpClWpOM
Ooi+oVGOj7yK5Wyrh3FIxPuDBlQaPysLSyt2EvPd7dolNRxlF/Uy8slf17MbxyHNhbqi2SwY0E5P
JbDX3XXa6zqt5dRrAQPksV3i8B5vTLu9cZLZxwndCj+a/lbnrneiGRw88x4jBAp4c/shEjJfPJ8f
+0l9WzcHRzSy2732dnodp7dYu8kgcurUrENpM/vMy23pvzWDCWKdjOpgabeJReodySviQntr8omM
n5U//mMS1zmgINBpXYbbTBCyMJs8XB0InA3Whl6RB6wSJrD314ihC9EFysZpSZNF+3NcCOvfdfkG
qcf0IxfUW0vSbtwNDljL/8pJDc2TaMWK+T8/KPTjo24uIHafVugQm6TzHt2sZlXTZFi/v8fVJu4W
ybml18ePE3cWNoZgQ2m/D1eFc1XdIcA98ghHGJt9ZQ1yQnHDVpcO0s60XhE+WT6W809p482+Rw7+
TGCTgFR8B2wqQmVaYwPiKkouCT3ZBAFEDSzvcIg7OSTOhHBR8ZJLetK6UI8X716+eZvS6AKiOQhv
c8yNMsYn0TPTIB9PCkvlCLvtNUiBz5ioi0RrYHJYlOZcwXN+K2AW6HuYIuA9c/YOWOqt5Pabz/kh
BhVmQ+jzX5Kd4ZiveuT59MrPhmbiTMuGafqPGFylvIApPKx2fDdAjG7oXoMWTcTTG4ROOUv7FMwo
seTMDMAwzIvK89VsN7V0CiCBQcS0N4AkdmktbQtRJjwmFGdN19GcaZ/Mv7Y3VW4PcGp46pUvQVgj
pAm42YLM1rDBr51GveHXNAGhRcGh3AgcdGgnHU3k+mwPWDRlAoVLB2eLG5ZxtrTw/RPJZzZ9S3II
lDXAajJTMW7Rgi7aV/QRK7ZILpnb6GrrVCwjUreklKmmXELNQp6dojplCQTufGxnIDoq29IQ5X46
67k7m0C4g4Y6hzrQGfgvclgTfK1sMi4aibIbo6kUhgSjUT1bDJR5BeovuBPa5Bvbc6yk3fcI3EHV
yB4CX+4nzMc1OoTkm9RyRLHu/nn5zR+++O2P0RthPyNt5K5fZQ1JhBgq3AblBL+iyAEVKrdazOOQ
t27H4JVSjtoo7I51+ObbF9+8TTXhEWQHPEGnaldm0WPkkwBKSBapbKazu3mH+pLrZVyR8xTTEjb6
DICgMN8JJJST5bHeHDB+zMNWmiuyb5cQrQTsC1CRttwdxZjAMpjmWuJK3DLTDndQ8UNdmdKwCp1C
oylg29LIDgubGXKvIwYvi5ldCRjBECmisQuaok3F60hFTEpexymOWB6icS5n+Np4tK659HXNezKK
DSi95ZNNPsie+P4Y+/I0aJU8WnMoA1Qj+Fky6ld7Rh30tn5Y31zpjJ43V6AD6zC3YyeEB7YuZzeQ
DkF8axarFYaroiuFL/5QswVFv9If1uAB1ezNg9ifgQ8zs5Pd2shl84aJp9lCWKkloXJpw3+L9UNk
7Lu7Blw968YGtwnO4Zgc/uPs36/dPY/uPRmvE7ocosN7hfkSAbO/zXgoVRLZr/idJPpvdKobshdN
KJnJO8lftuS71ntL7VEZt2idHfiCXZkhN2sNE+NHuN24UaG0PVC7NBArchtKjV2/BPbq3onILUmN
HOYfHEwUxFte9k5jZTC8oGd3tM2BBcnFvPfbk+g2ydwk8OSDvByIBWd+OTs5D3BVMTl61ucuGOFF
ivfNzecre/gH5Q3VeMIARCc515xORKc1HAIERm18TCf0vYETf4vY/nBIfaI29xlckrUIDYsHUrxx
8OSqORYXQWwieDgHeE4c574fuimGF6Ep+ugiTj/19mEt6imcyJNNdrtrkAOUS5kEZs3BdvLvB/OU
Nnx1yDMYfCRATb5K6JBaDNIUePpYjBC79/Y6EOUlU2/q/agSJIO6FFYHWwMFAV0ZhPtbJGySmreN
/DuBmTzpOPHKifg7gF1uN7sZKSZms9UGjwDDDOPd49V5a5rlHxGewOzlerd9Bt2awe4ob6SCHu8i
JKWLSdJPIMMGpks5uEYa40NLa9Yf+ZqylsskXrxxL81D6aKx932euF9EpyI4qukYCk0FXSAoAbfj
9p0iJFahqvg7Kd3uymxK+PjwZ2Gd2HtEfEBwgBAwZoTryWAguE8HDFFawH+jKIIWN+6GAj7gE3jO
Y7Si3IMtEYvUvstiEQQrsjN+v+AsXYFPUPsldwTCtghuzWy1xnSjcAQARhlYMftFVT/m3vsK464Z
EZW2BZQpAuEqfltRzoAzLnaezLWm7/m+Ekz6rS4N0MERITZDdCahOZdLw9zQpRxxxTE3aptPPFZG
oBTcsjUokWU7iqIw598wgNkNvrKx9XB+l5SjBBIXCFZTgpvCD2fHn47PKemCmROk/4B/k6kovHax
7jiM8ED3Af61gK2lqKm/gbwy8CQ7tNm/O8+e4kAGLcN2jSuvQVPH26C0Wcff1J/s3dTWqT8/7x0Q
xdw0imYtZAQ3s8eskFBneG36GB2e2NIkxVG6a3AoKYAwPgcjmSW7cfbSNN6hye6E7dp38PaG8uHx
wLTCZTYzQtbqNrMjn6/gGdZUu/mKn20tYdzsDQFx0U3xOxwPyHBplsHV7GFi8TvtX5SaIFHO4fhj
ygxY3VczcsD1BDIzlnk922qfaX8BW8yMkEph3w6kdd4JCnAa5YcGxqksRB/AQrQGEwTgvjdtWor5
arcl3wCMlIFGdgg9CUiVAHzHLpJsFRsFcenVhvKZEGoKh1cDeiW+Mqi1+N0Ogp3Yjo0kY98V9fID
inYSebm93qx2V9duLM11tVik5TzCAN9d2HYJvuAVec6/evnqhY4e/ECo1859d7vBUJMPSj63a3c2
oHWi2FH/a8Mp8GuvAxgDfIcGoDNLN+cUwxHujIoCgl7gqodmoS/TDkJt7pZgAah8rGApcFfW28DA
m7CaU+MRoBvuf9LsbUez1/CNL6gtsPKTWCGSHoqZX+J2cBNPDsf8dshwPKFerLmz2zlQY0GMdgNO
aPgfd6gPC4pP7dQodsfH373jiUGQM0jNTIySXn0bfrVhoFFX/IwH1na52i3nWpnHZhk6Jb4SQfkD
v/ri7T/48XwF51GQ0eh3hb+TW/sEk0Nqzje78uMNRg8/MwvSH5bslhWkNVmyVg5nMGLNXcOzrHwG
DSanseEVNTIJyPBQQiQAtCCYe6DfRIM8wYSm5n+x2yIYNVSB8rOHK1O49a1I4W8JBdqayNIlTEg7
ie81C1uzb6ezDAWedWkROzF+qLq9ZI2MVi0/1JvV8mwAuufBuQRK/6I9KHcwIFFmya1hvorC/7Ij
vBZJgX3gW2N/W+9fvsfNbmLsJiLl2Sm8+cc3b1/87vW3374dnLeAAeyRYFpBCQ6MHeblPdtUhbly
hoMnb3Csr81YnwxGauSsOdzPW0jfjAh81Pz5I3yXurbbnHm33eNBBPNQzudx1GgXfXGdp/6eQD8v
3r21XfGzIAZVxtpCGP1+3ksr3lrIC60W8znIK6YQNdayJtF5vc/dMxyRukk7Bpcytvh4Cj3kvAfl
mdGO91KEpI6i8kkP/kdp2jt5UOcr6Isvv3zx5sAzpD0v+AzDxQdxP/D+5Fw6/G3u40tcr27B3wYu
STXZaAPuA27wD9/+7oXiA51nP7mbQYN9aPCr1y//8KJ/TlFxXld0oB73YApXRUdKLpohxwGoNQjW
S/3Ca3Yk19YR3uflgvGerYoV8Owwh3aAY00Hy29DUkean9fAtIIdCOyK1M7ACPgViNjoei3mP2/m
Q34emGIEWleav3bNDozV1v9O+72ncweoEyxvRm4R5ED4jFZuakZPI1hE/ZPvRA/fkmgGv4FLW5cg
9koJYh5Qv3mCVM012s8PWBowj95Vdh1Wuw2FbKYFE3rMyMQ9yZuG3OJHwz/CSqDY7PY4T1Fi2iZh
15vXCtFqJiA5c/s5rz7/Hiw8f6vXHJLBUS4Q0wefeXNVbqrL+n4y4Ax+xwN/Q0bZTVWtJ590SeqG
Tm6mYO6nZ83pz57//OQkH6PSYnu3yublQ5PaVvPAer/T/jOcmJMTllzhLlHmMA3+7Ov9yvv6dncL
eRALeuNybTCqNZDmC4VmwqWwb97yEhqmqUcmF5gwVN9u1OAIj1IPb4FuETC2oRmE+fIYKvoXr4jv
hErYHkn7/enJgxBfl4aeYY+HiTBSDL6BAriWkhRmt6WMIigGDcnFFp8WbGmkNcq9AaOEvxT85qQ6
2jR0sdQgUtzOI9DoJVJ9uR1eLM8gclzaOG/FoXcO820yW9P0er5vsWSHFUrilWFiQwUJoJtY+ARV
nQbkkpKXzda0EhqiCEgwdgYlqskAhKfXJpXy3ksEbsofgNoItj5pbzEFO01Y3qDMB6brEdQMRMYj
MoogJRnWm2a4ya3dESnLxMjTnBYSIA4AOxJ7fXqa/1j+52lfc3sLcIRWoqGa+M5tVS7RM9MwGAyv
39H9U16ZV3BqpS0hTHg9x4/QNDoqorq9g6RNcjHRtLkDJ3m4w28oJpkn420XOZcGXCortx6N02pB
dkAPykExfX968AsbDmCz2bsFRxK8hG4f1pgOitDlIfdJ3pbFXBodZQMVRJoyrUhJL9gU6Qp6OwjU
yzbBAUstlTFb7eZhSus2lVqpQ3pEli1ztG+GedbU2x2qgkYU0iPuXXaxm+vVbjFPkTZhE0MFXFBG
er6rma0ToXMzhoVjKvTrKtVS3dwA62+qil0tzbn0RCjzfw08GcuNIfyvEXr+Lm2WCEcltMa+XDjH
YV2YA3RX8a2caMg6OqPye4MIFcvaCHcXlVm3SrWYF6lTp6gI1KK4Y11Xy+FXkaUHhtE+IA/KnjvI
89LAzJum7SuIztoMEySVeyfbbKr1bRXhpBcIMON/jyuFbAjmu88mIhRlxzicloc0oKyTFNHOJA5S
CmwhZ8DiUiIJGOmgtTQkDLCbelgdVg14si7oRS6a4fb58fY0zz7r4IltPBw3FDPQa0GT3A6gtWp+
mLpgv8YNe6KzRycNBD1zsTX23Fls+sHicvD4LWCfYzwgBG++R03YFXiy/5pUNj/kHWbo6F6ADkve
uoyyPxKGGP4FvgTdapVeIORgjj1VKVoFSCHCThpacfH7Ny9eD841izMt7e5HGWRsWfwA3UlHf998
AXoZ6CuFVL9XZ6JaHrAAPHDr0WxmGRuBd6gVcRchJSnazM7G5j8CPnk8QOub+df8V5ruiHtoit0S
8SCgvSjg4ds3iUF7zDTVIosAQzOsUZZsd8gNj7IQkzyRdTVPdB++6XciTEYv7vCNHv7Oeegc4ogd
NHXrQ/+7YmBO28w4bSKWVy1Z6JOgMYtcbkZFUPeG4K9LCF8xjOEKZAO0HGLhS9h73OEQO91b9Eum
BEzzdwhowz6QdaSmH4iznva+xqGygHcAqjolLlQRhVExHKsNGiYENuUprqxb7PsWgaphmbOT88LI
XYv1dWnkGAZSMl8iJOl0kLdnGPHwnMgPz6Ie9qd9gGzNU1lqKCkxp+yEruHKz3vvf/XuP3700Uec
Y764NW+d91+8+79nlPh8BhKZn6gcAy1MUTRKkgQ6zsDJCDxqULk0AnkUioANdU3BGKxl21Q9lXfc
foLCo4zHIF9DSm/MXW7+7lmS4tTniwXBc1lnhN+RM8TvjCjLnmj88Xf1vXm+teVTwOZ+b5jWV/XM
pjiHz1gtaoYTtPNimWkuQQclCdNxUFMjIU2X5sD2zKDNplyUs5vZjlU7vs5V2hHXm1zFlPeO8C1G
zuDOEfj2FoQvw+uqnnkYv51++xvT6gl9fvvizds3X3/x8rcvvgL9HH358pu3L16//v2rt/jlc/Xl
N1/81vzy7Wvz9Sf09e/ffPHrF/Ldp70ekIqRURHdGXwl1vDs6f/TWXn8L18c/6/T8+/uPv6bPp8C
msq0nM9XGDU6RLlSOCn9UZhfDaUM+5CUBJzUMQbYkGzfi1oGIDb0cSk/rMzDESbPPu2rZd9pKCmd
B0Qq90cSgzkxwuXH4Oj35R/ewD/TebmZNfDpX82H67/Ap4+L6upqwI+8o2BktAE4Au7qCLVqdlgc
sY3Uz46iFtSmkbNBRgHdAA0W9g0EcjXgs/7HHz/Dpfu42N5vdR2r0nMl1g+wWubvj6dIOeZvspcc
sY6eU4LyrCTd6LB/VRlKLRdQG6gWjwhg+VV8jjLaNUGaoCTUajcHx/ewdsfHQJSXZte25s8Sq076
zRaSzIAiVU2MAEx7kY9Ws530bSP9qMB1tVhTAU6YBro5iIWF4pRNCBHiSrPCc1zsos8OJolBH5vn
CRQ1gzW3cfmh3Ez6hu/F3XpTAegk2K96uR3xiLkdNb+TrqGT5pnGDM8jqAuOwzB0SsZTtK/0MQS4
zdoWON0raADoCDKAL7AzCuu4KzdLzCB3UZkzXO3rv38866vVQqhMXg/05KL1MIcFfgEO0TakxarE
uITL+mq3oasDueefoOKfcHfNlY9Hxrr+40mqMoQBJO8ZuHzqWb0NW4IDVoiJu5XsmScD2bs7w5u5
t/BcBjQZRO38xTF/07kfNGvUgnA1JM9mxL7S5LGHCo7bgGa9QawfEJT7oN62FI6MgJXrDb44FipD
SylhfuJiHWy73vVjyhXVd/2SI4EmBgqlCsZA9UjxwBG9bq2z4a05LPUxwj9X81z6PwIHxyVFUJFi
qkaWD0u+vDw2C3WM8cq064bPZV+aT3jcm2pbU75Sbgkwiptst77alHPrhc45xZKbvLykC3kg1Oy+
Ugcc7WIuMFnWAAulNt3SO3PmQeNlRXUwUKCW2Uu58+pid0WzV9318YSznJVhmSvh5d4BaadxMOSA
AtBOXr4Ip942ZW/mmLg8YZJ1ObJwxIY94ZS1qGD9/0U3JafGPCwM9xq+NM1O4D8js/d0LjcT+2mU
fQ1uIV+jsvgNLciE/8117mtsi5ue8L++zCILVw3pk32GbSmNOXyH6Kr4gcxJZpEvwInyQUR9JlRa
58JecAok2ivA9wnKaT0CzSrXU95YHgcwjnrr8my/uTG39Rb87JQYCDK4IfxbsdwJbUxkUWRS+lcc
H+jDduDaxIIkzWRJP1QoVkaPuehlx7Oar9QqBtp91eJpqu71anVT8Fbw+NCiOOQ/Jo3e1VSzz319
vVk0u4JRXatTw839fWOYYkLhwCD74BBrKkwhIfkwPzs9LyLMfaDz2+YqneNEXObJl5WfoX2UsMfZ
k+a7JaQwM7VHeZiWtm2bnIj+SIwXUwpmQW8QeFUUFuDlJc5u39bccPvTWjoYcpsT/vfQOagnSa/L
r/97Ddk8dY14Pq2kpAxy5B/AR41VXkpxFmJsGrSQKpiW/BVf3Nfb/BBqgBc3vJPH2azcgWbzzXpH
CdVcQz/5brkPAk6mgO+ChsTjFOxX20TV4zGt+HC7gTYAwwdf3JNoZu4+VL643K/Z7GGmAxOkWzzX
cP2FUBzgkWAP9OfmRI8P5BRkOxq2TVR4xyhhypEVmMSL0j6w0zb2t1umGKCIL4uduaYZE4WRBaeg
35uv7pZD77qKB+PfVbeApVtNgWr864rrJ+8RLM6Xnq7q+KO9ZCxyWXjJbFfm8kR2V0u2eDZuemss
r0skQ4bIAfUIwF5a95LEfjqRMc33E1VYywSHJ67jX/Bh2/6ayZpzBL2UH/qtJLpzmj5pwr/l1Xsi
wkLB1MLaR3BZ3W8NR7gd1opvsM0D3JwQxZ+gqkQ1QsL9c1UarHCw/PrmQvgpErfNkaZE9ehUh9o2
ZXGk1rCNxm8BLVWIbLXhSOtFdbndiwclqwLTas7qp6eRL+nL5by6T1zAEcwYEt4og5bgrq3Mq7ra
lIgzqvpQKyfLSRDJanHd2TZ/F+10NRWiH0LBSY2SqLQzkQ8BEiPzOTTsm3fbOhUPL6XsTY2I5WFN
jysQsShi5MS+QqkEceWLr2Jz9XEquaK4tfP0wQtWni74G0ZuyZt5ap+GCHywTrbk/Wk2i93ezT4v
dvPKHAjXm+lMnp3cct5zVyoXV3pz1TKkDAXv9rPQ/9y5t7smzr01FMu4bq7XI9/efzDb/2qzun8Y
kvcpryHhEVEEraTpaBJrbeO+6Vc0aXigKwSR4L8kvFTuhOpsewkTzEhNm2GGK6Ady9bq83vR0SSQ
NZmhFFK4InnuyfcvBC4xlcCjKRRrhglETCQyvNgsooCiJqnPJYZqeZJBt26kxZogvamiAuOMvGrj
cYo9DB5o5vgahr590DYZ+PUScC0DdKkjwNQzT1f0FDtGR0B+IaRS8FgMbjZcqDUUWpO+Lwk6najs
GyPD/oZwPpuhtiu0kxsYEkIig+8w19S88n+w+LTwUxGA8hAuwW25uakwPc2/YiG0UCF7+UtAj8Dc
7DCM8N8VN+AhlHEXZ6bOgaEDErkuoFBtoVmxi4DumLGLGEiV+9ewYvGcRhkm9A4X2JuCWSospJua
V4uO5UmiOoMJbg7v9wpKY54iHihqKPo6YGNaI85dSKRNVS05r5A3zBgM2C1k2g0UWpJc06m1i44a
DGgItbxhLqplK9TbglObFXY2GpPOdNS0VZSUP/nBsHL9z/TJorw0CFTUfI6wRPbUjDLQQbnD6HN7
kH5Rn0UFoBmrbkJJFpRRKAWhTkspObebil87rkKzu8B2qobi+cXdbYTNUMQGwaeVCxwugf61MgLK
wIz7xPo54raSNIoF4CC84mhs5Dz2wEcLMPg3s98eop84tD8kmZipsKMI8kHdrKo0s5NGntLClJgf
RW088GriFUcb0nlvZhY+rtAXqWpSdGAoOi8Zewszbq4u/aadtkw+ucb5G6/1INjHRulLP3dlI6sB
+fjA3DGkHL14CvMWacFeKNjzKBvQL4MR19JDkDP6TFh50B9YAHBnG78zy24m/j2kjpoQDKZqRIso
+ocYkWtTOnZlng5wP6Liiy9fvycsP+XyUwkzIMbluThiNlFoBywcqMY278TLGmOuLuv7LYBEV5d0
EkmzDHtjCNEIPvT4DPg2EOlzQshw9eGu+4vq1o0RDpiM6FdyZbvMeiBRpGAoedsa846vUbsPBeHK
v3+gKQF0DWwDkzZKJq2IlCz4Yxja6gZb8YUlsnIgVLkdtBrVEQ+rT9vkP87ZEPkNnwH7YzWHUHLn
9eRauoB8cObEO/ZFz3YsQVgilGlMS1h9+oEFzy+RfcaF8Hsu85K1ZYli8hOX/FpevXFJ+UlK1smx
wdfSK70Hox7N1/0gYpLAV3AJWmTyRaNOriqib2Mo9JNJWmCM8tosVkZ0Ltc1GGyH/efFCZgt4ait
LomxPiFIDyupmp8jGanvZNfp+kEecOCDhs/foihypE9KM8IQM4lm1O2Gl1NrbmIzxUdc1GYKTzZw
KwfZMSFpJnibTDuxIvzVHnB+d+SROv8IrB8VmeGlf2vOWHkVRh2SS8QWkHbYRB7mToXaePFzA9ao
5asQvKgpTlOm9ME0hmarPCkvG3EZiUiobzMkjYIrA9LkuXpJIbmrXWInUauxMtlrpP+kGT/BzXdf
n401PBTuY631J4aOeUFx7hNvEyb874GYIEB79XxiJbh6fmBFN9yJ+yjMNKOXJt0s5pKq8PZBOm+5
DKjv1G1QZuPxcVPBxQR3MIMTzyswRICqbduEkiJcrRsATukGCPceVdR/+JzyXVkT6Wy4HuSNs2Dj
yAvm6YwJ9+ooc7n0SdavBu7jadYfj/uSwNbJi8QXrsvmupUvwI9Dtcc+nv9uHSdiaRTT2VblBhXp
naWmt9XtCm4+1C7QoYDzQYK16FZzbfCQn+EcTKv7KUxOvnNnmVx7ogPnqqcOMldqfaFplJ5NhS/K
IVc6OzkfSQNnp+rz83OvD4mp8qea9iz1h27LdhEn3H6yakncejt+BNDan3Ei4EJ2WX0jbEDjbSvO
1fOOXpvknAG7qOnOGAov1Nl1WS+7QNUhVy+E3IIYGLySwFdFkgiMAsyoEi0lJMljoP3qMnpoemwD
B+KjjLDW20NPJQ9s0p630Rw2JfAhvm5bNYtXXqBbopoM350If4efFSD9fM6qC0HZxT+ChZw/mO2o
Z4hoAKAoJRcTfABGWl76+c3/9Ccq9ac/ZfzuKoUlw8tOnJw3N8XHmdzQHaBnU1XDehqbz19Vs5Xh
96tNSxZiGsSBiYh5XhO/ZW5D39QJ4Gbux6uZBOHV6iibXKltZX6Hzfbz9AvyjHotLPof/unZsvz9
DfgOCV/baEdxxd1berUR9HnHhnjDs/mqasCnjpESuR0W30pydvfPyYdyITegzAMTHLUwaCjeelA6
SeMlOhukiSQmFNPPKBt6pUe2mbwd48rU8xkSvv5uvEd9OhUPWgOZMwUvewblRpG3XD4w4/KTUXiP
e/eq7+I9aLpjux1ugGOgeejxoRsXDSVynKRSIeI0fgP+CqG/WYsAcnZPei4fnFyN89wjb23UaJGN
DjWFaK6IPhdGRHEp4ZfR7ULRo4E12LA5YJSQcpRSECm9lzQ6V1wSgd/YLmQzDnY1Ai6hHxQ2CtOR
ucdwfUa4cOb+BY2COIGyegPEtxboFU8BMsWi6GhReGtxWS8FrNXbAyROeedFqDrAYCgQ/n6rlZvZ
sF6CfRD5HgPD6Ftydk1A/fZ2EAUpPwlRh6HvX85nk7xv5UcB3dSvQvopHLrfJH8q0vkVBChF51fY
LSsbdWLFIvLOahWM8cnOAtPU5gq3flPN9mERJUS5vLW4v7J/rHHTKYM6/aQub4uvqcpvzeNxt05F
3tljTQ2gIaYAH41yC6PWyXwuGDGRErLF+RE9x4zL3WKB6xRidsJUzRN8edXveCELxLe32EknODUq
jIewgd22nuHDEPn2gFdaVm8jwxgMCfG5y9121U8Be2KBLBj1Ufbu3TtGGLDrZw4FLBuE02CuQXMF
YG5OoE/AAiVrxDB3CLLbX/aiwSRVD+l13l648ZuxbLZdE6ACBwA3BFPutVALz3YInBIMyIhJt1d9
YEZxh04FzSQxIffr/oaQmPC/I0sIE/lg7cXu7JkZhWdRjFXWrDT8xhmEgc05e5Ocq8bq9gRfQaB+
nVuTVVbBvl/vGrTvoac63AM7ACUulVHL6n+9wZAwaV0yQ7XIkoN1opCAkUpaZDGuSSmkVM1kx6cB
tz6zoAV5adnporzQsEu8PLnyyGkX77dAy5m+pkiAToQJD/uloW9zkj3zrd7CVhZMc+AEVrAObiBc
NUo5meaiSKN6RyKRCiJ6PQaa1DKZh7AphP7NZyfneTr5WHhTBHcEn+A8ULp0bCP6EoMBFKIJAO6W
ZBI0oMwoGeJuscW4OvFds1TcnuXLV/UMptZCNhhli/L2Yl6OnZm5sA1qHfKhV2mkZxHTXZSEV/i9
2wz7nVdyqYvaz8VsR15ek1ZHGngdqrqqcgL6Q3ey9Hth/ynsLIhgzVP+2UVLW8Tnhtb75es3jovZ
T/sdrR5t8rZWVd9FjJfMXIz39/ck8SIbQI8/JDOwv6wffukFWFtXrhgAzWV7bn0obqqFa4BTrJDO
tGUDTZlxMuUKxuAueomvvQBwgEKo1qOs/0w/3nfmaA3V8rM4XdgV184FssiJ2Imki9s+hTELBpIS
W+TFpJzXL/SBXujEPSJmym6AKqVSiQ/DBSWkIMicAWkwnvWjjeHG+VP3Clot4IKmzAQN4dBqWdvd
SIS7W79ostSjOwHfeVQYLJHhFV9Ca/Us4+DOD9YChLYnlzwW8XszyEpshCN5HfZYbbKpHG4KRuTB
Qy3VYuOu/MCL1k/5++O4pxBpUpDXD6DJKfmYmyckgwKgPtRTOPoleNTw0GVHMMw06iVAo6Au+DIl
/Qc9itp0mGjSlw3QF/5y1aU7ENZHSycSUWyUErNW2Fa3KUlqPcaYpCyC2IiaRx4gBqqTSW7r73eG
gLIGfEo8qrTPXbzpAw9STgAC+gMOSBj2p6pprNQfZf/6l3w/ko5lWVjrzNoxT84fCTZsG9rHklQX
MZJJPIrYYThY9KGQhK0Fhp+GEPPoi+fnURCZ3m2Cwiqj1Rd7hfxmveN+B1qrSLxHuRXk4TlmLJLs
BOzgyOHbpfuB0nhpFidhiTHjZBaovOKjmLYwiXd9ZYTHRtIQcjUXcupJiNMpJYbBFDcDVss3A0Oy
iJv28Ekrc4t8vdXYi6CsL3TkPk/sfjIywwxY3cRXUelT4ocVWWQ5x5GNRE6N9BEBGDgUAbMH7FPF
jNm4T6egsyEJUVom/B0lPufcj38WYLtC/Xocd6+N0wwDYtE1dIhAAikkaEDCySJZz0voqEOAJLA0
JaHsjdqAzFJeyIZXJ+5T4kiMXEn8kneG/vAHQN8VvAeiL5SnBf1qHhd3ZUN4FnnqTtLb+DSMtnUR
x5qEzDrjm3BoES4imVQqMl5I0M/nEymQijYMyQewDBigBx3TnswtNAZ66nZk1FOd5io+W3aWlggp
Ib34bnvg9Vxv2zPQBvEi1D27znHUiNJD+1578m6Jm3MhJsLL1T5o4gmC0Ng8DforkgeuqiUKamEC
UvRC9HcXvmqXCkhPIm4hYb/Qo+vMp4p04B7wV4DGgfJDzb1GvRZWp8ZKoV3NJOgtCdPV0j3Hgqb5
pnNkv9W67/bVVnP3TiqGhidVoBI0rjbACxd33HLYD/rlZKfUc56oQQy1Xs7hvemdbWGlW8pc5HkC
xKQbGeyCIpttk2hCKEX+Vb/jq2PTEv8u7TGFAcaFKQSklRJV9CBEnsY/OgtjLpw5lwxUVwiXH8By
DRP3aZ2gKGbZ9M9kozHfW7blWG+LXCp2ZwK1HILwxNlheDlR1eSYT7oNlBEhOtfw1eFyRQqBJxvc
iuUD6MyebHJkq9JirN7zx2Nzx8DlQz0iTAF5AFIz0GMig+nRu3fvxiR5eR6S7gKNIBeGH1OveYiQ
Jwcv+YgxO1GQvqY7IB7KgaErFQZvI0Rq3V5HPld6GXPsIX4jYxxihFbSmS7gNS1qUALthXMS3zXm
24jFwG6Px5z29rZcU349OieJgyL8hgw65GRP8EB95DZtVdoZTvszyzOe64ukBRf4oa4W8+w+9Qbj
90dLgkyInTGyez2nt8ZNxc9xugObVbqWRGFJegjEyxXjCJh4LWhEWpKxx9A+9vFARCAheYCi38Uu
3E0U3UA6llmnbW7ho/wydXmTT/y8hSAdUz4lSPoRplNit2UBijS3X79eGsmwniPNPYH87jjdvBVx
GnugfO6X9WJCivbsfpzd+3mcDnLgBWhhUbSg/D/KLi7JgGf21zDkQPjppkHEa08ky2gjxJih8Pqo
VeTZdOS2x0cwOhUPW0ZDr7QmMSoZUUQflC41JbbWWgD0BNNIM2qztitROLE8NQfBFO1B5xMdDa3l
uXbXpdg/MHX9Tu0qTTw7QBgZyOTxfZYklZD7R5xyAD/PR0Vf6sHrN5mpUKhtidQEfbevrP84P5ge
Uqtvgbxat6ATzmJqLgizKB/MmUE4O94enRmGUhV7KVSYhZ1J3uvygrKnQ45DgJgGTutpyS4ZzxYW
9V4S8RXh4/iIQ/5dnnLQlw/ECZLUqtQOKMqDusALMCSKqi7qmyoTkD7KqQHGa3DpNT0Hdc3pmiGE
YQFQ2ax/okrlbEvZurDpGvg7ptEkZ7lQp7oDIHcz4ma3Xq84xOwCs5/AJbgBlUEYmxg0wXMlGMDI
bkhAr8SxKF6+IwpbQgX2CwRzTOdM6JGgdQtWwur4Q0tYG1rv3mGk/UXA4X6+PynO5dwBOwfVcQJn
z8/BmASTePWbX0+/evn6xZdvv339j3FrISGb0wRThQTc+fkBA5b6pvx5eNC83NROFkiJDcof3Sod
6MLJtjswAJESBjkMZwehJDLVfd1sg/gUEj1B5eyJHWm/HzoeKdmVlTkBewgkGX41cl98sEHqzRPm
QXn4ORTsZ0a0JTteJB4p+wpJ0NyOSloZXpSKC+eH+Dyl5o4Mr0HZHRcY7wI8ipl6ah3i+6Rbaa3a
9uCCyk+zeLEReNznGlYJZn7W2eU8kWZkFf2xJKNfH65aP67yKG0HNqJCmexwWlulLCcAR4AF8z0D
5Gw0AAPyedbHpFhGFqaxP1oDgH1HqbvFn4gsLMHAz8an5wn89nnlhTIdvg1MxNYggy9fFvKT7+so
plv5GQChhJGOnk8EmIvlsUIVTseag8EzW7YwUCLJe1z6H3d4zROWA9M3WI/zcVu+gMREE8ORFx0+
5g9LxRUHenqjcsatYJGTOqm43z0ajOuyYSf4eWSViV4j+1Qcpi/2f59MgpwFXWum1SGKIs/uz0eO
EPL2BM3+HDyvXd+LtZyROPAVZ7EhPGWM/egP8z65WKDx0DpBtpGD7hPR7Sq4AmR5cliAU/zBLQh0
kV4QO0lz/oEYhidhCPiPuXTooPhITaULX5Oute1iKar6NqQh5o5Sss+let/vVMYTgFatcx7BrMUn
gZ7CHjBJ6/v83+k0XhLey6EqRV9N4PaBWujIRp1SmD2GLHq993//7n/+6KOP1g+gjTAVp+uH59Zl
7/2X7/6v3kcfAcLxuhY8EUIFPH5e/Kz4BDITW/++9UPvKPvyH7745teQwvooO8anzNRIbSBqszAG
wr9Fd2fH5N2yxsh6ij5rekeS8QOkbIbwIh9/B+M6BYe9YQUlfNAmI6J+jWV9DZ7AvaAfs4yYaB+y
xyqIdE4552pCH8gCsBuzr7NrgBSn+I+LB8m9XUAqv6ktbL+hGAXsxcVq7SiKRpyVqQ2a/iirStOD
YQeEzAipZ6s70LLzeL+B6ZmX4ogSCNYEliMO1OzvRY39Irte3UEUJgbkYO03D6YoISy6OUJSFdVK
A1XKBar2mcWiHxOZWTfgWjHPIU+g2dIHwzu2ZPoFhSesPOKMXECmNIHDgUzW0G3Pmhay1QzDVeZq
EwRVwQhPNXifAAADBuC4vZBauHqLu/KhsVkU1RpKIkJYG27f+pEfZV+oWB6Oh6CbgEIHqxKQCi53
C94nRpiBNTTPbm7DPdIhTIPU7fVstyg37EbKA9ErzITDLXz++efiSIBFT0f84bmPPkfFudKXC/O2
g6kBCMhuywklLkxJQPmoG8MxgACHtkm12xZnH2cMau47myLVLTCiW5TNDaWV3NRXCEe13qwuFtVt
IaiMQ8VX+Qz+fdlUzjko05GwUVk814Xgurw1f3kVsJToG0BAxTnht6iGAOiMOGJvyiwCLcTAKKZA
vj6LYB/ABjuYUEcWaaTXU3IBriWhvXALeikP7r1J9H6UvdwiMFTJp4JOxC8w0gSC5tAY4Y4OLz5W
IXNDTyx9Smz2FC4AIZ4NJSEH2eiWK/MIvrxsqq0h5BJRg3PBsnN2ada02J0MgsTiy1Y6AZc1+Wi2
rP8Z0eDn7iGKAxbBesDwPP0nIEWg1fLJ/LvlAMyK4bh9dSyPvV19E/aTockSWuaqBQxtPcyjK52W
p1sxNDMXxBbTFJgZS4MbanFgusnPxtTMOWSYxH4SUuzS3KDUiOFwW0p7kA1Bl2gYWnnR5NntDk92
dgPbgVbgRX21vA0ToUdDGg5nhSFeyqNAQS+wH4NsQJCoaEV2NVKD4+S0gEyCZSj30gwTIBNCCk7w
FITh9apR/mGtW/Ck+SfagsGA1CxqBLmOyd7BHAz59npRO4ccMQ/gFWszyPUBzMHKDzYy2oI2yK2K
hHd8bLYPrmCzjIx7lpRORFmGrYMUNMmmIhENqcueiy7feRwP6E/qjXuhcZ8zBRD/SpxJV85mFci8
uVrwIbVSvFDBCMcxc5F8hyS16Zm0M49WD2R/Kbob8bgQNzD4bLdEiQSSvZnJMnbA50BreG1Q647J
v//q3f+o8ttBnBGp696/ePd//i+U5Q5gK9D/HWOZoIjIJs0IxUeKO1LpVYpMZ7ULU9npDHbmLlyb
AaKZ2M8eJ7n1oOSHEhXEzSG51NpSxgxUyphBa/oXnushyaUow4uIfQxmt6gv7HoBt+EJKFmwI7MU
wE/1j4+h4X5r+jDISUMlEqPBO1PvUJz6BsfSnl1rPUhkOKJeOUDfJcLB6zayV7tMYh3ZryoQDY8x
JxC9HKjxKB9SAnsN2dJbwXiHJhiI0UIi7zgP+J+Wq/GfwLP3soaEs8VVa3NQcr6awRb+qWhPDoRv
JSYhTICFXxzLN51Z0w5LACd5q+D2EvOVS56n0ifZFF+tibKQ3g8eFGww1jhwiFjdBSgKG8AmvKeP
2dQBHQ78DWD8BlGyI8nbgId2OJ0iFSCA9dRmP2KUU+/HgrOGDfN0oiHschxaOUIDaj8Yn9JkAzwm
pnAlK8jgbhBlr5hivUtCV1SwHpLBhNlBw3M9fgIPpAf+B5UHcCnN7uYT+Bc9WeEDZb8JUK84HyY3
OZ2iQ733d78IfZ/g6S0FMDwzcDGhjPGmf3DHlznBWwedLaPpKrW+kXpwjpshT1ZZAKJELvAPpoV0
a+3RiRGq5HJ2Fk+BcmAg219pHB5zieJDV5GRzjLiOR2oOFTxpx24fRtEWPJqU718t21Tgyiz/RNL
5UZRHfm60a7lZtjbwzKfRIeCiUEdiyhCIMy82jH5t5yAgy9AufyeUGZac/mJpiy1CMMkSSuiI57O
mXUstAwXRpcynb9LHENspXHkf0SPpWVrmeQksdLTrO/lGWLaPOlZ9Cl/mYFPRnTVkgkM7m0oH81m
XwYdOwikhbAZFlfvaH/rVfGWMSz+SHTEyavveJpCkiw6mXnwJxbosencVsI3Q+qvI/NnU62HWX/S
t2kXyHRuaBi4gBgLpV7/jAjhHDB4jznZquitEKPGv7iZYxem9J+3q3v8FwFyitkl9TTuhyPrhS4x
wXTBrzQwh8GEST+jZZ5EPQjHO0PAr17gFpF2FGGNS5+0AgryxwjD9FTJhk8a8k6mtz+qe6InjXlK
Hj//tMnQA3kItUlAV8AvPH/4z9nY/AXYLnf1fHt9zktiVyj7Rcs+mm0Mtgvh+Yxw+aGsF/jUELRt
GOYYiA2ZASSYxe/31md0nLgB+SFsYQi0vsR3+GbOCnSXQxLZ1mqD6ebhlc+QQfBnQEji7idVc+4I
CEVwbx2t+CFlU8kdO46Wu88/jTN2Due/RzkEOOkwM/aD6/VsghkhfyNdY4nhQACLScAF0VPe4iw2
k1EZM8SW84G54M+JIXRxzHFKb+bx2/Sk6U+ztA1GuPY6mK6nsOjjAd2uVovGAnWZreJJaecVAjQH
JGieXge/NveVRojHUnC58801wHgy+SGPcuJtCUnKHDgjhWXlVrYLui/WmxW8nqd0AvErXkeMNM3b
9WzmYAIaqoQ4t6hhJBaLYqWvq9JcOZ27k5ZvkcJ9XCH3TmnbjF2D1GllUvPJvF2Pcfp7r2hf6NQO
3R8AK/hRlzXXSCkt2QzMJfJeeg0OmCzhs6VITUJ20qROgPn4HcU4DPPIuVITaSIsQ0meMXGmfFXh
rqYepdgBbk+b/4+6d19yI7nyxtb+x2Gs7XCEvwj/5YhStfmhagYNDjn6dsPwYrQUhyPREi8mOSt+
brXRaKC6u5ZoFAYFsNlS6Cn9EH4BP4Dz3DJPXqqAHmnXYUVoiK7KynuePNffQagGt897dyZeGt+0
TJr4ftkmlYWDk2z6c/5nvkPveLBn8vSwch/0EZSL6OdWrY8O0yJHTgpeO1kVnmEBtLbWbLUgNG7s
E+VCkNXFR3Aho9sNbQk0JM/kTeuHF1pIRt2IlJVTQld+3Sie7Mvu5RveWTC2t6DAK74K8klxbF2I
yEaVCTOehcIfyXCK0Qw+QI52IIjCnq8MYfTxzPnBFqPMZs3i9xBSgQj6rqkTVEQa9o/9J/NRFgLE
1C2izNcLzCWRwF+OXIvYG5eMUpQWKz5wFCRF00/9M4eOqMV8BZ+YWwHHmTh/NPk5RQvu15/WwGXA
N5O8y3uFZ87z0ImPaWI6qG+DQHlmHs04VhJRmET5yUOJ7TRDTyUyxO3qqkn4SLqX421123yuiqCK
OJzVdAbnOmBy5TX313YVnpWpyL2DnaNtrtc3XU6tlXgrQHAf+nELVwnsMQUhdbhZSQXobA5nD0PL
jGjMpJHkHvPczn1/PZadxSmzXhRcXW9gDwhKdHZlzB1tpXab/h/mmvV3Hh+JOAr9xHowZLl1gZTN
Hg0+RGifpPCWEpwYmRqGliQnvD3f4pt/oSvDLADjxz1qJza6kuiOpV82Mh9xyci9LV1LCqOD7yQO
0v9sv2AoDgqD76BN4nQq/m8+c54DN/bHQTAxhMXQAgKzaBtnjLs4m+Wl5OVzt0Zz+a/R1aWupvmm
FikWMgW70nKE6H2q2TyE16b6z/iTc7WGptqRVOVxB/yN9kWCfQIj8ifJrOCjALoACiXSmbAhiFUN
sP/Nn8VX8hi6xM+wFeqOmbiffvj4PygD1m2z/lTdb2Av//Sbj//3/0QWLPUUQzzBq6Mhp37B3DFb
YHfPdittoLJ2KYdVLkYdBVnuMSV2YVWzxbb6CQyNbg86DyizAy8uVNmLi4yrAGeHz/USvY4qvpY9
KECU+QhXgE197QgT5sGItnWFqUYbw0mvP9dbQ9cmAxVgZBsct3rB6aDh4Rrh8TTjC1z/9bfLahV+
e/gjTjYIfMrG+mWL5bmrGfzioc2YkRfeiATohRiArraiz45o7B6tCTNuIAgj0yUpvbcCuH4GYVKw
ivWCjT0YBHUJzoBLsIFTJMoOtwzuI/FARy2Jxbq+mTsHffKxu7jgjl9cDDiegdy3wE8NlYEAqws2
fYF1gh3jY10RN8gdcuhGlEpu9xjTNqI32oZtVdiPdZPBka12PmLaLU4BMI7qcAhxwaH58NZUfgzz
4JMgfD5gJTikqPlczehgok2Hfs5UxKNvg/acm6xHlvpqpM93iU4841z4ElUuTGwJ/lt8fYnTyvyy
bVZmNm0XMD8aVjzCGjF+uJM98HplxbR2p7ydcKIpgkxKKh6eXve4H5hDhZY1/nxW0BcCscZAa7PZ
slngdXVsIJqZYDVpYTcc8pT1Qb0xxA4BqJLsDdN4uLUBOElcFXlWAdSh3V+i8+ExHJfeIb5ylgfP
HA3v4i2HWY0Na/Qk9JOGXcshEp6I0OmJvdW5xTsjBGn5yAnf3B3Zd9mTNCcMFBhVAKC1alJOVm6Z
/dt9t9vGhVV1OqRF/neghiMA837+snMyDMPqM5GxeZ7pFGWqV0nYCXgzgqkYqFynu1Y25lFp2tb4
wXeGU6BfGKm74wSOVKcib47t5M5LFvq5II03V5Ju5zFcco/N7fOYrxPIILC+rlS0YYTAFuXn5bpS
YDd88aZeLe6WEYylnwRIHwy+GKc0AcH9GMDQQTITu0rk3NWsbW2ARryt/yQRls2KXcsd2vSv7631
hWisv7XkMnJtLGuiDBioqRI7/ECa0s/Vuq7A3/m+2WOCB9L+37uUL2ZrXVxQBw03hm7XLncB34To
+2AuezBwovvqstnBb0XkRy5lHpiqkTTZei4rGbLrOCVmyV5QqoiJztuTYNdya7rPBbkZACUAm5Wv
e+wvEsqWEz5cXNAXzGTSxX3Fr5rWPKbgYy8dhsdH8Ar6/AMzA7jt2IsczqtnDfNGKkl2GN5foL4L
XmhkbD1qQL7v4I8OeOy0CrgbOrJnHCQv1jULJYxBkN5mL11swysr5hpkJ3fnMEqyB/vWzlmROFrE
c6QIeR5+xeVpsxEvQLvLbsp0PTwFbIX0yaV4n3o6O6cAtCxHzHrRK2UUMEea8gtdJ0dLk+ypF3iz
oaTGX3esRoogFHnn9eA1XXrpdsmfjKi3EbkW23qD2cgQ0gASr9QLEroeY5lAeQfaTJZV65Y85Xke
AlZL5oLZCgvl6rIthfPhEXUVSxdMI9Wsdb09e8uReJHfYhJ/DG3nnO1uli8u4FOgLiA0OyI6gtS5
HhW39YTUfNdBzQ2N+Fw3+3Z1HxH2l3AmXdsQFWFtwbCHFDHHQCOW2p0V6CFU3RJ1Z20S4t5N1L2+
yvaGflaYq+0KXe1iGsmOkFDETIFzi9lWpzwHP4/2sRr934PEyQbzTl4nbfMJlP/twync8eTu4VTN
hczGw4vh1mjFu6YsoF+HhIjjKMJxmWB7aXLAQieX0uMWUUdDpMTQNZ/mxFyh1VSZE702EpA7wWa/
c6RglBFLeFg9btWW+RnTUjUO8/5MFJ2U70OTwnT/DxNAn+zBuEa8tFavAjfEzqbmo9Mb5DDRpi1T
x0N3kTR1/P75K+aR94ObT28fgP5MQ98HyrcA5172A6sp0Zjzeb6t0Zai98TFBVZ0cTHG+bi44AoV
u4v03QhNkHcG7MDAXyJdFSegZCN+5he4Mrhm5oy52Wy+BBsNJIETxl71wTU7DnIrIieViO4Avxlu
B0GNWaevNLZJnoz+/dp++7X6IrDQSZo5Up7aUj3cQLh0D938an7jM7BzcpmfOBH7aZW8UT+lE/yv
JjqB2jWJdIY2H54ss2DmpewnAc0yf4tPFV8TgvneRqmGNNnntDazdv654q6k0adVAc7aCz/PFP6H
PFMHErYMDkUNmLTHncMkfYG34wE5C53zBbVMssM6JskDtyG9Q5wfdu57CbM6I8mBWHxX0CokXSC1
0kG5oKdSB5EonePAQ/MVww/BlPRRPNNCpHWHyQS9cirzEjy3DKfTwVBqSLCSY3TG/pZMM/ZT/Myw
TItPpuxzTslUYy5ZcFEy1KQGGDUSTaqrK9AG7derqnWIHKCTMLeEEcMhKUlguLJmAGwI+uEdIoxG
DGw4DlGP+YXoVtHSbpId1MJEWH15hHdSZCLqkG3MYYiBbxyP0D0sIBq9w0oOKalyVdcaN5tweVBa
zu5sGBKfB0HmIElIIkOkmUvA6QTUVYyzhxU1ImcN/pCY4vOYSQ06aNmZ5CUfTW2cmauXhDlyZdGl
VPGIKYhLdFKGdGyxPa+2bNlJPVBl+dNvP/69sgNvlpc/vfz4f/03ZP9FoY6dANFl8hrRFOAKf/v9
r0nseouxONn3+Nrc4EjOKCJxdrUHH2QjRompgQ06LB/Y8MblpfwEs3E66hGyFv4VYYw5eiXMV90R
fKemGw8K+zJCmvnk6LgvEHNhwvSk8uQtefJAvcuQ2eLcwKNF9yCKxvYcBf48NBuVpNaheKWapSnK
sX3+Fz/epTPQJ0rdwOOLg3w6EoS8XV6+XH9uPlUQEDU0n9b415CvDVBQT7PCPHd9G7kui4M0V+vc
+q78hM5Hfg+344rUTN4XNpWDe6TSPiTrSY16oEORFqtqvoYwQjJlQX5lMWfYap0x421b7c0lZJ4x
7FR1hREADdupASjD9NiyBVF/IlMDDSOdCBFWANtyM2BPstm5W8mbsb7PXr4xN+Nmh4k1x0HSejLp
ATmD1NP4t9ljVzMvz6D52vQyhF+1hMsfRycJs7Ukvhpb7+gip75KYuA4wQjVk4JegOfjdt/CanEt
Rb2eBT4Kx4QAheEL0VMMA/kOnNT8NShevjm1k52xN0tzdVXmKXrtz4EGZYKsoduZ2S+qadg9b+GJ
2+IFrlhpLXf2qE7C7DIOfEDolALyGiHrlEg1Y9cMYaOSZOKYletYtWNXTM2G6+3YZhPlXkcJdSQY
FymvS8kJtsxUbk4dDcf5TaGoxRMF1+CYC/CD/16+/vDi3etnv3/x7t2bd99JiB+CqXR9ebXatzde
ZmpwPdg07e4WggVvZz05o6HUjIoVu0uJgIymS88UD4GQ8u6ALp1KeLichHc4n9UW4Lv4EcVZ8pcC
KIQpcdsKAgZ2En4O+BBG4l00GIO0BUYXtLXmVr5cVQJudMUQFosGrHo7yCK8QOgiQyRbEDBdmN5R
+056ueWO5+V4trtLh+W5g2vnNRcXks1YZbXe3SW+wZmFkZlTLxFYR65XuFbSJKwTxW4pnbRLXiFr
2tuArOmzjIP2xz+uqy8bxMmz7p5Cmc3SAXwVAoy7Po25ih95MxiBqtqu7j2UKgTGMkw7NUeaPWpP
GMFEs4MY+c9LLzxKfRQ7Gfg53SWBwtnT8wTASfDJjH9AYcF7AVRpxF9bG4p4Uy+X1XpGtx9Kq8I9
ZZiuCjQP4HRCr7JT8Xchd5SaAN3g3Vl9DsjCVzPK+U0ZzSLjQx5729cOC1ZA7oUr1tvGy3wHVwFf
CaomjnKbYX8kx5Th6EPSBa9HOECuY5z8Krx8r7qBvqGuwxObhnmhzgwk+PqtvfcQGdFmINqMLZNt
WHtywzJ9/Ol/+/jfaVkHme+ffvfx5dP/AsUd5sZxoy7rdtF8rkB6xvg7XLztnoz+2nmQ1VwRZovY
x5Vg48s1HmoLxHvK4Xhlfn8PDi5zI3NYJ1oCVkSEQy6n6PDWCk1c4YAyK0cB6rMwQt1lKxgMYKI+
UIQtgt5QuPzrNx/ev/gA/DSqrUzBurUuEFNlvbVpONmQ673EJwO4kFFvPFV+wWN5amMvr1qC7FIO
2pRfeQXGD5maNqt2C+2DMkfUo2VlbZR08EReRzWKOWZ3W2DUlzPPpZt8vMx/x6pAiIGANQwRgXI2
b4fJz+Ut0T4eCI0XVzAc3iCNMyplMCOn2c0l+V6p0yAl7KzVt7AC2ps8BbjEfuTwDw/wgN9ZF+gS
VlTfYk0PSfqqPh+4nMcYrfwD76BXGP3cmcPbnMwNJfGd37YJc918v2vM7cXyOMGbLnZf5O962UYJ
g1HbD9WC3AH/+q+oKVIrmB/+S27OvOVf/mtp3ryXn0GaMIxSqJcaintGEUVCYnle/GAF8ZdIvHW2
rX+Be5CsW2kIH04nLXHkGNQN6SAQgKK4nd+TdZ9Q/FGfo2UUaXvMhIXrUfQBRpDag5QWliIh6KMC
J36ay9tclpgzFAbLGiwjEPBCfBVLRyXQ0x+o61zaya7mpM23nRfw0LpV37Eifw9u2wXqvMjJG34C
aqZ1CYe8Bw1FQdHSq5akBfIGI1jDK86I4BfB9oAN3ppW+ZxfVVvwt0M3BgbnxIReNrX2HIKfoUZz
M61tHnBKmDbgdCj7FYUz4EJXLbYjrLzGhQF+j3dBwT+czpnwCGAY3t2HldnMV1CrDIsi6+fQ2w3j
nTp0XLOdyC0Noe/0bFhIVRvigfefKjLQ2065n/wrcoa8lBMIAsHtNCGDDR5txCbAdlEksW0a6eRm
vgXdMuLmXUUHxW1J62hXjvjoALtPswy/JDGu7gdt4gmkE5ZgSmszc5uIe4YDorWWhOoxB2U3QNu5
m4BdAXMLv8d9EVW0J2+ane4tn7IJUBhKjBXWj6Zi0F+izEagkivOQA8arYTGZY2wFaYhdOPBA5wV
pL2n2YRGoNPmAjFS9KLeJRKz8GlA+aSqlnQspCO6l3o4hkpM7GwLrq8huIiObI662e2bZi1IGHgm
PBLfNqSlM2/uEdVZEKBlVk1tY/ajgnqxCEUfWSc3/FZ2Kumh0U3KzDRgHS0wYY/d59S+Ahy2mhHi
kohMEign30sC+YiLLrcRxyRPlKsenVShcTojnaR6St3DRc95sLewtFty9yy8he2haPtsGmLF7Ugl
xQrhKzCNTKmVvnL/YmQv/eWFtaQ77jMK0kW6Osz/WcWOt/Ls3+sSwtZOu64iAi558fHti3cvX714
/eHZ70t9O+3mn8hyapjdWw9vvM0mm/sJVDO5EMd/auPCAk6T+Nwm7ifoH2yaeXZxgR0Ehz8S4gm5
9OKCRsUOI2hsQ0+QXSPb3VVrTvn7iqPudrvN5PFj7lGzvX5crR/DLdfuHmND8snN7pZ0DbeEj2fk
8f9/HgDFb6J2Up+HpPrhQVv3ON80rwfBdgdyy2GShPBIVzsCL9MJsJYnI3DMKI3Z7l5H8rK8ERoc
ME+BSwxHad0KG9iMW8A6EHtuIcseHGaVq2YsEtASwWtyEuAdXCtd61gdWI5tCgiirQUzdHhcx2Fe
cHQ2BZ/RbVGOPNJiZyCK7FoCls3f3Cjp2SSFIyPYTF639kFmSlAaWjSoI42VAIcag0xRjkwzn5f3
EgkOgrLZooaG8xh4iGYQRmRDG6JqG7QKU8Ad0B3RUKjUvFzX4vbsQK4ay5GzHxkA6BHaULp92iKo
7Ag7oFVh2JFiiGv41XhzD9P91Yy1RMMy7OD1qrk8bXf3K05sJqkWkYBpLRKn5rPKpN5OMoPeNU9F
Drx3Pop6Q8e4iltnn/yjGrcsfXfzuwc27/GiDHyiO/MzYRj1fp54SIBWfAnhkhzan25TODFcZ4QJ
mXfpTgSmDXFbsNTYinbMwm/rP1XHqFNU/k687oj6IlQOtqEalaZUC8VXLIlRLvmv5M9Pd5Ti/QhT
v+SQx/VHNTmhmOeCOecWONctmwY4h6/5Rfjf5YR8uubBgovkopzLc8pzAY49DA23rK+Qo9/5giHM
ztg1wfwycMiqMuD/hRw4gHX+gqBQSAAVT72W4OiBR0AhXNWFDnpSBXKe7cHK7AApT5OuTmLQsn/W
kzc0lTwxVOXsyejpecmxZSt0sTUCxh35pFlpTlXHrAoIO3qKSVRFIgg1Tym7lCTicM+fjlVdIIPF
DBlzY6q3CWYMq1dVVTTKlqsvH7yzOrQNT0aZ+uvpKBuPx2aXIe9MEuacpD/YRKo/StTVaIbUwDjr
HblmQU9UvzIZmz5ULOCjO0/Bf/DB4r/GIko4jw1m8V7RA/vZYWDe2O0HlHrt2BnrpjrMd+xb08RH
yE7TkHs2nMjcuBUZemKQKeH9rcpRF4bZhDujXjHHZT5Wbb7CO9A8ox/mZn0O95J5gP+avyWFi3kk
P1Wlwhibtz/Y3T/8DRHvZmse29/qK7jyV5ZfgvGaP3lhCQ/mL2b6A6FPtDelWgPa1DEOidKfCcg2
c6F0+hYLyN8Lirvm9hZzwoHPgLnDnP+2td4SfoOgNXtAbfeoqAfq4MN7g+cTvKpdQjT2sO4A+lYK
fdDTOgWtq8fy1yK7uzezusX9UC3lyzBrqa61+Ep/SjdTt1emrJLfF3kaNcJFGdUngZOl2+YFRnlS
6HjCo5WrPTP/AUdNaRv/7hnlV/JhePEGucYlxzVPGeDyUl5UgGqUtcOnRqoxHGc+ibPFwvdjITH9
CdZVTnD+zvn2Ac831FzxMJ3kEnvH6cGLdHrwNGpW0us2SFouOcSDUfVkE0+ldN/cy0Tfzj9VnPs5
zivOUx8cq45Pg5XiZl8lCvTUh7EYwWldSKpAG3ZqTZtbhCo9gM8PhZIyssvFOAjsQj661YmH5c6d
GZNFc282E4xD74UTVfkJeACZKvcQZgMJw+fZbf0F4H3JeIQsFEgnz+cYuAgxgMsscAJ0jeJXMAtm
A4IvXSL07zkbkN1H4He42Bv59pZGl2ORPOkogK9sGnDcBQs/VSPjjkvlcKTDDhHkZmA7puXJ6aWD
KwOjrlz6qL6BuY9dIE4QPsv5QlEyiP36ElETBZqqgKxh1S//8dsS3EkoozwkCjKcmc7hnkAlycWu
m9P+GkSBqZZ2+ZuDuDeZDcBcLsBYPc2fP0W8RcyJMo3VYIv5mhy08FNAF7FZ5GwCrLmzgsWxpo/4
NKSWUfc+HCaeOhhmEF1lj4BId802MVS1VSzv0LtdDqDICvZruyu8Dbu2UnXhDn0pOHmqiwds9sp3
YDu/w3xJ+IV5Ortazc19+B+zb5+a7WVr9BXKaZEUXPQYmc0Z9DCuYm0EK0PYFjo56okQAMP33HJc
tbJnNNs2UmwiJKp4nII28TngOn/ZFcQoCVAf6UimocIxJ6rLJ3yxahNFNA2wpsO4mHCVeel15xXQ
sEL3TCk4zcMi8Je6Sig9OwNixGcJHXg1Y/UAqB/7MfmWWJ9o0w/sXlejujnseFuJG28YUhw248fC
hCrQK9SBUvY/RrdSDoCsj6XV5VWxnS+U460MII0TdK3Ac8bMIuC5xwdB7LQpbDYQckLsHLJrzF+0
qUT/vV6s9ku+5qehTx3w8vac4QAwjg99UiFGg82CnNYUxslIL56L/OLGvJQFAkqAD9QS4d/jLeRE
9dKigD2xJ285fnY4abns8AShS6YZl+zrQFUipJJUA3QYE9WrfO3jar1k0ExgYuPtqXK+n01Ovz1P
MZ56/Trzinsr2p3YGOc2jTDWz8T2fEiv4nWENbRJhLBQxL3CZ+geZubnDLwFzvLgUFjPsM6T4ZUY
4yVvayC/ZgSMD74nN+7FHMMhfxWwEFJTKsaN+ENwwJ6DXDPDVLHUg1BwgHSUbZVR2fqyBmBQXyzR
QbzsmHJGHnf012x2ngZvB4fSluQlvcMWeWI/2lboh9lnT84jWHf0yIOmkyPrk1VbEgJsJUl3Pr7W
ZLhsEBM65crEbn+SYrVe76Lt47c9khbclWbTkRfuekvkKp+4HRPwvS58PYh3IB2/jR31xcnYhBAz
ZCGqLlXYzZahh7sL3Q4Ehr+un2Jn+Zv3kmY4PHsBsLqctoNsrOQEPtdgS69/PUZZDIMotkYIaW6l
5pYM4lX1Sdz1BCNJTPmqngLogSRmXt3DftuaM7gQY+ncbKe396hphSsVfFfAqPgrDw+E7q14YENp
eDjK/vyX0r/YAAgHmDnM/GBBim+3jqIZGoBLZHPYeU0KVeZ6LBCUjo2u1r6CaJUMj2a0kF0ba08s
t85lJItFUlUi6BPQbvouSl7B0lUbhpwEcSTlAE6NVS04WSLVHdYUpJ3ve3sTwxeh0Re4mQ71D/Xu
zPwT3+UryUNi3qoMTGOwIRefqvspI+9hEDr+d6wusPJs8vQ8IoErd9rsbGjMjUDlwfSVvGxRdSS3
A0+pkNSUk6r6qlvLk5agp65PU9uxqX9Z++KhHgQ89QdiRaTAt4KZMu/yNmfC9CMui6JSqYuBeoUK
Y7JT/AlVgbOoF8FphM51e1VtZ2zHKbiHI/hoxL1TPsG30nqHRURl/bbaWbiwb8dOj2K5GDsf2FoZ
WUfNh6/4p+uXqmSkr4J+/xlT/1SNaBoO7BrCutodKy/1DaN3iW9WVp7xZhrhsFCtHqIA0sMuBxj8
UKieKVlEaf5avdr0pCizr+nLVO/HGzQo2MSvMpdTawN30J++J3eshpMCeaRtsvZr1JoFVBatCbZ2
X/mCF2hfSpBsvlxaJT3kJ7fOvOZkoWYt0Ps/Va99WOHl0uLObzAwW/bqDJD5aQPKQMy+ulVAZnJZ
4Ogk303fmJHi7y9ZCsoftWeP2nOXSkjqGdfLmLAn5mvKdXnzdtBDTFqZyg9qGAiUHJ9DVRjifdds
l+30z6rLE7i+/sLKrQPTqpTham49GmHDfshV2/P3JnrUbG8zApQRNy+yoYNTsFNocS2Eo0ou6uwE
aM1ORT02wjw/dWZw9A8sg1jWO3AtW+/YUR+Nf6SqZydKRC1fodJgW6HDAVeArs2Y+u1azMHfV+iR
vF+QKyb7UVtniLEfgAvea5ttc2mqv2f+D6dmvro2fNzu5hbP3V0laQ0ARee9WZqnMo+Nc9RfzLnB
hZF8d4K8qI8QsL+YmKu1mYoG3Ucbgxl5OiOhNVxC9BgBODfGg/pcdcw9d1yUy3a9wCEVneRp7wEr
iWvuu4FIyMFT6zvMDCE8k1AflRrkqFPMVkY6deKjgmoaOQjSyzTHyAwJVyGQdfbjILpIH1d+d8af
Ar8o7fst2BHPcD6m6gngAbA7V2FHcXZephus14bxrbwWIbg1aKDsa11uLelpFCYaTIRdmgTdhMfr
PbAWtoczW968wCBaGdVxbrJZVyNWlE1AvEuTel7wQXsmn58Posm02wID2EUXemITOpp6d5DGop6v
FHkYtjb+gAgOBCJ4NEcON8BjMZFgugOoY6i21eTGnBOkQriRxQmcEsDas6HJgPJ3m6WuUe884BLj
puOl5F0XnYQTplR49uccnIOjNHTipr6+QbCu+VqR/QpFUVUDOtQDwDCM4K4i9y5QyhFNA1dwleTU
9TtTPt8nMEfV9pQ6YATKuh1nf4Cu7Fu2zSFnPF/cVAGFRCD2G20dQYWtBTJoXNiRg5oVqhPvodCH
OgJQwS9/MVWO50EWKfoOY7Oh7Awe6HtWeaEnXK/TwqKIT9TtqQQ84aQoIVG18irgmN30YEATvj1d
mWVaOWGJqC/UzAtRkP77LqE/plHaTyMvcnSCY5pSs9M4Z6NELsQt4CSiW2pv67N91lOJLXfepzJ0
m2aaqcNY+AzPKBsOR9kxBAwWmW7MGZ4wy1ohH9X3ZbzxeovbA81ABIJHcPTUuefnXTsvLfPIYh+a
dxe1YJ4ObCR09/QEflyBBxaWtzHJpjiv1kubaDURjez8YvGfGeS9B485/FNPTRhvbP1Kp7YOv4BX
H9tN7N9x0YByh49odg7L8h7yC2G3UGICI9TeYHRqtmDjFNA4dM3kaIBmax9xHZLElGq4rbbXlYOA
FXMbpW1V+UNXS3KJLoI+JY2tJHZjN6b87dg9e0hkeiprpg5Ss5UmdWLizU0uaFI0kUbSPJVZ75Ny
XR2uNG9N8Ucizf4PNXhVKvW/clJ87lFZFxVQkbY4ALE4zj5LegBIP1L/qVqiJmEI1uMhq1oY3Q97
z/fBIR15n7ZojAELFChXV6Qqi01re3ORF+JhikXG0pZWuoVdiw1lztMJfm2Zs4BRokvTosowjRx+
3b0pbzHJq9Ixmi1JbRfVGo4v4zsGCFa8Sd9jzt8UdgLG9GO1vLIdof3iMWIdgl+iUhU9SCDfMQav
YKDPdpeXUQd0xylL1au6RV/ARKcEGU7AdoryLDC5Hd1tgZrHqJpbbnLyx3XeVbJainKUU1cgwRIj
ossJkK4jI+St+AXFYvP15MItWxf1iz0UbQB6+eK4HtjOb1++/jDJKJms6fXmHnlN0/HHGdg3sRU8
qY/N6aUAzUQt+3Vt7i+0rCDnA0f9vtlvVU/Z3JoA+3+UVePIzzbORmynW8U15Uib/UOcxOACzN96
CZcOHbpbnRVVEtOZmjSo4X4TEwnzcGZ14V/Aa9EVZJuaefDjxvMcshiE6usOZOXj6reF+htIczYn
5ladaEu52bUAaoQ7AL2qWkn9Heg8CQefIoDngPdUM5YAZuoFTY1TD4ljOsWoaWGC6iIMXUTZ1YDX
3PkAnzpI06JSiuohl2ffnCfhj20J5bJw2KfO+1DZFtDJJmFy3Bmx+vvmbs3RBuSe5Kferdfpde+t
c2nq5G70VPrvvdbD/w8WG2BNk2tsk06ChghtehMEbX3Icus6roLpIGZZJ7e0vxyiJbn8Hs0EiVLT
Y3u6DfjmZgGe3w0p4UCHXrP58yfg2BP4xapoTN9F9gZT5SRIusgYCJeOOuIGs/QpGzllAi47fAbC
J2njjXOMJItCXpS5b4Y5P4ooC95XkmYOVZlhgmTSx52nyG/BHVP71LTAMFTDkS5dPrgacZ8+sp7Q
B4s3cqKRmKTQbKQpyqEZSbVhn3mzYZ+WD6pAzUNHDfGZVCc/nB0rt8h2S5zTHrFDeak5b9AiYiAw
NeW/j5xho9S6JY11dWcFx1Qnet15taCFw3IaCYSIcH7L+FOROgrLuGJBHOV0iD8j49QPOiDbOr47
nI3+k465QKot2t0IEpGaAVgSHZoapf0I8fIxZ+H2c7Wk1Qz9CPXMBEWVtOyL5mpzdPrCxttIr1Fi
e/cuUSJVRmqbpvPc0e3TfZsAhWErMRNP+mIYxKIxLZGijl8JSqcyGcVtyAIe20pc3tUza7bcC/9W
YD9uaTxxGwRfH7gW/NIJlsNn7rB5b0QP5el6CZ9H8ixV22z368qB8VqA6QBXOnFSQKlSWrwkLV8x
FsLVfrXCmkNF1rJSkJPPwRaQ5sbI/Un5vqJ63XxDL/BnyjOW3YJoSOYbwdC1z3zrhC5qf48X+52O
11PtTNXv2GihalPVJTTIutl1R7tJH7qjmzjUjJkXcFTH1giSNcr57M+bXwX7u5aheAABYt/+wy85
MB6s4Zf7HU0gbhdMrIhwuuAPEXyNCi2CeMkAjAFMMhCbzGFeSNYRHg0rCGc/sQt3lyR6gIkIgIry
pJskGI+j4UJu7qfpacVuYHa7eh3P09kTcOzudv3HLxEAH3NSYweLIeq7hvpw4luInQcLwebeP52j
jPVkqwYSH3pnVXpUbbeRjjgIxi+TWSJ9pGrzYXyOPezjrY+H3cGaeDyC8CfRKGV8AdYzjBX/6wcV
2E/D2Wn2ZnduQzBVNhvSSxG5jVCELsf0cI8bDeE2zRW/AJupSmlGm6lnp3m3Rt/Wk6poAdOK7K65
CeaDeTAXLxjMdJql7eBDT4hlcrFzqGlGx2UHXAWP4VgT9OCqbbQBnRJ74CX4WC40MEpvms1+hRD5
aNLSLt/1mhy8L++ZOUS+MCfQoxzsRWAqJyhMMtJQ3CkBb5QdTDR2AfHgxpApbs67RGkQEX3Dm62x
g9RmUA2pY1a3jumbPfn2l0FapoAh7GG4VmGu9tApHOhLPcowWgC8NtBp016TRRmcWuejhyY/7XOJ
wHTgifilTB10i1sHP47J/vpomzHYgvl+bb+nPUPpmJ1IiT0qy0HSKb3Lsi8uiGePluCAyJDq/Soa
+83wUTvEr1KBY/3O8BEmMYzVIWISwgtvvfUp67WxSjfsCELusCP9ShyRcNoCp1NrxW2nBKok7pC4
XH0u6HrpqRafmY/96XHFCgQZYhtiUkjBcvoEESrqjs9oh33SfHT2zflII6t3px3m1aRvEl2AF08m
fX4NXIPnquIdD+lPWJUg+gUHis33ompzUwl/hjz8UBRkngoFhax656Nkw9e/EPT6sXyXjLWheB4B
pXaAMSHENojC8B5MVAhnZX0TCQqhQx4OQruRQDPaCGFoa6J9wF6tzfMAeDRfEdeP4Q01w11Jvgzy
+0JECW3pFDt8U/1r+8nMEhvis+wlZv9orakYS0aALCdIwO/YX4oS8jWYNNNMwWWNZjHMw43dHPvx
AXbQh2MEkqEBGn4NpX77APeHL9S5GQ4iDupjlsMBOL2jB0XsHigeJDNv2+gjjFIg8D/+ZvnX1g8T
WFd3Cg8nuK6EuF7VHWA26mtLDe14/Dfx5Kh2VT1J4NL0YAeDmaF3lKRW54sQFb14JPvx/wmXmgCK
z17/KuZDfvoF0lPHXglLdog9O+8A948+AZxHI+WDDsONrKPMzDIFu6JMlsF2+qrS7qzYHXacI5Dt
FkOjrpWXtHlhJr7Zbxeh55B46CcGxW699EbxqVb2SUz+Aoq75fPWQoUUtc7Ndb9ZWm5KHnol2TdU
l9OwzlxKxuGVk4deSR6WV5CfeeV8n2Fd2nvjf2O3DkJ86O3kl1P7xd8cyVIzzUb6TxPlZfdEOyq8
Vxc6j8Tiplp8AlrR7BjAoFo6vzWfV2H4LH0mHKiWt5IHEk3kyz2wK+CFS2wbWC1LH7QBG0gH8nYl
HFEdC0hYb05Zm39QzyM4idhjeASfGoMHeDKkXg4s9c8CmWHHXC873KnyUwYNuJ1vCsOrgSIMtT4k
Nnu7TU+iITSIGOWAPQC3VfsjfgZvzR3gP49szM7IOrknXD/JX9Icm+pLGQU9jEx1sBP+VG+KsI0k
oEhy78GuGwTerErnyaMoBYnN/B3XrCITziUnC/U60Qt9quUL+TuR+vjQzqBKq9uN6SZuRYs2Gcgd
et1E/NCBXR4pE0LkSJteZvCwc6GQJKTAmmIHgoVCcTZ9TntW5UuZvkelXzEJB87aaue7JuzQHEh+
gEP16Dje7mOcaNonnT65VIiBffPVt39mKpGNM9HJ4XiOPh/PbOpwkBoAMrlid3cb2/HKxsB5Fjqf
6WTg8/Vwh6EQyNKDkpbyzhBs7JVqPfeROJwrnI+rHxMqXUnKBmguKfBakOpwHi8uFGpre3EhYAyn
T8ff+v3QtkFNQvX31s1V4nrTs9rNPzqUXC8QmGOAMc6XiCsH+qayQTGmp6T/DRhRcRIL/EDt4eGQ
WQ+FyxVQY3Ue7Kja7XHxnsXBOeGXyTBXGo2Eez9gHBT8lmCYZ5SxSukZiOl1iVUV8nHoJG+RnQHk
hPz0w2QaIz9iJlwe2ITPlksQUrxUOBx0owCpfFRqynYDRYAeOfeCK4U2LZbz6/pztba9NiLxWz82
ESgPG8NVWNKS0/midhV1uFB0czNvK8p8c9/s7eklHSiI6OsWcKZSrDxEk4LIaMruCDabZwyuGgzp
bMCNiD3zJTqMaoaYsLFzTKHkPiI1GgkdcWlPW1DZovKNk+MsK0NQ4QfmQmIIaAlK7TK38NpaKO3H
qOLiqcUvufo20SOa+glmyJIYTrciy4pSrZqfYLJqrnawMBEoKywIKcVZdx3Di3NgKK2EwIGHsSwo
YkP2CHexu75AuK1LJlQDArZX6etUbW12V20VLLU6Atntvt35GOavTwmC3OfO7m6gCqT3+Pq0WlH6
FQdJPqeekFrbyN+Q2KjCLPOpToXLIBvLpn2ixmyIa5Qpab52s1WvfCbtkrwNYepaP7YGvvQ7Z2vR
6b1C1YS9KesdxUGzGwruTXeCyIygTmHsMECHkXLa4Lbho4PhhvOdPrap8/PwbFJurdvAev5zs0q5
dFLhpB9MLWU7E46LU6WBqGS3fb0jWiAJhzA2j7vo0paNvU58sAXNUbGaC8xrYrPTYfYQ+d6iNgYV
vdxRqD+YvTJIGLHFDEPre9knp3ZvEAO0lKyQqHHUSnKcbCS3Zs2X96BCX3An+UZgCG8AgMREeZw3
YU65AdW1M/AscMvarMfeiAyeRsYRvGrXShJBziiKgbHmBfiXqaqaK5tgws+Mdwe3GXyLk877CGMc
N5vVvR9rxRrZneMhgSXSesW0UogagHSnbqtGaEm1EC3fWKYSQQTkChMeO9MEFRz5qW0TpjDTSw7k
8kry9wiT/7D4bv4Q1Z7gMCDjsAkzOnrASlArn6nI+RpBrTo/POMftrPnVJW3UH73bCw/9TMErEus
kB9YrwDl1BImPhv0mKYch9Znn1Ixi2dfxkABN0XpBE/LMLUbw+wX+SgvoSlbMgoDZUQB/KgEK/6T
Scck8b4sYB9Ri7wd7evzcHSOtxh01+fkR1PtV36HzgdxEHY6dIQju1149iABXEAwAWPUVBRBQLYs
iNzDUSCBxFBCVBoQRZTjLb/abDBIoEK68asuZPxuBHxW/zEOQySldKB/pay5aDhaWwQe1gAed2A9
8Yk0lG6GWDWEWaUZMASTkpAaJgfKLQJwm/t6CiKasLSQBg2sgbzMSeoVDWv4SEzT7nZEHtM8NxUO
D47Pb21ke1LG678M5lqSAwPYwTaZg0eT+ITEFmKbaNHOvDrz1frleSqVgNWvib7vuGtAqW5gxKwq
hFPunbIUKbU9VnAf1kbR+8XY6kV7NaLtmRrW+ZEXir4RAG7EmxlzKfQqVzsAaq3CtXNQFofWDbLs
kNSliBPF50uK0PX1h6z+qJdTRf6oN+pJIHMXzllrhMGIiu8pUSKfo0yOAD+RMO6lH3GcfY/IrLn0
PZilXze7ys8wK8KFjBIx9zlWV9XMHF5Q/WZbA2lcO/kdFgKm2rBvwF5hdtk9O0A5zBXTk/9s5Him
xyhIYmim9EGfaU/g50z1NnmUy3vlRhrkpLYculVROgaTtyJiWEJ02PaeU+NBKl2UhSM22EavdrRS
LyeObRc0xmsC/IBZAYga0n9s9ltz44jga4bpI3WinhLyiEOA2jq7qJcXKDKKXJKxb0+9jBPjhp3C
bQYaCyc1qFzJlygBNi2nA2agnDDBqU9TXQLt3Y2RTa7JDQJynCpZ9eIi1JlqvamibdboLFlIgWF2
jJXT2cNC+Z7m6suOUB9t9U9qqbXf1cOub+WrWsjNCfFpUAtuAkR4UNBdY3H6OjKbj5IyrLkgkeAz
4iIMWcJ5RB4OxLlqmScsD13WArS/wg2jaKP6HK2zu21oiKiX7naMbuE+E6f5EHmcLLLuQD1AmrCp
PmO6Z0Vmg0/K1JOi+hYZUpIQ9fEK7FAVeEIwM/3AGx62xgxnM4SuN626uvyKkubCWBhBYbG4WjXz
HWJhgzPudpRdNs2KfHvAW7JMsBvcKevvt3PzcGb7dV5+DS9kyOUxAaqJikHy0vtKW7msUZe/K72U
slSWliuVuJKaR7XNzGaIg6gUQkcAuU4S0elq9fNCjB0z3QDm1+R9Er+w36Tz2rHWbeai3XDH7reG
GydzAOJFrAxtXbnERTZlFaCA+cmC2VFCvQcgQXqTcDtCHZn7AE0+NVr3dq12YrRV55NJaFo/o+6O
L8ErslpxBq3tznxfnmdfYxvgrKgWlqqz0P3SPKNjtNVmlOWPBch/d0czUTfjD6i2nq/+sK1dDMfn
ansJUd9iVgI2Ek9WkfMrqYlRdNPOcQTWZhP0eidZ4V9d3UZWo8kgxqVqQw87eOYQAmkCItR6/b26
v0KJJtl2Em3HA8rSkFJaIbLC1M+mqyu+wgtXckxGVFpi1V+ZJzmcBVwO6jNwkjUD7OPZg0ZUyoK+
rxKbzXT72IbsArhn4v/hhoRo2vYkgr3D2vTsagjsOlwktwyRw31yiGN6+kFVIC14LkJ+E78Qu2GS
nHOWFAWnnwsxS+Vt2N2NMVlomXpjThmQVHPSbHJOUfsilXzUgpKBx1bGVYRTE5g7wfWDD+Y/TbNv
NIycIQwYXDLLD2NFSR3fZd+keSISaPNHbXZ6yn220y8LcgxvRfXwp4NwBlWpUXa9rap1gDL0M84Q
ZVyPT4F5Ppuh6sVTuZjHMR+LQWaAp98sRD/4x3XfVsgJPOdr/FJUiH3z4334qAXsDWiNDTV2S8O8
m6GPEgfRDI4nS7Dk3BxZvRRPDjNTdCdzcC+dt3V0HSo8DcwKaT2IaTg29lL77kN4gaT6wrLjRJoS
kCIgrdBq1xRev2xHwteaYzBbcfJoCfNxtZbEIl8/MYN3qcH4yFKuV/ZfocmhR8ULdjK34W6j7CvJ
Ts1aduf/wncHGr/m5LF+aWbo02MfHphb++eobqJ+6yVz5POMA7fYBRAqwBTId+Yty5EfQIRhv5sN
JbkAa+nFRQdGlpH7OFaAvjfiLdmJOLf50/F/QkP3ZfPZHFsQ8G/npAHwcY7BSUbSIqNpiS/vycTJ
At999x1pEnku/49q23xff67h0kdBQy3meDyGf548/oa+f4MgT2hhEs3C3MUKoZGNghPmRjA+vaxO
WS/C8cpBL7o6MLKQLqZhd/b+yZs06Nt3VF+T6BUoni/r3RY0FLaDkqOaNCBhd9ARqfhSTmSnPnn8
Rc/EkX2/GmUHOn10PV+mxwz/GWyC7RIge1rxvKrRNYXQu5gUEVI4h2Qsj1+L/Kr4psyP6Mdb4tUx
Hx6IybSNTsP/UeH39W0NMYMAYzbfX9/s9GnCo4CGSdr/Iw5GqhFIHNCMRcuElM6sF7pdLDDymFfP
7LqO42YPG5pKsRo4qqiP4fNmxL9qu9mi3sdsKVPXfoNOBNdmV0EYqVPs8Yl9zr0ytcB1rHqEmb7g
9Ntn2eJ+wZxAcXER9u309Lt4SuAhhkab1YSIbVgumQPot19S4Jb0F/CcZuuz6SgcB+AIaDNGnShZ
23WLaFA8KZ+qaoNx3zJ7dkBLJzsilcXxQfAnBKIjOTBfhY0zlif3lWugJKgr8KzYG2ZnhUXWQMmg
znqRWABS1L6vKut90VwxqjV3/OJit703M4vRnqjGNBc00gBSyNms88tqZ0g7Dwc83La3c7XGooeb
zWxI+E29BOg85YdrLsfoBoHj+AxvITlWAfqjRfQD7EGzVfzCqixLhBkg7bX7S/JZtEFVcn9qkejE
JqJvJ48fG5J4uV98qigZ/c3m0y+fcnb6xxjg//jJP/4DPyCK4O51rRmYS//Ge1gk5kd+Tc37nVcZ
a0mQy9IX7G0Ljo9Fbney81RqVstTiqqW4LBm6+FX5Tb/L/by10buUJwBCAaPbMZOL3wxwUegITpw
cDZ9gu0alU6rlCzkCkESmY+PCMCF4T/KwHaEH6iMwak6o55oF//umuPPWLZzxus4l8I73AeSRjWu
YpACx0GnAgjEbHean1hgXsdp5sWbxBnpqBh8qrYfhX4C6CdmLYS/iyehgIGPx1czJDYtme+8Mtbr
PfDFEMxJ5gKZcpKLEqla/aq7I0cCIJT36GCJQyox9V+9CgVOxYDD9Tzj8UuD5FAOSbGaRYjKAUb6
Zb1cDz9ktxWg10tpECClr1nbAFYNRtpT3OavgmpE20jwCGbim3tzmNABUDM+HPNy4BwECYQDVjeh
ZOPwOt4z3TOLftnI54O6TPH6f/sOenaK719+n71+8yF79+zl+xcun69/MA7FF/YdWVStx7fGtINU
2k8YNkL0LqppyLu05bbDhtxXNj55Xd2ZwskJSSOEcR1ek1/UaL/abVSzvRcm05/dBtQeSWtN72Kk
hmYjrgvTjbD/cM9dKrQ4C/OCdvv0UoB0auRTumTnreFDGP/SOb4ZzsKUCVDTQsAOHslLU3iU9UYY
oODqqocvYHLI2dOw+SvyuhUnbUa2YHsxcFzEkhMDFNhfPV7mulqj4tpq7hJ7V2d8YtgCMuaTQjlw
oM90ZD6mfeLCjHFgVu/9iw8uPGwqQWeod44CIzyIGYsuIx30u8b9OSbxmucCRC2LCSKOgWjDOwv0
kjQcq4fGUU26IN249CCyWlmDVRHoYWC9wHrrULRA0P3zX8p0mh8vtssGYutYNu4zrEhX9knuivVr
sZ4nnbkoj2pW3h9o9q9pzTXXUtoyiVrlv6ul7P8oUjqMzz+QSTBEBUh3UnakA02RmBjlEUMWSOxf
eSBcpyNKR63pAfy4hHdTaDk2J4q9EXz8evU8dZ90Wt87FyAZaUeZuTQYXS4dzctRlgBuF8Aam1FA
uwuVGQPiIkiEddLrU+VGaHiqA4kc525CvQnuzFbOsDxebqaOWE8hyirXXcoBQ3rr0uqRZ2LekTrV
RTPruv0QRQGo6oChINeBnjA+XOEwhE/FR3ECjqFFcmQuJE8kHg+RbDXkbpSJvAPJlSFjbMYSt0E5
X4k5a2A8P8tLQMxIpIMTsRFsT6dPkoA0lF2+Pu8dgwOM4vDzrmk0UjYM02aa1TNZUGQjeICRRzKK
GwS6wtNrlun06fgpA5HJSrLSJhkJqQ97dFo708Lrg4KXZM4YKJ5p1oG67TEXaCq8k5WSvaF0PsRr
KlEu4nJSklE7f1O1XztRZk/Ysbm+CtffnDA037LmeJ1hELYfyxeJKR41SMZtPyRBDGjUe5PDq8Tw
FjfWpocvU4x0+6neFPk14AHicJyPGzqqWmvCIwi5zay5ZtDrOm3RwUeqT8pylubjaD00dKpCd+G1
G6AY/hSOCdwxzBMRywa2OfX6LOcsCeeoSyJ/CTDh2uQJXmHKLWajq6M6vjZ1mBs7HwXfiaIi/lRq
hC9tqVEWfG899qPvVc1QhS1ohiBsT852QvxOaAdBqRMjvWwWmomGskuKaqnwmyKEuUH/YUfIgM1k
UyutqLPIQpwthYSFZ0h5F9rv0LlNjY64Knhw3ok5iTJ+7HBNSi3/nBRg1Ea1lXVKqWHTUkq3pdjA
QHdTdOR2tGBs1K0Yec3OLw97xMCS9F9tkPZsnN58u0B9/yo9KP89s8SbIL6sgykoOdl3t4n9Tsdi
FvJJP8YXt5CgsGpt4DIXCUPSrMkTkwWsM4LCNFz6xUXkp0oCQ1tZW4T0B8KVdZSahFCs7hPw34HE
6Sh4yG2qyxG1bPKHo+YTNyONuO2ickvmw/y8rPCyjKKpQ+Y7BJszlb/HuBsyqo0oYvkq804qkgxL
czCBC1Kr4PrqC+AhwUQY6xROlO9KpUG7PHHGTZAn6PxVYf6hWtXVgb6Nid7+TPQBX/DyR9Prphbz
VOsmpllwxhS7oUxKmCmwIMd8wLWzFjXZQkQuuoAl4ArGEpaFY/oy8NlVMGy5efVhC8our7nkinmZ
aFPw4p7fXCJtDGUeAyMKqX0lYQhGCaggVg60CD5mdYW47EpPSgbdZdd5Q0vWc9MPCBAOKgAy/mkN
eLJzzMkMxs3HQWB0R4ZLNyedcjr6Fg4OxGyqy0DRGIvbHzgkdp6ddJ5IHXqru06pa82i3N7ud3iF
UeYccI0FnByY+Qqyy7YMxaDwhuSwBcfP3wrflNlp9uTAXoBbpDil+r7LfE/FtkyGqvFd9nvDge83
dB17c9hJHfT8+OMRDkDNJblOd51pUvOlTjVneuQbDTBV+A4013CzqDFena2m7l7wJQzvPKv9wNAs
3CefCSsPS8LQOS8YiTtGyeItfaEkPZAo2OERYGLfo/poFY1B/5g9dd00N1Wqh8SuFBxITjIdwUFE
0Tx4qvmgVku/e6Z288Yl1nAdJDA2c64EvPy5n+6lvuKPk4YbeqVGGQOfdaRBgcHJu2B8jUTBHz28
Ewgo2hH6yX6DFnDKtQUGd7GUS5HxB/Of54ZN+iGMDuqFntOTNoN6LKd1pPiogIECkV1xLP59oVh5
cV7ozpjrbGlad13fQiMKs7T/tKTzYmLyF9IWeXnDfs5W7JrTaCNyNunOI6SOOAqXXdQHMyC29y2w
qYiPq4E2eDiMuiA9dnltjus7daDrCIi4mOqgvHssEBAqnE+iuxVjhFmRO7uEb6XGrt5IhEh6iQlA
CEuITfKoGZB4FB156pKkcPipzdvmNWtKulcOzd7uJfKWY/fF+dUO8VrcycNkF9hvuEbYlalDmIJ2
6vbGU/vYsE6foIA5f9/uEUEFd4PLSNPezJcQlQZelsptnnVKCxBEImzXZP46Dhieemyp5Ur7ZlAy
umt/jhULYim+10cb6Fo9D87faz7MH0aPp6o/3P6U/1UDQbQS2t6CMYt/hJheFC3KLymak/KTBLTF
xW86d4gP/goZdvlqv5IIXApHFccuNELM5eDxuXNwYGv0kEs0FqHm0JcQmTrZ3E/wop5cuICt7aex
h2Jykcq1CPzkjnbS3EZOg7Okq+T1s1cvivF4XF5cpONQ01pPjxacUV89UJJg4EdcYFEYJK+jUixD
iQrf8Uq318Eyi+d2zLU65zmCX+OENAH9cbmEQyEjwQc7DFXqijpdsXpThbrog5TQawQytNzmNfoS
5PptPoql7jLEoHS43l0xu4TPoYN86drpDgnWrzWfL0krkjKiAoSGRMTLmdPRc9oulz5LXAWQgDkF
iyGy27nZdinUPs8k+I4uEtKdAZ0WADx2lMczcXGBrV5cZP/R1nRxIV0wjykGFh5iR0AZtgZvY+mG
eWAR5CgyXl8dXlWOd+G4cr5xBHys3V+2cKus2XaoRW/XzztEBdhWdLD5LqKBmW7+AXa4tQt9i9QJ
Ie1UpA2uMIIGXFx4ywChCYatEv960eCBVLoChLA5LYZHyiKNJEVPVNdG2K1IkSmZVzkWXo1JwSu4
mwDJkE8IZRInrhXTtar+TOEHZs0/182+hbRTCMJmJyRAIYOXQE7XzalFOXCBITChVGHX9+QiaL3s
CboO8BMNw3dxITVdXIxgZoFc00/auxcXfhqMLS4qXotm3hFzmNpHT2mzLvB7VV9V5GHdXPlr7XdN
tuME2CKCKkC/dtDjSl3mNdRSKABWueXTdJ89RbWdfCwhvEPaM7h/hin3FI4/VcUIcp4Yn7tq/mlb
Xf1KZfYwJaCH06wICdqom+Nw9KD0qwryO6ludF9rBJWFhc6kP0eCgB/0etMYT5J4XmkLY3bLv49Q
Ef2Kv2OLCICEmI3IwBSk6p8DwALbRPKkFiy3ZJDsnOoD34bQCatUSI+VLaU8OBuIwZqY7tAqaFfQ
LUECpxtUKMLH98qslGrF0pa0r8SyWnUuuw8ESk0WHijagxhxn09mYcc6NXFMoHd3dmmK4cL7njS1
KENswfZSIesJJZcJysw0T5HXZ4gN4y4MSP4NEwpKyArBa6yMclntdsw2017DQLiQZREUQXO3ApeP
msxdRpaler3ZK7hUlqRCYE7nBCCItgiBgzeSkX+WgllLQXPzNXbDNs8YtX7KUFQp98XSUSQRTV+N
LvjYTmffsstmeZ8mmqFtYDZHHFJlw7Hc0JhvX3Mi9yuACPAtBokvu/ZCv14ppbE+jqxF9UZx9Hac
gX3Dcnwe9AW1FfPRXYBxZJabAtFC0pSnzy/pMN+21X7ZcOXfV1fdyRu9eZcbZ5RhEEOsHwvmNGwm
TbGD1eCGnNOGPuEq/j5hc0gx2VRhqvAh40Rql6i3/gYU8WUneWW1OoCtZDSorrR4hNT5ZDLo3EAi
23B94F/k3nfqKxWWQFcOutXYsIXVtk2F/Ntca/4HjN/QtR5uyPzLyG6oULSJtbzpS1BxVfPEy92E
iQ2Bs9xfBpb8y+oKwtWAxmMmsC7gqZOsWM2RQivxJjUSe6wSWBAeoTskpIpNW1XjywcR9RB3RGdg
Ec8pm7ElohtMMwpfbzBSWoIy3Qg5aQ864PBMgW96XK0IfxdCOm9riM4EBuMSEaBh+7kgNclSpBDi
QDMN8FY9Lcfjl/RCgT0v7NOVQykDPwuNNEb4w4whyujGAmwcVINwyzuESaPygnVh9SO98Gbk3+Zg
R+PR+EmQ1Jiig+zX1cvJyXYjlNMz70sFpKpO0DR7v7/UzqwjAXHGj0c+8qQ+Kyl3wXl2Y7ZCtT1d
GbKyEp7Z8h3IE2HkPwCKbTMqFQDWWIjX9DD/7aSIEziMxOag8cpFiUE49NV+FZNPhMSfqvk0t8sc
ssbeu1zj5V8prsBeOyiwOKTXQyLLqKsKOCcrjBnFEdRV+8c14aP0Sji+pwA/BFARQn3CKSqPF366
CeMJq2YPHT0SmRTBZW/agudh6lbLS1EPDPwkJiaRhHCFIdFAP4jsoSpblDmVsqIkkuEcVPFrdBe0
khwPs62actsRFND9olS8YQMmRjZ5gDDnQxU5Vt7jh4JTxk3FKDbdLrzaeZc/D3x4U/b0ACBMgz9d
BWC3HKkhEcUU487QPcVXCjNGnkkv/GpwlizyHPoIT2iCH7Udp2gjwxzJxCifYYzjjBPfQis+B+X7
VcXUzVLUqfasm0TXXS3XOCJbYz4MyP975UXcLat2AwjXAP3gHB6jHMQd9kkPzo3WdoZ9m1lvNF1c
xuLr2pYW/hslbzc28uUIB3ZljjXsaiCoVzo4EcagmXLVr85+28gVxS5IJl14prw3MaP5rCvd3D/5
rq94oB5tv7NZpenYip+suqH977zYSHPqPf/YGzM9yA5fc0xk6DA7iJCAH3MBZ+1JuKJyG8fyCZG7
qicNoJzmZ0lPuJz2MOFh+rLOsMN0AjL3yjKeqVR2nn+qz8CHMmVaksRSmtIHo1Jv+tx7LZeREjUi
H1nHk4TRVSmXWZVRtiOBVcIt1n4UvOrxhfU+SUxzYNeT4l5ysgNuseFQPF/Yo86nO3LASLnj2a14
Vx7a7uDGLF4Y7k5+7RGTAUyggixyiZSikug0M2A9wuKGPeWIv6YdW1Tj67F59l48StYVG8/myu1O
NAjohU7yA/hiixsKu+jQuOw1whhyHM6hs/GFqRbc1VO6aA3LnrOugCnKurrTt5jcJro6WyT7zn+h
ayqDQANtlFa/1Uos0HAIdGQrllHeS5XNBZgVknEcMKbXgGC7LHsppmWRLem8ba+TCe+c3iGkc/j6
OKJJ+EbqFMRsWZCBDqFmzH/dASE2CA5JeER2lwmG0NAvRBacymvmhFw73KczPY7QR/PcL24Y9x1w
UwAcSwBo2ZdJ9oXREaMRK26JBiRj03cFvOmOC+ZO4r+YqoYVAejdJCePfSfWWcXYbNVu2IJ6qu1J
zqZEil1GSae0e1SrFRBXHG/Eri/UnTDKDXjZLjY5yoxuGdRRNlO8LnCPaObCdx3f8dKaAwxaykfM
mxNyZlb4Hfn6ScAVc94iQZ10GNQkFk6SEjXmeF5X4y2jTcYBxNIlh0xZppTjWMuKa9EgqEOzxYcd
di6EJxsk9kwCGN6q/rouHK/XMVRwHPnvS1UJ7OBuLABirFzi69Qh85z3Y6PFfKFzbEPrSJ6tQkCJ
pyPbXpmce64rPccRGnCs96IjrPUaCGTc7NdLdw0LYY0//XoKOgg150IpJoz5CmFIpKGwZcrueuAW
Hm7ux8gtn55aANwzeAAn4HxIPDfg5mFKAzPZKlUZ3ybxPfQOKKx/iEZCQ/GaGHkXQ8+NhjUJ3jX8
0ZOa1Zxk2W4WyVQ1i4RNArpiQxp2yF0D/CsAnnFVmCLqr/DGWjken36GBWwHsYwPvNpzb9qx75od
TwqPfnen7QoWrjZhcOOt4i3ASMHT6uNLs2BVITwpaWhcemnpm1+RV42auG64Xg9xNwXXm+pzDPVs
q5EAZ8vrrjSjq9YAyb2/IV8xvqqHqcMezT5sdE0IFZjWxCELomevITbgZGzesJlGwylCchlokQ30
31OGGRUadYVkrq0c1dIZaWziFzB3I4eNhQfaPO+nNeSO1mtT1lk0LegjDYDciGjTzXfo/bRf4Gg7
+wnOSlr4hvPOlpLT22phxPd60XJ0xg5CdCgIFTGQuB8IOGqDAID7x4GC4RZiZVApgai+p5Rv1Drp
dtRh3lDqFjH0o4HMgi5Bda0SNYDKmT0BUbQNyCdOw0DCm0rdwk1XXS3fNKtl620EcmCwe0Yl4/5e
5Mtttao+g0MxhQND3oB6sQcUVeUf8YySMAL8pSQNtZXWVA+odW4vEZe3/kRuD4wGewrfnorpBxya
+VN+C7l0zNNT9MFdqt6umjihkllSc673G5cL1NNfn+r6bTLKuURHPMZd7zCa0XbCH/qu2VbaJ2BZ
6zHJ2SrhtQ44TS8Z7qTmdgPYuDxJNB4KTBVXO9tf8JXUOZXhpcy7FCJddj3OqnH0oTPEKvgatcHQ
9IvMAveHA7a93mP2Dxr2jxu6hnFuTlXe2ZZcy71BQ3C2O2fLkdv/WBf6ha/wYuBx19twIwch3MC2
GZHANANcDENy8MLMZkQWE87X08TDjrJwxSfLw4vOOHI/q0hol3DZLjrysItePZWPPaEdioK026pa
b1b7azPb5NYWRU0DIai2hipA0Y4y1BBYR1JtEHM7MwRjxsfTZssscnEFd5k/TD1Frk5eXpY61boe
75i6Lq7uYvPRAWT0LlfWUdcpHTbO0eim855Pm4LRYqQ5Qr6kfAAeVhZe59pBQlTjsOnzIHY7RxKJ
PXTWP/YkdU2UR/pKpQZzdrwfqIfgojNwAY3XzrTUs3zdOLSqyCPfTMoBkziIfaJ7fHJEliHvg9cd
6ck54kC5CZP8PFKf90FrqWrU9tYXgPP6oXq9PRqFtBOllSqldj3Luu7OCQvqUd9gWlzDV+oSA08Q
xliXLhQCjQ9l0Q1QuHjakwOHyxW60Z9jEA1tPGUHOERA01X6Lr/fR3YhGF15BE04Y1/eFDSF74nm
4EZPTshfHHljgICStIgufAJl0xoY63qRmXsN8h0ir8M3k6TbrmyFEnBxV3HkFpq5d5DLg1A/q1Vz
F32OzVuKJkhUSDVnzkQumCb43DdV0jNnUVYXRoogxfmmvAMbO1dFSUGo8vEMQpEqQa3520BRnUDA
IwkCApRaSUctt0789p5TJ6IjTQhEAcvgOEtwH64d4Klieik9bjFvOWf8Mqjo8p5ZF05uHzsaYRYm
FBV0xiBYXYyE29yngPRUFjHl4ZCwvG/Gy3qLv9Iqow2kGAJQufxxhw+rbSrIB7ZRKcASFctnpubx
oZpDTLVAlKTtEufjinYr5hzkHS5BwCqe0OxMFKTDs2JPEp8RLxkd7jnLPSV5EvARpBKFr1ZwByt5
nGisXefVG4c4HmjmSnErZlaCWEMbYwZI8kr+IqLKkcVwAvwgw5B5891PJJeW7NjW0YwOBjABTo+b
SG11TkKW9LSmd+ndU3OyS/4+vcOqLzsjAW7t7j2rJ/XXT9JBFNA7KY9ckfzBgYf5pOuIUBRykBcr
NaViXbETqLDfMc9FhnKetwpAfCDZGclr2p0OPBO/QeVNID94TWKiNK9XEKTobDtpAJkv55B9bizt
RE4xuokUOy1chOdezPc6KbGRJ/DwD4l9DiVeJTm2mWX1roOY2Rjahmev9aRiI9mMMbjEXLFyuQJl
x3yrug52mvK1Zvd38/uRTURvz1iXGR6WdLNBUxNf7BRBZ4a5xbCRbQU67pryuS+bqs0EK1bVgb6R
p8vaLMXnaush58yvQbmBySHm5mZTA9fe1/IpdARNC8pdu2U1DsUdw5ePRaQBOM91a54ruUqZO9wi
hpYNPf/CvekQA4+GWYOGh+93W22vqwIDaEC9UMaYzZi0mXNepW0eFKghhzfVqfRhTpUUk4mpUBk8
sZPp+OZesRwsmysUtE+fBAEJ8uoXU42A5PUlmAxXWecHqclLRV53z4sKeqnXWWdmzaPoYQI0qysB
Z7w50hBSHQavh2FieVYovbSSilPMEW3ZIR30y1mDkO2QvN8IqSO3uWijg+QvavatZ3930LyzKdrC
fVhtcRQLf98ZwiKLC5xyLWnrUro1iB63gW7RPgwMr13WS2+xyXHsgP6B1pCHruISikTST67wASKu
YEiiOkgCKMNgk4fUt2z15+bPchDzqKJbggSs9dU9maRp0+Bv7y5tIW4HwvA98EtygPbAHtqzCcVt
NdtltZ1RrVSf6oPPjzu+c9ZsZwQljVeAzaoggFNT9C/3pU3m+ntcB606ktk2bqVH6uv4Yqy/imQZ
KRRcXwBpbquz/K2nIu2VihPlUS6xT8ojuW058GazqY/9tmnIorSypSRbRjJGTTDgwfxexFV2Eu+T
hLGAIWVCXXvGavjC/ItwDxiUFEYNIV2rslyENK4+JzuML0kzNs5UsZgMrOPNqHiPUOlO+sWzEErd
lhWm7pcPvNlsH82Zme9XoqikygLQmRR2O1zgURfOA+1pnDOeah4J+bXZXkKoIc2WXkMWO07SRJz0
bMa7yKyeirEAgO49OC3ebRvlSBDvCWvEOm4zpdXAKkvBobXpDtx0FktPQpYzAcfjKNrMyEEscfFf
D7ksEMR+sfsy5W/l7+O+tjRUfozwouC68JJI3/lpbFZD+3lbcrqas/Oe6NXxzbydSQbgSZfT0qG4
1PQi++CjuPnWTSaNkdGRRKUrs+d2ie9RnKrRTm+2S6tTOOqtECOmuSqqLzV5Aeq+oI0YRHSEJxGt
j9nUiQo4/FWZQ7EKhuheIiA16/Da8aBLeXF2RYyQ4n9s/nhvDc7LzkWgWSjqUda1DpYijvnGmfRr
KZIOYiADdGp2epVANv08X79mwMPhyKutLFNKBJQGAnfeSN0VZ2g7jFT8QLQBZvM9NXe8t7kUatyY
XHV68rV2IFrBFxQPQ7PbxOgTbHSXXJTUvqkj36mIo1xGOhgfvckhJk9MToiYr35r/LPu0DJXvrSJ
dNmPHR2ixN1VZ+jG3UY4MVF+w6hCnbQNPQuzfPLH9R/X4KzVAnIQQRaB5XC9K8oSCtBb6UpMqzcU
tcesLU0FsFMyFbN4LqwLOqUKHNmbQVKpX9knkxDkqG5FUGy23gBDka9nMWJOpb2eUuaimfWwNn2s
QSFEq+3S1NYOkFJp+uudDqehpIhRJkTMmeuhxEEqZ8j092U3m+UJjHD8Ism2peuCp7mWx+HIw8PC
VyhZgJzyCByR4HtFHt7vms3LHSxG8lr0bFPdF+DD1gpV+v5aWa8jiDu5pWtoTjEmQyw59OYkgWUm
85HKhIlJVP+tNp3P6itJghNlgUL04gIHcXEx7gqbfml432q+NEwpBEYSsBspY8Hasq6cM9pjO46u
uuDrVcW6WdMT8JanqGf0MsTVrhCxwPwxjjZb9wngLoHjmu+vaSFgKA4J+ON5jZ2mDBuR7rk30MU3
+o+skcapOshFQgUOpvhXy5wGwjtxnam4mTgErMP/AEtTt0wh/oFMgF9E8zuuoM7j5LspU8ZWL5T5
mICenpBCBR7hBTjRJ+LNE5BCF87YSiRjO0g6tliobthZT6C+eFQP8HWZptxesBJZTFOJ/PQL2J64
TgVprZYYoL2Mw/5cLOXZ+YNBj4NKnAAhRVU0FDqbRZFQIcEmvXlQb4Lm0mYJ2980qVgXPMwHcQrA
pYOkJqxtboEKWrQRcZTafJdCOWF/YQ+b7GAaQIUNlfIJALA4TvulSroJFUgGL6LY0xqaDSd8wNLC
obU+cCYkfUeQaloebcAUi1y99ebE+b7dxRkaA+W2dwR6wJm8MLtuILME2BUSxhlO0exTdT/CvOpe
XLA/fZ7h9O4IvCtlK/lFL0BYRyiyBADA9CrT2+2967SagFTgdIgiFmDu+xspwYT533cq2mU+9Vxu
twJUmZxAEES9cUzd10ktHVR4UNOvcf+3Fdoiiq/Ml/HBTvNj/mUd9vnE2oodoCvoBFx88OreV7Pb
3FOsoRtE2gtTI+Lcgj+14cTABIyIkYKp5RBf19VdFrpDqg0SEK++LKYe9RhEKVLlGpj0AMQhXZSd
J6NLpkCPUwQ6NknXliCNEZMLMyUeK5cQiyZu6Obc7wP4zZMs7J61vWOOyrtm+8k6D+TQjdxUC6vb
BvXMW05oWS3H4R6O2ujdov7QMTGFTTA39l2AY10cfvULNWO9RmthidxfY9TrGmYx7HN5WA6yi/9A
8fYhzqIeu5KUuWzwfkcm5ggzkeC5NZ0ZQRIMyLE+Qy/vsjwAjZiuV+idX7NPPX0yciz0QWGxSSDI
CbMoCpIU/CBGGH4BGsIgBoXSYZkeJJTivEtWVQBewG2z+DTbgLIJU6a6IGQnlgjCyv72Ei79K8UO
oN/kfgPeSFCRelPA3be+L10QGrubhJ7VlH0zt6+9W4gVH/ZlNF2Ej4dtT3FlKViHbNo5PM9H8XOb
BIcLYIO2RawteYwFBQi0tRty60OPPule526XcYytaQSBhpCd2oyBsAP8j2l3/P2LH579+PsP557c
CA3aRkrWuaVEAk5l6sQCJayRkzK6MxDNg5xe4839sDU859Y5lJ+om0Mi1esWpdYgRN1cTSuFN+FF
UxEvLjePLolqn7stsPvL2Sz3joBXn/5zrD6RdVLyVKQm8mQtNwpKjuGG4aSvbwaWoKtO/MKNKln3
19P+QxSD8vrDGgSxCAHcVSFKTfP3dn4HP13dJYDxMlUBW076YGFe65kUGXYS5MSXM/vZbDaUQ4Kp
2WhcZuSqadij8qc7vK5IdIpIOS6DP3OzOjlVn/kHofsbU25gburVdbM1e/sWDye4OyJOyJpzl3ms
mcUYJ68UQxrrdnDC2NLKb18UAdPpN5nk+S0pFJrOLyXtbKU5UwcycMyZKCQYU2S/uEE/xCa7NUzb
LeBjOrqaA3gjFc0HJ+yigW2Ijq6lo59y0ZjIXjJXUTtzYPYDEa0cgqW5lObr66r4xjmPzgK6H9V1
ZmEswagdC24E6UXQXoG0BpVwJBnwH3oRLCcBZQoFOgZ+ppHhCgolZN0z+A66Be99dZuaptmcQGlo
ukYU2DbyB4lwpvEMh5/W1+uGvbr0t7bjVpNvp/y7aTzTcJLg0NDyZf+UfRsdEXw1sP45s2WzVugV
EW4xFSLBgT2BZi0yAvQbPSQR3odGYKr6Y8xIrepFpXdWx3jdhiiTHQg9iewk9rKEwRCSEy0NiwlI
FNV2vF0pRQGGHZNXNNI1wncHt2lPAajkJzVzUm3K0qfW52tvEMHcQAnx/NbFgjmEq8AN52vdC1dQ
1tCuJzMGPStIiKQzbz4te2Ak0U0NSH0oeQF5w0NNoTJ7wo0N5VkGyB94AlclsIBAujjXKsnNEMiE
gaX36Hz9h4o6y37ZG7aun/AkkAl8jUmwJHacZmLdoH1Ub3JoTH+tZktOYjDwCZRdehI4/GXEQnA6
MBfAr9Si0IpsxR9OOzBsEVvBDHWOo+E0JoMAg0cIpMPgqUNnZu6daJyD7iLrSvhmsY4Jv+sVPV3l
/GssSouF6Qruj7LD2RfpLo4VRe70MEMaYt5z/1WTaR1rgnSwP2J93lO4JcU+kf+ecnR4I9AfTRfu
1D4EowXwFFuJsYetl5a0vXuvOx7kZ61rxxoj5qY/veSGbd/+sZfABt9KIi1c/8kRpBkmXVT1/Z3u
VrAlludwlfrWoh9h2u9kafS39AfdmzmheOgl6vGnTG5J7eD+awW245mfSPwWAojrjKry1crXNTrh
m2gukkjCagAwdUrMKLTZ2RH5orO8yj8lWBU4+1pk8XREC7muLLj64IgATmsg9/fJiamOkOZ3lQXb
AspmWNZlc5vRtd1cOalJ+2Sh2nDNwTAKtNGaloXbgpAbgHi5rmyNqhI3k21DCtmqhavrlpMXABe/
2TaX88vVfcp84aGxovtXa9Hzk/hhoGlvfUh6Z1r4hWMej3TP1azndJp9MxFPcD82LAAiSHW+jF1O
dc1P8Fog3cpDqh1lKmPsgTaeYhuof/jZTfAfi1XgKUkOLnCN0RH9ApohyvZkXad5qUB1oCTspGd1
fRX7JMN3ZcyR8qmGt06dEzkzR1TAKyJWY+J+7S5Ff0Z4h5rMpTqMmMBTzr13hrl6b3Q5p/FUCt4o
P7PzlbtsjIj6EoCYtvuNugydOrVbiXuCWP8W0olCSAh7i8LkBXizQLfMK7MXPmX1LWYJiKDkT1zM
PGntEcaGjQaRs8kg5djHcKqADUDeGkbcxYJD/HM4yTA/OClThrT9zUPK1cxPpQXznB1dAMF7NPiL
vQsU9DYhbWjQVcDVkHNv+4GsggLldvgb/ubyMMf51OdJD0UNGhGkF1UpTvP9+tMadBqkm/CuvDXr
pzhtNR6xn37/8b/9u7/7u839GPxTKFD6p1cfX/6Hv/s72MuXpoH16RK8dlqziquKokrI6QUAnyhv
CSACtONB8bzM3jXr9X329mq+XreLm9t6uRtlv21W4Gryu231qVplp6fZq5cfMuDi1rDpYZfPZpBb
B+JkAHUz/2b8dLysPj/NB+YNmDHg6dnw5bp+jj0cjrLhW+gIDnp4Phg8f/Pq1YvXH57/9tm791DD
yf+ai/+OKxhBCiccc3x8QT8frP18fPCj0M8EKRIm3fZeWA/LFFRfAmx2Nmt3PWDM6H7pgfFRz1Rb
Xz8Z2dotJN17ClL6A+ptkaaYB91zJFkbU2iHFgApBXyk4ibcmKBfjfX696vUYDocxa+Kj9m52M8F
iycPK0NTD6snOf+rqcTstN3UTGN/M1ILt4FVycf8r6ub/9VewZjL3pARO2nAzva22NIitGe22XOC
CtLLYIh2vPz8ZZCkUuoLh6JDCNT8+/07evI9Ll0HQFH0e8G9GFH8On4eNEM3u78nXJxc4DWtXU66
qqENfkau4rLFLe04uLvpyAA2YdKBjs8yOCIjS0TgJZ+rdV1prwFD2BHeMA2HayqA68Kd0SMMu7vm
U7V2zvFIiNGjtrgKsYRTrk/4YgxxrhrKMhb9ks3AUAhWkwCQEVIrgu4iLX0Kzkz2ogLy0iieQDXt
RqElpGy4oLPC/kRMuOz5zmgx6hL5lUgbw3VjP7wxl5q5lBixcJjKlcfjOfO6dh5TazGhdCo0wz6v
/VlJy97JASz3ZhEgJbWt7NF2+MidsrIjkagiLvjj3F+kbi2AHde6o6Kf1XmsEntOq310t89kCcj5
zp1haopOcPr2Jl5JcQPqgvSublcpbX5XKdJfj4qzm8HZeYIYexpovdXTIN9h5RJoSLTIP5AIPwv/
ka6HUZpgqE76J8omFfUjoVgwperWSdJIbUxQx6GFespUV7hcIiqyrzsHw0AfsPeq283u3i4OfDxM
7TlZuyhRzzGT4NRI0SSw8D/vn4UHLAjPAnXpAfOwX4uDFJNY3bPElCCgv7hMdmiFocyMdgD+5P0K
v89On07Ok4jz8k33Qv/sMXS2Bx3ryNlMXR4+wlRuQ4hgsl/wvj6wHXC0mAnh6wzvy1F52MPIHWZH
ZOyR1lL35Wq+/pT58OEY8rFobkH/ZQlCCDRCWQIUWFRM5nszCfA+g5ed4XUjn9SdRMedMwycfXMO
cu7wbBhWhelIpRMR4sQCiKWW8PqzIRD89wK8LtKZEaQ7sFggd5/nk86crlDuCSzrqNOiSFMggxiH
LZ4EhNhGg/OUjOu23QA221FRToo58oY7nBqh+EnSLpRPcpjAboIZaRGSLn3ufYI3TfW1t7+mU8n+
HtHYMXQB9ytwGOYkx1kvRCFC0eqM/I6dTKxeknR3BpLSZvA2QlrS9a7OUNKwegGPEUVJrvA+LMvw
kJnPepy/7aY2xb7OnqSk5vBOP0J+7gLB5dJFHy9XPiial3vTJ2gHKgS5Xtl+1sN40zGQ5oNAam4/
0JSoFg+L6UnpWPWGZGSrq+kQlI/rAYentTqzUqx+YDwurxek4+68WBZyjCVbzNmEkflkzUNiPfjp
9cf/mnWMqCn+6c3H//LvUb84QJ95VCCvm1O2ey3IgsAoZy/fgH68IT8vSBEAn7FCub03c9ZQGg4j
jrGkz6q+9ed626xhCDp11TEpzR6140etoR2gSMusJs3MKVQ/m5lfpEeezQ46TUdfctLKB3+3bBZH
fJZLphZIBGXWx6VEacvy4NcnNonV2A8M6PpfeVAxiYnRzh61555S8kED+9ljis6u2dqzOwjZ3IJT
ysaaCp5OaAONX7x+YzYt1flt6uGTf7RPX3x8+V6eurK//vH9fx5BlBlKGwvDyG/rz4ZsQuSJqejV
i+9f/vgK/Gpu22y/xnw6NSD6UD90Rz58//IdVf/0m/Tjf/jH5PP/ZJ8+e/78xfsRWknX9+RPdVmB
WfRXg794h+XV/FOo9QXl/2r+pxqiZAgco3Xqb++AIojhpmkJaPftm/cvP/J5tK7u89bmBN8QtoeR
x6DIkA2SJcA7rlbkdCkOS61Ows29DQ712EueIokBXC6AGbbiLEN/Ce4NQs7pvTeYZ81neeqy8E3m
wZVhsRPQUIddEUptbcKrVoMsYhny3/Zk+taLBaN72bOQCu1dBSlvVYX0rS9X9IPXq8k7M98dCbth
+sBqftqGSPLFNbpARi3/kYxUL6AAJtDB54HQTB/iBO3uN1VBZLzg+kcZPRh10IA/Dx2ZHk4gIxXW
1+VWPWRKNJyYCwU4NyyNq/CXlGownBtJ3QQrkJhQ+86uDuJXV8sZXGsSeotxOV8R2IQNz/IhginE
3TmpY5SnSrNnrlFrQEZnpA2q3CCBmAcZfDA3fNfWoJD5oI/hzsA5SjFxXsAOFy7evKeVzDrubLWz
mEuHDFwcReLig1KIaRLVwF8x1UmE4fo965ohSrwebFI85SQ8+HSvUzCK0cC4f3+o18vmrk0N3qc7
r80JcGQumFD7MoXiBDi4W+KGzQ7hBkfAY90RbhjESzZApnlZYj+WbpoFP48Bk+oUF7vr1jf3Gf44
7xQe04TJn8FYOUrPTQ8gr9ejLaHYUDBeR6rxow4S/G8wqPTdRPdtOfjp7ce/Z8Z41VyPzf9/+t8/
/lf/I7HGEOWwyMyzawS84gM/N7LzPcaWLTlYYmuI3KLaPrYoGK0hLZDCagBBRGi0R0QYdJ5+9vbl
JDOc1P1llW32OwyhAK/dFtqByPr7Xyn1mXlouvx7fJV2BoeTNzX9h+6///D9mx8/pOnrsrrcXx9T
EKQOQ9OmfoAefIWHPL+pVqsGMhLeNdvVMveL8MeJUukhYe/5N7i1II80HY/HXXzl0cMIxqFklg3F
NQr/9apq2/l1ddAoaSRDM5Zly3swsEnKW4qswJ8RPgX7E+sryMiI4AjSIQb18d0KZRSB/brqANY/
w5OUT7g6r8NleaxvAxlrqDFAfBKrNI7A5ZF+y8ch5maLpZFuKrA+LUvYAPbkwKFgP8y2WptJvKU1
kehlPBoO1xJ93Idy2obsCjVyid7WpuZ6TQB36MzU7pbNfjdSUWlLQw4ouSQIr7vFOPsRDjRGGwB4
t+G6L++zt/dv70+fjJ8ECbN4y5jllF+EN244g+aOs2Qs9u2uudVQDjLjTy2dCPnhjv3Gv25R3cBa
ILj4fVZZLlr33RD1fOFdq3YqRYjJA8mPHSaK6NvaFGjkepc2JfhFLM7nzHuebtV+45c9Pjk6kwrZ
lUYIxtTo/mGwYywfJp4MZ8O0XvcI2cRu/mmgawi6BMYMShzbL41IfZFIIi885RS6dPH3XyW4XEaI
kpNooQM2wJpsIfOcBX0q2tJjbD3IGG/ZQP5wXwUTr2P4oYZOFaoDARDS7VdlmQQmSL+j56+wCx3k
vYucd51VyEYNSG99pDKqQ/vgY+yRrmRE0HEHOzJerKr5NkwzFxfbb5ZYNVbqddxbAUdnujKh2Bmw
ORBs8gj77XgQsNw10W6M9IHit01rZJJ9vcP8xrZKQ2Lv5qtPmLvXJqgAt1W/OsxHtPg0shEm7NvP
y+1isOK+zlOECF3kcXyG5BdXaDW6xiYwxypRDOtRpmbFj1NywaEQmmjncZR9M8pOnxxjS+rdLmfy
BEJ7zgcP4rIjT/f+jQl6gSEPeGgNDjN5r/ZP27l/Rnb2g51kPtHbiMDa4KFaRF80hiij7S3C+iDh
gWsqMLpauc017xtEkledue/BW4uu0OjeK30DYeJCtTSm51K11c6i2zKFQq22MXSxTN0jH+434lwM
TvyPLLYP5u7FqO0tfY6Mnq3SN03ZFQidPRBtwWEBWYzzxHomBHspZaYHL41O2V4NI40oCNgdNhkc
G7u4LxnCta6qU/Bwx0HaZgNpUBHsH2o9iAME0562c/KcxYdkiQmPQ+G8mmB/A/nhIpmbDMeB6juU
GNGs2LewakMsNHTXJ6hTiDcd41wC14+tfY2ptLE7MedkOuzdcQXHZeuz2nlKkzWOD3xq6+f7S91c
nfXpogPvDuUvfftizDlw4cHgZHCSPeeutOYvixK5qhIq9Oba7QmExSCAKNhNBW6nUvI694FF6lB/
igGT/X/lNr4jYBIQ7c45p7JHTAh9fOBzcEQdlhHY3wpTzXeybb5vXQ/TBlOQh7nisP5glzGMbzno
QNOjjwAnY7Vvb8KDrqrF90Xp/P93NwdWBqGmYB4oLhP71ZKkB1/3A3lKznfyA2L8zUFCd7Ca34PC
F02eAtN5uYfIN1A7hEk3GAIbKwXRHX8kZpGV7TCL8me4mLYRwOeU36GHj9+/1OTCBOHG1XKKfRjw
orcA3DX1RwFzO5wPEUX0zsGI+t7QMzejt14C0n/rjRncLAzBhg3m5YH5+FmbG1oLFujwnoYRk+Yp
uAj8zey2MFN1IPwBoTePugk9NPLi3buHNQLAf0ffJhx6cg+6x8MtYGAsls2W8+q2WTuFSMqHf1sD
usw9e+EGmVPlZVJdoD7F+f/9m9/MXr7+4U0QBeBKyc+//Y40bJGZwTENm/8pvOaR78RZNRMKvO0M
3mDewxevXrz7Tfbs9y/efciev3v5ITOrmf3h2bvXL1//BhL4vHz+IoNxZd+/+PWPv8mFD6WOUjXT
LIfRA7I6PoiDEUUVQKs4omIja/P0BsBvy/LomOOf3tngtBkkhgJ97U/vP/6jeWa1qYPN/bLexslq
712iWpdMdfDTh4//vfmawyXH7acab+Sffvz4/wwp4q3db7BmxOExrx9/AXBYFZcMRIyCOllu4K40
rXJLkZ/WNjBw+l/3CwF8Bypf1Hy5bNB0V1CYHa/G9bbZbwiluCVuBJ8UOcFkr5is4MOxq2N4errd
r7H/vtlzjkOZ5pDluJrttvsqB7Gr3U1z+SB3jl/xhXZTrTZYlBPpCqau2cP3aF2iVEAZ1VR6Q3Rp
W72ErSQhQIgRdX4sHXFbollhBkNaOXw3UGw3fDoD7cQa5kBcciFd5sTuUvrWEHUZoqmy9IOh1s2m
6LbAetHw6N7YbMbOLWKaffzBVFwto/MRtWw+5FuU+26WzVAwtErN0Ocq502Wu6nPYUPWVzBzy5rC
GHGPKp0IwkNb6zBgsZoq1QcKTz0nb2RMBztHmxVbMbPsBfyYM1j9BvkiwKEjBBldBVnYOSkRGRS+
7MbZiy9zsDtR70yHh3AZbFbzHcBvgTvFH/O7ev3t0z/mQ69HUJykFkq8fCXmyoZaxo8yqWgMjjT6
85vdbjN5/Ji3SLO9fgza8Hb3WM75+GZ3u6IPygdPPi6dm0r0+G3NMWL33/2aM8uhuCng63gUaEQy
Krs6qufzFu334r0KLSF2T7x+kDzAXy1VzZsN2QhX9wwiYY4jd5NSAaP7j3myYbw1dB8wPcczjr7C
rrL7Zi/B2bCz7uZgRGwEkDoezzh7eWUWyrZdL3Rt1isBFLawpDLYUQYNAcoD6tbQaIKqLZjFEcH5
X6nWAKpbVyz5RE0HwR2w3lFyw8uKhwlwGzBlQOZkYr1Je2820cN2jqZniGyh3bdFkKwXuwK3zBT/
axkeJhGFynIyDuOSUQWKkACU3Yj2BkCjUBI4UhQ6QliQEE/ACPiA92aeqxrpJsO6YAlRmVoR/gAB
67lLzk/Ty1vI4hH02rxJ2yKjpC/LwRd/nIpUiiHUnBMmO8220wxK0CupuGNE9yFwkwMxx/9M/g27
e9sIZeDr06YjJoXVrQUBtWTjVV2+bJpVp1EIXtLX1GzJU7pu1n+qtg1OplThKr2btwhM31FpLLgM
zYEbql4xsL0OkzMlwvA4ndy1GNKjYSS00PPefIqqX0oNYVoU+qi71sLZPJgzwPNN429SzuNd8BZd
DkgJmYvQ5fvdi9Sw5JsziBJ5f7/ezb90ufBQTP9ZnuXZV5n+cNxcXYFa+uvslygs/p/56Dz1tfA2
uWpnIqtrugwP82N8cKgn9iCP4Uad72aWTs+AlBdf2S5OngYONzpNU06+lxXzDYayPNoCydoSXoUR
+Abd/r3mfxB21VPkUdv98pE+itzbDWQZMjWyAwOIR92ewmFqKO0/ydkWw72JqJrDpkVvQTB138NP
dEcfEksxnCjCQY/+krRRYgGd8TTa90uxpgkyOPXJCDbh5u8SpuJaEGqWK4pMtks1B+mzKfDzRMGC
hrQDl8xfGYdv28/RpaQjftsG2Maed2IIg9W2QQy9NarjvQGJEf4ZdIC4eUQLtpNOVwDuV2T6KLth
0xQOO0KSIyMX7olRRrUv0872/cBsMo0IlE0XbV72h1M6Gxc4hzE7aFgl9kbNEHFfchbH6Z8TR8eR
AGDjBJ1RuE9mQ95/eAdah/xojPn8DvxoyE4BV6ER61pg4ywj3I7z8vDU410LM1x2rXNf/Gy8CyPH
vgdvLfjfpZmVT0dQ6J62AxAovvKp7HCUhaTMBXdBcT+mq4Pj4ZNEIjDlHdef+pUDczpfz4m990kF
vPKZCtwSw1QauQZxoFa9Bj/L3Wy2SVsfe97kR8yvlLWbapLZFIOylhFxhA4OBgPJfgzi3diQbPQA
0HKBkarwX4IoQ7BE6gHQARTapz7Py7CCQxKZhxY2SsqPheVRSjKnxSpssWA5BCwfaA7SIJICOhon
ZRHLxkATZvjEMKcwIMZ81MPc3ONVgl7s9FsNNVGDKsNJGDtameisAopiduuH9OCC4epdZh8mJlRv
RFuMdu0etizCnkwGHWSQJK/87PWbD+9+fH2Om8mrJliX1Ha5BddglFuL2ezWHLSaVMki+sBfFgYY
kyOgCIf0FjQw8xa5IIRKJ0R1c4m7uNh2v1hUrL9S5jVe/bjg0KMOGxRNVK/Gkp3KO8em4BjpN8TN
QMF8EufmsZl0toa0g7BNjsg3zR1PSVatIWqkzb5ECRqxgXkri813H51ejGGj5Y9GU0a1NPvdokH5
ML9CcTSPQ/Y3eicWMB7gh0EosEDGuNnVCwgEUByE3h9K1i/Te1cly8RFse86aaYU6JTI6qss7PfP
6G+c5VQdS9Nr1EcVToUcwUkds38Ora/XbdKAAUJaz8KiCkevbGJ1D/ftEHWxeBybMTfYO12dVCyi
UlbypxQYh8hXYrknXcyPqktrB+Ip7mVze09Rx50r8/4whveYhbWeTjiKPjL0wBEckYVsM0jVrXZy
920wOFCp+nNwIjkvLu8zw7/fAtoX6zq34G97DVK3U/J61ww+nKHZxlCZfVvQE3fdujww8BxCKLj7
uX/+6X281zVj9YXnEuweuTV++A5pXA+V7KgGLgCq5qP5T5F/fPvs/Xvz68/Dewj0uDPCNtzMfymD
uUGdcTA/QuQeU6J4PTlSdtbubw1fd1+EH/MM7IDDD99pIrDbjunx4ma+VYLoiUYAkNnJeAoz/lNG
q9FfvKph4YgjJ53HN2VQlPpIVnc2Z/z25esPE0zTOzyFmAVSwoKgVoEm3lzCWR5XIlYZmo6Mro5L
8CTADZSX8Se+bMNX0EAgLVoHlYW4LjdzyuvQMV9wiqEIHOAvweltDacw4ymMFooAONrApdLW9TFZ
F837Q+qCVbz6IVUZgaV21QU+TNhxc6PnPzx7+XsIju9qoH2fbIA2zUNH/uJndZYjZ83n7969eec6
y4gjrVbajme7u3FbbYp8CgfWNLCl7ZLpbZSXETAaDDaoTdXoYCjY+++YjsNJGWWk2JS09nTI4vOr
jhX8sgMMCRN0F7iFep2laNamaREtazMGtN16GQEttTZ7LnYre1RsQIVY6nEd2tbU5JdjxmJpsB3Q
l54RfXnwkNi4OPVuuu5B5x9lu4MXLYw8zh3E3F4S4slVlFlZnU1M3gQeOMs8gVTq0AQyPXYTSA/S
Exi++9tPINx9AExCeBq4efwZ0PrGUMvoSdXKhxoLkaiBn2u5T5wr7EsHHZkCJo+Kn0Hj54PeyIET
5Tpjn6F/cvbdd86TDC5/rPP0tm4B6BijdH09Dfy1IOdDyclmWLsN+OnRLIBX4TSH7ikq1NFrU8kX
5tsKqdjT1vKgvzAq/RVozZZInduCabS4Jvs5vsCmToBEIQdFSPn4frxq1tcg0YbuvxzyUQJR/9bM
CpYOwHiX4PfM6roC8YvPzkvZR/gBlV35lzKlBP6MORUgNUOcBmHlEBBNN6hoOQKrlW1fgKY0sT5w
Yx3irYRNmvo80JCfDx2wMejFw0k9cfyT2faG/3EfhuyTzzrFLE7+yPFsyAeNrLr7dEuYKLcNJl4y
95zK/GkrgFmTvRGwUIEV9YocYqbpfeUFreGrQM9iR4L38QzvY9VxuZEDTToyqftbc3G2Pjgqkyq4
+RKtedR7jPFULbgRFPl7Xows78I0cDQQfpz9LwnISJ8Kvv/dy7fZ2aPleQaQ50tGF0pWXvSMxUz/
T//ieQTSP9X2pz98/A8X5BFYLOsWokBQphD1t+cnCJ+Aro0VV5hqkH6SkOE7CnLmyIS3oHMhBDqz
sH9tK+tPCBYZ8R9kN6gBZVpYY9SZ5FrgP20j+0sjGKLqr9MREatxZi6pCanwjN7KLC1wf9P73zbN
p3fgOz7wy0BqJinznszAo+zFx5cfZm9+N1BpdvaXq3pBbjTFynmrvAFHIoXT15InDCCrgcLaUC3Y
XCCLzM2Bmi9h/veQC6qFzllXFa7h7AsuFGZ4WeElDrg+v0Bcn/PBX+WDublf1ZfdHpjr5nTXNKv2
tFmfwhY82hPTUCv4rlnDV4fdMYt8CZhtn8FzGr4Dl7m3zz78FgVeyN/QmPluKIDS9PL6Bm/H2yVi
uXH3j/LUPMk+AJLEbX19s0PTIcnbqDmDBBGwRc2Gx/XbzT9VLkCS4tjZV5C3yexqv1rZvAgeP5Es
0Q1xEhaHoIKWXILnl3j6Ra8pjsECDpo3i1wR4kRF4SMQV1fg85X/zzCL4MAExNVQj8DbldP+zmZc
gyT09n3F3uK7Z9tr+9rGrsibTo8ov0JlNqQ0LFMp4NnrbsyZ3VYLzKfE9cAjVYkuYSqhQ84fwLvx
bKOyf+nSRPpn9KfZb1wc/gt0ry2THYVDg7D19Z+4BVufeVy3N67CiPnSpb2kH8vnZm92zhwDFoO7
eBsHWA2RJA0FNpKKBXEeM3DyM/uIPSu4qo5CAJ08hEdRrFUyK0YXtIBzr5CKg9xqy2qVLc+4++cx
JIGbGcM+F1999WhbfueQ+Rjz025AvfDdGRPwlrudr+fXXrwmA5brt0BB9d9h9vbVqg3h1BHolbsQ
oriFe83tZdprsQ7eWTlsKcM/g+upxMAGTIotBv2yf5x70p88hp1ii0xSSUptSQ1H6oYX4F3BCtDs
w3olGK6ZOqNQVczIUSAVJtOG9j6b1bcjL9MpzgL0u18k0O9C/3bVyZHKwTFGeyasKl2bBXWkDM09
dgyqniIFu2Ynyq7EOZI4PvzxB95mM8TmugburpAvgqMIlVpqZ1gavUc2t9NElaMgdgjwaaY5U/9c
eUmEVCwFDmsnIt4YZIdJZaNLjbNuZaTogstj7cqR4X+7Xx+aJdUrgYNwlCFYcI6AomV3HWCSR88t
wJbOZ41Qb1v749Od5YDi5Ovmt+ys8Lwj8BGl+2tlM0Of8iifK+T9gyMKubZDOn2FR9a1S7o7abq3
t0G2M1DBg2HRcE6LOfxhnolnPaTdzcx8AuN0v9F46pxgzbB0d9tmfU0JeF3uwxF5SqkPMITlsUSw
UGxnSxBgIJ2AV3fLtepmDGd5d1NttYes6WoR6lHywL0GFp4yeZTtowInq2wP+FTZTUREXyS7Xg8x
d3UV3NxWLnAPQAGktkepF6KW4DeSIkjDkyDpcQxCcIWo6wPfpnAfSFqh+9zCOyRBoLHEJIa7X7UJ
CtCbmgXJ+mLVdgjYx3qcxenkYD44WQueJ8orXK9J9VZ04NOHZALTpuqFWPmXPI4aOBIZ9iK+7sSk
O+uZPU87ha4y0dL78GcAkW2RURE4Rs3gQWi/2jz4RsG9EvLMFANZCqnPvod6aOexbzk4yF5BVtji
STm+mgUsJuWM51pSWWHQcwo0hVQEGcxvYlVOvV6O7OS6jDNums/qyXmPEwGzp9Oe/AGkIijy189e
vXj17MPz3+YWpNZbg4T7QYGjGKnJGXGzzN72uB5Is89/++L57168k5bRAwirLY1Mdvpd3teNfocD
O7A3/W30NpFExPGmIvsarpYlAvIfd3z9zsXznoNyJNmr9IA9R94FoqDgaYdePdryVmNMTLX7PCDA
ZqOwbIMQnYNHiVOv9+3Tso8edO5PEIf0Xj/v8rSA917+BWT4w8lYkNw0Mp2cuNkoz/0vx4jktyzO
8oxtXWCo+lI6JZTrkoqq8FbBhi+QeryMrqn0TGtnW7rMfEQIZUBYofngyUiWNEEtDcs0SCs0oFfA
D6Q1Gr83b5/D2w51CLx/BQpK5DI7KuAC6Rrgr/+XvbdbciNJ0sX67phBsiOZTLo4MpMsO+twkclG
Jauqe7tn6zR6hk0Wu+sMh6SRxW32KdZiEkCiKocAEkQCrMLM9JjpUm8j0xvoKXSne72F/C9+MxIA
ObNre2Q7ttssZEbGj0eEh7uH++fj0v96NVvIC1xgs8WFV8puwpTtdJbF3cD4AC0tUSd5O/4ijZK3
t1+kIM6LSraev7T8xoNWGVAnoUK5iAZJS/4ar5e550wnZhC21azcx6oKNGbJn24BVTPjTwcKCPRD
36ZooupKG2URwcEtq6r1yqqBIKCi/GnxAhTUhJ7BNI7qwn9ZYN5XlQhDd6ux00EgRy/tuqu8BJob
XTVHZNSzidajMW6WZhIYqVp915IXqpknr3FPMspJld9eEVV2CUWvSI9Y4bXE1vT2OjLSLOO/3gjo
vH6kUrUqG5y6BFVYkL4hh+80THmUX3g/iuunFHEcgzDQZFnMKtBtVP4/XDYcYTpb4FYVVywjKNUF
vAl0jMvfgAozBcWhmNdrIFYBbCaWnWy1LCqmqkJHKTeUTQ9N7/joaB/wPOl6X/U2m71DRkOtsl9C
mQbTKCkkekkhsWeeeFcEoPnQPeA/PHwSvngK2dJA9EQbtsxJs8DoBgeCeU3zKcZUR/RgD6stvVE/
049ASTUrHI5WAkhNrBH2XCsK1R7iKQujNDlD9DyeNnIpwC+TRSveFDpEVNMx9CCMNcXvFLmsFQ8i
B6xKSW1RM5AN6fbDokDlMx+ruG5v5iyDHVvrSJHAHktdyl7XZJBQIJDudW65k0MJHJHcfTR8ye27
ZE4rMHFucXVUDPc5bZHLSdwyHQ5o2Hyp790+VMM/hPGEKb29iqn02oR3eC4O/5B5uaYb36MZr/1r
fNuGr8bfm9uLbkqdtYMilmQaeknD0nckOCCvUObdqdhMNfMvZdq+Z0chvzZ3CMZXGN7bGSdwoXob
h0FVrIltrOdw/NUeOwOpOx17pkENFYY+SaueWPoaQCu0zpUmLTFoau3bPWsGenKqNHb+0QGbqwpx
SUimTlmoboZD54uk+YlC8wWW3lafy6bhMFCAbDafbD999Ggt82OiqunJcFwTVii1rJc0mWr0vNC8
eSYykG6QzYvbBMbTh/9PW4lJFsBX9IDTVaQNG+BsoyiDMi+G7yCwXbxeTQ5/FYdNDDZVy3qImyFp
s2LV4Vm121J/tPrT14HxDdbzUjzGjNZ1aQZDoqMjtvJ35JRaX6V7rz+3uQZyKELZ4yRx5ZJeLeMw
qERREc4XhpJzXX3M5/KXznwZKr4QSDYpi/aCYRxwxFy1RxPzOlw0mc6qY7Z8aMe3oys5AA/60yab
cOtH3k9X64JjzRsmXCuWX2wmgsxKn/XlA7dSkK73rY9q68LuLeFMXVV3zQoxQSFM+eS6rUq9NXXT
UoXXJiwprIiFYZPQfpFedvkY6V65w5CxWkDrH0H4brbYdLeSfnW3+qvqh+/bGxAZTriiXDTDg34A
0A+9NMJgfuy/YfG9TkMQNHwXZRx81EhC4MiTyr5slTZUeafO2IZhxiaCsGCjMjhVEKJduBKzUqxP
za5mjQj0MFEP2SHFX0XclvhkQYXyl71e0VFZeqC8g90EkrX/tXISavhRnJ5qJwqkGFbjDMfF7XNe
39FoqfqMHfiyIQIvFFN252m475KriViK5EMSYRP50Zd/bZrUViOLYolXewPBQEgu764QzHBOx6kA
hKBJbHezfN3rt4sssJTIq774woWSRDu8A8dqzQctOd87Wy0LMrwYxy0q+xFztovgTnX/P6Y5N643
wJR+Ble/eol5nGtrHzkwFP5NF3+DO0J97ge+UWJxMSIrQuluyUe704sv1/RF27FzgLfRoolC0TnC
4+s0U+za7AjklmWZq/VOqQOD0saWK7pLVhB+4idIreGNN6G6Sm5J+pC7YEKJoB6qRt24+jK1em6o
wGPoNz/FLvPbpEk0fi6QBAYuC4U7DMvnUTo0hENOPCe9g845yNXh61FpqtQauwoQ0i4XV2HoMd2T
5P50Sw+PW/KpSOonatJN1qdFVVKv8Pow/bSRhIcginFoEFYhUJpro9Rq+0CdxB4ywhTjWvFVHLzM
kM9URISq+YCh7R7g6nuwAmKPq9t5QFPGwpfHVw3yerzg/hbJppyLizUN0yVM9/BQ+BuianWvgvPY
0ga1jyOyqGm35E6qHtLIbsIur/NB18pzr24CwYxctqX0cesRb/FHPKrThi7YcCK+U6JEELVlpKUq
4TCemUg+TdvawQ4KheU2gREzQlonadvSHG97z9YhVPFEyZBFVfn3S21CjaShr1s+8dbKMPSXf9Pm
9oH/2qef7JPmdZGzFUZ8s+cvCr2SXMOQJTRs5yGX/i0mPnfPNv246adG39omRdgUZDwvZosQlI9r
8m7eWxPXUgEhpqq+XLhacrbCf+6a5qw55xAFE9rgmBK1eOW+zwZAQqCbIhvvJtvy7vl9MZAqZr2F
UzFfmZOSwErHlULcBC3mJp9ODhmQ0+vMgcoVkVNio3WNNSODjmpCi11RVosIznC168aSzHO6oXmZ
b6y6ECsmSgg79ZYyX+YLWCUYIcB33vVKcFQxSAGTWDPaGiH7uqDJrHYMrHYDfoputGHmelFGsXCK
kJko9EU2rpz2fEm29Tvpa32zXuFRkFi5IdtvNpoDTMNhj8FNhf0L76tW4Z0xqBuyNgzaIvEeHRcE
7EA9zmTtHI0W+WgYWgxCcRF10H6sL+fjgBXXlhrrhhhB928iDweEYfS6Q+AU5eah2mzF83IgYIUt
gnKPCX6oGceNje8pTt/OEYGSW5fI2oAJUjVt5EBWDBp0ql1CuWkx4JiyaMIdGCEmbNjoIwVFDLnk
Cq7cNq1K3AniecS11E/guGEQ73LVSDby7hbvubebrE99K7B0WFkqf+mErC6W/EhZI+2zVnUncIdg
faaMGWiePYi9Va930E49GIV2TQ3rmtjKU6IMH8rk4eq7GtJ2XNhOhHTiD4YbopLyd6ZJalhvPOWP
S2WDWTGrlPYbcHLiD7Ltbk5631JhxyurUCIfKBxyTnFCUp2I1EvnWcw/cPBUwfm4/QgTeHzZffHz
xY/Pn2GEWffKhFrVxYLN506us0t3fvFajYJ3R7dj4D1ovv/AsbxWpb2o202vLM707vayCwWpNfi3
sU10hGP2gsasRrsTc5Kp0XeJ0ndo46jUipdLA212TtJ1VBnrFmxrDfIkJGvJK0v2PA7d6MQ8Dmuf
LE7aysEA7XLiT4gKMUgBsbVg4tF6CV8ioodrrrPmZ3JMuxDjVjNedGgfPUb/yNvYuw76lY3/cRL6
7mTndw2vDJSt+hR+mOF/vPgR2giaPTgrRK2AybGe/clJL5D3vKqLwWQMrNRJZ4AhBZSYIG7EtZDo
j21lt7nrBV4SSr/n+32cURtezycnjcdbaL38RFovP4rW7FIGHQYZHfYwe7nTdZjXefYngyFsK/hX
UoP323g9Wwy4Zt7HOsnRlpK0xQ2+RIPZa0+/RHn0kTNfz15mh7Dw7A1uVc/7XFz/Jott6OY7YHDY
PVTfKMpexYKcgqw/WTTgoF/zPeMZXSAGIKGpEnH3Re2E8TZWFaKLqPCUaqJXBfnlTBa91GWHi82w
VBytHoEksGIRyZd1MXZWeJuGh6Zv8WFivkR7CeUpD3FVYqqqJg+r26rM60wzDNCRdBoIhm7odeqj
V6JH2y2mlwCtqppLO/BzgiiKGGDN2ZxG1XKJypqcohip04TBHOVz7A+SOk4YcB5RBxFvoMcVp95N
r5kadKKbgxLDGVOkG6BAFtl1FuW4XEhjhRpDsGp+a40g515KqVDf51t0cBuBNpaxYF4gkLVJoW3G
v5NubmbGW0qrm8pdS5jKq6C8eh72KuXxojee9GivL7y340IBBPJAOBw1qdKVycU5evPxc2ZhaevC
9Akq8+euUqtLLaDqn7YscdyxgZT4T83rzXvL9D8pV3RMj0XXjCY3VrplmqnyBjRqHZi8wUjfmsxA
5R47yi49aZsfxPbkEnsTOD4cxaYlvzfGJ+B+wEzvyDGkbgzm69kQIyYHeLWqQjl1XYdxz0u6WyxU
7p+qQufFvu0f59vObGtX/17dhYlY9KIGsztoMaIpDEAyJM8nQAL0vlKXqnchCEDXfnbgzuoB62l1
7T6xOmua6WfdXqOjljn0zgtrUK/ceLk7jpW7cnZwyNBq+rDoagvp5dHVroOBD6JY2F6spt3yKFjk
t/OBszIYxb8nyYgGeKCjJHh8lB39zXamwyObDBHZGKMtieAdLbg3h9TfBtSiWkSeUM8OAbF8ayea
nH+o3pFvU6TyYeQLFPx6oXNYk8+O1gNBGb8HscBZwwrGjVvo6Z4psoY5JY0KZe8G1d2f/sQZJXbX
dAkNjNmd2WK15ClQJAL+8WV21JJvllJ5LDaLzQADFDAdCsI5dRmDt/v1V7T1dCKyWY753htWT2fm
sbLDr7+KhuWKRRAGENLIcmqheZnSYtBR4JiPgzUL4rga8LgqOLn2bbV8h/JJCXySZBSu5Neft7fl
IDZNlkUxrMctK3nvVnU15sqwupYErgGFFGc5I92VlRPXKQ0ozAkAuVl7HUmtffl3L8MoF2Vdwmsk
k3XFeRyshdawkmJpDWUEWhXI2WPUPvZIY4TJV/kL7drXIr8/Llrkd+U/f/7s4uzls4dPcRIOUXM7
5Ir5kMSL71GOadB4V5Ktk/ZtOJZzk5VVVueTgmIccTA9jQbSdHI2SZUDYSkepowNseA4LFunhoEJ
USgzW0oZPAcSpfaMkHOzXFhO1OorTzDdGtjYWpn6yqtsR4g5JX6T2DvMhgfql4XXos7SOiJwLwZz
Ujna6BDB+jInh+yu0XJ8oDomDywPEMxuq9DN1BV5wJPfGUq/9dbeXAiToVNd5TcoeXknWMCeUOF1
13F0BFpIzgS5+MUyC2Bn/dgS2j6pgz10Ypn79+PWNOVEH6ni9ga4CTDEFW43hZ+GZzz1EebNnplA
DLygiLp34DLkvr9WWkOTRMcl7HNK8KjyMXyugND14xBcqREI2XVCeU2QuIhrTvlWHEbTYtUFOe96
jnhwKxx8Z+/o5z1yVjVkVHVvoEamYlugR3Bc42MV14Jjxd97hlcpdCC1cCIKnFc/KFBRobcKokN8
ehq6q9ToA/BBw69gGkhG56ItuMHG1s4zO/7e8hRKOLsS0zbBtH4egkBM9DB6buiERAB/Fx3v6hVf
ls14jnM4uuaFveidnrXendnE1j2KpmlLzLHFZiRnp8tnuh+1jbsBPoN7bdeeS1U6E8ZFbvarGJt0
WfW2BJL2OLrhLprKkfW3Rc9qzGI7bE9Do9r8RKFD78djnCmLQz2M2h2yTpuhA+28pZy4+5ZyfcVq
3+5I1sCVBjeZRjsP5uDwaLW7hhAUtqFr8HvlYE197KmmevKJfWkIJGybYLU+cX82EAOclWEfgnzP
La+0pzD2o3+ke4J/cl/6R06L+VT1Gf/W/cYfehkF2vdv2dXa7Isbkqq2UVAvWFOyAWorRVX7piQ/
sbYLg1B5ZGwKPAwAcXrFW0J/beFdhapoBcdikbO9uQbgl5aiFThCi9zs94DF9rJioxUI5xfF3er8
uY26xbQaKESb5u3DiSe8CHEpNJULsI8uVwD0Ttg1dbpJ9YXEcRbSPnItqFIaCQYjoBMjs1vcHwnk
Q67dI9TIjaUlDZMmU0qODYDjlqiL4p39locELUHF6kh3wvmaQBSMxIFYPQyuK2QWAtuzKx+0KkaR
D3Mgm2su8EFz2x1dRd4GIqd1tJr53jM3b10JroGOX4cQrWSFNMIA1ci31+d+1FY1/6Gj3lwkC60S
YRkrGtyeh8ESJMJq1jZW3ZA2ermz13oPd9K8p7P5cfCmThJjINe6Q1FSOoo2UAom3A6jxMoC4mUL
0BB9k/6Nkb2ogz6qF/D2xXpF6Gnl3NNbhcz5ZKXDtidzbzgC46MI6MH4tIB7cC3E4vFfi15yvzlv
IZotYFHdl+UXxxZ2+EcPWlpqWWFtS0tB3KMX6DF5Vp80DGf+za0pKve3oUvofdes5mUWJSwyzIEz
M4XduGGLYTtS4l5gTdv2yLwiqtFQSXZoZu9ldDPueHNirR7L0RMAN/M2mvqmJXYY5ycu7vKRKCyn
n7S5tISoVqhqdevG5sblk30a5g+QTOXKfKDa+sjetgGciVLqTtYW1Ebqk5TeZxDBldCaQVcPGRX7
fYZsVq+SxXXhjwU8WxbomI+O05r13ltKGk32kkJzKU0zZgx48+a/++yzz/AKFeQKdbK///nN//IZ
JwwgdM4HzFUInB2VY5SB3v+XN/+DfEknIkthoDq+v3zzf/+3n33WzAiw2HQ63zPneKgKc6p1J8Bc
/q0zt1CnQ8griGWF0h1v/+iA77NWPfY9NxDuw42SC5cFztdyIR6L+HmJGRqqRc3m7oHOFm9y2Fl/
Gxh/wornwgg9GlmFmMs+w3fT6QYNGFExGxZjREeZF7fMmLDfRT3KSR+BJVIQwD3FleWCU4pxuUWk
89bXp9Hb+Z968J9fyEb0dv6XLIouFAh9tLqtqFYc4Rza4ky1BJw/x9QPY7uPNa8CZRSlCnNlsXIK
msQu7CFfR0n2AaSh1YDwStNexL+0vStJU+kXiI0rMqqUphYyceEzqw3KmVpzem0gpUbcVyBj6PaB
3/HpVYJQm29w6Y3LyaTOOrYkvsxvB4rz2xOHh2+3K25aSfftXEBwD2QSzMTwCYrZ93jlAMWZ3n9x
s73pli6PrkySmSnjLalXcGg7UunUCRP5UzfFfrkPfwk9/Es3JF4q/jDd5uLAHTk8vkIwxu5bGHn0
hUIL1YGccrQe80CgZQLgVPE59HtEIAzqUeOkbA4V8y94ow3EznDN2L+ASQMa7CLpo6i7T1pxLM6h
mV3PMRda0cQibRvH7fFeuy9Iq+PgW1XNURqKbJWXQGKkchf6fZ8apK/Tw2O86kf/GyQOUsxLZ9eg
2S8+zVQ0VKBgYMhu6KMzjOYrHsIllUAaEBFUR7eFK7V06S/d7SQC2hjSNGhihX1JCR34JdIxbmJW
EmU6O0pTdQ+LJHjOtF/u+U41wc+z4EfbEXyCdxocFFCTgz+jCJBLSAvAmgZegaO0uGvRhgLfbm03
/vZyuKzeAV9WgGaYfQht7/eO7sbftdq9pa8GVq8XlWM9gm08aWKp3E8IKNiHDU73oJyK8p1wReje
x89C0rSULpnBh6FfWptqNClQNujptQItEF0gE1ZQMAeTFn/Yty+Mz8tzeg4n9V3LDVGjVUfJ2U8W
dnuNBhfu+RgEoTH2OtVhLtsx/Z49P3t2sWUSgn074MC/ERzqoChVUTUarZcIq8m3bSyLLdFmTngG
JBuADBSoZ1QRvhtDZNco5aG4EH+7XM+/i7NOcLK3Lnqr9URHffVQWl9PxwMUnwOT13Lu2FvJ3HSN
C073xgnTUFCEQX4Xh67nqIYmw9CVK88x1RI6Loibi+3FE30XJV+avLkNjsXRVBxwpCRrlU1KU2PA
yGkx/uNPEWce+ADEQRcMBifcfBl3DF1Uri3WAwZ6J2BtOk2YVAn7xG84PCoY+Ukv+prkoqTVt+cP
0LFYwfe19QPEvF39sEZjPXWXDPW18/7tm//JKEwquR952S7fX725PCK1qdP5EVQlC4OCU+lhMeVU
LV9yDgNc5yDVduhTK5laz0qjBuoTEP6d4K3b1MJcEECwL9uynjH4G+GRdToU3TGARgcjTMxQa4vG
qJpWwIlztbFD/lscGnLazG+lU71xHXZ6q3N65fG7BstVFVCfXOtKs8OO4r2tGX3pLpoeejaVwLkJ
2EV5uGkqwYxUdW8CotK0h/noR+wkRTDZcOTg86ysRqtpctyT0tnF+fNHP/x0/uzVf+nFb4+OjuL7
v2L+cVNgUrHebTnm+EGqL1vPFyDYJFF8A/8TTPMojS5PT65sOUc+juhrK8GczupNL5I0MBPOpxjr
7Y25s590YaQKX5o4iJ48fPr0+4ePfmvg/aUtDMGwQu0oDO7R86evf/fsVbcX/epIjnjfexdjEND3
R0UAOD2OZvkG9fphdb2G/cBeGnU+xxSVOTogukGY2JFvo6+OTr01xB381ZFNZaGuS1RGGW1QGrOx
Yz/X1DDFAwxEl6dNDl0E9ZEmKoeODzgehXcelBPDKLtYi95Jhw28mK7rGyeIFU1yaFFsoIApd0Nt
XuXph0YIKObOAOWpeqBpYjBeuijqEWeIskIl6xFp0PCu8X1LkJhmXlmJvqYb22lUupV0RWK/7L69
Ox5e3qtn6C8OPFoMMmS2hnau0ij6oglNjLU0H3NdRzNQnHkNPXz26pzZD8WERTEoCMUq1imCmeRe
70g7nnc7/mgbLGfLMOGzYxmBu+aGfKg2jIRMZiJ+Yrub43NM5ojkPb7aJkpLza4hGj6nSAKMfU6Q
KqfRk+cvz354+fz1s8eDn348vzjrBdyi5mSyCicb/fK4lzq1vDx73As6Vy2LcUsVJ14VP7w8O3sW
6sj1sijmLZV8Garkz42OHUSbAo04LbV85dXy/dPXAZJgUNK0xa6bfPn3gTqaHYE6FuslTHBLLV/v
qEWIBCLwJm+jyTdeHa0zfHsDsklLJf+wbyW0m4KVmEh5tCWim7EsRGL/xGj8BhydGRezZ1zAev7c
tz9Dr2DY4Bc/64KvLh4Pnr++ePH6YvDjw2ePn55By4fHx877s5cvn7+0X584WYaFxRpu6uXKI2R2
+O6HYvVqNf6RfiZ+vdv2aXsNTs9TG8CYWFjN3zyC46+aFucg4iVcV5rdaptr3fEJlpjv/y46ujua
WAarV7o69Lgwhluut0d1pF6+HjTEIJ9Ex7ovT775+leeRo8MkePLsNTlKZXx7BjW4XTJdThBM/h8
a637j0APPiRkNGrVBy2evl45eqayfYh2OK4Gs3z5br1IyCtfH9S+uPPi5wFIPM9fvuqS9ap73G14
OugTYY/Pj5qfG7avhEWJ88CO9aIuH1ES3eEcWtFbJ10vvPabvzh7+bsu+Zt2x+vZsNv8AgWJVn2s
i/qYtAxV62w23Tm5BEoKCJGsfiKdyfW7H1inGKZMSoZTEJf7Xx7hFdq4DwcSnxN9OFeE2ffhdAib
QZCN94HrCy/uA/MmhtoH/stcsQ9cNPzt99TuV9DuS2j3K2j3B2r3K2j3Z273qy9bv4V2v4J2X3C7
X0G7j7Ddr6Ddn6jdr9raRW7YP8YLWBDi+9DYEMSWd/2/Rz9wUPlW/W+0cztKo2NCv8pXRaQdq1Qi
vTZTpyWHqm8kDbMJmW+i0oblUYOGRfW0WDIsny8RYD3Xr93mFvMdcOrMRMO7pnRLnUG9CtcR71az
ct2a3Z0TE3+IubT3ilhC7CcKkG7RSn5SSmtbEbytsCypmTdnHNqu+ERp1MGG1cvsIagcF9VPKLby
iJHoRe7Fm6juEOiI/AnSp3JgFf6hXoHG1gjmYd8NaR3/cd+gOXarEuV6GOY1s1RUaYJc1msX7xan
BHxxZDmGiRYmy1trXO761XqM0/Bp6Kj6mygsRj/x2Te+t+M9aLR2330Am3rU9HtWaQPf3TY2I71y
EvIqnrqPI9V6jrBuc+nXqWRG5Lxgjajzd7ecEqNZMTTpYCDqPvAH4WBKPZNMB9aQcCbt6MlCEQv+
Gt3khF6x0hxNL0D57dESGZleokFuZq9gd0l7yLDXVT7FKlSqU3xGl6OIMVdHU8xsCv8uqrouh1M7
/+l6PuZrf/RQHZdkY2Xf1WIu8Pzf9qNmu7vscaYFNIQUgoBRzrlreC0P7GI9m9tYCwJni84AXh1o
NOXxrFfio0D3KNFtvsFBw2FUTjYP5sWaPFf+WJDZ36skIUi/DS1HggvErrDfEtVNA0y9j+pKjDxA
SZhyNAMhfeFk+1CV46iYLVYbuZKXwbF3Sss8Htq3uihx4nJpvxM60ElsT6IvohO6rgReNKXbSpSH
8fOWGRLqZ0WGf5nvZamm95/tXYn6X6MC+xsKWtK1HUYnLZXQV0n7Z2n04EGUuE25s/Is+isrQBLS
lqKX0f3oWcNhQaKyTSg2fiPbm8SWrS4PZt62EKxlpjyqwljsOkIdNeNIWr+zQTMpzQbw5bpcrdnF
xuypZVXNiKmTRxHsJl27QGsuyaDr1oYRR+VoPYVSvNuBl9QlM5Z8xfZJU9G0fFdE8SCKnZs2YGRw
AExRCqgXubkoZXs4klI6UKA4P9EsrhF4RTzrC3vw2g7pLvKmlwqa4PxvOk1PcnGe9fDLBNuEUerq
6xC7h8enweA4YwulLxO6HeEjOJQOiq/6KPEJ/JkG85YbeQYlDf9oplrotdz8MzIgHf+m83vIwnYl
8F/Xzo0UwXSBiZHUeuYLJyxrrnHA+91ug3b0vRDY7xvXTUlTcVsmfjpF/owdrgyOSWE1aU1nGM6o
RUQL5XTWmEPLgho+lKNldYNh70oEjYN9tJdV2+hcL0xngMtuu4iqd4Ylqlok02Swfa/LyeSETuq+
V92hVZ1FLvPBd9FRAHiS+xnDvr9vympFnNQFMamIMu7q5pa/zL+SrWYbyBreB/vvw482zP2VBjrL
uT5fT1eD4TXnE7ONdUdPjhrlxaJqPgt5uby7JceuLpoOYB/7aKh7W1a31b0sPqHql2ePw/7musew
jT++WjSWb6+XTEQfXzFZ3bfXzCanT6z6zztp0+bRo2r0l8zRN81Z+zhD75ZDI3DM6fZPP7pZyzqr
2I+2muyC3qAOzjCp4LjdSNVm4fAYtFUXFLJ+hcWKcb7KA+BJqnIvVQAUxu0K/6hMZ05pRKYsQH8b
FQ2TitWRhNq0wnbIGh0MmuuQS5B/W8mlXFaOVqoGu++EHS/Ii0QuPuUtNMHvxfaorkVHiFGrMAJ/
EqVyxG10dl3TNK5orDsaZ78/fPRbGnSfF/0RXdGhuzLZUhrFX59FdvFjFHTRKKNuhzmulMzDmf81
bVT765OWr4nHND6HbR05jX/V8jkw1MbHdDdmf/yNX0Iza1XiV271Zc2RAHU5KVUD6KixlZJAyOsl
hY9xJUTa5qcuVY+DnwZoa9Xh0/Zkex0Wha1KfAp/tb2SZYAMPp2/OfJL+HT+VbARn9q8qH98/vIC
TbO0Q7LRoL6BDcR+RJxk5Pnzl48Tef2KvIIQgt9sbeC/xXRco8feZdJ9A4cN1ZmGbwyS7s+6xJXV
zKvfPXz6FKj16GL/tp4Wk9XO5i6qxc4yL1Fj3Vnq+2q1qmbB3j96/uzV86dng1ePcM0Mvn/95MnZ
S5iWJ8/3H8349lX5R5QviOKtvRjfPlov62r5oqrJFrfzA0vA6/Y0Z8x+2vZNvWTmiIPVE7OlS7/L
78rZesYfOcOQYP+BLbma5YZ2vek0e1cs58X0y5PMLtX8Dn1MlUvdpR7IYxzJVaD0sqixBB6bqiwz
bn1UOeL0u9JBzBRL76BZRqXwCwoQ7WNr+WBbZeEB8yC8qbzaWk+AFN8/f/7UzI189WqETOz79WRS
LFFLgA/MjWr7nLV8vav2rcPbCR0v3XnxnCDYkvYtmF7t7EgbfayFEtCdLDmLabWFDRgBaks/tPQp
YxtulsUkwcqbUD341IUbaHqKfpLuKGMJD9kyxr1aD2uMVebwOU6pfiM2cpCfdWobujkglgWSar6w
QU85lQ4Z7FAIfjuPCF2ZgxW13RzlNYmoy0JUyJhzZj/3nJ9vosPomH0ijMYAugKrCqfWrTSaOhFl
sI5ieBXjQYnmQ1wQovubEiS93N6UoxslWlazxbTEO22BglYm0wy2I90SorlztaRbiAK97EdFJFmI
f910hz3Ay43F5uQbDnccVh8K7YZN9qHKAlcXu+m8qTwcyA1ltK7XFOFJ+JBotVX5lmEwdX6NoZLl
RCZBdJGydhIVLci/ocJMMDZq/FSsNVrv2gP4XA3ua0yXNMND12BXKmRtGRmDP9biBe7e7LeoM8Gw
GKejSsux60iDQSW74NvFsK1GwemaMCIjalKTxih03A2x1upQoN18wkgMFpEnMOMo/Z2qwFF02lbz
jqZr2LTF0jSrbbMm3zUXPuTv41SjZeb1qCzjrevA7ur7f6JwbZW/a1mMbvPl/P3gzf/133C4tkbo
ifAF/Fur0BvCWNOxv4w8TbuKYg6a0drqe/E+13jq8xGcMhg0Rk0nAkhqgqRfKiidn6QChbCpER/5
ikKyZdImr3H7oO4LuivVcz/6/e/R4IJs4LpabljZ//3vTzV6Cho9pYsugKT6JNMVMaQSfU1/apAf
M0Q+64voZrVanD54MK5GdcYY4Fm1vH4wLYfLfLl5oD7IblazKeODmDAfqgTvVqVX0pGy8MKV28Nd
vrGOmWo65rQzeJjrduWRDYih39UlhmpLrpqu2Ay7bnIzcuMcSC0+wm+gFTZTym/rrAiA0DpVc6u3
nPXPXwiy5YK14CeZ+pk6zvqU5s9aigRqh/dWahxS8E9d7RU1HqAfUPc08p78whV5TxNc3G2pv32g
KfwAZ/n3v8evEv+j3/9ejqLy+hrnMI8eS2Mw50IQd1lYCJpAcpwLAiQoR+WKvWnIR0fNESJE2kVk
nulwtp8n7YnMNbqjG8bqN96owe5ie8+8Dn1yP5rNd5y16hPK+d0sKiWacgKHewXnUriwl9blI3qw
tRdNOM2dkDV8C+ZFPGNKuHHJbm7AW8drYLTNNYfePLTMU2dzYQicwUWmbTqWL1qtsiLw9DTHZd9C
TpiiwmK9Gy6+9OQP8Yjkv7wsaFIdan7yp+dlpoJC+7rB5o3xvBKIg3nVfCmvtCXaY1B7YtnRMeK4
ZinwJJnsZG8SORCGTgNqc3iz0ozO3n860j0j1GEHDraPJqhTtowwAGbDouEF6AWtEuG3ER/DJ9nX
n9rJRoc827tXg33cWo8D563zjV3URttWSP/Tui9T56EXQhmSWxjQRRAXx6qZnmx2jQij+EVxB4uj
zmw0QsEruw2CldWrJhJHWdfrIW2B5DYz9IK+7sQnqxlNokz351pu9koHtIy9DKHdnqlf+TlrsYXB
Kpcbey8eODNSgXCpSmUi9QWKDQbypyo7GOjSe+BwYueayJtKZvE/aVsugZXXef97gl1S0r0OYM6s
KOT3+Zv/7d+1oCuRzaD5Ncj5U3WREwS26KxdBKbBqmIv1X/d2Bqk/b5Q7AGtAiCLkKMEfdzvn+CW
OLUWXnLX26TtdTg2AtTybvNN7VbqpqIWxzMyGjAEBqjdShGhxtiLtQFWYOprQFYTT67YebPv5ohp
V2fdD+wc2/uAkKwT+T7In88U69lRR/Aw2IUzEvzoXqQ65OGMqG7+G9DIvwGN/BvQyH8NQCO7wT12
YHtY0B7Nk21e3Bqwjv0hPlpqg+N4/9reD9/899ZZrdwM34/e/J+/ZVOceiSJDcjgzLZMZCZkj6ME
WZLot9MheMESbQXXVTVWq59Q16rqHZYVf9wP+bKs1rVVMYJ1QxUoDWpxABuwhIOmxIAGeNe6l4/H
FbH6hHJOq0Mdb70pzRw9RMZET5K4OcSYfVz0DwLY7cfXxRykUeWESV9nVmvdww+YI/nwEJbJsKrx
tjMnO2U/Jnj5uKFpjKG7/ViKQzti7kJQeASp7MflfLQscpCcuUy52mSxXLgGmn/Pzb9fl8Vq38ap
cKjpcfExTS+7Vv2q4XpVLXlY2BITFA3NtdUeR7jMgMHBeoC+8mtTF3cG5UtxKycLcL2ezXJQrmkj
wpquF8WI/BcQ95DqiJJJSqj0MJXeyOPkLCWWDaJUnWpo/eQuncgXyZtUgPOT21TLvu3zPmXCYy8p
eWHdSg1QL9aGJOYDiyIcxhikAJelgBGlpcBAx2WNlwg0dqkk3dLZQ8luseeEtc8V1BiYqsTYI3vk
0Hy4TLd0ZjXsWlXWq820aC7VcP9WQymuO9jN16uq2/h8dFOVwJ36l/I+6mJgE/5Lbi34x5wfw1FE
v+Ag+lB0rxo18RA18SU6aYY3KAnW+YAqfID1POBKHoCmvm0ykItSfbyE8Oeh/N5KBbOQwqvG7u+4
wlzCozWw//nGXTr8tYSAQYmtfSWHIXu66MG+0yWF95+sDbmmyNRQ4bYJYU8mzccF5zeBCoD8D/BT
Gpd9SHDCyPWySPgvOSQkjySPORPOjNFX7gvimmJ6pJSB6JqofBVfyiOpuZn3WupqSZymaoRBqxGp
R12NaON2Z1wM1xQE6z6mmeZHLjT6bMNQX8kqRxuxp9qay79mfkoq6qNjcuf4xo/gY5P40mr7KsKM
vY4nrvSTCmWYqzUDcVVkCLQC8ySdcjFYM6rDqQaR4lZhoO786cdWflz4RakqIn8WubAGMZBiVmKf
BkZ8/PjsxcuzRw8vzh6fCnOjC8Eip9gixVsjacDbGq2o8sHWlTEMKEPSEcLiSyGVHaMXynelyvfV
X0HtRfsBS2GglpyELWl+LMp+gYXjsI+5XeEdn6f7VXgXWxPIR3h4vuidmjTrkZvygCLGOEtYs4Da
RFhGgoxNb0JGQ7un+JV766DeOgyGHw8oVyxob+taNrYB3eDfjaRMU6Ag8ZM4izuarlK4kUTJlK6b
pf2ESabwk9hefFLczUjnhbnpTydxc/SZZCDqSbme9zxbQ6+XJt+OGJGZU7ba3NBmYd3QcLI3FSNd
ozBE50zIhZ6cGvpUw0dd23Ab8Jb/8K5sdJtoUtE/Oj4miDoAtgyMDwaNYtHsPhbQG8B9qQVyZYEN
Hlpe0h0QH2+AS9EUerV8h0gEjdKTmtIk7lkahR/WBclNzP/G/wQTe0smM8pBcNTxcwTlxMH/9Evj
+RLzX+uRr5f8y8kdnqT7gX64QHQ2EAd3cnWr2rlV4QcK88MBf3FRHlR0hTsrLKIgigyKNYFrMmjN
wZJwbh121Qky0h5VulBrioDLYr7Sk+3YqCRXr82QQyfvFjyMRi/MVQM8wjpkQ2CosZWXHhkzzH5X
TpDuadS9I3md2R/+rru/ENoPwye4Mc46GTCfAf44vCCZAY9+wGDcCmCGniEfqz3oBSbV5/0AAQNz
4BOY/3Bt7+5WU2s8G1JCxym+S7hIGp5jErm8GwH58NMrZf8t6doXKAqmnZZCSyfHFdO0mNcgXA9A
FZyUdyr7Hf3osSKPiWBDjhR6qTuEA3Lz16d7kyBIfq5k24i5hLOdqb+n2z6iEr6zxdaOHFpJs4RW
gu7o370FCfJXkoG2eTBUDGNOoCSOhXesfzSZQTeL+osgECvdqFTsu/bFHepBd6skkL5H3B6swrok
LCu06dT9tug0j8qB1ezE6DeGJIHZu8ajsBVWy1DMtaFe/HYZS+6ALRR0kGJclJi2Cdo9Tq50oVEp
Aq3XnEBVQfpwOfimH/fjcJR9uGIH6cBaDqHoa3ceLDQB44nAojXZr+H4pQmXCou7Ed7MeYnPVKIP
f8Go0naawbbIc9FoVUp3ivX7LlJz1zh0jhvdnVbXyq/DlmuNANmQcQM37xYjX9WoLYsZJYlVCZiX
SwuTyNzSO8J2go33/R70rc7sDLGQzvZ9wVz63nQaEOcf+d2cTzaEDJQFRKNg8fMQK9xt5rCMGC+e
vv7h/Flkqj9V2CzcQM+/TGckZxAZKYE7Yf/c0BjUvZyXhZuuIJqgNRQPEK3xZmu1nuerYrpBh28M
96qjar2MdD6qZfvn1/lyOHXLM1QKecW7i3TrAnaBM4Ty4wKKkRCu8rSvilndTHfqrbmu+a5Lqy6D
TYVTzJ83mrHyE5MgovJG04LpRWrppXZMALMxdlPF6yI6uWBOVOKyYTHh9Nc2FMwxZpMq2C86j/gy
L+LruAg64bhA6cOwmUTcdj1Qa87Xck7DiDqEdyHjIborQfK+HmXbNLkiEx8Mcewl2vH0s1am5cq0
qhNY3ZbJYRlZZscxV4gZhG6FtdGMH9aeGoq3eNkOI0jfSdIMdDGGg9tqOaZm6h2LkL7CtRfK+yxn
CbaLWUgD+hS5q7JRQ+EqYtO+c85iWQ3zIexcuRByt35nS8SFmiyjBn/b9/FNpB8KTJGSb3dxPQr8
6NbZ3jrjlC9bZp2Hufc9vZFR/A+bH7n5ZZGATUR2fR7QW60c4s89urQjdbnBEiKFkUEzTnGyfwkb
KLclEvcqQ7yQXVW1ZjX36hLMjUB17ayj5iUh85gCD8FfYT6yZSVtXTUhpsMTZaTChq+Jt0y2ld+y
zloF1QDM6PbxhMTry3s1Xjzco1nCD7NrOIJv801myygtX++igN9YQBzcW/vTF1JkDlNC9y5EKcMe
wldW3/V9lzwbS0k1BrJMlmWY1Zgge9mdp6Vn2w+HdqNzOwePSYCPfS6OFXf8jRY0fm+pWl1mbK2c
xBV0gbUSGEhbkmDQ5W93PZWl9By+TK+2GTW/YPAskYha0Z2882YbPyfiCkvvnnUDcoQct9KJxFFe
nTcKRBmUqL4PAhQWstW6+tY/whSGi54w0sIFOsyaHELmppcsMlpnNU9VyyfK5Od9VIqzclAMi800
xDsSTFqlKSdsp4HrJ7ne3elNySRGk+vcpPDowwiBcfQgujeWIsiZ+C+H8KEV7n2vVjZUIH/uu7rC
NHObAE04tK5soDmLVbTT1lc/PBC5BtNDz7tysiGKBh3WvRVssarfyJ7EIYMyuMGoSr+hGnRVjIG3
tA951DBA2EVRs0MYN/gnw/+4NwuaK5vbldNtEqFFE7SUkCklZn8mblNUlThIY8Sq5duVOPOv5Ru5
tE6/tDeLKMIadPdeHR0eisO40oVtD8aeaiwI+U0ZvbqLzWIzsNv0D+Zd/W1U4HVadRyX5SUWPrxX
w/9dUW+l8raavrxyB09LG4ZMlfAfSPdDZQfYZANdwUAzeetZukWs/y46IhfQJtPUfhlOAgIVa9j8
AMkKi7cYlgjg4l0M+mNBkaPm6cg4EBq9wEIYj5YBwN7weyhuvKhdp3gu3be+7EXqeqHvXDZ4zcEW
Rnram8i20U1g8a0EH7JOP4WbuJ22r1qt6tjmIysz6Imj7FHjkjV9e+rN1x6PdkPo1NiwCmXHIjtk
y+f4PwkGHFNU0rJCMLmBExVocx547rgZy3qOW4RjqRv/ufzm9KrJ/3XCdiiRBngYMg/seq1sZ8i6
eFNPt7J1OHBKAX4LsdzWi038HtSgTeiig0xA+ihWRwZVnXnylmPBMUKFXCumWzRg4tCf4zDNaAjL
Ac13cVPOx+lGC4m6b9yrNTGrZBiqxFfKiepAG6KDF7gjD4/+mnPpQGLucO7y5XUg8k6mx7uqOwga
yulAIz3nnrK0JuUXx7p6Gwi/ZSpDlsgD9FBkf0tEAmHRXaBiyiV6daOtVq2cpY1Wgkgr+TsEVVgS
bMy0QKx7rOVcJHuo6V0eJTaQjUIeAa5ULCM5CGoCmr+uJP0lBlUtSwUrPUP3SlAwP1WS3lX8MBBj
Rb7dnqeEE0kJZML5JHJt5Q1YInNMod3TUziFjlM/EMttm3MhEC/Fn7TiOU71CIXk42C3uAB9waji
SzRBSw28CtJdej6vtnvIkUgYTqxK073SsexJH+USZFFoe0GhotxDokk5SU9PkZWgcXmPUfk3KSFZ
UmW6tyzVFK5k/B73GOO8KMaIG6F3jRomhoeObnJg7ylmNkfkIHQiRJj3FY3PxdygRFvUpaABTzrb
b7Z3eWrypF+F6T9cFvm7Jp9uSRPPyU2mON5mY6at00BjXKUchPBVwPBFnrfTTGWEipM0DsV7s5QE
wkMgA5+epcTKEH8I2yy6DyJdFHd2LngRWrkiXPDTtHkEy3noHL/FXSlm+F40GMzW01WJnn4g3dqw
DdZzkSvbL8vt6wlylFANIP2TI+AeveikF30VEuckhmPAWnDIa0GVUKfutjLqsjHo/dCUb3VOH/k+
8V2tSZJtGxxM/UlIMBEB9F2xGVb5kuOwluvFyuvUuJjKB42Sg1kx86Ae1Aitu7o0XIJEjqS5FlQr
A9MhfWlOWp6vAoe7hXDE/EHGbmDLRICU6hbLIbzVbvZNo6bWJ6kv3ZZWu+lHUtoIF62FvL6IN4A2
wu+YFdam5SPk8svRMq9vsqBDqaXuozDpKGFAg/i30ta5aivGrRNCoQ/IBjp4JJC7SLq3U7IMn43N
0QUrMrR2Ly5E9SLGO1A2S30dJTHA42qWl+6lr/sJouiBaMYggnjtOpZLcVg5UR7FD+JDTvxa/rEY
R45fnXHUo4Rk9Kc5kN++xcP4QUw5At02PQ9RZbDHnC7fHipbSXhkabP18CUxZikLuN5hKgQiSeBo
QoFMPhWyKSHtspuGi2Mcf1//qQffzdCREkS7FAnOoUHyLoJ3kpYln9XtRkKQYkCyg5ZZAaQm0j3s
rJdqkVzFvtcM141ehi56pbB+VGKslYVXvUFWwtdEqhmfdYQXIN9p62u1kHbFBbeMUCnJtjEvZk+S
X7sDos3UOpRGaL3CK5GbQHRAcPdl6meQ1/CugZD5YOR+SxPp5enfe4L/Hi3YxJCpPBDwEj6lStzQ
ZC2z5BOrnBPeo3RBskpYLnbNVHJ3Ab2b9YHLq3TrnfsdHj2L8RB15XlIR9e2kbuGr9fUcpjz5ZAW
L05xe45v4xbcOtfRC8dhPLyCV77q7ekuCMhWE7RyFZMxxE2JmtB42hvyhMKfnARYtxSMscut7Daz
/eFu1WHqZM3zxcE2R1nXO4yjRCmuZV7Fp4HYNU1za+Fpo02I5FLo0yn+5OH509cvz14FSC02pNYm
to8SlRKchJaYK8fXwOFGjvfMbtPr/vAaVp6oEEPfp12i2yAOZPNwF2C1xunTIj1V3Vg9om78s64d
vub851k65H+6ZeEY5nEp/bhqy070cZMS9DDBkLE2w6YW57asCx5OZN/Acqxou8cNRalRvCK6YsWn
u2vPV+y2hXa6/atXPl57tqC9Qbc0svea3mM9+29D4sRO0zKfxCNlOSN/fuE+mbiA1y2hBjSAw7in
vm+sdqnu8hDtR0jQtyFSqkb7uvzp4fHVlugGKRbY2az+Nu6OSc4YjNdLFbln3eZGhy2XvsafAZRB
ApaJ+UDQfoDitGC08qbpRgXARnfykTnIRXp3ZwKa8sQYbDxpyi9YUOJWsUSTqvhUiS3wt2lmkTPL
8sQnablZGTd2iuKb6/OsuB+ljdXA7FC2F5XXc/HMnTU69iGfNgUc+opuO0NM50PIYUGPRY0yvjdW
Agfa1eCjtIfdSVM/MsPcXFEFjVvye2S8upedUBRENR9LreT74C8nK82j5exHScxOI9fXD4aihAp3
lvGCWFi1+8K7+/UcE6l6YMHBxprncdNJMljBtnvufnt6PnNASdyG7znXrK7fb9xoBK6sw9EpTZNY
8zy3ndS30TVwdfsuHFkLy6nhtIobZIvOEB++41X5znNsCLeAIy3uFku/idnWJmYMPRnNGjx4errz
DCKhAjYP+Urb7Gy4kWrDmHiur9alTe0r2HqxuR5OXe9CcUdbDhgTVW7Wkg92IDRu+2bM7gcJ1rVd
PYIYyKR+3qsz+j92G8GBfLAzEiQasbVnpeJspnpBvfiDdFu7KChAL6V5Tp2V53oMJlNyAmCv6LSJ
77CxnR/uArLUBvMBRZste5tL3HU678dv/t1nn3222GQ4ne+LN//PZy7KJoai42mNCGd9UHwHaNgY
DCR62EXdotVCziVwUmkfK7wSSi0YMlN7z/+Q4y9gbUh/ulcUu0dlOgc0+BmeFA9fnEdwpC830QJW
zAqx6zhBSJcWZRf3PMbPo3WG8garcPpssem8n7z5ny0IM6T7OJ/CulnBkYSXt++v3/y/QJSDz6MH
63r5YFjOHxTzD4LI2+kwQBnDhcSd37x6/vrlo7NXv2nJJzDM6+Lrr9SvP07LoQr+f1yOVuczQX7Z
lQtQGm0E2ui+yF8OMutY0OmU9+Z6OmXBDa1aPoyAJInXN1X58pqAz2J2HN4N3a+vzm8xwzNDnyFb
IJzQbxcabDRhk+wCoSVxblRDflAVZmCXRO0SiRNhLnVM2o7ZPYCSlnG3XCGNKMYJA4CykE3MCVW3
h6t5PdMwbFCDAsGPv4i6GsW1+/FV2X0zcY9V3jJ3jvGbnRtiqzTeGeuyRmAje7id2PB3VBoZWrsl
0RiWeCyXql736CvrweLddQMsQJjmb4tNwOjXWrVLzZaGKDDHQgYiVWC2wJSatb3Ku3gZaSnVEt3V
t1mN7QJuvjTk0Q9Tvx7oJsI4MPLjvfqByJLqqTVlzQ9xxoolfeqvKx6lJ4mp78i/m/K8mbkwOE3V
AAcM+k7PfDEGJjMYpLBY5tX7vHFUWTzXq1HMqgOeobBZ1QrlkonUZtTU9fOvw3gaWypwV0LDkIrR
Xv7hFKvDKT7dmnHEgvPklQpEiykxhkXBaTVKTyPnJ8b8mmHJyVeO3lnpeBr8WKfCkYw3Jk7RzUhk
PuQaM1widYLnBSXNmS1gxHXCp4mk0aEaEnUyyAJ1z3qV5fQFVYrAh16Hdw0etxeKK9XIGf2ndBf9
QTCi2fTXEgcIK8U5EVU5IxwUq5x2QAbCUrFcobeA+ljqYpkAVsJvzp5dvPz5N6xgq5HR255gIoJM
ojbF+5s3/6MlEAzgQKJsXCAQvC/f/B///rPPEGE0x3AsPM5vDuUt+feBSKIOMI2Irb6m/YyC3RR0
uA56l9T26+/6R9nfZ19THXw8Rl9mJw++zL6MEhB/bScyVAM7q4oOdRJ0Zvl1OYqYpHTyDR6+/OHR
89+9eHp2cQZ0+JAhXCpBM6MPLSrjPTzIOxSmPK4KxmzGPjEWJqjZhPjc6dgD0AXr9UJJetjPk+zv
oySfYq756xvOP3STz6/lqgZP4A6e27MSFPsU6nyiEjAh7LzVgAJcVWnGhgVlmEEzSbkEgWi6kbDd
Duc4QgDZKnq4vF4jPPQLBmalOrBWmFTCky0IqKdWjRAZbjEseFOtI74AplRmnNisQJGCAoYlFWY+
RRpQCxooVN0zIKYaZXUSZ74Y+okc6EHMmK/84NuLh99/F3csoDesX2Enqm+imNM+gSonELP5eDxQ
TSePWNHj86RaAitfIgdG54nu/e6uoPs008Pvu9TodJ5jUj1YDgvOOEdxBDLMXqTL1WpSrokFrISs
nXGloN7t7tLk1Mhh4M2GFxbQCCbUQK3CS9OrTq7zieKL22I6zaLkfOJuoFrMVrKJegIkrFbTTWGq
UboAtHqLadTwwyGFvY9xWbx6cXb2+PWLTp//R8uEhFTYR3aT9WhZLla0jr3N3kkeFKvRA3w6ME+z
8QPeFIdWLVl9E6UdzkGo9sxiWV0v8xmlIdQw1mQ/XC9Uq0ZqBmV6US6yzs+wZBHrG6gIT6GkxXww
nSCta8awnV/zxCCWN1e3wpkbTdfkfXcQvfj54sfnz2xOMXj+W9BniKi0AxoDORzdFKN3h0Vebw5l
Fg6lcsUbOtZcy+a1OZWlp6mNQFhrHI5YcKK7HBFFO2TMGEfMR8Od7Zw/e3Xx8OnTB4/Pvn/9ww/n
z35QE2r+17nQwxZqCPNCZWNOGSWdtU/NoxJBFktQEc00dHisIMcf4mqn3qOhSaXhCHcS3STzJVO1
WqhRk+uArAWLKFCz6qy1y3SiLZu52TDezi4RYGuXt/Y8Logf94SHuiWjP6zrlcJHsBukI0OwKYln
Ag06goNmsVdJhG74Z6T5Z9dhEknaOUdicIK+ymRtXFL6w/w23yClMUQB9Lpq7mxMio2hJU7rIyH0
e4RzBcmVYe6ZM41gMBUakg0nE8oVd0QnmK5HA1o//WNoDZRLfv1Rq/8/ihoLC8nIu8Xopor+I6bj
JA5EP49UKkc+ssmmjIttWBRzTgXTi47lrgzHrlOElisaXa13E6wpmAJcPRwxua51nip7BfaPnRGq
HnYUQIhw9dXNEuSZ3GhoUZdqOH921lU6OqFnVIsC28X0AMBnGD7jeloNeZo9vpkyGnvAEFLVnKgF
P1UrFv9W5pAneb16guvqkZo2HloXX8ie0CuOPum2GUtYdqAchGxD9Kwm1nuKn9G/rMuyATu0ekBk
AfgxGC6GnSjWB9Mzr+ayZyxmXdspixC+tWb8wZovYbkB9Jw+Fb8WfWlBbwaMWog2VPWlSBEKfwwU
JqvObd5VTo1HliKtDxbHvo2TNKRo20sH5ASEEHWnxDUSs+j+2nvauB6SoR4e0yWf1edTsR+h5Aid
WM+R4iYB8LyiA7kh+Ej/FKIM/hSakA55v+v59qt3/cgWo9prSdOA4xDHH8hHzfs2NaqyBioHbbTc
gQddz+jFJnr2nV8oOUt6nEzLdwXRoMdgJPgTZw2Oq9SLuNPSiXJFujSzfnrVBD40Qk6Huw8qRLmE
7ykeyWYvXZ2Dyt79rMmoJLXwFI5WzPdLrNooDdnXp2y1eoAvHnAKjzZl/fL05Cr6VnJmWFtYWZiP
06YtXxiLLYqZY8C25rOe6dmmdNWHxzoJrX3Q9QNcynYgbtFuTAN2vxBl3C9pmcm31WfS/TY6SIbE
939wsnT8AbbS6m42ff/uzcn/qhLmsshQUPI1BAwgW+x/fg0lD9/87mnEU9PjXM9Kt/1xPa7xVIYV
iuhoY8oTci335NAznD0Qtb/Pa+aEZIMXzZLllpcVsPqn+e202Hh5O6xzQv4Cra8te8eBikU+yd5Q
d76Ef9WyK6flahPKwIIOrt9GX1piFHZspK2icNLNrRnA7MU3SFP4r37AMMGwPzl7aUWAaSjGyTH2
n5HWGB8M9M6eqVytsgRo2rD3PxVaQ0ItkhJ+r4d4xIqwWM4/5NNyrJtcsnadj1Se41UF1eBEHWdH
crAj3+SvSJDEw5uJRHOVRdGPBaYhviXhcrTGq46OZBMab+AooRiOjQ1PfpMvxyr/9AoqQJ0J1Tnu
Dpag9qAWhnst69PoEWKonp72o4O7f4j+DP99SP99DP+9PLg7OTqEv7958uSKf58dHeGTJ0+ePL7q
HIQUWSp2fMTljo+g5JOrzmBaXOfTgQKZTY7ujv6hF8F/H9J/x6kqIXTry50kFDw5wiLfnElqEnjy
K3qCnTLPsF/4FDtmnlI38DH3A17ohmC6B0tcGpcyZRh4g9eIKVqFeSkl0+oWZGr5cQMybxryTsMt
h0V7ERWh+yt7NIFP8OYWpLlv2VCW30kfrsK9u8EjyRxmNjGv4Mx0vumAIOtWsWSWiuZ2NdTu5T/d
q6+6MNStZgldvJvyVa/TEtAC41zs3tgPZOzWE+kgXbQOyzn95kzjmGTOuktDb/WEsmVXwz/YV16Y
53k51q8yzu/jgoaUhMIFCyF4l6OHdHB37+jkDZIgKnf7fTuffWV/JgWEgSzz28SdgAz4BDp2TXuq
jDXkNP1b5zWyc3s4qT3UmcJZSOjXIf/0knl0nDgmzOsBxVA4iq28IPLbyRtjfyhZO5ZFTtqqNBax
s6WcZaQigCZ/XX4o5nTFme3oPotE9gj0ky2DsHLPLHf3GWokiU5kOEyagucEZfHGzWcGw2CN+2Ud
ERI2Eh7I8466H2Q8SjzXkIfD22l1jQdzPUUvLAzLrKPkbkx5R2W9q6oVxp1y31TIDV36tpxDX20/
TukHrlLo1dPqGs6mROrqeb20iN/I8NGS48Sq3iWQHTHmk8hK0617P+BFapzHeMh2upOpPZjt/VvP
dQ+5Z9Q1eHs9LQhMkOaZLrSUzsgzjxBSJrwoW2xiwlo0TFkWCHQOnYm7SSq3ofScI3XUn6aeB1BL
N+s6Oa6plJJOZF52eRvAKsHdpPReO5KC30RGcSvuFrBUQERcJu4j2CN1IuVTLwCxWQ2a5QljXD3I
hxR801aD7KcGQDe9ZNckR2Hlj9irUl05ahgpL9mAOGFKMY1PdWT70sKWwlZGIOq2IZypmW4sB4Hm
asKIWrvBMAk10Y6bq/KOC+nY5lvrpswqnQaIpfREEl8zPbJwvX2Nz2OaSt14DSrmHco8DBiFVxZF
+r7aqEwbWMbKYbKLt7W6vIMFwU5pgpA7HjADbZsNivPOF8rhI+lCcWT9ML2+PzqIwcbLOI47bWAE
tvczTijdl4sTdBI/kn6hswo5B+zESQ5iJnCXAxYEq5dfaGdo3xBhCgWy5+bXFoekmUf2vgF2Njvs
fqFabkmfyesF6kh82Us3mdqzJR/w7LhymFmGuDpU1VjG/3yAO7hthu0t/oWdr5g9tsOrRSH8+Q1J
hENbWwcSEKFXrBtpp1OmZQT074bAuOGHtNrj27wm3+8gqrgQpDERvFmFjakU8H12Ij+UyxQBV6nx
Kmo9B85MHpfTTZyGYuOFIzrUC6Qahvph8Ny8IpRunlqUp37UCTxVo/FWjUfFUO/kQ6wk8FaY9l89
8SpU18aK/5tOvz0WJiE3tYMg+8OsB/CHYv/4lEONqNVGAbWw/kVooBr7q6kQpoPCQPTp4Kx4nxBb
l8A//zRKbDBeD3/KNO6Yvk/lQvtMl6pnr+lyxqwYVORyka2JzyWHtQnTxs5hKtpqruHH9QQ0QBJ0
WQce7ZXgakbBhHl2/R4FzLvLf7AN7R/JzBGm6J7GTku2jXEnkdHzsx8rd2f4Lt71hZoO00jjg60b
6ZP478eiy7cnxHPf6vg8TlIHerHEG21BZrdC6mxB34cDDkwsSyjbQIlb4Y53t7Vf5j2/R8yXQvVt
wdr2xKBPQ1nePSK7LbX221bGvrDSJmo2uDI+mtZt4O92312p4dNo7Z+7bXTYL5tN43zQBo58lTf5
lqpgi27YabKrsKLoluFuYqs9w+fVGNiXIW7hYZbGyWNv4VxcRNUZt6NbGTjfRiTFmgKc8P0gAOK7
Gy0rbCRZqKAnbVLp3oLWSV7B5fy6312vJoe/svybVT+qxWALljCXwoeDcTGl+fQ/PAyPy1gn1jNl
JnEUJ1ua7vhDEjj47re/RvOa3K314+PsKDZjimlM8a+/s4blfm8WD3Uvae5PehcwKYTXgSTyspZ8
r6F3IABE3xqbWwL3nLxW+d1dGwX2p69oFrBfxPeyLycEse1NjSmbZsrsz0hs/aO0SaDRtKpDC66B
PyaJtjwUMhsaxntlx/BhGHZsnA1xMrF1LW/YSxY6+X7q3Cfj7FG42OzN0//A98njsh5VsB42ZDdW
8ToVIvmND6flcAldjmL1YSyWe2YvFEfFXm2DyRqFhMFAX+QP62q6hm7zb30LrJQL+4a440ajmZtl
FTsGX83rSUEZmUg9XopPkhqY3JeaRqj4QJV1DM+LjeLXs/xdgRiIiQYO7ImJyJg5DsjnS42fnb/E
eX1MJCtrLI2hTesh22qBeOUKc8TWq0fAYn8dcHbgs66s1TdoM+k5UR2G5FeZqiltxXYV34gz+gd2
th/bKEPZFDX5nE+LVbdWUAzQWdvsjI4EqsFERZ0h4mpfUynVeXCdwjIXj/CNsplXCjdOwn+wFzwr
NLGT8g6XDRpa+UJLHuClf5K2CbsH7IoLXVBpozBn1uimnI6X5E3oeL6rHrTWZqXCA7nSPxNGU81p
YZLsKdRXFFNESR8M1IyRHDAYwGnhJ1ywyIxx9h6mP2uMfa9eePp6QTTt+qHzpXRZue8043R4RKnF
WiSo328FXzyGF60N6S+3tIUS0XhMiQfKP2LOefnGPv4wObS9atjAnWaqp3oy7AQWtjUYt73ejrLh
sbKnFJq1x7Rh753BxwMy8Q8Qo8PzOgzBsdJRIxjUA7OA6VqJbmYnuXglCqtXvfVQ/jmWDPeF7n0S
inkTyBnehGqTcaBb6oyNfFLJDfWG0wG6gX3KAM5hQdgcVqvmgTxeEkUvjwR31npRRSSozD2u8VIP
mKEpjZhb5WyAL2BV3aUeSJnLpxP5vhfRvDAJ0kD8s+q0ik5x+ZSL5xmkjBuJKIvCLeUtAlZug3To
wkvsUTcEbYHaiHzbunPIxWLlxTcSHtjqFu8ox9lqWeZTfXzHWzA01isVsYdgE6ppUDjXK32OZNLh
MNhGC4mtUTp0tnKie1/IUlW/ZU0NBFm0kZs2yH1NdjfUVozIix1A6vM0uCtR9ERnHbr1yK1aI97R
xim1S3epbwNcGdU4DFOqCmd20UR3JW2HQ40Hy+L9Gqe1Bf+UXlI40VTYjQP9qrjsDtxVZzzqm/2G
5JW2R2VmDxUVXAJavuWP7XwM5ESnLwHhqJCloEwC+W0TpvYA2OftMl9EiwpvptApsVBSTkTrKCEM
Ad4oEW0UffQPC3TcMrKQbsBaGeYhHuLmV9fpUHu0talxsSH8u0xLYec4tlAlIq5pXIo9sBuDDxWo
iTHZs/fnQJMouR9q30UXMXfN8bPnF2en0fnccgTWo4leFhgcCpMgIXVxqzEyBq1iMc03FPoPpT8U
083p27kTiep4djOnQP0uiTWaCKFM4MgkcVUIupbwPazPDfF7UQNwty0tQ17WRUvlp7v7S7hgpyAK
v5ujfNRCvC3EWjp0BToRGIu9MLeTwkTsu8MNIM81RirfBikYxAreutZ9HixR7E5C2YHZX1a+Nr1E
zWXIeHxm2ckM8wqyCW7PYin+spcqnzi3w39NpXateOnQrJKN/FsgXy3LfiKlO+GJwSJtE9PoJTvD
jwasRHkklWua3XRQA+jH8R5joFsjSWNJA2kbyd22NfZRQ3mtL8VfrUejQrsX+DPgDoCbMBfqNX9L
9010X+LMa7Di1iMNbXgfcwSKdLZd5knYy19JW/r4XMDXhWb8rTDvnhCWbf/OmmcpYEHBW6dMOV35
yaKm+Ww4zqO7U5Rx77LJEhMXTQYccCbpcYyO3HV9dXXVwbM1s3tjLEgdPx3cpFzWq07gSgoNPXL7
gPYeDqY0AFQWihO/9kVZV6qitBmKjXkeXFBvZrikU5JSWBylu496cmTEinjmPwWYmdfZQVgsClFo
sawwqGUqBAjljPh4QnXD6guD7RhVx+2+rwhd6lrYyKf8VjyQGWFoyh8yIoVHHmo4EneuMERUbzO0
U04LCn0dUHcHyvzaSOmACSJG5F9sds/gAwKLsS8x/aa7W+vnsOlnrNzEUKL/x3xZ9wNp4ISSuoUw
HIunMdhirstBPwLNV1rFcWxvVJYll+zTpXWi+5vu289EVWGRU5Eu3V8a9paBP0FmblTd7Tfq9sxY
f7eKoS3S/Cf3LPW2RWM1M2umhbg39vuupC/sl0Ex4yEbRagP3sDY1r735hrDuupr5HZKjGAvNMv4
Rpa/P1aLIqP7wgmmMVAwOWiAONdtmOIuAyqZDcpH5y+lDx19Y2mqSHzO1jPlRaCEfirp9TjtvJ+/
+feMxzcoq0zWy/vqzd2hhuUzAXJWWJyJn0MUO8Lp6OgJpDGUlerxK0KNOX/eaQuFpPKqVOOrUGQd
hml+2VOoSsqQc7c6f56o71LbxX4S8UWgxGvnqzyMuW+dFHyBLDdoAfYm19rqio2L23Yb1MvVHSU6
/r6+eII3r2h4Y/95D39bdTxrdNWKEeRBQruGPG1k/x6Pyi1UZ6pJqU8n20eQjPQ4w3CSGEmeEzJV
RNzklAE9uaJ0b/J0FhhQBWsE9TYEkD06jbr1akxJSo/5b/a+PuEf6IP9iwYCeCye09xhvF2UBxFQ
elU9ILrmEudT1YfT4kPB95fjglEIqiVfKrbFN2BWwWI1QTgZ3ixyxILm3WeMWzWCRrJr7E6NwTOq
jsg0quAyCoQonBe33g0yTjheg5JvAIa1F1kUneNCV91AEBTngDPQOXpfZxfyR5IyysSwoCYLD2Z2
Dl/crdD6XWR27z0HDzWKvh6QI1DrfrEtmNInqk8+b8D7TrAau580YIyXTrq3wy+8DaYq70fjNY9o
YnlGxLQ/fQdifT/ujkLXJH/5GZphwmiMVZ2NxTqsh+5oEGregy5D03FN0N6aqxCEqbXWL1WdV17a
59twCvqlk34q6JzSOIVhCBNE0E3soTXU5OevAjIE73eDXZvE+PXY2zrEeCmuN2Sbi8flmHC0CEhG
xoAH5Kj4ddxEbzbrCxaLjimzJ81bQTJL5DgzLj7M11MJIYOHzwcvHz9/9vTn1CcIzOlJgtv5qPGK
18tk3LhO8ZOJ8eyGM7y2TbczxKte9Liar14W+fgJcKhzjJBLtnquqp7b5MjI1bNC7J2WVfrP2H+7
I9bCRBnFX5fIB9dzqo2mdTQt8nm0XvRE1qo5qN3eneTFGknsWZAlOQSRpd1KBz254Y3QWIGfh9HI
Fd3ronhn69V7k/gjySu1NCA47M4Y0tPhWi2vw0c+TgKVIEjcpSSZRcJXy/KaQnmJ5mZzB8lOLHvR
zrvTrdwoMGNcoV7KaeMghJcsNrDAsFVlkPLaMYoUA+vEwBNOzu/hesLuPf2jHnE7/JNOcOtgsdRj
RsIg8tOBzWe3tUzJLaSLIG7QIAGGEerBpGNfsNPxqN+PtXMAyieIlq861aUgrWU1NYc8v4vq8o+w
PcS2LbOIcDfqy5ShdWREXVsipLTstzcFo5rN7Vutmhn+mMGxcrznMncKHdt9Xo0WISPYDRAj5PDO
mVvEF0JGDQOoV4B8usIloCUSpmoh+1EtNmf5EKuf6EXiUrTP/6BvD1pwis4eVivT38Cx5283Bt2F
ObflAsU/2pFjj1zTlFpU7Zfy1mhMnO6Qo32dgnqylV+B12E898d0LFKned1bS96scPR7gL0yGVue
Iu7hM+Ehb60v/bhxSi/PCO52/MST5tIm+UXatz/YFZY8AB5X5DOrWt/azQVgdPKX5w2tRtHXNXic
Nhie6CpV5KsR1qnYVwCBJAT112nVc573q4QOe9VpkJ8W6YFaw7sSxzHAJoQwWfeKnZ5hGIm6kMC/
MeWArWqyMt2VhI26RCBVqbBwa85EPNbT1kCNphXgulSomWVfClkbovY11wXKnux0ha4+veg+Oua1
obMZDo93ID3EWeihx3DKGRaQO3qWL0z0jgwXCkX6VpWPVYpdeUA+xvLDtequaQdrgFK2jOGFScO+
T/WxDquLKwDh+gEPxI1KRilLFGGnH9IoOosG2nNUPsJKn9bbTnTG5MYuJT5Zt5/Tiq4Efo2XDoWd
rsjgdpsZ6Gha9Nn6IY4hdK1i3W9RVQH5k14Qp2aDwgM2JbALsgCbqSBsphgizbKgJG7HW6Q9qt0X
9poq1C2mlp3C6h1vuEOxvxe5mx5Xh/5wPAB0WHRXxiYkWRvkKP+K3iJvTMeHgybItWkFS36z0DRu
zBNyKSmBHfcMt87HiXsOSGdld9Dfn/cbzcurYPM8BFUi0LzzcXMF6cVjJfepdWh5eOuP1kv09Irq
eb6obxChjpcFegcWM5CV/1ho2ddbGNCcrGmcHewuP0nS/aYy1H/qvfC4V6uxsLknjxP5y5JOLxCd
i0sSADmhgSouQPyXnjx5fEzEf/L4pGPzi1m+QaFzjhJaHj17/fSpWJ/wk6MoyRmZ5oONHIBjlBzK
srUweTBZqvL5RgGUHfWOeye+dmEYVllrCLNy5eAxqx2ZOZJh4LQHQokxDuglf83Ku2IsEr1paz7w
rXb8U5nzGmIC49Agn/uTs/RiaDM+pSXmPocewHP4r/ec+gNv6F/vHXQL3sB/veeqk/Gp7q9XAvoN
L+G/5vkvAYtWsq+RCU9L+4Smj303v/lAJ3gU8lx24aF1P8qMwyuCdlxTxNoqpghad00RIlWzED22
iinSNEuqN10HDQW6uof7G1XFQ9VmZvRYa5qBvTXmymWyytQfQT+qoAWuIdYx7z5tsYs6GWu8Y4r4
YJdYQMjoYyqpPBmlnTA8wYYwx4Yw8u/WwGFDtl0kauxEWUfa1X69ynxL7qeQFlZeE5pVhDtacNvo
tr0nU2sqkC/sNRVK5tl+bb5l/tvnjneembuTf8G5ow1uwSV9+ty5tnDkW9u9kTEsAHhUyO8YnmsT
e/vX1XoV/BrnfffXAewgTYOGeR/dcGZFixwLb+Rk1ADn2oSHkkkUEE1aGby21/boqso/Ay3BKPFl
F7Y0oQea6U5DIDKrObROW4jsGv+tfdUmpPrb0BOnWmbDbcZajG3CqN0MFt/ZTGDBeUwDl16zGpyK
0LWSd5QbMdGZGHsdaRm0XeLVki67+3AgpqUi6cndpQeRSJSGNAil3IC0KI0l/tRuvfxYe+hazaZR
6kpD2kNb09Z0b2taaVE+zS+VbmHlwGq20oucQLLGxYEo0L5qU8xH1tUhDU8MQ3EgTg7ehdTyxSZD
MN9VOc8GmKf5bsU2jQKtILHYFy0FdJKtlhi3uCrsHob6bLJqNRSTT1dLjGr+Z1l3IBUfksq1iR1F
hTQEVq4Fs4CSBZVcD5LS00iy6BzU3Pl0Y9uN8a68brb6Z/oishPOQJM6f4nexdV6dBOt52PMd4Bs
GI2S0WOj3UQJImHbGptOd5R+kjpjNBdbsdmit0zHlXZs5CE2S8gW4RK2xKFLAC1MiXLeCUhHNvIn
S5q4qbpBRiCOPp7ZQ5sZdglkLYIYD8MXXknusju3Q/biWsI91JaITkA9gP9+mna1RXJRdPFq0nNp
nXaNzxqUMxNsnV6NzxpqkZ71yDqt8O/A3fW/IolCL6mGFBE61pskla2zQ8YImsECF8N6+TSEjS29
cXTj6bihCzREkaBVrK03Wya6b+/8j5Ez/qXk1X9WiYYXQePIb92O7j7EgANqMEnDSkLofHW1iPbl
0yra4Era1jDJOdsaJgUkfMYb+6mkIfb3vT7ltRMAUHo95OMe7YDjgvPn4a0yr7CSMhMUeHPckwxB
kbU8THI/vhSGyQNGwqlBqrouh5hZCs5/unVS9sGOXFhMcsxeOF8pzwY4jYtiTskIytrOGygw0Plw
StfUmJmQVd5hMcrx4EYRYL2qZrnGlwMSUp6lEtMxrSSV1WiZ15jTMOe9gnnVIgRsoYzSxXTTPOlJ
0mPueN+7f2KR5vy5XBZgSbogIlmGaHd7QxfwfD2BUpWMP1YozvmYckZRZNHYeVbbD0HcWElOWXpm
JYKmC3ZvZzUvMqBNyoQExHmlZnVRF+txxQwCAUzmlaoujU1AXIlH8abRAq83gx5AN3faY6cRzqRt
G5bbl1zN84POlvt/Tlqjkld356uu7Tdr19d99vppN3An7pV6AL8f4INu5/3izX+w0G/QQWJJd2zL
guSO9+/f/O9/JBicl4V446gi0cNXF7imFstqvB4V0bwciYCroLBqO11WbmBroNC8Uj9wZlcVOo6o
B7OF+nOWL+ubfLo1fQrwitEqlEwFc0JbuDkuGo4ZhRRYr8opgelwAViqoxtYhEyGFYLub0Y1p1uH
v/DlYJB1LL0O6iHcodVglV8r1e7Fzxdnry4GFw9/QLFrtsjkfYIpreJDfh3bCeQtRRF9ruLFZrEZ
2M4attaIMQPI+rFQ3NEyJB6PmO0dI50JHfAP+Yc8bn72B4oWisO5danEaGEV+VAo4dvuT3OcnBwE
/iPDQ9AlrLCHNVweXfG/x1dqkxHYFuW7hRl48fOjwdmbC6wGkeOBTMlgQOnxYPujSBKPYnSdiYEQ
VPji4flTKo1lrX7gD6qq03l59tPL84uzwbOznzAF3KvAKCgf0+eU9z36Jg2nhac4gOgk7Tx89ej8
fHD+avD47MnD108vBmfPHj1/fP7sh1DFnJdHnUgP1brj7QT88seqeud6AcCueXH24sujE/Gjj26g
iOTBkW1Zyzass3ZX8AYKHAOy+AIovZNAMrw0+qVxw8tJAAZ4pmH+brzz5fzFjmpQaOA4JUzzL78b
E3Ss5KsPBoiR5AOwM6DzScwrjjLD1v51sxmC/GWfBHMvrbwCPFnd+FFjSkxR1bXk7zbJ600napPU
W3qg363QgY57psY0kFnCV25BjtdMYqvbKsveBPMVMlaZi75hQ+ZnS4HWz+JedGwh5uQMxK+R9W1g
/QWxf1/9oFOh1ffpAA7MnPKlSOpSlcz5y57+Mo8GOhvUC8xrkPZ4uXo1kahCaRdBjpnVypkQiAU7
ZsUpo9AVJ/Nchajb+CLBvxvewpg4kF4grzsOWN/peywBm9EZOXe/OejgVQT6kU3mPboSFm5uLzpF
eSZL8NosmJdt23qTfk7G24FxJuOGQwGNAt1esbeXJ1d+lfiOx/Di5wGmvzt/evY4GFfpnm+88wd4
lg7oFIxbECMmc6FR44tkMv+YIEuqaDJ3U0Toow7G8bkex6vnr18+OmtWc4CKQHcVIfoEJ8umxGBl
ne01CwFY5rmV3YO8yBboxs8bc4HbhYBFYHOmQHs86vEss4yUcxAiZF0vNlwNpW53aHMQndfc01wg
o4En/tqHZCJ+gw535Ypyikzmqb+Df8KsrB8KyWNMSIajfFlgbln0DzNKBK3PWqdYo+xso3zuVQcS
6vU1euRGo81oWmQB21H4qGnfWpglPqd0E9YZ0Rp0q8kHBSm/FW7/tD301uG49IEG4Ub9hOLAIiBc
L23F04uGoHi8+4hl27adWyNHAwfclhW4Y0gJem6ZCCvM1jojtSqVsLfWcXpkOKC8eAKwBDXJUTUF
iaTmJJ3W2iTgRJFQKKneBW8zqzZKFlzVK84YnE8RKEsnBoSDARXtU8JeY4WYmEdPV6r0b65MTlcO
TdPfGJGdv85w/VNCZWI/bhmrNnUKV5ytOmf6YRc3I6pjxpmkMZXzstB9JiWf0jLCI6s6ONWUa5ao
FDI8rVXgmap6TumwQcMAmlVR/qEqxx1nw43ebSKcbKx2TNwFhn3LibdXuUquXeF9BflLzj/kyzKf
r05x/uxe5bRSoCk6uKe3+QbZi8mrzX5NNOTni4LT1NCFRblSBLBnYFXNSij64vmr8zcYS0C/YXwk
g0CtnAjsBoa5MXyC57LP7Avk5XE1XwmSOsZnjpRPPFtaUNXifL4OxzVMQJIFE0qh1s3ceC2qfI8z
HlqYvcO0trrZj/N/KRpwAqDnZKzvhtAD8BCmt9nZ2ZvzVxdhXnIQnZUUA4GTbI1Rp3feaC/N4q5E
3OPE3iieEFbNZsgPEHUFFakSY0CGcPpgRpXhBoNFqvkhEhxzOGXR+TxqrwyUTEpZTVgyt0V3OlUA
9CVxc5lVXE4t7kZ1HWCqRBr4/FJo8+z52bOLXqR/XTw+f3nVRqvnc7M58YjFtQ1ceA5kuc1ZjMoN
5XrExayrNrcydTTC2JbCFv5YsgU4PCK1tl1gSW9oZtYfPnp09qoFqcJm8XRLSLm4dc+Fk0eNjZB+
UsdaDzInwoRXnmgYpuUhLAB8CFLal1eieqNibmHsjPwNrLdYz6rVPnmeVSs0KRF6Lbpx0kY284Ak
OXRJ0ovOu7PousLgB4t9Voy1QGJGbrFAOaDQkVQcONHtZ5ZPsyyzcuNhyAU2hsvYsB3EGxv17Fly
OA58FlQtvFlV50CrDCLiM37W033hXcXASFhfz0xFQztq6wjT5gXoYjnZqKNXGzhJ7oixRXKgkKns
YzSVAJ8l+y2iCw30oUuElI474yOKjqp0X8GH4EjVhDqmOprrIFFte8clLrkryjRJjYfiFo2JAc/p
ponByRPZk4VuN0KgRv7aPgdtur6Bf0Zk4//DmkyOU0LeyKghyUM7JhEdY5dWCCEOT4Dvo7l6VXVs
NoUGGJQwFnjBT1xba+onX/REjYD6b9WdAp7JWP2oWuLusTjgAR4DTh8y2xHBwx5SJMSOz4tbRSB3
wI3DFkplejhI/2xUDVQmGE/feIaAjSAzq1EoEzmuaxxdFqiZ1wTVbc+rKcDwvgPlGOwBtmlPFAw6
GSQ4sfwZo/X5IemnHR+cqkGh3VF7zU86HesaYrDIR+/y65al1yDwDmuFOz277BON6449DBMBo0TQ
ICF1WwaJ3/4wgPP97NHF85c/MwV+Y0XQ2N5L7ZbJ0dSLlNJ/n81rdOQRakb6E1R0cV/QwhCPHFpv
AgQd/Q6OiaGy0pkNgVGtGGqFWRFQFeHg8ErJzhQtMMbbRsRmDYcuNSZPXVrYQ/I2Bd9JYuSrtR9Y
rEPxppiAgIGxrozDv6QLLw72cpe63QSsb1wXFgjYfgujY7GOVyXmYReNh5NwoLYkmgYaOVbFWJ0v
pKtyUsaep9tFjzliGA6oD+WYB5AvkMGBXrOyhC93BHpNyP4mqC2KpnPLebWr0Gsjr6rzCbe+f0bp
vAcXxehmzqnlSRsbk5lJ2Vr4X9RiUVySQwl0dVgj8n3yKGUuDW0Q3aA0zF+MmmCs7n/nBV68EIeH
sw1Ys/AmqYNnOIt+rG4Lukch9JUuauerFWWQpmRe4+JDSQNCjfM8uoEFKhUQnDdmVWHzEN4x459i
EPIOI7TQiqI9y6LkVaFqwXGiaiFZQ62zEmqsPhRZGoiSXqg0MnTwx7dD0drUsnvuLTnj/rVNwXJE
BL6QVGIWTYLk0sYUZPRhn4zt3AeMnbTPaZhUUWgwPkhw9ZAK2h4hVgXHJ/yAJ4FW/gw/5EnAVYiJ
S8jGczgu5iX6FNgK3bAgo4BVEfW2WFn6U4MZezSV0FV120iGFhunYCa5d8o5SI9mZWf03JKTMHCs
b6/9jB79HSWM5/81W+Xr2Ay5axJ/O51iInSst0f12b3gq91svJ4t6HydLPhlw0Bm4SNYw2d4+pfP
8Orv7fLtPFZxyZKhJ+3wq8CLzqiq3pUoj5I/QSaLO1nG/3QZvV29nVzdP8jus//q5Wn/Ch9e3b88
fHubXX0B33///HeD1xdPfoVX6W/visnbu+EQ/n/SFR4Sls/N5d4F6OvK8ZPDOXkx3Z/M79vxnbyl
xgrBIAsiDWD9pIplBJxjzR9NHL8iJ4546W2vs/mHclnNcct6+8yS8HtGzocDv/Xm00Yjp0LIr3EA
AvxgRdlryfQki34q0ZllRRaosau1cfISwdFWX7AvLVmr0OgFnIXc5jaE61Xbx4pVE5SpgcnTK+7b
EpOpZNGrfKylymEB7LrEiGNxnQUqrTC5vSNnq6VClpMcs+pgNgt930UJu9nThgD44CfHVnP3ctsS
OoWhzQ+PgYk+XEVT4A9UcqNleBHQoRsIvlGuhEIRL946S6MLu2d0uC/pwIHRMNdTY6KziRUNOIFG
hd1zxeqJMI5tlTqupw3OHQZBJwbldiaLXgMrXq7W8xydiHqkstuWVbZ6rhcUjo/ZuYbFEk2dN2u2
YqoTk9V0WLgg437gFPEwuZRrwqoO0wmFp1QtAJKC5pUD7aDo6Cg6kiFnnEVP0G0F+Tzld75b4f11
QemHiujg5Ot/yKKfQf5DrVLpVd4V5wH6v8t9zbK8vrFkPVhGx5qVkhiexA7QOxQ4CRTo8Zdf2Be8
QJKE7KZc1srrqdgSeS+4IfmK32V8W8LfXh6dYvVXqY2Ats93qlP4+Yn5vAnXpz1ZmAfGA3Q1Q44W
ujQ80LNVm+xRa9g017DT6WaBpSvhllvNEr2mcYKFAt0DP2S91SLcCqMqE8DVJXFej8oybkU+fc2g
Go+p9BYI1IPoaYE+UZyHmtKvazjObNdFU+8jrptISXVp0lH2U7XExY+lFuaG0gwuvZPsG763QAUE
fQmRGRbv1+geW4FGfmxk+YOI2MwyYizxOro/L+8QmYZc/DJ1rPgOOqfNU0zIrWBmXj6DwaIT1bPm
kbhaFviFYXL8rXP2WcYu++h6Wsjd8KJaIL+ixae4SnAqXHtUroRkavg0bIEKz5iSGuRSK8FRBGRm
MgGqYwjL9PjsX7JNMkaDRbzPUM/R7i8uqmJktY6OUkXJg3IACse10KBx+YYarrZispXqXDYq3bZX
wMFhLcPhBvofubkW0+lO6sn4PpJ+bEURQ6nIYh9pePRlNJROLcMicQ54fB/K39fymOdx5+Ravi3n
sZOq5SfYbRhTdA0HFoGajKbruoTjTxR6qF0DdZlYHq1W8gWbVd9ScicDfyQjoly+zsWjeKKB0gzR
91RyA26B0H10jCYUDkR0oaGgzYN85uXWr6z1teBjJJ8nxtoHOfYyFysAX1aP2BWarBWVWDjGUh95
ZgsDwOtlYwKAogM2vOJ/vxBnQIIPIlj9RTlOXDT9fWggtaYNVDgeaKIKCLk6OqSOq5WrayrAiUk0
/n/k4rS9UI7irBLo+2IYC0ojwIJQhnGXYcZGl5cquM1ZorgiOS8D+njAdqbjnR1Gsi2oZZ4+vtyl
j/s3AeQhNllsMUs2lU/WO30gPUf7JMXTx5hnFCdQDkm5OT5pWDN99QZF2sYdMDKerpm0e3V62vzw
Xt2lxKEyo4XX19B9yEH0SF85KbAQhlVCljhGV0Fli8gc2Q6d2Qh1Cv2ajk/wI/x5efrVlfJ0srT6
qFo2HUlYBUcAS6OEUx1fnV5RtYmjku9DkvYhIGXs83UbURqrgQ4zZQlAk1GirAD2PGrUv30n0K5R
zrNPmMImTDeyCHIrzx7BLkNs6b2IxzfOFgv8GJoplJ9KLJS+mDCrxoaRvDRuMnI+8wlLeO14hOMl
htr6nisyZnHArHdUozKH1vkEs4IwDJtu5ocCB4TvCEKMDvm5AkukS1YtG7jxAMK0Lm7sMAI2ipJe
maAXvknQBZx+TgYA4A7kg1QuxQ6uJEixWXKe77Exom7Y6KxdeEY3OUzNSlKqqrNDXSyZzmStHWC7
AJsK1jJKDgWCUxcEJSIEh9sYoapS/DuqjFOC6hN3mkOB0L8Kuo7RzvIaetV9O/9Tl9Ex385/6dKQ
2FlBhq3Q28QW/J9fPX9G/VB0VjNNfSNPw7LKnClV8osde432c4q9dlKur9wIbSwSEBC8UmT26DhB
2IulluFXrAXDnoI/8C9cduEoDZoXdS3SnB7kDIPA4wPYe+9zWcvynuuc1dfumn7CpnkSVnhGzQqV
5cuIdkJbtBcI2hqvihrjF7R8UpslSocizOBfuiikadNI+0ojG5dlzylmaHpZkQfIhJeNMohoTYo7
VJGXEG/E2p9vUtTXNWFiL/Fe31kjBzQ303KItywFMFB9g5GT/M8q4KzICf0YfTUnGpi3oz3ecK3X
jDdLpp5yxVYR2aC96IZvKdiyo1gGXmfnUkk2wMQXCLCYSpu6Wk1RqFVb6aB6VXsmVfyo2gD2w35T
ZA+6hf/LOdbOJq6qNtuyF3Cl4NnnvqLVnbh4mjWDZFpYYQPZfcZe726ZesvOdGvQBotwJ+u/3W6l
IIfwPp3/JU5Tk+yI+uYOxv0w9KVS1dQhQ/dcPO2cC4uuIS3KygdO4D7iisbqwjI22CjWgFBjwOss
AQm1ucCwqqbVIrE2HvBeGE61dJuMEwpBojccfkTBR/wf+p2yN5NdEwV3pbG0SIlkcHAEUIyQn4u6
F3HiKmT48CH+k49uBmbEdEz1GFISS+Bi/iPmwEMzJwmJUEua+vVs8wjAOHdxulxa18ntedGtjxxb
WSMDjesfTRl2gbfZYw67CQij7TfL4+guyytDFu8Hmh/dPARS1y4MXy5mzzDSDc6d9TxfbkCsXmjg
PLQfPatWpxGlDrlXxz39+JwCVuDNX5zHr1+th/Dw0H34cDyGh1/Aw84vnc6wnFeLRjvfl6vnSyj1
Z+tDePYGQ4Hif3IfPpxjfX9nPXz66qacYHe+/dZ6+lI9/e4766n0xnoinbae/A5WFDy6bz16XH6A
Jw+sJ0+mVbWUx/bz31XYwL17oHQcaMFsok9LS9zTn5y9hy/6fasSoDs9/Nx++JSG6Dw4wyd2mR9o
wM4DLPOdXeZFdYujs4d3XsOT0pnimueeF5Qz9/h07naWHpK2MKdZ7qiQOHJ4oPTbhECNB828QmPD
dFBNJlDCSCCv4CTDUpH6hnL3IrHYyDZaL2tKE5s5SCyT8m5X5bJFYi4QIy8h396BQY1xNwu95dIU
eYV/ODWZJvatzXxB9kv1w0m9Propp+QbglSliG96MsAKahqkp2/R4KlMcPQdXaaVQO7xMi46LYGa
oB3xVI+LfwSNcKXPCZiLh01LaKSzeeWMWKz1HnTYj+D5crOAY4P0BYIXApGPtK9UgJLnajOhB4XS
QHnNkbhJQuLqhiyFaCET0OWm1scXyodaMZypG80F+77wTeWsGKN7CANQiNEN1teKTbKePgfSpnL8
uFmtFqcPHiw2Q4xKzYbT6rpeVKtsWDw4OTo+fnD0zYNhcQM9PKxHBchuh9XkkIX7+hAEr0Mj4d+s
ZlN99KFsCCz2Q1mQCRmkRhl1tXxna5FEyIgpSR4XUEaRkYRHIhNOH2U3QKlQKNXR5ud5MWPl2KU5
kgC7gocOc06PtFCtclgY1zKDH3BtqDnECBjs5VyeD7gGvHPN3wGRuTty54a3hqD7gfRAqiNtBzhS
aTFh9gY0oVuzSv4DbAlVg9EYJf4aOEWBWS2BWjv2itquZ4DXVrVzXRgLrF6q5cS0xoZADlZGN8fa
aUQMAvyhWWC20ibaLMnsdAV9h+K6CtSdoBht6WC0LHz6UpgpfvuB9yoGAsBg5jg8euSsAsQxIBaj
NHQz25nENhXIYWgl8Oc028w29EE21xVxKikyey95UukS2h4kH4awPh4zjAgOwyxJHOg7lMooIA6W
CV48CQ+IbDZLn5yadXEaPRRGgH2x1ou1G6x1I/PSsW9jZuSmyBVTENEQ84+qUC68GFtPyYdvuAG6
SwG15JlE8lBVSzonlCbjh+mSKMNsrkHny5zTa+ARAfPEN9K6b+yMMNZGFF2P39fBqMKErSA4PeI/
GPekQLLiRlrPy/frQncS1cpibBw6hxu77ij6R10OxViuB++lcdLs4Vt359rvj/AXoMPxbxYbsTsc
xaq/GOPO7mQUo7VlwtDAxVkoKf+KdQ1nthWvU6ivVugx4n9DZkerMtCmBTLDIqzya1OGDtrmvHlp
LalmyByuBmAt54EKNVziCowoNd6kpGh2XCV2SZDu85mx5dOquXdo2QBpcd4QiFStNlK5dLdSR+su
ijXU2mWR72pRBaRN5zE5NURtdFe0UosUvfWVkiiuIOwkQzUCz9IV6L03eoeDpj/U8gzThghT65g9
alD1I1us65tmw0iAcJfkTp4QCvVc6m7O2RSkKYvGUZa/GZNHxYsZOs0Lfa6iNUuxIQb4R8ZNccN8
FHkHIfHEgp3ZkI4d2UjkCE39ED5sGybtFN/i025s2coop0SboDH7PpS/73jQkVeqB/mJUjmavYfV
eHPaiCwg3yK66a2ykGezdV8+5y7ghbgy4aqAaTEcIg6cLABxfUVTcT5Z0Tnv+B1VI2U8xP4PBpP1
ihLEqipNZ/JpmdeE+3CJ5xL9TGyrDf9rnLd7zG/U8zgNgiObuuJW8J+Yq6Ls1/kyTi0sdHLnGPx/
7b1rjxtXtijmfEkAJkAukOAEyKe6FAQWZTYtyZ655xKmBxpZnqOMxxIk+Y4O2g2KTVZ31xHJoqpI
qXscnzx/Qv5T/k6QX5C9Xnuv/agi27Y85wIxZtTFqv1ce+2111p7Pew0QhXBtoIB33dhKUSWuO8x
/JAhW64l4vWB67CoIwBXlGMbhUhDC2LTJ6dF86qMOaEwyr+7epjKuVotJD05Fh43KQ/OPkec+frZ
d69mbMaCIpGp3mbh88rhB2g+l2UDZ8gyZVHRZfITfWIgfzrFiBhmAMPsJHuQsEiK1i72CURHxTy4
/3LAJocACKvGPqQGSpSq9KvsfupSMqMyPO1/PwXVneB8yhrMIgw17Uug7T75gHdm/g96vieF3T00
7vwUMf9MJMJpLBhO7ye9KFE2gbooL9DOPNPOUYjGp2YYE/N/douCAeiL4cocjJCzgiP49LScjCM1
7bhGiV7jt1isRn3ekn3MgnSikO0STUOQffXEanyTlKitYtcuOjO/QRIlbyDmTDCjPkuH7yBFphXt
i81+jdIYNdwRtcEfBkv5jjXvqMnbrJ5vGmTBCNLjzvJmEmOy+KVwyyTJYa8dUSHa3WZ1u5wCmxrr
LN0+55evDkzYYtAR3UlaR1KLEJaYgQ7T/sp5jA0ypFZ664KukOKKTZI0w0YWxsj6D3Zyy9PaWLRb
wpnvsvQwocthG+ZqeBE+Or7EMvdx3E66AAe8l0Ie13En+x69sNyts80XA86paFtpJOzNjtnCHYd8
wDCUVx7ZZ09rJUGIZdMG73wQVQPJJ3R2taKOzNR3/hPtFzhe2smbvspL4csQhD4AwGLtHry/B4CA
uDcaADxqr/cwQrGSsnhYvLehXyTWEFMqJ+Skc7qqwU7ojAcUzSBR51uwKRnqYL9lYzjzm9aJPQZu
07+Ow2jdnh7Cm6tOcXpVrLYFph6mqn3uwvXPJXSEsntxWjccxZwL23A8dHh6nRusYFjbyWuOzQeC
Zf3M1ucaNtxkzi3BnRpgWAzCBLBhmDm0RsdDjhMZmZNgpIwz1eSZJU17sKL5ivV74aISE+0eFL7n
TZ1LJOfvmN+Dc3AQ0A2Osg4cisXZNmx6ofOvQovLVlmXdI2mhbHzpqRI9KhygEv1g1VDI12JP6Oq
We+GYnw5zt68Abe3+8PmzRtSVepmvRj22DxoQLCAdeD0e5DWrWqn3KnAEehdi2I2byigYJwusEoL
uQoWXrBlkagp9uXtKWJaPD+1j8C4wYxDhOnfxetg1z/c8zZKlk0J8fHx8Zi0UYQW8eJ4GFCEsLVG
HKBG5eS3WrP4YU76YdsGG3EITDjGtNML8FLGKpLMyLzuOhlxUXSy1AabZC1lcefLpejGbL2U8ges
VcgvoUW3wXNU2Qus66yoMYycrZUbop9ueBBOsZIeQQtitSNHIjQmanzk8OqoqkhHYraWdqxmMQFR
VkkpVQ4oz9JbMMAh27wKKs3XHUb8dbplqx1kD3i1/DACTzcnutAWDZ/oIUHr6EjJUmJ0CeawRT2P
vpmXS5Xte+yUpB6nqRwkhe67ObAGJoRWy5IL+KZ6TX1pyoYLRVVfIiB5G8K4Wl4YzrfFjRVIDfRz
83uI29k8oA8/DWkM5XKldGHEQe3qlG/lFzuo3vABLDVpN5hDa+hV5lp/LDfPyPQFUWIkt17gxqj6
GCaZUCpwe5LrVvL27B70+fPYvctiU9TlYkbSHPMdvtRrkOGfrKelFU58Z342LkFkNYPx+BBWTSrh
g6QqK3sEyXMQGsxe41CSaWQjxHKMxlh4WFNQz9W7A6Op0thmrVxWcOfgXyrqS0+ZW8icHL5VVDeK
Kh/jz7pZDBwuf+7tIp5iHmFrv2FMHhAKYp7awxe2Or7JHiH1FkU/H+O7hJC0eJtoSdHF+GPyJkEJ
p34kd/HCOd9vFleUesJdrbkDeDuzkQdHHkBFzMMdxgiH16a6S+Z4gnsm1z5ozWxmCjs3R4CKS+kM
TarAVsuQMDa+AbrlDfAoSfPpRS7NjrB/EFx8JyGZzbq59Mm+tSGWMbNYNwhtjAcj3UgYS1ADsf/D
5iteECCu6mNHMCw9DjlN+v3Obg70AflNVhTI2h0VtpuRmJAByKVD1YyO7+CAk2B6pBt1Pq13ATD7
sXE0RJPQYDTbPZb//NvKFgmwuF5wJRRipSGDA2YgZ7EMq3mBRIT3r6bZ54lk2jPu4wX8MIBahM3F
S9pVL6wNWCsoTfW8Pbcq5jUuKCU6cZQJ7rgLEjnYIAgbHkccj7v/98bo0biOo7tdXynKaRvx1zbp
a5EXOIdppngFW3Kk1hyG76/0MJnRKqAD2LyG2TfltR+jwLuuanZrF6NYGV0EXSmrP6jhSIBcM8jv
0A4tVCe5LsJTHmedVKPcyb4m9oD5bHLPQXEb4X1BAT/naGW4clKhurO5026SPQQ5cPO24UYWYDOM
6k9301ctGr2zcCKi+Oljp3AliRusdZNtdCtsESzkBpWN5RKaIHPMHFjEU6ihLIKXFRvN+3q59LT6
oyDSPjIyTAHN4cxHzbMayN4pjW7EXZxpmrIVXH168eR6m0MzzMoJz4b9ONopk0lqY1u5wECnSCjB
AyWkIPN2L2FNM3s/rzuUsigPgBQR8Ki4p0C4gNVKbjCzmSJ1om1tZI4c3GwthJgs68khVPHQNIFx
taVaz7Q8IcwCcObTaKfYDWt4iJkuk+Ar8MoSEA0uTLlL4kf9q9Oj2Krn7LCMNlAY/22zHIG8Xe9O
FmW92JNh6AUbHPmkpRxl7/3rMX840c14mYiiDjMuNxtkLBPXceihgxH7wKQDY6huawh5s6qqLRtJ
ApdwXqyqDy1ZtJNSnGGloOWRGgExU+KAdaAt8OO1NWO63Q14xm52k/C40fd+KX1cJoVP3iSR/EmC
VshU8ZA6uBzNIQUNoNpTkUkzLAMy1WwHeTywGmAIkUP7wygWbJl9yTgfYw/ixtRzDfFuJWdtIUu4
YgtjDl9jRqADS7tZdsIzhWKH6ptOmF742BUXA1rRO4ChXhlcLcUwp5hX2sX9UaYIIkJnvyanoqFP
gTuwyeuuXRviITFT266TZBWdJLKKdJSgY4yX0IxMaaeZdZk5xSdDrCHwKljPz2ZuXZkSznCPyo94
r0oT+L1LZ6LxTCqNdC/DtEZFxn03y/UoRvFBijIPn6PgraPd+27W5xWM3LrxnOJTy9xXxcWOdWzy
GEybasNHNWqIHsXV7HOyHn71EUiJevndJsP/DTGIih3BiKehWz8EcQKKmo9MWzVSt0Bey5kBrC2L
OEKKqCANoYQv9pD6Gf5NQADKj+GbYkTqSywY6F6gKbiajN++/ZB+DxFcwLRGioSJTNAMqL7M+IJy
DK1MjjmRTEGfYsnYrBhn8NcX8WVOUsJfLVYbf6jqpR0N/z5uRFyY+Ix4bAQhTYS5gq1ovqdOyGjc
qjxcm01FA+F78cH4AfoxRGVdUpPwqh0YR/9ee88020kaCq0dU62D3Sb65Qb7d5tcdqnF9lE2MP8j
51LbmgIyjCvkItymEbwaqVUcHTJUYvjaGp2qa+7eK+Nt06O01/aUgBS5MwePHzY/3oUu4eknBIw0
P8rcU0i5HM1x7UUKciujsI7c/Pb95AJbIigwXuyu3Yk6TLqW4uz8Cwdsu+fUJmhzin8SBA37CbZh
mhi7KYgZK1QlQ4ykhPaLlmaLPK63JmNalhCPoWTLSrlJq9G2HxfROcFKAD4qzK8wrWC3kIAHF7be
cg5Dk+ExTIyU03QFkjPJbRijzzplYzPoma2YAAiFLLn7nLDx3lsrbOw9BfxT3SrNPogj7c2ItHKw
nRIW0/ckd4Mm0I7RQR2fyB17Dqb7PtUKrzdu1sEb9kuHlwL8s1DiBe4BbhBnzJDBsMtdSH6veeXw
KV453UAkFcIQrNUbtxQIUjdrj51LM3IyR30QvsQcreuEZEjbhXivBOulphLXjbpICI+8repAQ+aQ
3MwC5EeZ8dlRylAtEitsOy3PzuxOroORpPdVYs0C28hkfJcgYsAF2hCBmvG9Eb18LSPKQnz6eWJX
GGyhPzpoNAvzfrXfgqGnWWFfbrpFZbfNf3YTHAriZ9a2kSCSRwDnybR7PfsqTJVJZ02g83y0wbse
N7nOuwtswZXV6TaPvdxuE3xZofGufv0Pn3zyyfZmPFuQ07mgkGFI3jWv39zHZNU99Bzy7pkLOIJ2
4kxPxKncJDyLdlc15Fe3JoiPXr4a93RCdrmmdn1Xq+V4e4NJinZ7sGsa93Tqa5WVGrJghzmp7WSi
pNSJmEaj7I80Lv+aq9frdYWhxATQ7WmNH46y30FmY8lr8LIoxLP7fG/Yf0oUPa7qy88wIvKDL/7j
fyDneqAJsOh5n5AGXIZQVoQHFtPh8VsMfQhPqBqHBzBeibZnH1TNQNCgxJ+AmYLDh2v8M1how8Nj
2dwZmsrGrbwg49v+d/s1/DF0FP5Yvgnf7c+bRV1ud1gObwFSY4GvuL/4WnUGVzo0428Mpw0L8HVx
gSOBQ4OfycYCZ1lAXj3sHWlr3Muj/aV8yvrPa4O08PANXl32/wreHwQ2/GlWE9uH2764qVf1DcVx
wVHXN2xlx72D8Ti0hLjlnsCLJ27qCcSRhTXAaxJ4Ar0iDtFME5cZvG9oNTYGH/cWQoATM3FmaYpd
fsnuBnPQfuENiHcBSEikwHuryrgeQxcPo2zMviR9ROjgouI8uEPejoB6jRqC9o9vyA1fJXU/clwH
TIiOHFSyFSg/tLEtviGlZm5j/rhYFhTwkvJqkeeRDWQPtxdAC9szjnuqnmm/H/LolEovTODRan1J
RrhOdYRBVexwbCzHi5ruyuhuD9S0U/Rn494pY1U6GLSEB5hmX0OG+afStuEksNVE/GWuwswnxkpU
UZQYshPvFor0x1HWknDhDKJz6Zz/2jhbam7Reuf9XMVSRIMnisOMxsFgSjVHO616j6EWs37EUvS3
QHQoN0NkmDXCaGslxdaC5iH4x/7y0rBvYCg3SbUHTMbeGRgp86rz4gJi7KtIJWZEgBQnJ5uKg4EM
+xyrCxyDq4uLAl2+Z3B5zout87lDFqqaww77UZ/oNUY4w9P1mxovzc0yzADU+OuBH5HMIVeyXx5X
YqEmvVj/mji8pbzGcsZENKieCr7QVhEkQSO4moR9tacoGJ63ScaYFR4/+AassHr42ufYKHigqKiU
5RB+AMuWT40sk335pUgypHBoVTlDuhXUWGF93j5gtY4SkaS1z/1Z3j9TvssRZEAOAINmzc74vM9E
cEkTHO4V/pw++P3kTC8zhsPr9ShHV1Wno3FlyXBcWTIeV5YMyJWlI3JliZBcWRyTK0sE5criqFxZ
S1iuLBGXC97dVa+evMuSgbeyOPJWFofeyuLYW1kcfCtLRN/K4vBbWTL+VpYOwJUlI3Blfgiu1oBu
WXtEtywZ0i1LxnTLVFA3PlejQ6Q1ZJQtpILP7KrsclXM10APIWz3Elq7JLJs44CNs67jl6hUcOoK
HcS/tzKNvgOO4atzMEHFw8Q3ip7rMAhesCsdZ72D7fFP5yf2lAjMom2gaJyNpCJzR29npg6S9KQy
gmAMkmCus4weDoNoLfMs79RlZKn6wnieNIhkrGdPsqNyPc/5tovj8wFIcavyUyh0dgz4/GwRnWDU
0OOMmL8m+NpzmLQYSAYhaUDvQ3iJhvyUzw25X20gb+dOyC78o4FEMe0DUvRjbtpW4cL9L5WQLpsY
l++rPjUV3SejPUbSQ1BfHNA2RAdAvgMImvmLTjzrR0rRtonpUBqqCxYFjrUnTCCo4JJPQcIMt6BD
KLXlIWV/1Mk/JLdL/25NOQPBOJDcvzC0L4qe5gXyDGwqp3iPBFaHUdUjZCarx25y0I3Ld2y2Kk57
gUG/DfYl+4lSpiIjBh+Td0gyyZAUdFCL29yUgJKW08fgZUdkqOBaF82uWm9aZ/+SBFBPqfElTG3H
NUJCve91LMrjqGPuwqcc1XZmbTI0P9ehxw9Y1rSi3g7HdnBYFds2p1RC7wwjoMB2h8N9hgtjhqFe
oVlFn8xVZAgHqXoqq8Btz8VRFgxsqnDheChEc5mqlf1lh0crbJNT5U1HHadN2OJAMjykPxc359W8
XiK/Vu+3u5ahJepOjghXc2jbde4O56GElx1K0tUXIJr24LuUyh4/KDzTw/D6jCwK6yJhSujvMG1R
eDwd6zZntlbDXRbDqTMsuKUk8mUvHz2b20kb1QjPQSJPyStI3bM24EliL4ETG562GVz6OMS+Lx3R
yVttwyTIQ3v08tuv2ccxHAzIsbXzi1agxYTQn3mLGaB00MldqLqaeBeoGB92cB3HUlt4mPpTPJYt
OYJC3mLzdZhA3oJV6DRuVGPAV+0NBFaO6fO+ywzzSLtIO6DbHfjRjBI5eI8+66OD/ucwuR/5cI8O
dr2Afxd8bTcjteZ0tgV4c7QlafrUwN8/6jWF2v1sgo3/pFvZJ/yED1uPBt2P8EVqg0T2pFBQ3PXV
+Cx7u8kNgQ/4yE1zKtUg8InrK2x579uLcJ1h19C90slVPt6C9WiodNi0fjz4cKczJdU207vcsDJ+
HXZDNWjG1JW+R8PjIJ1uwTvkjrW5PRriBwxwExCHwv1fAw379xjGt4WTV/EAeFKGwbcDTpuRcAI0
bz8sm18JND8fNkcAByZE3zCWsZnEKOv7BsuWp+y1St+x5bMGqOrA7zjsjmZ+4OiFLmx/aBL9EQ/a
e/f8ef/C09Cxz5F5tDoxt0eon1sZYlMaTtht4hbu2PMY1NU6kFpaqajnUpO99cap+1Byce9FLdiP
11aN3WrOxKX2Fy6s08z+LC0hN0Dqv38zFuic39ZnduldkrtIWKRH+3fMhCLVdsLSO7kvUe0tBMfa
sv82TDDKXB4QfsVNG0ErsqFH0KU37C10beElzS9q5riFlAy3rOPHoJKSNRwC99iIQW3qfjHsCtZi
lA1mGH9qNhuMsh9/Su17xdr8StgCw57ZEGIfEWnCjsLMat73EKO3R14uHnNWfOyTIB2OyovbjGFK
vUbwTZIUQdUgrBBahvgteFYiSHzN7PFvNmX71x+i7RA1YjggbuKHTRicPCyMNib+q9MHv5ucPGxV
P7CxCpO7CAaR2Y6CyeQIxT4SpbSN8K+uc0/hgRpuAhlcUGHyZ1LQdS5Cti161YIPpvaBw2k8HiPe
O8ulFkhHMYQi7Z4yqm+npxxNXo9NYr601lEh5716qdgw6NSHhfyQOFzRoD4kW5XYtf5oDo2jfTyp
kSSsDuzYzjopcdLuIMLkhL1BTH81dnxktZPuqvdu9/rfffLJJ5I246qq3oIlzLv969c5eh9k8Eqi
xC5Yg3SB+WMxssx2tb8sMeHo5n31lkNtYloY8CIAIiW+B1xyzClT7mQnv9Z/PchoUu7K+ar8G9nX
/pqNo52kxKFeLhFEOU1mbYB6WShbKA7xPBe4oANJBhnq8ZIdEtxA3FBJhIZtGYIyhwDrFDyY0tnp
1s3ReAlpcWvqmSJnD8l0WY8N3VS2kKvWjYfXH8NsVhe4mU6+4kzFlHd7PV8WLK5A8nBxT5LMZcAO
8Urb9iUaKmJG2Uhca7aHNZtjDcuOoacqRFu6M8OIy2C2zOFicAJq/Iv1EurMyLTZAwGJz9G0Sll0
DL66uSgvOQvzCDvimGA2xPESWyGrs1SfY8wlbSk0cjPJARpmAsdIfYaDy5eGlSkWEE92CMSlvLjx
geJUqQwygpKMeRxBxmAMFcmx31oDglAD2sRvJ83uZmXhThlmNiW/ZRjRlaDOkHTB3h/Zem8W2mCF
tYKjmMrzbFIXF5M3jNRf0t+qXhb1V284jU1PQsFgUGzM7UbRz80QNxtJX0XhqOq9pJGcgPKYZjXJ
XlUYVDeFQaQMt3Rtsr2ZwKDNkLDu2IHIHJTD7EshaDTl8fOg1FdvHC/PvQKYLsqVDeKJ+zHVjymI
ndgG2jszRaEnLPmMlwTiS0PUkxrjFi8gzFKxtI5aEvIcOsa7n8kbXrWwl8f4x4BfUB5SipkHTKvK
2fRO1AS4mjlVGVDkf5Ju1hUzHdj422bb1WXxXofNxjVNLNi4awAAwUO9I+goiit1ymOY23jd86US
0ATFYQVVTk2PKmDKvzmCe16jVwhg/HxzQ9ZvkG5S0s0DSYZJvnnDI3vzpifpBlDdAdfSHClcBrjk
LCCmEk1KappBXUN2IKgjC84uG/PMoxZS03baTSnhlGUi5EiC7EE8o4sarG2FEGK+Vp8ULcgQF2Bl
OpiLGta3guXMo3jAcyI+6BOmRCcYBryFMLBmcNAJBOFK0lnkCw6QWfS5ZNIOfpMXmGGVVJxZMa9X
NzMhvCE5dOOmlYCmmPJwg5k0iJhiRn5l8IhSjl6k8LidKFsYtK4AJUtLUTMyuzsvig2fiF5+Akzb
QcySEPDU0LE6zLDtQDW77uAYxYkFWTkO+ma2SXENSfDGcZu8yrDARvZBE+fjUY8rY4w2rJMLiDCL
FDr9mMk2V2YveOijek1iz6/LS6rhIHp/NG7SdZSAJQOQ95sdkVmkXWV+I5Dh26Uh+KAupWwu3qaz
tQ5yNrbkjBgWyFTWyKBGmew3fN2O6A5sc0ZPngS4ca3nN4C4UNKMvC5OkH2wTCY2bVCd4vKP491m
R0go0o59R4wGtxXOtAQsDzszgh+EDOAmwFTmSmCgumMmFIBJgcHRZRrTDpRmZpIJF1RRV5QD2a2H
Y70sE22qmdXh3QMkgAmUGemyNCfErjKnTMNRDjnNtiSnWMMeFhmNkNaRhOSkjsSImXR9w1AAM7zN
rp2O1HP0x4bkhnbYN8HsaWYpVHXdHTu+C7ZlSg3Nmr3bjsF/BbAP8/H6GwjaGGePNpQWFNOnk5qi
WKI7i02E66SJN2+oS3PCQ64OHsCYpdtVRQ4vdEL6EEjMBJV9Of/Q7jWZfZeRQtC2kzqVYBvx92KJ
SQ5VSx+K7F+AwbcFhB/HxIRZy54z0g7kGaQ/yXEJ0e4c2bJoCjWsJn1s2OE0masAQZ7F6iBqdz1/
a7F6xmNNgVHo6Zs39utYdvjwzRsO2s85jjiv4QtszsPURHe/wZH0nBKt0ekvwlpdUPLqj3tIbW9k
tjB10j4c2HHzjPPOOxTp2HQBLZQ00QopMArter5bUIJbAAIn39QNFCnaQG3aXSxZm8qL7KbaY34h
EW84rbZqnXYtkWFMAbWsKAMvjIS2vCodEdwU4A6RNb8O7AOHy6ILNTJKgshRVg+o8ZnamHCUSJY+
EYJgfWg0I1mWccfAkTYcHDYaPMCS5fQcUB4Upo3EVRju3a6hIPLY79w2dajXSwqpUcxIQFgXuzla
vrlupUSWr00bpZENhhnGZwGRDbVGphvKWRgM6SOoKPky23LBH2/7BucBHS2U8BYONu1yDlBSIugI
Hdh9rtsIZJfYxHDcJg7MhCuVXorrXYABgWQmFRuDvtvPYBk+2xnZbll98Flcyx8SwbDHA+ibF6s9
SneL+dbsAXgqRFFOfJNmkYhU2wNZq5+gvYlrmrORQUZyNVI3KmDphJHUzcikJ8QmLMyBaPbY8mRX
nZwXJ5ggzvWRC0HEK+eySd1lcNb7AnhVw0MZ9m+ztKmoRImolBqYoCbVkNJi+csmIBcd1YSpCfgO
FPPNhJhbvLszW6PG9OfEsHr6gUZl4uJrgIgahphyaHOHyIcukGATI/kIFG7BTY2RjxHsUBZkeVN/
Q2wocKOroovR8ZAxT5Aux+C+CUGINA+rQN7BtpZdqahhzCRbLPaU8p2G+eYNlO1qUFaufcN50lBy
2G/e/Hz0Fdx1iJFCPFcBIk5JkzEO64SKaRQW7q24nsPVBc8erp3G5vTijU7cbbFBTeoFxAsyRL5O
7i3J6iizNptndeP4BDyvEB1O5Eho0koWp/Z6WzD3SUuijLc91khpdkX5Cbha1ONX5pk4TlHq9sSo
2FFAVZ1rPwVUsSnl2psHw/Knm4vqTevedHO4xe5sEw9Em8Qna4rQUx06CClLniX3Tpmdba8gU2N1
wduENjVO7GNcJPJgKcTLb8Nhc29E5mK1BlMe0fCg0nfo7t1iGsElfT3JCHV6EBpur0WvD1eV0EYM
qkOSHAvnvzZslSiM7Gppr+jYjsTwZVfV8uNBmrrBlCnsUmjv8MDXDtxH2IUnumvU5hgkzEP9soHT
r6RgRYiZYZgg4TReuLAAFPOrEp7d9wkDUvoB7vUtwYBA+6KhpvSPDSe45V9WZAL7YZKWN8UHiqgD
wZPmG/cCG7pXbu657KNSu2gMI4VK3+8qPIwgRSl82tNFHpIEUmPbPJh44C/pnrBZlZe7q9XNiPR5
5t1mJ2nomISpJkgYM4PYr9fg6uaI68fCuXJzsdoXRjQxc3bcYO7ZLTDJnEHoKDg/hh8NFWkEM7h+
KGqLhUgDlmWdOjhcKlEDOA5vTJdO1AiGVmElCo3eTTPSp3L3KDkhTYgJuItyaQ6BExAPLqv6hrPA
rIodXjsb3vR9UZ9XBmHR6+UCdbu617YODx0xMokZY0guL6gl79oH7m/huhXwbQ4H90LyI1pQcCtq
cPFZbk6zD/Ma2Macs/qNME8miU3AfF40CfbTnXRcW+kIzV6cZ7axOaXdpJXkEDzcNoLSNZ/l54YU
wJYnSQS052bZbTwRtlSVDjnKxK4khhJ1kbtyNa+x9c/soD+GXLusFnh+fNxDknth44uCAqpuQNWH
f2O85VUh05AdZxU2kCE2iptTCNrSwW+g3KNEnxjvBY8QifYKaOsCJ31kLR/eN87EnqRYsgEO3NsE
Nk6c69uz/8ouMSqBVE7opXlCOFfIAgiHMPAkC6BaaV2wBQJWauJG33JEA2odQhrknQ1K+cyWj9u0
KgRq1EgbHHoS2hllEZUEu7uiLlEWXmG+ASA7D8dfDKVnCDdFW33jFBQUyRAM+Ja8w0EG2lZ4aKIp
1jlvdB4FmnVwUKCltt2RCyQUYLjL8qKtM2aw5zshGfONgrOtsCrfFlm/eVtux9b4sJ8WgODwr2fb
5XkeA32/hRvL5fkYEgnuarRNg/rv3r/+LzHyMbTw7sPr//e/+OSTO9nzf371T8++mz168afHz/7y
/Nsnr57Mnv2557boJNtvyh0RShbKQOqH6I2SQJz05BSxeDaD+NmQfvJ0AEzz4AzjCkt4PghMMJih
RcJsNphkdzBmwX5Dil0Kpwv6UUPBB6xBPVnzpAccW/gD3HiVl2Z7sKkT2Pf1Ic5TP7MZXKEFSuIN
5n9IrTFWrRhaUPhkaLbnrDpf3pidtH5iWPacpTji+4dAfKAD6BtDi2MDZmF7moFhAxJpHuqOsu/h
EELr6VEGZA5sB6z5JltoeK3Y0M0zCbI86/WCmjkkRoPrz2q7x9xRPIB7zl4QhG2Iac4zGQKnYEhZ
79316/+a419z6Ld3N6//n3+goNfmLQ4BVnt7syrPJwYyW3NOynrjxSjk/6q2qEOAqOumUbxy5duB
PeZU3pbbt5dgjAokYDX/282JmIU0+3NR9fdQXwhycyHcNQzJ1DwB40lANcM88ArL8d70yKCSLJLW
8+2W2HHDwot9FGs25htk7B1MeqpVjHdhMQFvNHr5Ypj9U7UCzPpzXbwtVjhfbMQs1sP79784eXj/
wReM6HZ5DLIPHoy/GD/83aAnAbntKhIkAIFE+2GAjLTVrJa4aTRkp1ku3qIdqA77DeFGGVynA6k6
ACdObnr8aFXOG7ai7ksJcG2ENZYfA6pnMFmqMaRzFzkT7KymPw64gNmd/PQTOnGYARkC1Ex/5G1I
RlVsYQs89GYJVN4gjV1eKDhodkvT1GA8Mw8T+DFKNrCtmvIaDp1NBRH7G8ELaoRGj83g44RejIgk
D8zWMfz7IMMCM7iTgPNoQm+pvwEvFzSyvZmo5RuMzGAgRuEHGD+KmLvy3EiWu5seD5X20MnD8X20
a5hnF6u5ss0dAV0AHf4clsKbPm7Tt0WxzSoM/H5hsBQXXPpZ0YkzQAqX4eDwceRey41p62eiIv5n
7p0OKkjQjXrOanuC6fu89YLkvtQcs2/Q0o9WgQfeVNXCwMlqtgHKXHQiH51jwMCMB7RGyfJj/jiR
QqreWyPgDrQGXdeDj/A8wVKq1jeGrhVLCKU+iGtd4EdUw6tyVPsnwR7Gd3/SvD0G0iIVmshrNYBH
25J238Ar6V4H3Zkm6KAIeny6KR/zezcRW3jiPqu+0eoVT5dBqo76HAwCKN5Ry2zKpda4eb/5sBiE
awVkFL9MXr7f/PXxY7LCew59+XX3tVppr675ApVbqqIrb7JbCon3LfwbVjLNPdrDdNvHit99EN2R
K1xDpLck234GguQJ328Ck/Ho+VMCJ3w4AE7pGIomdw35tyTLj8lTcMJlvHov8dMg66rHZVStxzjg
LF0Lh4gl9D6DG9xBVw0qoapYLhY04INUFb+EqvoK+FYgyoO23lwJVQ0iazfkqTRoA4Yu41et5x8W
Gi5Bj6qEqrcFwwRxWW8GiXpBCVV3v4lqB3WjEqr2zPfOG3g92yijk6CUbqAubAjSmTmfBukGwlIt
LQxCoCVbCGrbMHMdtb1iunocH32QbiBRMNzsgEnA49Altui1GlAGCexpq/OvY4gnF03tdpumeZAo
7z6qGmDpCFbLg1QP9qMmsyiiBPtHKvBHVXy+uYmJiBSHj7qsf1AHZf3zubGYkRqGjxCGef5bsWl8
XJKy7qOq8ce5OeCEiAyCGv5HVUvlhSl3YS3/o8Y3w4sX1y0A5Y+aMEBShFlLcf7obwaUQpPraz/q
Crv6hgSFQaKC+6ixDlwhBy1rQR91B+cGfqQ2HcQdqI/eoCoIjt+yD/ijLl8258CypmctH/0KHR3w
R10e3FrW4A6agpL7GFQBlhGkxUGqiv0YVNKHR1QpPDe8EyOskCL3sD4XikOIFg8/ao7C7EQQJ5MV
7Ec9pNaViJZBr4FXUsHfEddys93vTqr9zvxBrYQYCA3K6jDbJGxtlSKky/32ImCbbPkxGSkVEymk
GQwzzqfPUiyQqseFNL0BQIT1wmpSSHNPXz+mj4OOeq6Q5u92y7hqWFMVSlb95uvB4aqmkAcgusT5
a12ao3vgV5Ybng/4cRKU9U6VppwhsUuMPmhFlfX5Mnct9aFcIiPf0kKirD6J5iB9b+tBau3k48SW
CpG4WYOeAp0Yivkmu16vPrvarVeZkwcIpc2HI3Aa+zVFTe0UWkPLgyyBnVwFv+vVml8Oso7y8F0z
E/MPncXhuyr+nWg6Buni7rumV3i1PWjtg78HgumqCuRiZ9RvpK0sBx3Fcr8w3M4A12KAPhxr/L2A
JH5ouw5OfhIDcHjEQpguUqsAsjzc9g0S5cd8DTixhbRczoNMVoTObAGfU5LJDJKVdAGPLynwejda
HVWPvgfnVWely0QllKxTaGOnFYrer75+9v2rQXsFLuBXefLiRXcVKKCr3DSINu1VqIBDtZ+GvXd/
e/3fqiAMfBP57sfX/9d/TzEYlmWzqN7zfTvcUXCRhrMk7MVTyLkH4k0vKfNnF3ugqZDVjRM3njfV
ar8rZnwRwa93Isb2vFuJkU3uKAMUbQSV+oa8W18U7/ZYGvRa/A5E6SAvJF6Ac02h1S/wLhD+/cYM
/Vu+rD7GBf6yrvYYLZxcrg2i4Ju8z3pKjsuBL5WPeP/khCF4wtBTSQPJJ3bap7yxu3oPOQvZOZay
sbmywEBM++GKAHGGC4e47aUpMu1zWfl8cIwQnqFtgGpsfSh8b7y7hoSIoHl+P6+nfSO098MB28Gi
i4T1rOCgSbZFMormFtNzwKENb+WaxVdTU/GXot9i1YDKMMx3BUFvtjd9L5gcu4mzj7kPx2QYua+p
yF8Sbis9m5cmt52axcsHMF/MV183uwEFWuKhig0eGK6VO6iFVYdDuMP5QYcEvBovIKJYfrFB6E6d
8zveC+U+/OI8Kjxu4PpiOEqOJtwx83LFhXO9n4aTtsRKwFGIQQhF1mnCNEu6CEa9dz/9gpK2zdmW
QYcgW9FIuMvdhyDDC5qAlRvVSBAO6gO+xdRrw/axjVVPpg+bu4rgAYaxuTKSbQcJOdYQdEcZ2ctO
KYPPEqprjwkcyB4SNKtuKA/w0F5k5rrFcALYPIAVH/yP2J35hn9dzCm2t8XhRotF5r2QYdA1obLi
oLmjJPmT3D3aSkLdQzNW6i3HZeEuT4VxzLnk2AAB7JY5CtEx8fmk5vcbMJ0AHsnl7gwClHFRl3hS
BoObKIgBNgefBACdV2nMH6LAZ3FJb+aIqWbncTgtHLH8DjMd4EdS48apG/2AWqYpLydUOi+UV163
/qnM07144KfQIlMzBardzbawOQnDGHx6l4fnby7TlfBbI2k9SN0ChA4RWlb2GUrXj+l9kB7rxZPn
z168mn3/9dNvvlFV9OtoDYTKGCbCXbDY4Q3HEAuE8kQu6un9YZwV0o9ZLinE7YKBHUx60UpTcz2/
ztUajPDFfQFKdpI9uD8EI4jXr1//IWpAEzs7ldNyQpXP0ilc/EyWd+9/vpTAb+WnD6jjIPaxG++n
0wAj2rHLdjF48voRGNxk3z57/OjV02ffZd9/9+fvnv31uxGCprkisxUMUIVsQ0X57AUZB/FgyCoY
zBG++uqrQSdYBKPpTsTL+nkEeAZ/+MMfKAPCAAGE/XbDyA5tPB4PQqxIk7s0tRu2wBUWgXfFmDRN
s2V5wfftOc+3nVQGZOmyMvPR+2NIMKKIjkesdQnnw4wnplLreDdeuUdabXbfYfsMT/vff/fk9fMn
j189+Tp78vrxk+eAOpwXtfMcwFif3qg4+OFZe29WOBnbGxQefX7vmKEzXxWyTClmqCthHwcnbjv/
vbNWTlnvKEbjYwPu4CT3gjQ3xO0RA9I/ZXw4YyKAZfAY8tkdyyt642L+Bwi74n9a+IkWLuBO1hgA
NRc32Rtf2HuDYSuMiITe9ONxkKzTCIIc3vxHl0jiYi1xFIWbnnF8JLY0VYLGBRr6hXQA3B39nqQF
RvCLNTDa6h2acE45r6hpcQr/3DaW8GLVME8oE+Pc3G5q1OOsJuhAyjwPXATutuIwi9WKX+qUx5II
2zC76sREEgHL7Qeb3NW5QqLhiIXQGTlZvS9CCZayEwH6X6zmZkrS/JNvv336/OXTl6OAyzL7EGQW
U7BcYEJ7HvA0nA1fThg40d4exbErZ9WGbKimYFY9Ek8COMJ91BbxLYnLEsThOFyG/NMOPmO4OiIW
bwq5vinAki92cozQ/crGUmZhzganGlNf8tNbgA5i4jfKA9ryXVk+/DjbL7VfAo1Nnhu0OT3DgNU/
F7fFmp4D4QPe3hJf1PTRjtOZxLO/gQ0BQfFm1sXiar4pm7UaMgQcUFtGTm98rwBsJTJbEKzuX+Db
3CLlqHOf+ElgyHp2w0MYw59cYi5wJAhr7HiQDOlN54M1IIwiijCH0YBRM9hwZ8V6u7uxKqqow5uy
WC09qRmbId6flQYIDIqIPOy9+59f/wNb7lKSPmvgUK2W7356/X8/+eQTq0NUJqQjsWAipSDbENXO
9rVuCgxPDOcWtHxZbAL9oUpZS5WS4aFHqYz1vR6cHBxpsbgmTogvC8d0aS50hxm8doWBn7wYN5U5
7kcc1hkTy9Pz7hw2+E1j2ZI8UAdAU5AeESIOce//qSw+5OQB7Gzq4WUG5IrMUdkF4OlF9hhEGHCk
O6fahjmHsuiGt8ke59dDjm2CMYrq6vpGQoFQFETrc8tvr9n+eb7Y7SHqnDSKoWCxOotMjynyPVw5
cYIx9DbM7slQ7kG1xwZ+pKQhdyiKqUAB7FbVhzEF4HxflRTDdd+IkukDxYDJ3sPEaRQYCy0eT+7P
/jEkghAwDHsSVo+nl2jpmoFpk1k0fLeG3qg41wuMFwkOZbZXsJOe73cVJDNfoMsGxqQ0mHlNlPgZ
mnxTZjc04EGviqbYkf266gwsrm1sskp1ApuBcVDDsMQQlQIWXi9ZPkQHG8cXd49tj7O+YwOzmakx
m7mBqLiZCuYUhIkdOeOlnM2grGlmt9/aWwgOYkr00uAQFXpb3JhyBFUz5j/eiJ6ZPEZttCfVOYXA
4Bhn4JgtMc689QYv5kYNZU2ep/Eq844xUnuzX1y5RtBF1SywDGRem69ouAvxANCzBEGI6jyamkIm
9Bj4BuIUsWRnNh0wNENyrWWnV7B+Jg806Vaceh9TD3b40wziy1Km0FGG8WwRf8HBx5D4EpiaArI0
4/EC7r09StPMPVTooZtqsQTRGBocyTptMvzAMWSFhzAwAiuumx1eCwD/qWH5GNDHkC4JWwfR7sjv
8JxiWPCyyq5a4cFNHkwE4SR6savEskYHPXCcZKeJOR0ZDC1NqfA82eGuuwgWewQtwJ1ZXaKvh0JB
miPk9DalrRJD4hVDuJWKgrJEkZ3wARtazI2A7zgrDWmdpX0GoeRneV1VOxwaQnqU3aNgpvfuQXqs
4ByBZJA4vXFUOzg5ZANjhfCTrYQF7K+eSvFm49k70OSH4tjbjJS2soXGqWnhrBcnHcYD+aimCBdk
e0Dyt2FKQNbw5cQr9mAO8g/d0WQ2ouMbYBFW2VVpKLTZ8TcIJqLAcHToVuoC9xf6tEl1WqZBA3Sf
z8lwuJIXRq8XD1LPwsG/Qy3A1R3cdAt4y+SaGMH6jiyl8W54gIQQFzYuGwfpSZgBEJh2KWjmsa4r
f0VigYYr+YjAjpcANPjektjo9DEWRJbCjRrGynUfj2WPnaUTDafygisH9hCkp2cKfAHiWQh6Vz52
UFMMyZCDaax7m8f7bujLBMHcItY9SJTlJjyiszWhbXRlgPCoGiptxVVVLnDQPECFKSGOhNlauG57
fi5vugoxQLtebHKuPwTh+kGyFS5xev+sA60gIu/6HINGQCRhjuQmdVPNwp1HPvjDgCFnB0Iirdo2
qK9r23UDSOhXDwc2rZU3XSfAedtzyEkZAuxYWGENUAE/oKmA+WNquoJAgv2rUvPN8SmbsF1qKUAN
kudUxZ57uwAnSZYznoolPHCKOXLLVtx4RNKYOQaKAoUTYinm2YU5ulEiFf4YLU3ijDV4zWVB7VLp
8e7ChCgxzIMkc5ivKl3hjuFLVqsTUAkjnPabtxsIKuRCr+Cwm/YDDUpy5pcntlLuL2ZYfqzuBAdf
wvCC2w2PVB8qvCB1PEu6ahTg8WKNv8HFvB4CDYbX+TBNQbtT+3mSbyKNTFC+K9GMr+jhO3keREBr
WS7WaBG9gjCVLgOYam7oXX/fEgvQXgz9go9HAtYyko4XXrzcrXf5qV7Rs+EhlDBD7V5k6uX4BfYz
B338hbVA35R+5rwWKplQtOThIg8tycGUVB7d4RbVQSapqzzqQXe+aSSwufQGlI1PsmyKosJm4xx4
1yeHMmYS5E3fYAOWq/3FGfh+5bVIEkDTO43+yKn/5z3V7iPCm2uQUDXIs+qBIY8JtFNtjv203/+W
AdRxGNKF5Hd2VkMaHBUL2Tqctt1SYVNddFvfOj7mAGpHbGYuetRMmB77rImMp/bS49XJSaIGRHRu
GC6gfmhNzaptzHdfzRsro43AuQ0On0GC275Doa8We1LgAfd2RemiVbi3+PadgO4jkD7m0kYO50ao
fBudNQ8j0ODb+FB6mIRNy9r20X5CbEx68f0DgNqLUhfAd5xMPteaIhfi7EGH6hUG3YPubdOH00P6
rEe0J29xvxuMbYozppZ/Xis4nSnBJpXU9DCRiAlFG7E4xKGpXYUDkg38aLM8ZvOaYsduXJsi3be3
whDuMADZhciSJdmwGLcT/FYbZqeSv4e4m9gLFoPUqvcO7mBVOLGD/d2b2HKDfJB9mrHfCCV+93LM
w8fhQJbqWX3MSj2r//+F+iiLZMDStUa9O6Dg+J5i+bjbnum0B9FW5isIYotwRvV/I5pg8yRZig28
f+SrGcP6GlwjrxHAuiBRKDuOQLmnm/cQatSUy/81KDUU/5IxJfIZcj44wiYc6aMakgKnsCrGLNIh
eHY5IX7p6Uzd4/AI5Ekc7gcxKLFYrlOQJCGJUD5IA+92/3Uj5u0OJjfGn3esYKpYjdS/+aHSIz0v
o7VsXodRQ94NfywT2+E4/H+0XDL+5yHP8Gl0xg7Vhni5P2+reNJZ8S/7VVvFe50Vvy7ft1X8rLvH
qnWOdzsrPq8+FHXLUNvHmqYDtEZ/F0KAA04SAvgyjMq2EgKcZrolgkBc+jZERe3Ygxs2SXZg8IMR
T7idjBzdHs5gMJKZqPb+nnQJmWZcp1/ONNPM/m3RN7VTnCoL4umD5dxREjCX9bUdVXVYraNuhBSo
2MIIWhgOfqny4nanYjiKqZZl/85qELalShADNNjyyiXJQDtv/H5eoyWl3owXEMIP26Lp/5RYP694
PvB47blltMEeIFJMqKWfk0L6z+RHnuBl2cMc0G0cuShRnGn4BGM5wmPAtRY5K80j+M79XTpvpa9m
kqImV1C5uwQdHVwXAoj9GvDmlKud4QTSXL+Mt9X5itfj06kdhOHdR4OUqiOSTObtZDsNPdfZ4G4z
vduMUAnJYxzJCIZHdU4tBA200H0xbYYg8rMYo+zr9A6xn4fpWrdcVqg36FxM13JiURUM74EQ1r5s
SahhHTX01AIKuJYt8FoeANiyBWLLnwsyMAbqBtnyaJj9LKBhpeUBsKX1h/ndZhhrD4nOas0h+lXH
orS/KuQIZsbUwJ1zbgYf6qeFvNLD6eTkwVkvAYaus/GQ9tDw0z5B+tg3qaxmQpipuxBCn6yqPd09
8g4p1X1tJxPfpx5gdg2t+WHz411Ad3j6CalObXjNUZa40CMm6E9s4HQED8RFf5tbgOQBjKWJmtKp
a4bTfT12EEmOEs5/kzv4aC15pnmsvvcmrx0tOKuEy3K/qNaQBlNsiIUfGZEFMka4Ltk1g8JyxwuQ
D+SCJYDVCO4EwCtkNuvT/d0gwYjyvWa4ilIzWsuOqzyYxkymYJdT2OLYR/V2q/3rLnc4Vsw74ek5
1fe/EwVARc+L4sQG1GSDDvDaozjcO874OJ3aSwd00zjq3gFLHmMDwt78CWJB3iq6XJJY3MkaSGhY
XtxkA07ghDIHR3Kn52nmuQtD2BB2trEwGVOCZjq0sJaBptSmSCJRGAKvvjvyAPhhNISwuMQGUK9O
H/xucvLwTM0Mg5hJSjUgY/Mms7P8UlVVVis+1aMccgcNe6RN4CHCYfU6r1JUB+GMU1oMooRJt5/f
Xl1gsbq83ByJ1abkMVj9y4/Ag3cmqVU0SA5/MNKEf26kbK5OIPQRI9dOjPDnOMe1uEqQ6rLxAOCE
clIEo71PrK9XRHVdLbvNtEwXZ375LsOsI4yyTAspm6zEsaINtP7OLIH4zZbNYl4fdb/LRf/tomSE
hzxHXPYjJgjljpkdGtuasl23n/g9goB5OYyKYUAenj+ZBHMKNBsegPsOZovdjiPju2HPBqiwL5N3
uiCPgV/MJOvTJvb2r6+xCKqRGS84Kza7ZbXfjTGWZW7EO4jqABwBingYYkGZPxfi8OirJXDOFAWr
YWjrOEpyWRsrbeg1eU1ijFv4nT8YRgUkkAVGe9e4xoiKFszQ+QCQkaOU8DGllGvEjPpmwFQ2qU9E
fHT6RI8eJBWL3Xvd7XMu5/me4vvIg/RUibsRViXWOXnsKsVlKF4TBhR1LRjgvG4pGMEkE2xowVbA
+Ts//z/DW0KIzc+yJxsD32xbGSYGkp/9/AYRGy2najl6vrOi/NUIRA7qMOl5Lvo+CjBicRsYDnkw
VDhxh7iuPmRFoyb64vfdi5W7PAaDzQboGK+pIZR+ZR6Hk+PR3kNF9l1TVOiX4Jh4M4VodivUjoIz
uIiT7nSVMC8UltxUdKRPrVJkhpz38xBJR+iPDOnXwGcSc6AVyLAApe9H/F0feyTLPvByLZYl5LNC
2gae67tsWS5pH5nmx1n2cn95CVJvtTH0MdEeuLeDEB3lhJWUvirzLZium8P85GRTreeX5WLYT+3j
2iWRJYQGv6zmMsh652MXf4udiI4MCMQYzUiKQZum0O/uHCP07c51gS7svMOjd5sQGpBzmE5o9Ja3
kcsEF05Fvef0frva9Dy2IuZwDPHft4wq1+i5Fm51Uz6x29FRt5d2wbqGhKzanZA4LtiWKG7kfdsL
L00BCLI54VStd2s8L69HzJcyAK7t2v1CVgCgwIevGqREPPMEUwwYZaRKFaqLQpHdPwNNaT/LvvxS
DEDlPB+28AnQDOlwVbykHcbszJhXSPMJoToZ41Vc7zyx2RfoJrI/Bp68f01y6fXu9MHvJ2catvCS
uS1g9H5jvqP7uEidFB+RZIdsQVf2Q44rzq7QLuzFRR47fPzOfr1MfP3cfr3KrxM+dRtwLCc5jHjD
vukjuwdtwZh+x3SPvyG1zYfxy/yCbf6hniGe94MyF9Tcpa0Lkfy+0CVK+B61DfeQ5iVWvu9/UoTh
4aeff/qFwa1VNd9BAz2OIpP3kfT49a5lXq4UIzXPzuBFVW2bAVejEubwGmUL09qDUfYw/YUGr7uC
yISn0KKZ9xnO4Qt/LIOrYrWqBqfwHVHgyut1cLl/S/exVwgF8+3dv77+7zjUS5A37d3/8vof/isb
5wUSBMrj/pyLukDSFMXFfREO4Hm1LTaj7PnT509YYKDGc/M3zum7h3RihiXn9A0VSEPFYo+HNuR6
G2D0ZSO9mR2HuSh5xD2VWZM7GCN9wfqc85SV3wtK98U8xNhGR1HZVCGqg2n6fbmEzI6YEnDgKc03
kPVPtOFYGTqTrBNWnc5djXVGZwUipiYQ7AFRxXYJT9vCDH5joGG6+UzgwgnhrRhniBjHYpCi+dDm
ezf8hmGRSg7laHsprkuIvzP4/tU3J/84cLEPMPIaj2yqhjnGJYT1goPVoBcF8/LYITM+SAo0X80k
D32iEMmcU92ywQo4gkESCd8T1poKIwAuEGXBTgNUgIdZf3VSGmBIlkWglXDsfZl9DgGSIEPtzecQ
bcZAKwsgOaLPDwEy7RoghvFMgMyEOQn9O9m/UPR0yOgN8L6avzdn0S7UDNlYNInwCS39sdBu34LZ
Mq2irW2+m8I8yRyhJ3BL1A77UQwBglzaMb+8dkBwPNwOpYFXK7etVivN3MDnkPd6Qvu92nyD2zOn
UqNM/iIaClZ4XJd5KZqjsBGbBhVh3R4m2naGyXnTXboBW2567DUTaMMsFOgh+Kg6okW2P/2CZgzm
u/k3UKThMpl//deEBQgQN9Nm1+4J3w8gZli8ZYYqAGT1cjWTkR3QyI6BkhZfk8jnkdI9RmjDWBzA
WkFW2eCUoTUZ9HyyPQ3XsGc3ZfABIh5QBERMjht3MGipJXxSNuAR8MbM/Z05cuIAww3DU777X1//
Nyqlw8KIe+/+t9cPfi/JjfHO6jmmUP4Lhb0cQXCvcoFph8v5qvwbsdsYp8cIQmbbUFprPjirphef
vBJdzR27dwxhh0hBXkYHTHRLEbfKerFfzSUCW9PrMWuCSZltXloJODvuD08nD8+yryCn9gOw5fhi
cDbKQA4yq79a0ZANgA3XuxaZGLzLII5TVUHiWYg/v64MyQOJd3tZz5e47AOUGPxeh1ad/Wp+iTkW
65atGYZim+3mlw9hnROxDyk/T2jaRnYVEhn4vtsYl8VOBYmPt4Yd2sv9ORek4O9D5UrNLtIcPluC
4s/hfEH7IT84izL7Mx9B+B1h9MTImQKCMKBFW3OqDWDYwqiRb551TGyyJu0YQKnrkc2OQNHP+iTr
redbQ21rHrEyWxGgGRmybxh5BcleJJOeej33Qamcnd5tzlCXaIM3c++jrD/hzgFWqs+znqe1J29e
NkfY0IScxXtHZO0G3pHEyrptGYJqchgFIaJsB3Z1maAct7oaB1XocUr1GNmgCdwooKePRaqH+F6f
esgHnqB+MLiTbJtTaPss2UNnXKetdQ8AuDTFjodBIKEf4V61+5EevOoM2ar2gGrftu4bKufrbnDa
c9wVFBQGfghhm/SHXYGN4jsD6oFUar0uIDr+pqojcmZoRnuajgrifiPihwkfKgwEDH/8DzK9+WWj
WoXYhLbVewFG2gbHCSwehwjAy7K+CRcmtSSu6Wglx+FCHk1u/chhue1jpCDwqaLABHDvoPUjayYg
D4nK4RxtorQf2Dm0/XCLLaZC60JsKJAFgwhxrgBVDczL6RPn8Up9Q2UoGAIJ7uR4OZ33qTmKnd0f
JvuaLctmx7reqM/mam84/w+bxDfM2D7N/sn8eVGs5je5hQyc3hAheLueBlFDltWMwiO7bCP02wva
0pLjXh0/EGDZa2voUYZZXVxChs0a0Ruak874p+pOaVt8X/++bUVqRQAMCwCXzY86rDsVks2AQCcM
5ojh27qAI8e7W9IHgkYqXFY6fxD/QE9zkgyf5a4dSS+Jf8zKSNQ5GchAuNkBEsW8XPKnYcJ6vGxk
OsUy11MZpqwP/hOckKwMp12WzVcQB/Umc83A+Tp10sIBdxNOncM9R+BRQ77jdkbel+7M/umPvEaG
7Rv4VCyr6aeiNpd0y9GNLbxCPS8Y3WV7ShHz0QdpeEPLeJI6m5lsCPMSTi0+tPxq5v9geHc/Boow
smBhaFF6v0kiNaPzJtCp8wyYKCa9MCzBxGEZyNILRPTpJrYSsQMncSGab4Lvw6h88XZiRjCOlU0V
p7L4scXXsli1oYyD1Hy5tDQ0dykGomNDysgCYiFlDL1pIBq615Jq48MVZGrzWwrMkEx7LpKmdLat
tmE8KcyocPyZpEG5WBXzOqrsnXi2jPJ0UwSlhUImSKKPIsOuTeXwV2OHWV5ryuPNgm508xghBEen
ULc1nqS/Uczy46koKiJzOiK5vyivp33OiNcPkQFqjGeuqq5Ef4aeBMrL06KdUXjPBfVZ+bbclhfr
soEg+WnWSocFMgekhnoAIq1E8Hc3qTmgM+FIIBoThNClntFUxN3z4v2Q7aqL4bNOkiFCpCDUPr/k
hV94pX7Ij7KNFhwlG7W1QVycKIrAZkud/ZLwF4xhAKcNNHbmBDckRG4X7jgutti8t6gAZLEO4AAb
YppVzdgULOuKeuZiiZMM98+BTWfKyN3yaBAdKZoKSKBoGGWIx2h5Z/aA264ylf7zf3715OWr2fNv
v//T0+9e9lNxqggrZ4IBpp1Ut4Zn3G93Bpka07hZYDIpCkeS9BnItm8vzdlPdgT25gqEVWpqRm2N
wOqSVINltfmu2n1jo+sr5HiKtdvxA/OIGbg3e7joYYHAj9CCAYij7vMBodCDB6G3FHOHxXacSl4H
7/QNfIpoJfjM0/8wiQLxcg9JqpvxeZ38GPcEKqByE+T4S5qDW8bC9L6q5ss86TSeWpoje01KUnKK
56bXJV6YRdypMnhlTso79gJqaBEVWECyF6CA1r68TtHNdg9G5h8Ma/Y3Q3Epmjjq+h5MzuJzDSpg
gpuTbb8lKoDrHsdo2sqhh+QAbQk7Qp/YmhceQm2qST+Nkqbk6ecxHt3u/LdzUDyrYlM36TgPSSng
5EEHPx0PDGbe4XMcEyeokIColziI5GWX1zbBGtlp+iV5wv7L8QwT8c5mKdJpR0Blg/ZSQ+WCNFCw
kFditmEBldBkPhqZiYkJ8yVGYMp9kROKHoqEDWVMxRVuM1K5JWgTd0+lA5ZXnS9xh8cdJMFHmX9w
xMaKQi5D2sgOJlfaOnTkth9V5CFB4wwaTXAo59W8XqKZUr1POhYcfXCZuXA/xx4jmhXy4coNweEy
vKU3RLc5VDf7yjgn2iHJez4AfnZgfW+1UeVIs7zOcHJ4jIORr+uztxHQ0rZYZpZ9ZkO63OFQMV43
l8PhQanfEgjchBEWtO5/2NAO5UWuU6kVtFqladUANF0qABeKnn8HKSly2w8EWCNVvRVrDnHoShTy
pFJMSXCbSwx7AeOLxJgwxHvzAZjxok5E0hNtyEYmfoRvG4RUVzRUGIbYufVIS5FWpkbFPYU+Da6D
KMqTSQU/lXkKukKtMBhG3KiZJKa/SbUIsLx1a1Ap2ZhScKUaS+bATfe+gkzw8Br6SrwWSHSqQRDh
JFPEKr5BVEcrXM94x4qsOgyLNhzoyj02kJv5C+RCgNhCOcfin3JCdd64rgXZsqf0cDaMg/5QJ1P6
M6K0ReQeQkbSY7K1Q018zx2JPHQ2IIBDk4bJLJ57n7R3xciwgeyrgo/x6TAjyhSeH4baE+FqTs3f
s96h00q1p4bb1awrdiZ3SRbmzjqRAWPkNUxYZqSxyqWp2JK5F6jd0FXgM0mb4DIKUcN/NSL5Jdy1
/JUQLE+dKtDdHG+oZEc6y73zAm3JFlVdF4vd6mZs7fZaLrowC6pN6u1vkBbrpbBSfKawHhL+xB9d
enJ+6rVdwjGUBPd9fAyvUbhtStfGm45/BruUWjNl6CHlmZjSgqrOYW7u1+FkItZ6q393yT7YZjZ3
Kb0cGcqD0bEewHCU2VcyjQhN+19aTDR8gsBperf+Ci2ytDEWkw+lbOadHAx1YyickRY2YH0UHWnR
eaVU0jzI9HmGm1wXSyilHf1RNwT0ghdymAqsrc8HFDfUIdZvCaxd7hD7oEIOCbhiAwoNDnUYl2CQ
Au+ia5+QlYR6rWJhV+uQ8esa4mF09XCrXtxNHUI1ojEMuYMR4PoRtWErZ0ONMLFMPz3YGs1XYKx5
O5AVzlm9iiwpfE8kck+f6K7HgwtMeaO6tQohYZDB1UXTuvghuWhfGpeeKiWLHG5HS1E8QJ36FjJS
TiIxWMB6QZxpDd4YTbHMg2VI7J0kv8p95algZngOv9xV26c7DgKbru/x20es76+JzYLEa3LHm28w
eaIgtHfDwNRIn04KSpZ46UznEOKPQ3CVG1HIW4J2hDhArXpB0xw9t69TQkFatHEKOpvzejZDToWM
hPppkKeHER5IfKIia2jnqq5ClR+HKbin5DwwDJTxOCCisEkji6mc0rFG3grcqSTzLWSMNJhQXQDf
RSGFISpRXxiJvpw+XppKcmj4xm4FSmzZhwn1wWHcDEiZ5pWbxWq/hKx/mxU4PtxUe8rWOsdbLc4t
a76eY15B7hAzZT1Hvu/zLDfn5OIKjirKO7pFjpKTFelqTFOEZxP7ITkgiO9yIZNcem4PdSSxGgoh
/ZksRN/jjmPssJswkR0wuM6O70gcx2eZxdu4M7jbIoujjWUc7LVu0qPdDVUQh0aL5pR+AT4Rgsmo
8fuAHojh3IDv81ODS7fOzkYfFhQ9QLwFTfv8UhXzgAnOY1zEVLFrdyrNT9RHs4UWBnl23rq2Qfma
8pJGHcZYYgpe6xatcXsCVZw7K+1Rayd22LIOLMc6r8ljLbCqC/x9sIbOMs0wrfZHaMvglQo/btew
WOu02d12TY9k0IhXttBY3zdIkfaB3Nu+UlY59upfSgkwdHbCwLRAA4/KTtqmJ4yKfeGGaRotAHUp
qG5sTSM0E8+rRrWQNqhOXCBGg3OMghai7D0B0/mB+mz2HIl4URtXCzZIBLnHGifR0LWEqJ6HCW2/
1mtS5atFXE5g5ZmWWOMzivGQ9yWh96b4gGvUb7HtwtYO2dDZEAOQ2RlavMtYlaFKOGk+RwDXCDJ0
UaodrDp3ZQ37NgHIlOUKljVgsc9BQnt3SXysEJ3ccn5/9Pro3J39L93EM5GJbeYvLweobx99L9K1
Ob1CMCJDMcaejjzIJeZZfCwh2ZbV01mVnvIiwO9aA9j8GsNxqr9IiX678UmJpHomRBOyyVSDCMUw
V4ooKnuRfDrN3BXsGvZ6pOJMa4RCBOu8KkABcaH0mbFg2HJFyLamZneWzVV/pJPJ9U9OvuqDd5Oa
ZVIMa5n6iZ66kg177/731/+jciWzUU4sr/Xu/3i9/x/IsYzZS7p/MLULCmiyNAQP3NoyuBgDJxL0
d6Y4KfDdttl4PmbbG+1jhqYwMop1tXlb3Gwhl7Dcq6lXflkXl4VL7nflinXIfPQb2lihtpFiJYgb
wWVd7bcSyKmGYxffGDJZnO8vyTacj2D8MHbtDE5OqN+WpC9z5Ban/WZnRL9+uszSDG3Kzvprw3e1
FOPswlOwQUYnFoMG5pFig8DzdmVkzP5o2NYLesdOXe1kMYP+c3MkT/t/efb1k5YyV8VqOzULCGJt
Xa3cumYIsEtYa7SBGmfZAEc16JSVjYgM+IKSS6ItaEXm2d2QlDJ43cgluYoPOr/YkRvoDbvddza2
q5wrvB2Uiq6jkHzc2dCAIT7IchACeSGGGb+WkXa2oWYBaeHBu5IvDzKH8LuqG8zebNqmApJhK65v
qlugO8b/6ndjI/JfXdtiU1GHnaj49ZPnL548fvTqyddZ8W5fvp+vgNSZBZTdOaW90TEvDIcErqY4
Sf710ed42+H78W3NMF4CXljFB/5CquxiPqGrLJmA2pugBHdGVA5v8eMrD5Iw4U+ah2JHmlB0sQPo
D3067NxtPKcd7odbM02gCXWuSaP1248KWURBQ4rEZ1xTLfxxd0w3e87IhFRWQic7VBK87+C2Prtj
qNe7+dHmLNK5JeCdmen/WO2usv+JrvhA5/CY1D7Zw/Hvx/cptsOjl68yQzAbirOxnr/FwF9BOw4l
aHZAqfGWbz1fiWpjHPIpOVxVGiDtgEh4UZT+xRwVaLiS1mmH0TAmn2M0mPzhKPv9KLufMq9Kg0WW
5d/b1VK3rWCNyZYlTb72ErCsEWstv6BYMMaPGbgRbIAa8BX9eL9ZquzX67EIcdubMYfKl78QaMkP
I5UkGOpMGvvFhx7SsBectRDvRkUuzR9cwy9kWUEuyYcRWuKts5nSzADkShkzQHs0bTAYms3Pq/1u
xkb1M4s1CrwCQPqGx5MZj0+Xco+otFUT/z/401qGOWJLOFj7iRAySwTEksOgwSuSTBXBMaidJjnc
d9uwZCWwWOhHTS83PkwnLaBm/yKCc0AMjfS44ITmFBVUAmXtrkyf0ne1Wd2gdhomD9EGPS4A1MXc
EvM1d7Km4lybGG5LklbD0bCeg92WDbRTyWBOrsHKmKuzSlldxWHzXr9DDUUe/vi20PTRGrbcjJuy
EPFBVu83+Bet7XPQGDnl/0t4h5MUph9HgWL8gjJEE6phhVcQE6T44ALswcLyrrKwxVBGEDcRJIox
agSkpZLuJ0oOaATxJ7DkYm/YgzWHRwMq+76cW3rcPbAxjMpApwEmFqcCNwvYHm8PKFs21cae9Cj/
eGp9Mfk5LzcYnr/ajjA5HKd080098HL/MMQQNmQ3XqhQ7y5IAg7bzJ4uO1BMlnDwNhiVbWgHKEzu
aI13Ul0YNKvMily6Md7LnpmDxKC/+fe8ago/gRYMZ1lXaNWIFyjZyfv3GIgTYk+NVStP1ucFKtsk
PhTWLZrFHOqapQN+jFaZoyDEARHFqswgjG766QWDxKIOegLBCLJCur17sriaG0q2A7MH6Jpz72lB
hMSOHWw07MTQNBrLDmOzKGDbWPuLOcT6lGKmn/Ob9klAsZ7S6e12N5mEJB1rrPA25cwq1GC3jUtE
1FZ08ZWHRA6mWFFOhGo7FZScKrycBtkUUWFcfMAZiLZ7lrqpRksFKpe8i8yb/RoNYLZD0lpAa1ID
3Rmyr7J/vH/vH1slKMBZNYUxCQ9jwcgvs4ctliC6F7gQUJzEbFdBJMl2V+XB1ymVipE5NhB4bDlC
WtM36N4HvGmuqg+D4VmvbQTQO6C9TfnY/2EDioMfzB8CC8ZdVZA566XtHhJzMG39a39IcUCketIe
QgMxzeO38DzxMMy/bi53YSp37yYsRAITiJiKTzWtTB81u2Jeo8esOm1SDVHQXN0CH2GkzAuO+F/7
4CRx3htFgj+2Z+W387+VhqpaMcYJB0B6IGzCshgLKbhcVefzlVC4kWZse9aHrVUZp0p7TGk3m3uo
TSK20h5N+DADO4mvKHU+Fu+q048TG99ORvfFHbNBrnBbLMoLNIRAM3QnJDcZ8VuAR8KqaQ6v3+u2
NeF4i+XlpjL8cb/Tet8fRF8UtNBfgyEDiXGBLC5sg9DveYytH7f9r49efPf0uz9RLgCv8U/bB903
o13MgYAlNHUMA9adL7sm3zenHRy9GORtdQPzYOnYYlxRdzaQQ3dgc7GHmYop7cmzPwx/INXFnezJ
9RaoPPJipE0ZoIvmCgdMZFm4sQN8Z/vnKVGU+Evv3f+5H/9/Ve8cJw==
"""
import sys
import base64
import zlib
class DictImporter(object):
def __init__(self, sources):
self.sources = sources
def find_module(self, fullname, path=None):
if fullname == "argparse" and sys.version_info >= (2,7):
# we were generated with <python2.7 (which pulls in argparse)
# but we are running now on a stdlib which has it, so use that.
return None
if fullname in self.sources:
return self
if fullname + '.__init__' in self.sources:
return self
return None
def load_module(self, fullname):
# print "load_module:", fullname
from types import ModuleType
try:
s = self.sources[fullname]
is_pkg = False
except KeyError:
s = self.sources[fullname + '.__init__']
is_pkg = True
co = compile(s, fullname, 'exec')
module = sys.modules.setdefault(fullname, ModuleType(fullname))
module.__file__ = "%s/%s" % (__file__, fullname)
module.__loader__ = self
if is_pkg:
module.__path__ = [fullname]
do_exec(co, module.__dict__) # noqa
return sys.modules[fullname]
def get_source(self, name):
res = self.sources.get(name)
if res is None:
res = self.sources.get(name + '.__init__')
return res
if __name__ == "__main__":
if sys.version_info >= (3, 0):
exec("def do_exec(co, loc): exec(co, loc)\n")
import pickle
sources = sources.encode("ascii") # ensure bytes
sources = pickle.loads(zlib.decompress(base64.decodebytes(sources)))
else:
import cPickle as pickle
exec("def do_exec(co, loc): exec co in loc\n")
sources = pickle.loads(zlib.decompress(base64.decodestring(sources)))
importer = DictImporter(sources)
sys.meta_path.insert(0, importer)
entry = "import pytest; raise SystemExit(pytest.cmdline.main())"
do_exec(entry, locals()) # noqa
| 231,272 | 74.976675 | 77 | py |
tinysegmenter | tinysegmenter-master/tests/test_tinysegmenter.py | # coding: utf-8
#
# Usage: py.test -v test_tinysegmenter.py
#
# `pip install -r requirements.txt` is required.
from __future__ import unicode_literals
import io
import subprocess
import tinysegmenter
import pytest
def test_ctypes():
ctype = tinysegmenter._ctype
assert ctype('一') == 'M'
assert ctype('〆') == 'H'
assert ctype('名') == 'H'
assert ctype('あ') == 'I'
assert ctype('ア') == 'K'
assert ctype('Z') == 'A'
assert ctype('9') == 'N'
def test_tokenize():
tokenize = tinysegmenter.tokenize
assert tokenize("私の名前は中野です") == ["私", "の", "名前", "は", "中野", "です"]
assert tokenize("TinySegmenterは25kBで書かれています。") == ["TinySegmenter", "は", "2", "5", "kB", "で", "書か", "れ", "て", "い", "ます", "。"]
assert tokenize("") == []
def test_timemachine(tmpdir):
with io.open('tests/timemachineu8j.txt', encoding='utf-8') as f:
text = f.read()
toks = tinysegmenter.tokenize(text)
out = tmpdir.join("tokenized.txt")
out.write_text(' | '.join(toks), encoding='utf-8')
print(str(out)) # pytest show this only when test failed
assert 0 == subprocess.call([
"diff", "-u",
"tests/timemachineu8j.tokenized.txt",
str(out)])
| 1,220 | 25.543478 | 129 | py |
tinysegmenter | tinysegmenter-master/tinysegmenter/tinysegmenter.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# TinySegmenter 0.1 -- Super compact Japanese tokenizer in Javascript
# (c) 2008 Taku Kudo <taku@chasen.org>
# TinySegmenter is freely distributable under the terms of a new BSD licence.
# For details, see http://lilyx.net/pages/tinysegmenter_licence.txt
# "TinySegmenter in Python" is written by Masato Hagiwara
# See http://lilyx.net/pages/tinysegmenterp.html
# "TinySegmenter distribution is created by Tatsuro Yasukawa"
# See https://github.com/SamuraiT/tinysegmenter
from __future__ import unicode_literals
import sys
if sys.version_info[0] > 2:
unichr = chr
unicode = str
xrange = range
__all__ = ["tokenize"]
_CHARDICT = {}
_CHARPATTERNS = [
(("一", "龠"), 'H'), # Some chars in this pattern is replaced by next pattern
("一二三四五六七八九十百千万億兆", 'M'),
("々〆ヵヶ", 'H'),
(('ぁ','ん'), 'I'),
(('ァ','ヴ'), 'K'),
("ーー\uff9e", 'K'),
(('ア', 'ン'), 'K'),
(('a', 'z'), 'A'),
(('A', 'Z'), 'A'),
(('a', 'z'), 'A'),
(('A', 'Z'), 'A'),
(('0', '9'), 'N'),
(('0', '9'), 'N')]
for pat, cat in _CHARPATTERNS:
if isinstance(pat, unicode):
for c in pat:
_CHARDICT[c] = cat
else: # tuple
for c in xrange(ord(pat[0]), ord(pat[1])+1):
_CHARDICT[unichr(c)] = cat
_BIAS = -332
_BC1 = {"HH":6,"II":2461,"KH":406,"OH":-1378}
_BC2 = {"AA":-3267,"AI":2744,"AN":-878,"HH":-4070,"HM":-1711,"HN":4012,"HO":3761,"IA":1327,"IH":-1184,"II":-1332,"IK":1721,"IO":5492,"KI":3831,"KK":-8741,"MH":-3132,"MK":3334,"OO":-2920}
_BC3 = {"HH":996,"HI":626,"HK":-721,"HN":-1307,"HO":-836,"IH":-301,"KK":2762,"MK":1079,"MM":4034,"OA":-1652,"OH":266}
_BP1 = {"BB":295,"OB":304,"OO":-125,"UB":352}
_BP2 = {"BO":60,"OO":-1762}
_BQ1 = {"BHH":1150,"BHM":1521,"BII":-1158,"BIM":886,"BMH":1208,"BNH":449,"BOH":-91,"BOO":-2597,"OHI":451,"OIH":-296,"OKA":1851,"OKH":-1020,"OKK":904,"OOO":2965}
_BQ2 = {"BHH":118,"BHI":-1159,"BHM":466,"BIH":-919,"BKK":-1720,"BKO":864,"OHH":-1139,"OHM":-181,"OIH":153,"UHI":-1146}
_BQ3 = {"BHH":-792,"BHI":2664,"BII":-299,"BKI":419,"BMH":937,"BMM":8335,"BNN":998,"BOH":775,"OHH":2174,"OHM":439,"OII":280,"OKH":1798,"OKI":-793,"OKO":-2242,"OMH":-2402,"OOO":11699}
_BQ4 = {"BHH":-3895,"BIH":3761,"BII":-4654,"BIK":1348,"BKK":-1806,"BMI":-3385,"BOO":-12396,"OAH":926,"OHH":266,"OHK":-2036,"ONN":-973}
_BW1 = {",と":660,",同":727,"B1あ":1404,"B1同":542,"、と":660,"、同":727,"」と":1682,"あっ":1505,"いう":1743,"いっ":-2055,"いる":672,"うし":-4817,"うん":665,"から":3472,"がら":600,"こう":-790,"こと":2083,"こん":-1262,"さら":-4143,"さん":4573,"した":2641,"して":1104,"すで":-3399,"そこ":1977,"それ":-871,"たち":1122,"ため":601,"った":3463,"つい":-802,"てい":805,"てき":1249,"でき":1127,"です":3445,"では":844,"とい":-4915,"とみ":1922,"どこ":3887,"ない":5713,"なっ":3015,"など":7379,"なん":-1113,"にし":2468,"には":1498,"にも":1671,"に対":-912,"の一":-501,"の中":741,"ませ":2448,"まで":1711,"まま":2600,"まる":-2155,"やむ":-1947,"よっ":-2565,"れた":2369,"れで":-913,"をし":1860,"を見":731,"亡く":-1886,"京都":2558,"取り":-2784,"大き":-2604,"大阪":1497,"平方":-2314,"引き":-1336,"日本":-195,"本当":-2423,"毎日":-2113,"目指":-724,"B1あ":1404,"B1同":542,"」と":1682}
_BW2 = {"..":-11822,"11":-669,"――":-5730,"−−":-13175,"いう":-1609,"うか":2490,"かし":-1350,"かも":-602,"から":-7194,"かれ":4612,"がい":853,"がら":-3198,"きた":1941,"くな":-1597,"こと":-8392,"この":-4193,"させ":4533,"され":13168,"さん":-3977,"しい":-1819,"しか":-545,"した":5078,"して":972,"しな":939,"その":-3744,"たい":-1253,"たた":-662,"ただ":-3857,"たち":-786,"たと":1224,"たは":-939,"った":4589,"って":1647,"っと":-2094,"てい":6144,"てき":3640,"てく":2551,"ては":-3110,"ても":-3065,"でい":2666,"でき":-1528,"でし":-3828,"です":-4761,"でも":-4203,"とい":1890,"とこ":-1746,"とと":-2279,"との":720,"とみ":5168,"とも":-3941,"ない":-2488,"なが":-1313,"など":-6509,"なの":2614,"なん":3099,"にお":-1615,"にし":2748,"にな":2454,"によ":-7236,"に対":-14943,"に従":-4688,"に関":-11388,"のか":2093,"ので":-7059,"のに":-6041,"のの":-6125,"はい":1073,"はが":-1033,"はず":-2532,"ばれ":1813,"まし":-1316,"まで":-6621,"まれ":5409,"めて":-3153,"もい":2230,"もの":-10713,"らか":-944,"らし":-1611,"らに":-1897,"りし":651,"りま":1620,"れた":4270,"れて":849,"れば":4114,"ろう":6067,"われ":7901,"を通":-11877,"んだ":728,"んな":-4115,"一人":602,"一方":-1375,"一日":970,"一部":-1051,"上が":-4479,"会社":-1116,"出て":2163,"分の":-7758,"同党":970,"同日":-913,"大阪":-2471,"委員":-1250,"少な":-1050,"年度":-8669,"年間":-1626,"府県":-2363,"手権":-1982,"新聞":-4066,"日新":-722,"日本":-7068,"日米":3372,"曜日":-601,"朝鮮":-2355,"本人":-2697,"東京":-1543,"然と":-1384,"社会":-1276,"立て":-990,"第に":-1612,"米国":-4268,"11":-669,"グ":1319};
_BW3 = {"あた":-2194,"あり":719,"ある":3846,"い.":-1185,"い。":-1185,"いい":5308,"いえ":2079,"いく":3029,"いた":2056,"いっ":1883,"いる":5600,"いわ":1527,"うち":1117,"うと":4798,"えと":1454,"か.":2857,"か。":2857,"かけ":-743,"かっ":-4098,"かに":-669,"から":6520,"かり":-2670,"が,":1816,"が、":1816,"がき":-4855,"がけ":-1127,"がっ":-913,"がら":-4977,"がり":-2064,"きた":1645,"けど":1374,"こと":7397,"この":1542,"ころ":-2757,"さい":-714,"さを":976,"し,":1557,"し、":1557,"しい":-3714,"した":3562,"して":1449,"しな":2608,"しま":1200,"す.":-1310,"す。":-1310,"する":6521,"ず,":3426,"ず、":3426,"ずに":841,"そう":428,"た.":8875,"た。":8875,"たい":-594,"たの":812,"たり":-1183,"たる":-853,"だ.":4098,"だ。":4098,"だっ":1004,"った":-4748,"って":300,"てい":6240,"てお":855,"ても":302,"です":1437,"でに":-1482,"では":2295,"とう":-1387,"とし":2266,"との":541,"とも":-3543,"どう":4664,"ない":1796,"なく":-903,"など":2135,"に,":-1021,"に、":-1021,"にし":1771,"にな":1906,"には":2644,"の,":-724,"の、":-724,"の子":-1000,"は,":1337,"は、":1337,"べき":2181,"まし":1113,"ます":6943,"まっ":-1549,"まで":6154,"まれ":-793,"らし":1479,"られ":6820,"るる":3818,"れ,":854,"れ、":854,"れた":1850,"れて":1375,"れば":-3246,"れる":1091,"われ":-605,"んだ":606,"んで":798,"カ月":990,"会議":860,"入り":1232,"大会":2217,"始め":1681,"市":965,"新聞":-5055,"日,":974,"日、":974,"社会":2024,"カ月":990};
_TC1 = {"AAA":1093,"HHH":1029,"HHM":580,"HII":998,"HOH":-390,"HOM":-331,"IHI":1169,"IOH":-142,"IOI":-1015,"IOM":467,"MMH":187,"OOI":-1832};
_TC2 = {"HHO":2088,"HII":-1023,"HMM":-1154,"IHI":-1965,"KKH":703,"OII":-2649};
_TC3 = {"AAA":-294,"HHH":346,"HHI":-341,"HII":-1088,"HIK":731,"HOH":-1486,"IHH":128,"IHI":-3041,"IHO":-1935,"IIH":-825,"IIM":-1035,"IOI":-542,"KHH":-1216,"KKA":491,"KKH":-1217,"KOK":-1009,"MHH":-2694,"MHM":-457,"MHO":123,"MMH":-471,"NNH":-1689,"NNO":662,"OHO":-3393};
_TC4 = {"HHH":-203,"HHI":1344,"HHK":365,"HHM":-122,"HHN":182,"HHO":669,"HIH":804,"HII":679,"HOH":446,"IHH":695,"IHO":-2324,"IIH":321,"III":1497,"IIO":656,"IOO":54,"KAK":4845,"KKA":3386,"KKK":3065,"MHH":-405,"MHI":201,"MMH":-241,"MMM":661,"MOM":841};
_TQ1 = {"BHHH":-227,"BHHI":316,"BHIH":-132,"BIHH":60,"BIII":1595,"BNHH":-744,"BOHH":225,"BOOO":-908,"OAKK":482,"OHHH":281,"OHIH":249,"OIHI":200,"OIIH":-68};
_TQ2 = {"BIHH":-1401,"BIII":-1033,"BKAK":-543,"BOOO":-5591};
_TQ3 = {"BHHH":478,"BHHM":-1073,"BHIH":222,"BHII":-504,"BIIH":-116,"BIII":-105,"BMHI":-863,"BMHM":-464,"BOMH":620,"OHHH":346,"OHHI":1729,"OHII":997,"OHMH":481,"OIHH":623,"OIIH":1344,"OKAK":2792,"OKHH":587,"OKKA":679,"OOHH":110,"OOII":-685};
_TQ4 = {"BHHH":-721,"BHHM":-3604,"BHII":-966,"BIIH":-607,"BIII":-2181,"OAAA":-2763,"OAKK":180,"OHHH":-294,"OHHI":2446,"OHHO":480,"OHIH":-1573,"OIHH":1935,"OIHI":-493,"OIIH":626,"OIII":-4007,"OKAK":-8156};
_TW1 = {"につい":-4681,"東京都":2026};
_TW2 = {"ある程":-2049,"いった":-1256,"ころが":-2434,"しょう":3873,"その後":-4430,"だって":-1049,"ていた":1833,"として":-4657,"ともに":-4517,"もので":1882,"一気に":-792,"初めて":-1512,"同時に":-8097,"大きな":-1255,"対して":-2721,"社会党":-3216};
_TW3 = {"いただ":-1734,"してい":1314,"として":-4314,"につい":-5483,"にとっ":-5989,"に当た":-6247,"ので,":-727,"ので、":-727,"のもの":-600,"れから":-3752,"十二月":-2287};
_TW4 = {"いう.":8576,"いう。":8576,"からな":-2348,"してい":2958,"たが,":1516,"たが、":1516,"ている":1538,"という":1349,"ました":5543,"ません":1097,"ようと":-4258,"よると":5865};
_UC1 = {"A":484,"K":93,"M":645,"O":-505};
_UC2 = {"A":819,"H":1059,"I":409,"M":3987,"N":5775,"O":646};
_UC3 = {"A":-1370,"I":2311};
_UC4 = {"A":-2643,"H":1809,"I":-1032,"K":-3450,"M":3565,"N":3876,"O":6646};
_UC5 = {"H":313,"I":-1238,"K":-799,"M":539,"O":-831};
_UC6 = {"H":-506,"I":-253,"K":87,"M":247,"O":-387};
_UP1 = {"O":-214};
_UP2 = {"B":69,"O":935};
_UP3 = {"B":189};
_UQ1 = {"BH":21,"BI":-12,"BK":-99,"BN":142,"BO":-56,"OH":-95,"OI":477,"OK":410,"OO":-2422};
_UQ2 = {"BH":216,"BI":113,"OK":1759};
_UQ3 = {"BA":-479,"BH":42,"BI":1913,"BK":-7198,"BM":3160,"BN":6427,"BO":14761,"OI":-827,"ON":-3212};
_UW1 = {",":156,"、":156,"「":-463,"あ":-941,"う":-127,"が":-553,"き":121,"こ":505,"で":-201,"と":-547,"ど":-123,"に":-789,"の":-185,"は":-847,"も":-466,"や":-470,"よ":182,"ら":-292,"り":208,"れ":169,"を":-446,"ん":-137,"・":-135,"主":-402,"京":-268,"区":-912,"午":871,"国":-460,"大":561,"委":729,"市":-411,"日":-141,"理":361,"生":-408,"県":-386,"都":-718,"「":-463,"・":-135};
_UW2 = {",":-829,"、":-829,"〇":892,"「":-645,"」":3145,"あ":-538,"い":505,"う":134,"お":-502,"か":1454,"が":-856,"く":-412,"こ":1141,"さ":878,"ざ":540,"し":1529,"す":-675,"せ":300,"そ":-1011,"た":188,"だ":1837,"つ":-949,"て":-291,"で":-268,"と":-981,"ど":1273,"な":1063,"に":-1764,"の":130,"は":-409,"ひ":-1273,"べ":1261,"ま":600,"も":-1263,"や":-402,"よ":1639,"り":-579,"る":-694,"れ":571,"を":-2516,"ん":2095,"ア":-587,"カ":306,"キ":568,"ッ":831,"三":-758,"不":-2150,"世":-302,"中":-968,"主":-861,"事":492,"人":-123,"会":978,"保":362,"入":548,"初":-3025,"副":-1566,"北":-3414,"区":-422,"大":-1769,"天":-865,"太":-483,"子":-1519,"学":760,"実":1023,"小":-2009,"市":-813,"年":-1060,"強":1067,"手":-1519,"揺":-1033,"政":1522,"文":-1355,"新":-1682,"日":-1815,"明":-1462,"最":-630,"朝":-1843,"本":-1650,"東":-931,"果":-665,"次":-2378,"民":-180,"気":-1740,"理":752,"発":529,"目":-1584,"相":-242,"県":-1165,"立":-763,"第":810,"米":509,"自":-1353,"行":838,"西":-744,"見":-3874,"調":1010,"議":1198,"込":3041,"開":1758,"間":-1257,"「":-645,"」":3145,"ッ":831,"ア":-587,"カ":306,"キ":568};
_UW3 = {",":4889,"1":-800,"−":-1723,"、":4889,"々":-2311,"〇":5827,"」":2670,"〓":-3573,"あ":-2696,"い":1006,"う":2342,"え":1983,"お":-4864,"か":-1163,"が":3271,"く":1004,"け":388,"げ":401,"こ":-3552,"ご":-3116,"さ":-1058,"し":-395,"す":584,"せ":3685,"そ":-5228,"た":842,"ち":-521,"っ":-1444,"つ":-1081,"て":6167,"で":2318,"と":1691,"ど":-899,"な":-2788,"に":2745,"の":4056,"は":4555,"ひ":-2171,"ふ":-1798,"へ":1199,"ほ":-5516,"ま":-4384,"み":-120,"め":1205,"も":2323,"や":-788,"よ":-202,"ら":727,"り":649,"る":5905,"れ":2773,"わ":-1207,"を":6620,"ん":-518,"ア":551,"グ":1319,"ス":874,"ッ":-1350,"ト":521,"ム":1109,"ル":1591,"ロ":2201,"ン":278,"・":-3794,"一":-1619,"下":-1759,"世":-2087,"両":3815,"中":653,"主":-758,"予":-1193,"二":974,"人":2742,"今":792,"他":1889,"以":-1368,"低":811,"何":4265,"作":-361,"保":-2439,"元":4858,"党":3593,"全":1574,"公":-3030,"六":755,"共":-1880,"円":5807,"再":3095,"分":457,"初":2475,"別":1129,"前":2286,"副":4437,"力":365,"動":-949,"務":-1872,"化":1327,"北":-1038,"区":4646,"千":-2309,"午":-783,"協":-1006,"口":483,"右":1233,"各":3588,"合":-241,"同":3906,"和":-837,"員":4513,"国":642,"型":1389,"場":1219,"外":-241,"妻":2016,"学":-1356,"安":-423,"実":-1008,"家":1078,"小":-513,"少":-3102,"州":1155,"市":3197,"平":-1804,"年":2416,"広":-1030,"府":1605,"度":1452,"建":-2352,"当":-3885,"得":1905,"思":-1291,"性":1822,"戸":-488,"指":-3973,"政":-2013,"教":-1479,"数":3222,"文":-1489,"新":1764,"日":2099,"旧":5792,"昨":-661,"時":-1248,"曜":-951,"最":-937,"月":4125,"期":360,"李":3094,"村":364,"東":-805,"核":5156,"森":2438,"業":484,"氏":2613,"民":-1694,"決":-1073,"法":1868,"海":-495,"無":979,"物":461,"特":-3850,"生":-273,"用":914,"町":1215,"的":7313,"直":-1835,"省":792,"県":6293,"知":-1528,"私":4231,"税":401,"立":-960,"第":1201,"米":7767,"系":3066,"約":3663,"級":1384,"統":-4229,"総":1163,"線":1255,"者":6457,"能":725,"自":-2869,"英":785,"見":1044,"調":-562,"財":-733,"費":1777,"車":1835,"軍":1375,"込":-1504,"通":-1136,"選":-681,"郎":1026,"郡":4404,"部":1200,"金":2163,"長":421,"開":-1432,"間":1302,"関":-1282,"雨":2009,"電":-1045,"非":2066,"駅":1620,"1":-800,"」":2670,"・":-3794,"ッ":-1350,"ア":551,"ス":874,"ト":521,"ム":1109,"ル":1591,"ロ":2201,"ン":278};
_UW4 = {",":3930,".":3508,"―":-4841,"、":3930,"。":3508,"〇":4999,"「":1895,"」":3798,"〓":-5156,"あ":4752,"い":-3435,"う":-640,"え":-2514,"お":2405,"か":530,"が":6006,"き":-4482,"ぎ":-3821,"く":-3788,"け":-4376,"げ":-4734,"こ":2255,"ご":1979,"さ":2864,"し":-843,"じ":-2506,"す":-731,"ず":1251,"せ":181,"そ":4091,"た":5034,"だ":5408,"ち":-3654,"っ":-5882,"つ":-1659,"て":3994,"で":7410,"と":4547,"な":5433,"に":6499,"ぬ":1853,"ね":1413,"の":7396,"は":8578,"ば":1940,"ひ":4249,"び":-4134,"ふ":1345,"へ":6665,"べ":-744,"ほ":1464,"ま":1051,"み":-2082,"む":-882,"め":-5046,"も":4169,"ゃ":-2666,"や":2795,"ょ":-1544,"よ":3351,"ら":-2922,"り":-9726,"る":-14896,"れ":-2613,"ろ":-4570,"わ":-1783,"を":13150,"ん":-2352,"カ":2145,"コ":1789,"セ":1287,"ッ":-724,"ト":-403,"メ":-1635,"ラ":-881,"リ":-541,"ル":-856,"ン":-3637,"・":-4371,"ー":-11870,"一":-2069,"中":2210,"予":782,"事":-190,"井":-1768,"人":1036,"以":544,"会":950,"体":-1286,"作":530,"側":4292,"先":601,"党":-2006,"共":-1212,"内":584,"円":788,"初":1347,"前":1623,"副":3879,"力":-302,"動":-740,"務":-2715,"化":776,"区":4517,"協":1013,"参":1555,"合":-1834,"和":-681,"員":-910,"器":-851,"回":1500,"国":-619,"園":-1200,"地":866,"場":-1410,"塁":-2094,"士":-1413,"多":1067,"大":571,"子":-4802,"学":-1397,"定":-1057,"寺":-809,"小":1910,"屋":-1328,"山":-1500,"島":-2056,"川":-2667,"市":2771,"年":374,"庁":-4556,"後":456,"性":553,"感":916,"所":-1566,"支":856,"改":787,"政":2182,"教":704,"文":522,"方":-856,"日":1798,"時":1829,"最":845,"月":-9066,"木":-485,"来":-442,"校":-360,"業":-1043,"氏":5388,"民":-2716,"気":-910,"沢":-939,"済":-543,"物":-735,"率":672,"球":-1267,"生":-1286,"産":-1101,"田":-2900,"町":1826,"的":2586,"目":922,"省":-3485,"県":2997,"空":-867,"立":-2112,"第":788,"米":2937,"系":786,"約":2171,"経":1146,"統":-1169,"総":940,"線":-994,"署":749,"者":2145,"能":-730,"般":-852,"行":-792,"規":792,"警":-1184,"議":-244,"谷":-1000,"賞":730,"車":-1481,"軍":1158,"輪":-1433,"込":-3370,"近":929,"道":-1291,"選":2596,"郎":-4866,"都":1192,"野":-1100,"銀":-2213,"長":357,"間":-2344,"院":-2297,"際":-2604,"電":-878,"領":-1659,"題":-792,"館":-1984,"首":1749,"高":2120,"「":1895,"」":3798,"・":-4371,"ッ":-724,"ー":-11870,"カ":2145,"コ":1789,"セ":1287,"ト":-403,"メ":-1635,"ラ":-881,"リ":-541,"ル":-856,"ン":-3637};
_UW5 = {",":465,".":-299,"1":-514,"E2":-32768,"]":-2762,"、":465,"。":-299,"「":363,"あ":1655,"い":331,"う":-503,"え":1199,"お":527,"か":647,"が":-421,"き":1624,"ぎ":1971,"く":312,"げ":-983,"さ":-1537,"し":-1371,"す":-852,"だ":-1186,"ち":1093,"っ":52,"つ":921,"て":-18,"で":-850,"と":-127,"ど":1682,"な":-787,"に":-1224,"の":-635,"は":-578,"べ":1001,"み":502,"め":865,"ゃ":3350,"ょ":854,"り":-208,"る":429,"れ":504,"わ":419,"を":-1264,"ん":327,"イ":241,"ル":451,"ン":-343,"中":-871,"京":722,"会":-1153,"党":-654,"務":3519,"区":-901,"告":848,"員":2104,"大":-1296,"学":-548,"定":1785,"嵐":-1304,"市":-2991,"席":921,"年":1763,"思":872,"所":-814,"挙":1618,"新":-1682,"日":218,"月":-4353,"査":932,"格":1356,"機":-1508,"氏":-1347,"田":240,"町":-3912,"的":-3149,"相":1319,"省":-1052,"県":-4003,"研":-997,"社":-278,"空":-813,"統":1955,"者":-2233,"表":663,"語":-1073,"議":1219,"選":-1018,"郎":-368,"長":786,"間":1191,"題":2368,"館":-689,"1":-514,"E2":-32768,"「":363,"イ":241,"ル":451,"ン":-343};
_UW6 = {",":227,".":808,"1":-270,"E1":306,"、":227,"。":808,"あ":-307,"う":189,"か":241,"が":-73,"く":-121,"こ":-200,"じ":1782,"す":383,"た":-428,"っ":573,"て":-1014,"で":101,"と":-105,"な":-253,"に":-149,"の":-417,"は":-236,"も":-206,"り":187,"る":-135,"を":195,"ル":-673,"ン":-496,"一":-277,"中":201,"件":-800,"会":624,"前":302,"区":1792,"員":-1212,"委":798,"学":-960,"市":887,"広":-695,"後":535,"業":-697,"相":753,"社":-507,"福":974,"空":-822,"者":1811,"連":463,"郎":1082,"1":-270,"E1":306,"ル":-673,"ン":-496};
def _ctype(c):
return _CHARDICT.get(c, 'O')
def tokenize(text):
if not text: return []
result = []
seg = ["B3","B2","B1"]
ctype = ["O","O","O"]
seg.extend(text)
ctype.extend(map(_ctype, text))
seg.append("E1")
seg.append("E2")
seg.append("E3")
ctype.append("O")
ctype.append("O")
ctype.append("O")
# initial values
word = seg[3]
p1 = "U"
p2 = "U"
p3 = "U"
w1 = seg[0]
w2 = seg[1]
w3 = seg[2]
w4 = seg[3]
w5 = seg[4]
w6 = seg[5]
w2w3 = w2 + w3
w3w4 = w3 + w4
w4w5 = w4 + w5
w1w2w3 = w1 + w2 + w3
w2w3w4 = w2 + w3 + w4
w3w4w5 = w3 + w4 + w5
w4w5w6 = w4 + w5 + w6
c1 = ctype[0]
c2 = ctype[1]
c3 = ctype[2]
c4 = ctype[3]
c5 = ctype[4]
c6 = ctype[5]
c2c3 = c2 + c3
c3c4 = c3 + c4
c4c5 = c4 + c5
c1c2c3 = c1 + c2 + c3
c2c3c4 = c2 + c3 + c4
c3c4c5 = c3 + c4 + c5
c4c5c6 = c4 + c5 + c6
# method cache
up1 = _UP1.get
up2 = _UP2.get
up3 = _UP3.get
bp1 = _BP1.get
bp2 = _BP2.get
uw1 = _UW1.get
uw2 = _UW2.get
uw3 = _UW3.get
uw4 = _UW4.get
uw5 = _UW5.get
uw6 = _UW6.get
bw1 = _BW1.get
bw2 = _BW2.get
bw3 = _BW3.get
tw1 = _TW1.get
tw2 = _TW2.get
tw3 = _TW3.get
tw4 = _TW4.get
uc1 = _UC1.get
uc2 = _UC2.get
uc3 = _UC3.get
uc4 = _UC4.get
uc5 = _UC5.get
uc6 = _UC6.get
bc1 = _BC1.get
bc2 = _BC2.get
bc3 = _BC3.get
tc1 = _TC1.get
tc2 = _TC2.get
tc3 = _TC3.get
tc4 = _TC4.get
# tc5 = _TC5.get
uq1 = _UQ1.get
uq2 = _UQ2.get
uq3 = _UQ3.get
bq1 = _BQ1.get
bq2 = _BQ2.get
bq3 = _BQ3.get
bq4 = _BQ4.get
tq1 = _TQ1.get
tq2 = _TQ2.get
tq3 = _TQ3.get
tq4 = _TQ4.get
for i in xrange(4, len(seg) - 3):
score = _BIAS
w1 = w2
w2 = w3
w3 = w4
w4 = w5
w5 = w6
w6 = seg[i+2]
c1 = c2
c2 = c3
c3 = c4
c4 = c5
c5 = c6
c6 = ctype[i+2]
w2w3 = w3w4
w3w4 = w4w5
w4w5 = w4 + w5
w1w2w3 = w2w3w4
w2w3w4 = w3w4w5
w3w4w5 = w4w5w6
w4w5w6 = w4w5 + w6
c2c3 = c3c4
c3c4 = c4c5
c4c5 = c4 + c5
c1c2c3 = c2c3c4
c2c3c4 = c3c4c5
c3c4c5 = c4c5c6
c4c5c6 = c4c5 + c6
score += up1(p1, 0)
score += up2(p2, 0)
score += up3(p3, 0)
score += bp1(p1 + p2, 0)
score += bp2(p2 + p3, 0)
score += uw1(w1, 0)
score += uw2(w2, 0)
score += uw3(w3, 0)
score += uw4(w4, 0)
score += uw5(w5, 0)
score += uw6(w6, 0)
score += bw1(w2w3, 0)
score += bw2(w3w4, 0)
score += bw3(w4w5, 0)
score += tw1(w1w2w3, 0)
score += tw2(w2w3w4, 0)
score += tw3(w3w4w5, 0)
score += tw4(w4w5w6, 0)
score += uc1(c1, 0)
score += uc2(c2, 0)
score += uc3(c3, 0)
score += uc4(c4, 0)
score += uc5(c5, 0)
score += uc6(c6, 0)
score += bc1(c2c3, 0)
score += bc2(c3c4, 0)
score += bc3(c4c5, 0)
score += tc1(c1c2c3, 0)
score += tc2(c2c3c4, 0)
score += tc3(c3c4c5, 0)
score += tc4(c4c5c6, 0)
# score += tc5(c4c5c6, 0)
score += uq1(p1 + c1, 0)
score += uq2(p2 + c2, 0)
score += uq3(p3 + c3, 0)
score += bq1(p2 + c2c3, 0)
score += bq2(p2 + c3c4, 0)
score += bq3(p3 + c2c3, 0)
score += bq4(p3 + c3c4, 0)
score += tq1(p2 + c1c2c3, 0)
score += tq2(p2 + c2c3c4, 0)
score += tq3(p3 + c1c2c3, 0)
score += tq4(p3 + c2c3c4, 0)
p = "O"
if score > 0:
result.append(word)
word = ""
p = "B"
p1 = p2
p2 = p3
p3 = p
word += seg[i]
result.append(word)
return result
def demo():
print(tokenize("私の名前は中野です"))
if __name__ == '__main__':
demo()
| 18,779 | 63.315068 | 2,039 | py |
tinysegmenter | tinysegmenter-master/tinysegmenter/__init__.py | from .tinysegmenter import tokenize, _ctype
| 44 | 21.5 | 43 | py |
introd | introd-main/cfvqa/engine.py | import os
import math
import time
import torch
import datetime
import threading
import numpy as np
from bootstrap.lib import utils
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
class Engine(object):
"""Contains training and evaluation procedures
"""
def __init__(self):
self.hooks = {}
self.epoch = 0
self.dataset = None
self.model = None
self.optimizer = None
self.view = None
self.best_out = {}
# generate_view will be executed at the end of each
# training and evaluation epoch
self.register_hook('train_on_flush', self.generate_view)
self.register_hook('eval_on_flush', self.generate_view)
def generate_view(self):
""" Generate a view.html via an asynchronous call to `self.view.generate()`
"""
if self.view is not None:
threading.Thread(target=self.view.generate).start()
# path_opts = os.path.join(Options()['exp']['dir'], 'options.yaml')
# os.system('python -m bootstrap.views.view --path_opts {}'.format(path_opts))
def load_state_dict(self, state):
"""
"""
self.epoch = state['epoch']
self.best_out = state['best_out']
def state_dict(self):
"""
"""
state = {}
state['epoch'] = self.epoch
state['best_out'] = self.best_out
return state
def hook(self, name):
""" Run all the callback functions that have been registered
for a hook.
Args:
name: the name of the hook
"""
if name in self.hooks:
for func in self.hooks[name]:
func()
def register_hook(self, name, func):
""" Register a callback function to be triggered when the hook
is called.
Args:
name: the name of the hook
func: the callback function (no argument)
Example usage:
.. code-block:: python
def func():
print('hooked!')
engine.register_hook('train_on_start_batch', func)
"""
if name not in self.hooks:
self.hooks[name] = []
self.hooks[name].append(func)
def resume(self, map_location=None):
""" Resume a checkpoint using the `bootstrap.lib.options.Options`
"""
Logger()('Loading {} checkpoint'.format(Options()['exp']['resume']))
self.load(Options()['exp']['dir'],
Options()['exp']['resume'],
self.model, self.optimizer,
map_location=map_location)
# self.epoch += 1
if self.epoch > 0:
self.epoch += 1
def eval(self):
""" Launch evaluation procedures
"""
Logger()('Launching evaluation procedures')
if Options()['dataset']['eval_split']:
# self.epoch-1 to be equal to the same resumed epoch
# or to be equal to -1 when not resumed
self.eval_epoch(self.model, self.dataset['eval'], self.epoch-1, logs_json=True)
Logger()('Ending evaluation procedures')
def train(self):
""" Launch training procedures
List of the hooks:
- train_on_start: before the full training procedure
"""
Logger()('Launching training procedures')
self.hook('train_on_start')
while self.epoch < Options()['engine']['nb_epochs']:
self.train_epoch(self.model, self.dataset['train'], self.optimizer, self.epoch)
if Options()['dataset']['eval_split']:
out = self.eval_epoch(self.model, self.dataset['eval'], self.epoch)
if 'saving_criteria' in Options()['engine'] and Options()['engine']['saving_criteria'] is not None:
for saving_criteria in Options()['engine']['saving_criteria']:
if self.is_best(out, saving_criteria):
name = saving_criteria.split(':')[0]
Logger()('Saving best checkpoint for strategy {}'.format(name))
self.save(Options()['exp']['dir'], 'best_{}'.format(name), self.model, self.optimizer)
Logger()('Saving last checkpoint')
self.save(Options()['exp']['dir'], 'last', self.model, self.optimizer)
self.epoch += 1
Logger()('Ending training procedures')
def train_epoch(self, model, dataset, optimizer, epoch, mode='train'):
""" Launch training procedures for one epoch
List of the hooks:
- train_on_start_epoch: before the training procedure for an epoch
- train_on_start_batch: before the training precedure for a batch
- train_on_forward: after the forward of the model
- train_on_bachward: after the backward of the loss
- train_on_update: after the optimization step
- train_on_print: after the print to the terminal
- train_on_end_batch: end of the training procedure for a batch
- train_on_end_epoch: before saving the logs in logs.json
- train_on_flush: end of the training procedure for an epoch
"""
utils.set_random_seed(Options()['misc']['seed'] + epoch) # to be able to reproduce exps on reload
Logger()('Training model on {}set for epoch {}'.format(dataset.split, epoch))
model.train()
timer = {
'begin': time.time(),
'elapsed': time.time(),
'process': None,
'load': None,
'run_avg': 0
}
out_epoch = {}
batch_loader = dataset.make_batch_loader()
self.hook(f'{mode}_on_start_epoch')
for i, batch in enumerate(batch_loader):
timer['load'] = time.time() - timer['elapsed']
self.hook(f'{mode}_on_start_batch')
optimizer.zero_grad()
out = model(batch)
self.hook(f'{mode}_on_forward')
if not torch.isnan(out['loss']):
out['loss'].backward()
else:
Logger()('NaN detected')
#torch.cuda.synchronize()
self.hook(f'{mode}_on_backward')
optimizer.step()
#torch.cuda.synchronize()
self.hook(f'{mode}_on_update')
timer['process'] = time.time() - timer['elapsed']
if i == 0:
timer['run_avg'] = timer['process']
else:
timer['run_avg'] = timer['run_avg'] * 0.8 + timer['process'] * 0.2
Logger().log_value(f'{mode}_batch.epoch', epoch, should_print=False)
Logger().log_value(f'{mode}_batch.batch', i, should_print=False)
Logger().log_value(f'{mode}_batch.timer.process', timer['process'], should_print=False)
Logger().log_value(f'{mode}_batch.timer.load', timer['load'], should_print=False)
for key, value in out.items():
if torch.is_tensor(value):
if value.dim() <= 1:
value = value.item() # get number from a torch scalar
else:
continue
if type(value) == list:
continue
if type(value) == dict:
continue
if key not in out_epoch:
out_epoch[key] = []
out_epoch[key].append(value)
Logger().log_value(f'{mode}_batch.'+key, value, should_print=False)
if i % Options()['engine']['print_freq'] == 0:
Logger()("{}: epoch {} | batch {}/{}".format(mode, epoch, i, len(batch_loader) - 1))
Logger()("{} elapsed: {} | left: {}".format(' '*len(mode),
datetime.timedelta(seconds=math.floor(time.time() - timer['begin'])),
datetime.timedelta(seconds=math.floor(timer['run_avg'] * (len(batch_loader) - 1 - i)))))
Logger()("{} process: {:.5f} | load: {:.5f}".format(' '*len(mode), timer['process'], timer['load']))
Logger()("{} loss: {:.5f}".format(' '*len(mode), out['loss'].data.item()))
self.hook(f'{mode}_on_print')
timer['elapsed'] = time.time()
self.hook(f'{mode}_on_end_batch')
if Options()['engine']['debug']:
if i > 2:
break
Logger().log_value(f'{mode}_epoch.epoch', epoch, should_print=True)
for key, value in out_epoch.items():
# Logger().log_value(f'{mode}_epoch.'+key, sum(value)/len(value), should_print=True)
Logger().log_value(f'{mode}_epoch.'+key, np.asarray(value).mean(), should_print=True)
self.hook(f'{mode}_on_end_epoch')
Logger().flush()
self.hook(f'{mode}_on_flush')
def eval_epoch(self, model, dataset, epoch, mode='eval', logs_json=True):
""" Launch evaluation procedures for one epoch
List of the hooks (``mode='eval'`` by default):
- mode_on_start_epoch: before the evaluation procedure for an epoch
- mode_on_start_batch: before the evaluation precedure for a batch
- mode_on_forward: after the forward of the model
- mode_on_print: after the print to the terminal
- mode_on_end_batch: end of the evaluation procedure for a batch
- mode_on_end_epoch: before saving the logs in logs.json
- mode_on_flush: end of the evaluation procedure for an epoch
Returns:
out(dict): mean of all the scalar outputs of the model, indexed by output name, for this epoch
"""
utils.set_random_seed(Options()['misc']['seed'] + epoch) # to be able to reproduce exps on reload
Logger()('Evaluating model on {}set for epoch {}'.format(dataset.split, epoch))
model.eval()
timer = {
'begin': time.time(),
'elapsed': time.time(),
'process': None,
'load': None,
'run_avg': 0
}
out_epoch = {}
batch_loader = dataset.make_batch_loader()
self.hook('{}_on_start_epoch'.format(mode))
for i, batch in enumerate(batch_loader):
timer['load'] = time.time() - timer['elapsed']
self.hook('{}_on_start_batch'.format(mode))
with torch.no_grad():
out = model(batch)
#torch.cuda.synchronize()
self.hook('{}_on_forward'.format(mode))
timer['process'] = time.time() - timer['elapsed']
if i == 0:
timer['run_avg'] = timer['process']
else:
timer['run_avg'] = timer['run_avg'] * 0.8 + timer['process'] * 0.2
Logger().log_value('{}_batch.batch'.format(mode), i, should_print=False)
Logger().log_value('{}_batch.epoch'.format(mode), epoch, should_print=False)
Logger().log_value('{}_batch.timer.process'.format(mode), timer['process'], should_print=False)
Logger().log_value('{}_batch.timer.load'.format(mode), timer['load'], should_print=False)
for key, value in out.items():
if torch.is_tensor(value):
if value.dim() <= 1:
value = value.item() # get number from a torch scalar
else:
continue
if type(value) == list:
continue
if type(value) == dict:
continue
if key not in out_epoch:
out_epoch[key] = []
out_epoch[key].append(value)
Logger().log_value('{}_batch.{}'.format(mode, key), value, should_print=False)
if i % Options()['engine']['print_freq'] == 0:
Logger()("{}: epoch {} | batch {}/{}".format(mode, epoch, i, len(batch_loader) - 1))
Logger()("{} elapsed: {} | left: {}".format(' '*len(mode),
datetime.timedelta(seconds=math.floor(time.time() - timer['begin'])),
datetime.timedelta(seconds=math.floor(timer['run_avg'] * (len(batch_loader) - 1 - i)))))
Logger()("{} process: {:.5f} | load: {:.5f}".format(' '*len(mode), timer['process'], timer['load']))
self.hook('{}_on_print'.format(mode))
timer['elapsed'] = time.time()
self.hook('{}_on_end_batch'.format(mode))
if Options()['engine']['debug']:
if i > 10:
break
out = {}
for key, value in out_epoch.items():
try:
# out[key] = sum(value)/len(value)
out[key] = np.asarray(value).mean()
except:
import ipdb; ipdb.set_trace()
Logger().log_value('{}_epoch.epoch'.format(mode), epoch, should_print=True)
for key, value in out.items():
Logger().log_value('{}_epoch.{}'.format(mode, key), value, should_print=True)
self.hook('{}_on_end_epoch'.format(mode))
if logs_json:
Logger().flush()
self.hook('{}_on_flush'.format(mode))
return out
def is_best(self, out, saving_criteria):
""" Verify if the last model is the best for a specific saving criteria
Args:
out(dict): mean of all the scalar outputs of model indexed by output name
saving_criteria(str):
Returns:
is_best(bool)
Example usage:
.. code-block:: python
out = {
'loss': 0.2,
'acctop1': 87.02
}
engine.is_best(out, 'loss:min')
"""
if ':min' in saving_criteria:
name = saving_criteria.replace(':min', '')
order = '<'
elif ':max' in saving_criteria:
name = saving_criteria.replace(':max', '')
order = '>'
else:
error_msg = """'--engine.saving_criteria' named '{}' does not specify order,
you need to chose between '{}' or '{}' to specify if the criteria needs to be minimize or maximize""".format(
saving_criteria, saving_criteria+':min', saving_criteria+':max')
raise ValueError(error_msg)
if name not in out:
raise KeyError("'--engine.saving_criteria' named '{}' not in outputs '{}'".format(name, list(out.keys())))
if name not in self.best_out:
self.best_out[name] = out[name]
else:
if eval('{} {} {}'.format(out[name], order, self.best_out[name])):
self.best_out[name] = out[name]
return True
return False
def load(self, dir_logs, name, model, optimizer, map_location=None):
""" Load a checkpoint
Args:
dir_logs: directory of the checkpoint
name: name of the checkpoint
model: model associated to the checkpoint
optimizer: optimizer associated to the checkpoint
"""
path_template = os.path.join(dir_logs, 'ckpt_{}_{}.pth.tar')
Logger()('Loading model...')
model_state = torch.load(path_template.format(name, 'model'), map_location=map_location)
model.load_state_dict(model_state)
if Options()['dataset']['train_split'] is not None:
if os.path.isfile(path_template.format(name, 'optimizer')):
Logger()('Loading optimizer...')
optimizer_state = torch.load(path_template.format(name, 'optimizer'), map_location=map_location)
optimizer.load_state_dict(optimizer_state)
else:
Logger()('No optimizer checkpoint', log_level=Logger.WARNING)
if os.path.isfile(path_template.format(name, 'engine')):
Logger()('Loading engine...')
engine_state = torch.load(path_template.format(name, 'engine'), map_location=map_location)
self.load_state_dict(engine_state)
else:
Logger()('No engine checkpoint', log_level=Logger.WARNING)
def save(self, dir_logs, name, model, optimizer):
""" Save a checkpoint
Args:
dir_logs: directory of the checkpoint
name: name of the checkpoint
model: model associated to the checkpoint
optimizer: optimizer associated to the checkpoint
"""
path_template = os.path.join(dir_logs, 'ckpt_{}_{}.pth.tar')
Logger()('Saving model...')
model_state = model.state_dict()
torch.save(model_state, path_template.format(name, 'model'))
Logger()('Saving optimizer...')
optimizer_state = optimizer.state_dict()
torch.save(optimizer_state, path_template.format(name, 'optimizer'))
Logger()('Saving engine...')
engine_state = self.state_dict()
torch.save(engine_state, path_template.format(name, 'engine'))
| 17,179 | 38.313501 | 121 | py |
introd | introd-main/cfvqa/run.py | import os
import click
import traceback
import torch
import torch.backends.cudnn as cudnn
from bootstrap.lib import utils
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
from cfvqa import engines
from bootstrap import datasets
from bootstrap import models
from bootstrap import optimizers
from bootstrap import views
def init_experiment_directory(exp_dir, resume=None):
# create the experiment directory
if not os.path.isdir(exp_dir):
os.system('mkdir -p ' + exp_dir)
else:
if resume is None:
if click.confirm('Exp directory already exists in {}. Erase?'
.format(exp_dir, default=False)):
os.system('rm -r ' + exp_dir)
os.system('mkdir -p ' + exp_dir)
else:
os._exit(1)
def init_logs_options_files(exp_dir, resume=None):
# get the logs name which is used for the txt, json and yaml files
# default is `logs.txt`, `logs.json` and `options.yaml`
if 'logs_name' in Options()['misc'] and Options()['misc']['logs_name'] is not None:
logs_name = 'logs_{}'.format(Options()['misc']['logs_name'])
path_yaml = os.path.join(exp_dir, 'options_{}.yaml'.format(logs_name))
elif resume and Options()['dataset']['train_split'] is None:
eval_split = Options()['dataset']['eval_split']
path_yaml = os.path.join(exp_dir, 'options_eval_{}.yaml'.format(eval_split))
logs_name = 'logs_eval_{}'.format(eval_split)
else:
path_yaml = os.path.join(exp_dir, 'options.yaml')
logs_name = 'logs'
# create the options.yaml file
if not os.path.isfile(path_yaml):
Options().save(path_yaml)
# create the logs.txt and logs.json files
Logger(exp_dir, name=logs_name)
def run(path_opts=None):
# first call to Options() load the options yaml file from --path_opts command line argument if path_opts=None
Options(path_opts)
# initialiaze seeds to be able to reproduce experiment on reload
utils.set_random_seed(Options()['misc']['seed'])
init_experiment_directory(Options()['exp']['dir'], Options()['exp']['resume'])
init_logs_options_files(Options()['exp']['dir'], Options()['exp']['resume'])
Logger().log_dict('options', Options(), should_print=True) # display options
Logger()(os.uname()) # display server name
if torch.cuda.is_available():
cudnn.benchmark = True
Logger()('Available GPUs: {}'.format(utils.available_gpu_ids()))
# engine can train, eval, optimize the model
# engine can save and load the model and optimizer
engine = engines.factory()
# dataset is a dictionary that contains all the needed datasets indexed by modes
# (example: dataset.keys() -> ['train','eval'])
engine.dataset = datasets.factory(engine)
# model includes a network, a criterion and a metric
# model can register engine hooks (begin epoch, end batch, end batch, etc.)
# (example: "calculate mAP at the end of the evaluation epoch")
# note: model can access to datasets using engine.dataset
engine.model = models.factory(engine)
# optimizer can register engine hooks
engine.optimizer = optimizers.factory(engine.model, engine)
# view will save a view.html in the experiment directory
# with some nice plots and curves to monitor training
engine.view = views.factory(engine)
# load the model and optimizer from a checkpoint
if Options()['exp']['resume']:
engine.resume()
# if no training split, evaluate the model on the evaluation split
# (example: $ python main.py --dataset.train_split --dataset.eval_split test)
if not Options()['dataset']['train_split']:
engine.eval()
# optimize the model on the training split for several epochs
# (example: $ python main.py --dataset.train_split train)
# if evaluation split, evaluate the model after each epochs
# (example: $ python main.py --dataset.train_split train --dataset.eval_split val)
if Options()['dataset']['train_split']:
engine.train()
# with torch.autograd.profiler.profile(use_cuda=Options()['misc.cuda']) as prof:
# engine.train()
# path_tracing = 'tracing_1.0_cuda,{}_all.html'.format(Options()['misc.cuda'])
# prof.export_chrome_trace(path_tracing)
def main(path_opts=None, run=None):
try:
run(path_opts=path_opts)
# to avoid traceback for -h flag in arguments line
except SystemExit:
pass
except:
# to be able to write the error trace to exp_dir/logs.txt
try:
Logger()(traceback.format_exc(), Logger.ERROR)
except:
pass
if __name__ == '__main__':
main(run=run)
| 4,750 | 36.117188 | 113 | py |
introd | introd-main/cfvqa/cfvqa/__version__.py | __version__ = '0.0.0'
| 22 | 10.5 | 21 | py |
introd | introd-main/cfvqa/cfvqa/run.py | import os
import click
import traceback
import torch
import torch.backends.cudnn as cudnn
from bootstrap.lib import utils
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
from cfvqa import engines
from bootstrap import datasets
from bootstrap import models
from bootstrap import optimizers
from bootstrap import views
def init_experiment_directory(exp_dir, resume=None):
# create the experiment directory
if not os.path.isdir(exp_dir):
os.system('mkdir -p ' + exp_dir)
else:
if resume is None:
if click.confirm('Exp directory already exists in {}. Erase?'
.format(exp_dir, default=False)):
os.system('rm -r ' + exp_dir)
os.system('mkdir -p ' + exp_dir)
else:
os._exit(1)
def init_logs_options_files(exp_dir, resume=None):
# get the logs name which is used for the txt, json and yaml files
# default is `logs.txt`, `logs.json` and `options.yaml`
if 'logs_name' in Options()['misc'] and Options()['misc']['logs_name'] is not None:
logs_name = 'logs_{}'.format(Options()['misc']['logs_name'])
path_yaml = os.path.join(exp_dir, 'options_{}.yaml'.format(logs_name))
elif resume and Options()['dataset']['train_split'] is None:
eval_split = Options()['dataset']['eval_split']
path_yaml = os.path.join(exp_dir, 'options_eval_{}.yaml'.format(eval_split))
logs_name = 'logs_eval_{}'.format(eval_split)
else:
path_yaml = os.path.join(exp_dir, 'options.yaml')
logs_name = 'logs'
# create the options.yaml file
if not os.path.isfile(path_yaml):
Options().save(path_yaml)
# create the logs.txt and logs.json files
Logger(exp_dir, name=logs_name)
def run(path_opts=None):
# first call to Options() load the options yaml file from --path_opts command line argument if path_opts=None
Options(path_opts)
# initialiaze seeds to be able to reproduce experiment on reload
utils.set_random_seed(Options()['misc']['seed'])
init_experiment_directory(Options()['exp']['dir'], Options()['exp']['resume'])
init_logs_options_files(Options()['exp']['dir'], Options()['exp']['resume'])
Logger().log_dict('options', Options(), should_print=True) # display options
Logger()(os.uname()) # display server name
if torch.cuda.is_available():
cudnn.benchmark = True
Logger()('Available GPUs: {}'.format(utils.available_gpu_ids()))
# engine can train, eval, optimize the model
# engine can save and load the model and optimizer
engine = engines.factory()
# dataset is a dictionary that contains all the needed datasets indexed by modes
# (example: dataset.keys() -> ['train','eval'])
engine.dataset = datasets.factory(engine)
# model includes a network, a criterion and a metric
# model can register engine hooks (begin epoch, end batch, end batch, etc.)
# (example: "calculate mAP at the end of the evaluation epoch")
# note: model can access to datasets using engine.dataset
engine.model = models.factory(engine)
# optimizer can register engine hooks
engine.optimizer = optimizers.factory(engine.model, engine)
# view will save a view.html in the experiment directory
# with some nice plots and curves to monitor training
engine.view = views.factory(engine)
# load the model and optimizer from a checkpoint
if Options()['exp']['resume']:
engine.resume()
# if no training split, evaluate the model on the evaluation split
# (example: $ python main.py --dataset.train_split --dataset.eval_split test)
if not Options()['dataset']['train_split']:
engine.eval()
# optimize the model on the training split for several epochs
# (example: $ python main.py --dataset.train_split train)
# if evaluation split, evaluate the model after each epochs
# (example: $ python main.py --dataset.train_split train --dataset.eval_split val)
if Options()['dataset']['train_split']:
engine.train()
# with torch.autograd.profiler.profile(use_cuda=Options()['misc.cuda']) as prof:
# engine.train()
# path_tracing = 'tracing_1.0_cuda,{}_all.html'.format(Options()['misc.cuda'])
# prof.export_chrome_trace(path_tracing)
def main(path_opts=None, run=None):
try:
run(path_opts=path_opts)
# to avoid traceback for -h flag in arguments line
except SystemExit:
pass
except:
# to be able to write the error trace to exp_dir/logs.txt
try:
Logger()(traceback.format_exc(), Logger.ERROR)
except:
pass
if __name__ == '__main__':
main(run=run)
| 4,750 | 36.117188 | 113 | py |
introd | introd-main/cfvqa/cfvqa/__init__.py | 0 | 0 | 0 | py | |
introd | introd-main/cfvqa/cfvqa/models/networks/rubi.py | import torch
import torch.nn as nn
from block.models.networks.mlp import MLP
from .utils import grad_mul_const # mask_softmax, grad_reverse, grad_reverse_mask,
class RUBiNet(nn.Module):
"""
Wraps another model
The original model must return a dictionnary containing the 'logits' key (predictions before softmax)
Returns:
- logits: the original predictions of the model
- logits_q: the predictions from the question-only branch
- logits_rubi: the updated predictions from the model by the mask.
=> Use `logits_rubi` and `logits_q` for the loss
"""
def __init__(self, model, output_size, classif, end_classif=True):
super().__init__()
self.net = model
self.c_1 = MLP(**classif)
self.end_classif = end_classif
if self.end_classif:
self.c_2 = nn.Linear(output_size, output_size)
def forward(self, batch):
out = {}
# model prediction
net_out = self.net(batch)
logits = net_out['logits']
q_embedding = net_out['q_emb'] # N * q_emb
q_embedding = grad_mul_const(q_embedding, 0.0) # don't backpropagate through question encoder
q_pred = self.c_1(q_embedding)
fusion_pred = logits * torch.sigmoid(q_pred)
if self.end_classif:
q_out = self.c_2(q_pred)
else:
q_out = q_pred
out['logits'] = net_out['logits']
out['logits_all'] = fusion_pred
out['logits_q'] = q_out
return out
def process_answers(self, out, key=''):
out = self.net.process_answers(out)
out = self.net.process_answers(out, key='_all')
out = self.net.process_answers(out, key='_q')
return out
| 1,733 | 33 | 105 | py |
introd | introd-main/cfvqa/cfvqa/models/networks/smrl_net.py | from copy import deepcopy
import itertools
import os
import numpy as np
import scipy
import torch
import torch.nn as nn
import torch.nn.functional as F
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
import block
from block.models.networks.vqa_net import factory_text_enc
from block.models.networks.mlp import MLP
from .utils import mask_softmax
class SMRLNet(nn.Module):
def __init__(self,
txt_enc={},
self_q_att=False,
agg={},
classif={},
wid_to_word={},
word_to_wid={},
aid_to_ans=[],
ans_to_aid={},
fusion={},
residual=False,
q_single=False,
):
super().__init__()
self.self_q_att = self_q_att
self.agg = agg
assert self.agg['type'] in ['max', 'mean']
self.classif = classif
self.wid_to_word = wid_to_word
self.word_to_wid = word_to_wid
self.aid_to_ans = aid_to_ans
self.ans_to_aid = ans_to_aid
self.fusion = fusion
self.residual = residual
# Modules
self.txt_enc = self.get_text_enc(self.wid_to_word, txt_enc)
if self.self_q_att:
self.q_att_linear0 = nn.Linear(2400, 512)
self.q_att_linear1 = nn.Linear(512, 2)
if q_single:
self.txt_enc_single = self.get_text_enc(self.wid_to_word, txt_enc)
if self.self_q_att:
self.q_att_linear0_single = nn.Linear(2400, 512)
self.q_att_linear1_single = nn.Linear(512, 2)
else:
self.txt_enc_single = None
self.fusion_module = block.factory_fusion(self.fusion)
if self.classif['mlp']['dimensions'][-1] != len(self.aid_to_ans):
Logger()(f"Warning, the classif_mm output dimension ({self.classif['mlp']['dimensions'][-1]})"
f"doesn't match the number of answers ({len(self.aid_to_ans)}). Modifying the output dimension.")
self.classif['mlp']['dimensions'][-1] = len(self.aid_to_ans)
self.classif_module = MLP(**self.classif['mlp'])
Logger().log_value('nparams',
sum(p.numel() for p in self.parameters() if p.requires_grad),
should_print=True)
Logger().log_value('nparams_txt_enc',
self.get_nparams_txt_enc(),
should_print=True)
def get_text_enc(self, vocab_words, options):
"""
returns the text encoding network.
"""
return factory_text_enc(self.wid_to_word, options)
def get_nparams_txt_enc(self):
params = [p.numel() for p in self.txt_enc.parameters() if p.requires_grad]
if self.self_q_att:
params += [p.numel() for p in self.q_att_linear0.parameters() if p.requires_grad]
params += [p.numel() for p in self.q_att_linear1.parameters() if p.requires_grad]
return sum(params)
def process_fusion(self, q, mm):
bsize = mm.shape[0]
n_regions = mm.shape[1]
mm = mm.contiguous().view(bsize*n_regions, -1)
mm = self.fusion_module([q, mm])
mm = mm.view(bsize, n_regions, -1)
return mm
def forward(self, batch):
v = batch['visual']
q = batch['question']
l = batch['lengths'].data
c = batch['norm_coord']
nb_regions = batch.get('nb_regions')
bsize = v.shape[0]
n_regions = v.shape[1]
out = {}
q = self.process_question(q, l,)
out['q_emb'] = q
q_expand = q[:,None,:].expand(bsize, n_regions, q.shape[1])
q_expand = q_expand.contiguous().view(bsize*n_regions, -1)
# single txt encoder
if self.txt_enc_single is not None:
out['q_emb'] = self.process_question(q, l, self.txt_enc_single, self.q_att_linear0_single, self.q_att_linear1_single)
mm = self.process_fusion(q_expand, v,)
if self.residual:
mm = v + mm
if self.agg['type'] == 'max':
mm, mm_argmax = torch.max(mm, 1)
elif self.agg['type'] == 'mean':
mm = mm.mean(1)
out['v_emb'] = v.mean(1)
out['mm'] = mm
out['mm_argmax'] = mm_argmax
logits = self.classif_module(mm)
out['logits'] = logits
return out
def process_question(self, q, l, txt_enc=None, q_att_linear0=None, q_att_linear1=None):
if txt_enc is None:
txt_enc = self.txt_enc
if q_att_linear0 is None:
q_att_linear0 = self.q_att_linear0
if q_att_linear1 is None:
q_att_linear1 = self.q_att_linear1
q_emb = txt_enc.embedding(q)
q, _ = txt_enc.rnn(q_emb)
if self.self_q_att:
q_att = q_att_linear0(q)
q_att = F.relu(q_att)
q_att = q_att_linear1(q_att)
q_att = mask_softmax(q_att, l)
#self.q_att_coeffs = q_att
if q_att.size(2) > 1:
q_atts = torch.unbind(q_att, dim=2)
q_outs = []
for q_att in q_atts:
q_att = q_att.unsqueeze(2)
q_att = q_att.expand_as(q)
q_out = q_att*q
q_out = q_out.sum(1)
q_outs.append(q_out)
q = torch.cat(q_outs, dim=1)
else:
q_att = q_att.expand_as(q)
q = q_att * q
q = q.sum(1)
else:
# l contains the number of words for each question
# in case of multi-gpus it must be a Tensor
# thus we convert it into a list during the forward pass
l = list(l.data[:,0])
q = txt_enc._select_last(q, l)
return q
def process_answers(self, out, key=''):
batch_size = out[f'logits{key}'].shape[0]
_, pred = out[f'logits{key}'].data.max(1)
pred.squeeze_()
if batch_size != 1:
out[f'answers{key}'] = [self.aid_to_ans[pred[i].item()] for i in range(batch_size)]
out[f'answer_ids{key}'] = [pred[i].item() for i in range(batch_size)]
else:
out[f'answers{key}'] = [self.aid_to_ans[pred.item()]]
out[f'answer_ids{key}'] = [pred.item()]
return out
| 6,297 | 32.679144 | 129 | py |
introd | introd-main/cfvqa/cfvqa/models/networks/cfvqaintrod.py | import torch
import torch.nn as nn
from block.models.networks.mlp import MLP
from .utils import grad_mul_const # mask_softmax, grad_reverse, grad_reverse_mask,
eps = 1e-12
class CFVQAIntroD(nn.Module):
"""
Wraps another model
The original model must return a dictionnary containing the 'logits' key (predictions before softmax)
Returns:
- logits_vq: the original predictions of the model, i.e., NIE
- logits_q: the predictions from the question-only branch
- logits_v: the predictions from the vision-only branch
- logits_all: the predictions from the ensemble model
- logits_cfvqa: the predictions based on CF-VQA, i.e., TIE
=> Use `logits_all`, `logits_q` and `logits_v` for the loss
"""
def __init__(self, model, model_teacher, output_size, classif_q, classif_v, fusion_mode, end_classif=True, is_va=True):
super().__init__()
self.net_student = model
self.net = model_teacher
self.end_classif = end_classif
assert fusion_mode in ['rubi', 'hm', 'sum'], "Fusion mode should be rubi/hm/sum."
self.fusion_mode = fusion_mode
self.is_va = is_va and (not fusion_mode=='rubi') # RUBi does not consider V->A
# Q->A branch
self.q_1 = MLP(**classif_q)
if self.end_classif: # default: True (following RUBi)
self.q_2 = nn.Linear(output_size, output_size)
# V->A branch
if self.is_va: # default: True (containing V->A)
self.v_1 = MLP(**classif_v)
if self.end_classif: # default: True (following RUBi)
self.v_2 = nn.Linear(output_size, output_size)
self.constant = nn.Parameter(torch.tensor(0.0))
self.constant.requires_grad = True
self.net.eval()
self.q_1.eval()
if self.end_classif:
self.q_2.eval()
if self.is_va:
self.v_1.eval()
if self.end_classif:
self.v_2.eval()
def forward(self, batch):
out = {}
# model prediction
net_out = self.net(batch)
logits = net_out['logits']
# Q->A branch
q_embedding = net_out['q_emb'] # N * q_emb
q_embedding = grad_mul_const(q_embedding, 0.0) # don't backpropagate
q_pred = self.q_1(q_embedding)
# V->A branch
if self.is_va:
v_embedding = net_out['v_emb'] # N * v_emb
v_embedding = grad_mul_const(v_embedding, 0.0) # don't backpropagate
v_pred = self.v_1(v_embedding)
else:
v_pred = None
# both q, k and v are the facts
z_qkv = self.fusion(logits, q_pred, v_pred, q_fact=True, k_fact=True, v_fact=True) # te
# q is the fact while k and v are the counterfactuals
z_q = self.fusion(logits, q_pred, v_pred, q_fact=True, k_fact=False, v_fact=False) # nie
logits_cfvqa = z_qkv - z_q
if self.end_classif:
q_out = self.q_2(q_pred)
if self.is_va:
v_out = self.v_2(v_pred)
else:
q_out = q_pred
if self.is_va:
v_out = v_pred
out['logits_all'] = z_qkv # for optimization
out['logits_vq'] = logits # predictions of the original VQ branch, i.e., NIE
out['logits_cfvqa'] = logits_cfvqa # predictions of CFVQA, i.e., TIE
out['logits_q'] = q_out # for optimization
if self.is_va:
out['logits_v'] = v_out # for optimization
# student model
logits_stu = self.net_student(batch)
out['logits_stu'] = logits_stu['logits']
return out
def process_answers(self, out, key=''):
out = self.net.process_answers(out, key='_all')
out = self.net.process_answers(out, key='_vq')
out = self.net.process_answers(out, key='_cfvqa')
out = self.net.process_answers(out, key='_q')
if self.is_va:
out = self.net.process_answers(out, key='_v')
# student model
out = self.net.process_answers(out, key='_stu')
return out
def fusion(self, z_k, z_q, z_v, q_fact=False, k_fact=False, v_fact=False):
z_k, z_q, z_v = self.transform(z_k, z_q, z_v, q_fact, k_fact, v_fact)
if self.fusion_mode == 'rubi':
z = z_k * torch.sigmoid(z_q)
elif self.fusion_mode == 'hm':
if self.is_va:
z = z_k * z_q * z_v
else:
z = z_k * z_q
z = torch.log(z + eps) - torch.log1p(z)
elif self.fusion_mode == 'sum':
if self.is_va:
z = z_k + z_q + z_v
else:
z = z_k + z_q
z = torch.log(torch.sigmoid(z) + eps)
return z
def transform(self, z_k, z_q, z_v, q_fact=False, k_fact=False, v_fact=False):
if not k_fact:
z_k = self.constant * torch.ones_like(z_k).cuda()
if not q_fact:
z_q = self.constant * torch.ones_like(z_q).cuda()
if self.is_va:
if not v_fact:
z_v = self.constant * torch.ones_like(z_v).cuda()
if self.fusion_mode == 'hm':
z_k = torch.sigmoid(z_k)
z_q = torch.sigmoid(z_q)
if self.is_va:
z_v = torch.sigmoid(z_v)
return z_k, z_q, z_v | 5,384 | 33.741935 | 123 | py |
introd | introd-main/cfvqa/cfvqa/models/networks/utils.py | import torch
def mask_softmax(x, lengths):#, dim=1)
mask = torch.zeros_like(x).to(device=x.device, non_blocking=True)
t_lengths = lengths[:,:,None].expand_as(mask)
arange_id = torch.arange(mask.size(1)).to(device=x.device, non_blocking=True)
arange_id = arange_id[None,:,None].expand_as(mask)
mask[arange_id<t_lengths] = 1
# https://stackoverflow.com/questions/42599498/numercially-stable-softmax
# https://stackoverflow.com/questions/34968722/how-to-implement-the-softmax-function-in-python
# exp(x - max(x)) instead of exp(x) is a trick
# to improve the numerical stability while giving
# the same outputs
x2 = torch.exp(x - torch.max(x))
x3 = x2 * mask
epsilon = 1e-5
x3_sum = torch.sum(x3, dim=1, keepdim=True) + epsilon
x4 = x3 / x3_sum.expand_as(x3)
return x4
class GradReverseMask(torch.autograd.Function):
"""
This layer is used to create an adversarial loss.
"""
@staticmethod
def forward(ctx, x, mask, weight):
"""
The mask should be composed of 0 or 1.
The '1' will get their gradient reversed..
"""
ctx.save_for_backward(mask)
ctx.weight = weight
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
mask, = ctx.saved_tensors
mask_c = mask.clone().detach().float()
mask_c[mask == 0] = 1.0
mask_c[mask == 1] = - float(ctx.weight)
return grad_output * mask_c[:, None].float(), None, None
def grad_reverse_mask(x, mask, weight=1):
return GradReverseMask.apply(x, mask, weight)
class GradReverse(torch.autograd.Function):
"""
This layer is used to create an adversarial loss.
"""
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg()
def grad_reverse(x):
return GradReverse.apply(x)
class GradMulConst(torch.autograd.Function):
"""
This layer is used to create an adversarial loss.
"""
@staticmethod
def forward(ctx, x, const):
ctx.const = const
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output * ctx.const, None
def grad_mul_const(x, const):
return GradMulConst.apply(x, const)
| 2,326 | 27.036145 | 98 | py |
introd | introd-main/cfvqa/cfvqa/models/networks/cfvqa.py | import torch
import torch.nn as nn
from block.models.networks.mlp import MLP
from .utils import grad_mul_const # mask_softmax, grad_reverse, grad_reverse_mask,
eps = 1e-12
class CFVQA(nn.Module):
"""
Wraps another model
The original model must return a dictionnary containing the 'logits' key (predictions before softmax)
Returns:
- logits_vq: the original predictions of the model, i.e., NIE
- logits_q: the predictions from the question-only branch
- logits_v: the predictions from the vision-only branch
- logits_all: the predictions from the ensemble model
- logits_cfvqa: the predictions based on CF-VQA, i.e., TIE
=> Use `logits_all`, `logits_q` and `logits_v` for the loss
"""
def __init__(self, model, output_size, classif_q, classif_v, fusion_mode, end_classif=True, is_va=True):
super().__init__()
self.net = model
self.end_classif = end_classif
assert fusion_mode in ['rubi', 'hm', 'sum'], "Fusion mode should be rubi/hm/sum."
self.fusion_mode = fusion_mode
self.is_va = is_va and (not fusion_mode=='rubi') # RUBi does not consider V->A
# Q->A branch
self.q_1 = MLP(**classif_q)
if self.end_classif: # default: True (following RUBi)
self.q_2 = nn.Linear(output_size, output_size)
# V->A branch
if self.is_va: # default: True (containing V->A)
self.v_1 = MLP(**classif_v)
if self.end_classif: # default: True (following RUBi)
self.v_2 = nn.Linear(output_size, output_size)
self.constant = nn.Parameter(torch.tensor(0.0))
def forward(self, batch):
out = {}
# model prediction
net_out = self.net(batch)
logits = net_out['logits']
# Q->A branch
q_embedding = net_out['q_emb'] # N * q_emb
q_embedding = grad_mul_const(q_embedding, 0.0) # don't backpropagate
q_pred = self.q_1(q_embedding)
# V->A branch
if self.is_va:
v_embedding = net_out['v_emb'] # N * v_emb
v_embedding = grad_mul_const(v_embedding, 0.0) # don't backpropagate
v_pred = self.v_1(v_embedding)
else:
v_pred = None
# both q, k and v are the facts
z_qkv = self.fusion(logits, q_pred, v_pred, q_fact=True, k_fact=True, v_fact=True) # te
# q is the fact while k and v are the counterfactuals
z_q = self.fusion(logits, q_pred, v_pred, q_fact=True, k_fact=False, v_fact=False) # nie
logits_cfvqa = z_qkv - z_q
if self.end_classif:
q_out = self.q_2(q_pred)
if self.is_va:
v_out = self.v_2(v_pred)
else:
q_out = q_pred
if self.is_va:
v_out = v_pred
out['logits_all'] = z_qkv # for optimization
out['logits_vq'] = logits # predictions of the original VQ branch, i.e., NIE
out['logits_cfvqa'] = logits_cfvqa # predictions of CFVQA, i.e., TIE
out['logits_q'] = q_out # for optimization
if self.is_va:
out['logits_v'] = v_out # for optimization
if self.is_va:
out['z_nde'] = self.fusion(logits.clone().detach(), q_pred.clone().detach(), v_pred.clone().detach(), q_fact=True, k_fact=False, v_fact=False) # tie
else:
out['z_nde'] = self.fusion(logits.clone().detach(), q_pred.clone().detach(), None, q_fact=True, k_fact=False, v_fact=False) # tie
return out
def process_answers(self, out, key=''):
out = self.net.process_answers(out, key='_all')
out = self.net.process_answers(out, key='_vq')
out = self.net.process_answers(out, key='_cfvqa')
out = self.net.process_answers(out, key='_q')
if self.is_va:
out = self.net.process_answers(out, key='_v')
return out
def fusion(self, z_k, z_q, z_v, q_fact=False, k_fact=False, v_fact=False):
z_k, z_q, z_v = self.transform(z_k, z_q, z_v, q_fact, k_fact, v_fact)
if self.fusion_mode == 'rubi':
z = z_k * torch.sigmoid(z_q)
elif self.fusion_mode == 'hm':
if self.is_va:
z = z_k * z_q * z_v
else:
z = z_k * z_q
z = torch.log(z + eps) - torch.log1p(z)
elif self.fusion_mode == 'sum':
if self.is_va:
z = z_k + z_q + z_v
else:
z = z_k + z_q
z = torch.log(torch.sigmoid(z) + eps)
return z
def transform(self, z_k, z_q, z_v, q_fact=False, k_fact=False, v_fact=False):
if not k_fact:
z_k = self.constant * torch.ones_like(z_k).cuda()
if not q_fact:
z_q = self.constant * torch.ones_like(z_q).cuda()
if self.is_va:
if not v_fact:
z_v = self.constant * torch.ones_like(z_v).cuda()
if self.fusion_mode == 'hm':
z_k = torch.sigmoid(z_k)
z_q = torch.sigmoid(z_q)
if self.is_va:
z_v = torch.sigmoid(z_v)
return z_k, z_q, z_v | 5,174 | 35.702128 | 161 | py |
introd | introd-main/cfvqa/cfvqa/models/networks/factory.py | import sys
import copy
import torch
import torch.nn as nn
import os
import json
from bootstrap.lib.options import Options
from bootstrap.models.networks.data_parallel import DataParallel
from block.models.networks.vqa_net import VQANet as AttentionNet
from bootstrap.lib.logger import Logger
from .rubi import RUBiNet
from .cfvqa import CFVQA
from .cfvqaintrod import CFVQAIntroD
from .rubiintrod import RUBiIntroD
def factory(engine):
mode = list(engine.dataset.keys())[0]
dataset = engine.dataset[mode]
opt = Options()['model.network']
if opt['base'] == 'smrl':
from .smrl_net import SMRLNet as BaselineNet
elif opt['base'] == 'updn':
from .updn_net import UpDnNet as BaselineNet
elif opt['base'] == 'san':
from .san_net import SANNet as BaselineNet
else:
raise ValueError(opt['base'])
orig_net = BaselineNet(
txt_enc=opt['txt_enc'],
self_q_att=opt['self_q_att'],
agg=opt['agg'],
classif=opt['classif'],
wid_to_word=dataset.wid_to_word,
word_to_wid=dataset.word_to_wid,
aid_to_ans=dataset.aid_to_ans,
ans_to_aid=dataset.ans_to_aid,
fusion=opt['fusion'],
residual=opt['residual'],
q_single=opt['q_single'],
)
if opt['name'] == 'baseline':
net = orig_net
elif opt['name'] == 'rubi':
net = RUBiNet(
model=orig_net,
output_size=len(dataset.aid_to_ans),
classif=opt['rubi_params']['mlp_q']
)
elif opt['name'] == 'cfvqa':
net = CFVQA(
model=orig_net,
output_size=len(dataset.aid_to_ans),
classif_q=opt['cfvqa_params']['mlp_q'],
classif_v=opt['cfvqa_params']['mlp_v'],
fusion_mode=opt['fusion_mode'],
is_va=True
)
elif opt['name'] == 'cfvqasimple':
net = CFVQA(
model=orig_net,
output_size=len(dataset.aid_to_ans),
classif_q=opt['cfvqa_params']['mlp_q'],
classif_v=None,
fusion_mode=opt['fusion_mode'],
is_va=False
)
elif opt['name'] == 'cfvqaintrod':
orig_net_teacher = BaselineNet(
txt_enc=opt['txt_enc'],
self_q_att=opt['self_q_att'],
agg=opt['agg'],
classif=opt['classif'],
wid_to_word=dataset.wid_to_word,
word_to_wid=dataset.word_to_wid,
aid_to_ans=dataset.aid_to_ans,
ans_to_aid=dataset.ans_to_aid,
fusion=opt['fusion'],
residual=opt['residual'],
q_single=opt['q_single'],
)
net = CFVQAIntroD(
model=orig_net,
model_teacher=orig_net_teacher,
output_size=len(dataset.aid_to_ans),
classif_q=opt['cfvqa_params']['mlp_q'],
classif_v=opt['cfvqa_params']['mlp_v'],
fusion_mode=opt['fusion_mode']
)
elif opt['name'] == 'cfvqasimpleintrod':
orig_net_teacher = BaselineNet(
txt_enc=opt['txt_enc'],
self_q_att=opt['self_q_att'],
agg=opt['agg'],
classif=opt['classif'],
wid_to_word=dataset.wid_to_word,
word_to_wid=dataset.word_to_wid,
aid_to_ans=dataset.aid_to_ans,
ans_to_aid=dataset.ans_to_aid,
fusion=opt['fusion'],
residual=opt['residual'],
q_single=opt['q_single'],
)
net = CFVQAIntroD(
model=orig_net,
model_teacher=orig_net_teacher,
output_size=len(dataset.aid_to_ans),
classif_q=opt['cfvqa_params']['mlp_q'],
classif_v=None,
fusion_mode=opt['fusion_mode'],
is_va=False
)
elif opt['name'] == 'rubiintrod':
orig_net_teacher = BaselineNet(
txt_enc=opt['txt_enc'],
self_q_att=opt['self_q_att'],
agg=opt['agg'],
classif=opt['classif'],
wid_to_word=dataset.wid_to_word,
word_to_wid=dataset.word_to_wid,
aid_to_ans=dataset.aid_to_ans,
ans_to_aid=dataset.ans_to_aid,
fusion=opt['fusion'],
residual=opt['residual'],
q_single=opt['q_single'],
)
net = RUBiIntroD(
model=orig_net,
model_teacher=orig_net_teacher,
output_size=len(dataset.aid_to_ans),
classif=opt['rubi_params']['mlp_q']
)
else:
raise ValueError(opt['name'])
if Options()['misc.cuda'] and torch.cuda.device_count() > 1:
net = DataParallel(net)
return net
| 4,667 | 29.913907 | 64 | py |
introd | introd-main/cfvqa/cfvqa/models/networks/updn_net.py | from copy import deepcopy
import itertools
import os
import numpy as np
import scipy
import torch
import torch.nn as nn
import torch.nn.functional as F
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
import block
from block.models.networks.vqa_net import factory_text_enc
from block.models.networks.mlp import MLP
from .utils import mask_softmax
from torch.nn.utils.weight_norm import weight_norm
class UpDnNet(nn.Module):
def __init__(self,
txt_enc={},
self_q_att=False,
agg={},
classif={},
wid_to_word={},
word_to_wid={},
aid_to_ans=[],
ans_to_aid={},
fusion={},
residual=False,
q_single=False,
):
super().__init__()
self.self_q_att = self_q_att
self.agg = agg
assert self.agg['type'] in ['max', 'mean']
self.classif = classif
self.wid_to_word = wid_to_word
self.word_to_wid = word_to_wid
self.aid_to_ans = aid_to_ans
self.ans_to_aid = ans_to_aid
self.fusion = fusion
self.residual = residual
# Modules
self.txt_enc = self.get_text_enc(self.wid_to_word, txt_enc)
if self.self_q_att:
self.q_att_linear0 = nn.Linear(2400, 512)
self.q_att_linear1 = nn.Linear(512, 2)
if q_single:
self.txt_enc_single = self.get_text_enc(self.wid_to_word, txt_enc)
if self.self_q_att:
self.q_att_linear0_single = nn.Linear(2400, 512)
self.q_att_linear1_single = nn.Linear(512, 2)
else:
self.txt_enc_single = None
if self.classif['mlp']['dimensions'][-1] != len(self.aid_to_ans):
Logger()(f"Warning, the classif_mm output dimension ({self.classif['mlp']['dimensions'][-1]})"
f"doesn't match the number of answers ({len(self.aid_to_ans)}). Modifying the output dimension.")
self.classif['mlp']['dimensions'][-1] = len(self.aid_to_ans)
self.classif_module = MLP(**self.classif['mlp'])
# UpDn
q_dim = self.fusion['input_dims'][0]
v_dim = self.fusion['input_dims'][1]
output_dim = self.fusion['output_dim']
self.v_att = Attention(v_dim, q_dim, output_dim)
self.q_net = FCNet([q_dim, output_dim])
self.v_net = FCNet([v_dim, output_dim])
Logger().log_value('nparams',
sum(p.numel() for p in self.parameters() if p.requires_grad),
should_print=True)
Logger().log_value('nparams_txt_enc',
self.get_nparams_txt_enc(),
should_print=True)
def get_text_enc(self, vocab_words, options):
"""
returns the text encoding network.
"""
return factory_text_enc(self.wid_to_word, options)
def get_nparams_txt_enc(self):
params = [p.numel() for p in self.txt_enc.parameters() if p.requires_grad]
if self.self_q_att:
params += [p.numel() for p in self.q_att_linear0.parameters() if p.requires_grad]
params += [p.numel() for p in self.q_att_linear1.parameters() if p.requires_grad]
return sum(params)
def forward(self, batch):
v = batch['visual']
q = batch['question']
l = batch['lengths'].data
c = batch['norm_coord']
nb_regions = batch.get('nb_regions')
out = {}
q_emb = self.process_question(q, l,)
out['v_emb'] = v.mean(1)
out['q_emb'] = q_emb
# single txt encoder
if self.txt_enc_single is not None:
out['q_emb'] = self.process_question(q, l, self.txt_enc_single, self.q_att_linear0_single, self.q_att_linear1_single)
# New
att = self.v_att(v, q_emb)
v_emb = (att * v).sum(1)
q_repr = self.q_net(q_emb)
v_repr = self.v_net(v_emb)
joint_repr = q_repr * v_repr
logits = self.classif_module(joint_repr)
out['logits'] = logits
return out
def process_question(self, q, l, txt_enc=None, q_att_linear0=None, q_att_linear1=None):
if txt_enc is None:
txt_enc = self.txt_enc
if q_att_linear0 is None:
q_att_linear0 = self.q_att_linear0
if q_att_linear1 is None:
q_att_linear1 = self.q_att_linear1
q_emb = txt_enc.embedding(q)
q, _ = txt_enc.rnn(q_emb)
if self.self_q_att:
q_att = q_att_linear0(q)
q_att = F.relu(q_att)
q_att = q_att_linear1(q_att)
q_att = mask_softmax(q_att, l)
#self.q_att_coeffs = q_att
if q_att.size(2) > 1:
q_atts = torch.unbind(q_att, dim=2)
q_outs = []
for q_att in q_atts:
q_att = q_att.unsqueeze(2)
q_att = q_att.expand_as(q)
q_out = q_att*q
q_out = q_out.sum(1)
q_outs.append(q_out)
q = torch.cat(q_outs, dim=1)
else:
q_att = q_att.expand_as(q)
q = q_att * q
q = q.sum(1)
else:
# l contains the number of words for each question
# in case of multi-gpus it must be a Tensor
# thus we convert it into a list during the forward pass
l = list(l.data[:,0])
q = txt_enc._select_last(q, l)
return q
def process_answers(self, out, key=''):
batch_size = out[f'logits{key}'].shape[0]
_, pred = out[f'logits{key}'].data.max(1)
pred.squeeze_()
if batch_size != 1:
out[f'answers{key}'] = [self.aid_to_ans[pred[i].item()] for i in range(batch_size)]
out[f'answer_ids{key}'] = [pred[i].item() for i in range(batch_size)]
else:
out[f'answers{key}'] = [self.aid_to_ans[pred.item()]]
out[f'answer_ids{key}'] = [pred.item()]
return out
class Attention(nn.Module):
def __init__(self, v_dim, q_dim, num_hid, dropout=0.2):
super(Attention, self).__init__()
self.v_proj = FCNet([v_dim, num_hid])
self.q_proj = FCNet([q_dim, num_hid])
self.dropout = nn.Dropout(dropout)
self.linear = weight_norm(nn.Linear(num_hid, 1), dim=None)
def forward(self, v, q):
"""
v: [batch, k, vdim]
q: [batch, qdim]
"""
logits = self.logits(v, q)
w = nn.functional.softmax(logits, 1)
return w
def logits(self, v, q):
batch, k, _ = v.size()
v_proj = self.v_proj(v) # [batch, k, qdim]
q_proj = self.q_proj(q).unsqueeze(1).repeat(1, k, 1)
joint_repr = v_proj * q_proj
joint_repr = self.dropout(joint_repr)
logits = self.linear(joint_repr)
return logits
class FCNet(nn.Module):
"""Simple class for non-linear fully connect network
"""
def __init__(self, dims):
super(FCNet, self).__init__()
layers = []
for i in range(len(dims)-2):
in_dim = dims[i]
out_dim = dims[i+1]
layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
layers.append(nn.ReLU())
layers.append(weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None))
layers.append(nn.ReLU())
self.main = nn.Sequential(*layers)
def forward(self, x):
return self.main(x) | 7,498 | 32.627803 | 129 | py |
introd | introd-main/cfvqa/cfvqa/models/networks/rubiintrod.py | import torch
import torch.nn as nn
from block.models.networks.mlp import MLP
from .utils import grad_mul_const # mask_softmax, grad_reverse, grad_reverse_mask,
class RUBiIntroD(nn.Module):
"""
Wraps another model
The original model must return a dictionnary containing the 'logits' key (predictions before softmax)
Returns:
- logits: the original predictions of the model
- logits_q: the predictions from the question-only branch
- logits_rubi: the updated predictions from the model by the mask.
=> Use `logits_rubi` and `logits_q` for the loss
"""
def __init__(self, model, model_teacher, output_size, classif, end_classif=True):
super().__init__()
self.net_student = model
self.net = model_teacher
self.c_1 = MLP(**classif)
self.end_classif = end_classif
if self.end_classif:
self.c_2 = nn.Linear(output_size, output_size)
self.net.eval()
self.c_1.eval()
self.c_2.eval()
def forward(self, batch):
out = {}
# model prediction
net_out = self.net(batch)
logits = net_out['logits']
q_embedding = net_out['q_emb'] # N * q_emb
q_embedding = grad_mul_const(q_embedding, 0.0) # don't backpropagate through question encoder
q_pred = self.c_1(q_embedding)
fusion_pred = logits * torch.sigmoid(q_pred)
if self.end_classif:
q_out = self.c_2(q_pred)
else:
q_out = q_pred
out['logits'] = net_out['logits']
out['logits_all'] = fusion_pred
out['logits_q'] = q_out
# student model
logits_stu = self.net_student(batch)
out['logits_stu'] = logits_stu['logits']
return out
def process_answers(self, out, key=''):
out = self.net.process_answers(out)
out = self.net.process_answers(out, key='_all')
out = self.net.process_answers(out, key='_q')
out = self.net.process_answers(out, key='_stu')
return out
| 2,042 | 31.951613 | 105 | py |
introd | introd-main/cfvqa/cfvqa/models/networks/__init__.py | 0 | 0 | 0 | py | |
introd | introd-main/cfvqa/cfvqa/models/networks/san_net.py | from copy import deepcopy
import itertools
import os
import numpy as np
import scipy
import torch
import torch.nn as nn
import torch.nn.functional as F
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
import block
from block.models.networks.vqa_net import factory_text_enc
from block.models.networks.mlp import MLP
from .utils import mask_softmax
from torch.nn.utils.weight_norm import weight_norm
from torch.autograd import Variable
class SANNet(nn.Module):
def __init__(self,
txt_enc={},
self_q_att=False,
agg={},
classif={},
wid_to_word={},
word_to_wid={},
aid_to_ans=[],
ans_to_aid={},
fusion={},
residual=False,
q_single=False
):
super().__init__()
self.self_q_att = self_q_att
self.agg = agg
assert self.agg['type'] in ['max', 'mean']
self.classif = classif
self.wid_to_word = wid_to_word
self.word_to_wid = word_to_wid
self.aid_to_ans = aid_to_ans
self.ans_to_aid = ans_to_aid
self.fusion = fusion
self.residual = residual
# Modules
self.txt_enc = self.get_text_enc(self.wid_to_word, txt_enc)
if self.self_q_att:
self.q_att_linear0 = nn.Linear(2400, 512)
self.q_att_linear1 = nn.Linear(512, 2)
if q_single:
self.txt_enc_single = self.get_text_enc(self.wid_to_word, txt_enc)
if self.self_q_att:
self.q_att_linear0_single = nn.Linear(2400, 512)
self.q_att_linear1_single = nn.Linear(512, 2)
if self.classif['mlp']['dimensions'][-1] != len(self.aid_to_ans):
Logger()(f"Warning, the classif_mm output dimension ({self.classif['mlp']['dimensions'][-1]})"
f"doesn't match the number of answers ({len(self.aid_to_ans)}). Modifying the output dimension.")
self.classif['mlp']['dimensions'][-1] = len(self.aid_to_ans)
self.classif_module = MLP(**self.classif['mlp'])
# UpDn
q_dim = self.fusion['input_dims'][0]
v_dim = self.fusion['input_dims'][1]
output_dim = self.fusion['output_dim']
att_size = 512
self.v_att = Attention(v_dim, v_dim, att_size, 36, output_dim, drop_ratio=0.5)
self.txt_enc.rnn = QuestionEmbedding(620, q_dim, 1, False, 0.0)
self.q_net = FCNet([q_dim, output_dim])
# self.v_net = FCNet([v_dim, output_dim])
Logger().log_value('nparams',
sum(p.numel() for p in self.parameters() if p.requires_grad),
should_print=True)
Logger().log_value('nparams_txt_enc',
self.get_nparams_txt_enc(),
should_print=True)
def get_text_enc(self, vocab_words, options):
"""
returns the text encoding network.
"""
return factory_text_enc(self.wid_to_word, options)
def get_nparams_txt_enc(self):
params = [p.numel() for p in self.txt_enc.parameters() if p.requires_grad]
if self.self_q_att:
params += [p.numel() for p in self.q_att_linear0.parameters() if p.requires_grad]
params += [p.numel() for p in self.q_att_linear1.parameters() if p.requires_grad]
return sum(params)
def forward(self, batch):
v = batch['visual']
q = batch['question']
l = batch['lengths'].data
c = batch['norm_coord']
nb_regions = batch.get('nb_regions')
out = {}
q_emb = self.process_question(q, l,)
out['v_emb'] = v.mean(1)
out['q_emb'] = q_emb
# single txt encoder
if self.txt_enc_single is not None:
out['q_emb'] = self.process_question(q, l, self.txt_enc_single, self.q_att_linear0_single, self.q_att_linear1_single)
# New
q_repr = self.q_net(q_emb)
joint_repr = self.v_att(q_repr, v)
logits = self.classif_module(joint_repr)
out['logits'] = logits
return out
def process_question(self, q, l, txt_enc=None, q_att_linear0=None, q_att_linear1=None):
if txt_enc is None:
txt_enc = self.txt_enc
q_emb = txt_enc.embedding(q)
q = txt_enc.rnn(q_emb)
return q
def process_answers(self, out, key=''):
batch_size = out[f'logits{key}'].shape[0]
_, pred = out[f'logits{key}'].data.max(1)
pred.squeeze_()
if batch_size != 1:
out[f'answers{key}'] = [self.aid_to_ans[pred[i].item()] for i in range(batch_size)]
out[f'answer_ids{key}'] = [pred[i].item() for i in range(batch_size)]
else:
out[f'answers{key}'] = [self.aid_to_ans[pred.item()]]
out[f'answer_ids{key}'] = [pred.item()]
return out
class Attention(nn.Module): # Extend PyTorch's Module class
def __init__(self, v_dim, q_dim, att_size, img_seq_size, output_size, drop_ratio):
super(Attention, self).__init__() # Must call super __init__()
self.v_dim = v_dim
self.q_dim = q_dim
self.att_size = att_size
self.img_seq_size = img_seq_size
self.output_size = output_size
self.drop_ratio = drop_ratio
self.tan = nn.Tanh()
self.dp = nn.Dropout(drop_ratio)
self.sf = nn.Softmax()
self.fc11 = nn.Linear(q_dim, 768, bias=True)
# self.fc111 = nn.Linear(768, 640, bias=True)
self.fc111 = nn.Linear(768, att_size, bias=True)
self.fc12 = nn.Linear(v_dim, 768, bias=False)
# self.fc121 = nn.Linear(768, 640, bias=False)
self.fc121 = nn.Linear(768, att_size, bias=False)
self.linear_second = nn.Linear(att_size, att_size, bias=False)
# self.linear_second = nn.Linear(att_size, img_seq_size, bias=False)
self.fc13 = nn.Linear(att_size, 1, bias=True)
self.fc21 = nn.Linear(q_dim, att_size, bias=True)
self.fc22 = nn.Linear(v_dim, att_size, bias=False)
self.fc23 = nn.Linear(att_size, 1, bias=True)
self.fc = nn.Linear(v_dim, output_size, bias=True)
# d = input_size | m = img_seq_size | k = att_size
def forward(self, ques_feat, img_feat): # ques_feat -- [batch, d] | img_feat -- [batch_size, m, d]
# print(img_feat.size(), ques_feat.size())
# print(self.v_dim, self.q_dim)
# print("=======================================================================")
B = ques_feat.size(0)
# Stack 1
ques_emb_1 = self.fc11(ques_feat)
ques_emb_1 = self.fc111(ques_emb_1) # [batch_size, att_size]
img_emb_1 = self.fc12(img_feat)
img_emb_1 = self.fc121(img_emb_1)
# print(ques_emb_1.size(), img_emb_1.size())
# print("=======================================================================")
# h1 = self.tan(ques_emb_1.view(B, 1, self.att_size) + img_emb_1)
h1 = self.tan(ques_emb_1.view(B, 1, self.att_size) + img_emb_1)
h1_emb = self.linear_second(h1)
h1_emb = self.fc13(h1_emb)
p1 = self.sf(h1_emb.view(-1, self.img_seq_size)).view(B, 1, self.img_seq_size)
# Weighted sum
img_att1 = p1.matmul(img_feat)
u1 = ques_feat + img_att1.view(-1, self.v_dim)
# Stack 2
ques_emb_2 = self.fc21(u1) # [batch_size, att_size]
img_emb_2 = self.fc22(img_feat)
h2 = self.tan(ques_emb_2.view(B, 1, self.att_size) + img_emb_2)
h2_emb = self.fc23(self.dp(h2))
p2 = self.sf(h2_emb.view(-1, self.img_seq_size)).view(B, 1, self.img_seq_size)
# Weighted sum
img_att2 = p2.matmul(img_feat)
u2 = u1 + img_att2.view(-1, self.v_dim)
return u2
class FCNet(nn.Module):
"""Simple class for non-linear fully connect network
"""
def __init__(self, dims):
super(FCNet, self).__init__()
layers = []
for i in range(len(dims)-2):
in_dim = dims[i]
out_dim = dims[i+1]
layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
layers.append(nn.ReLU())
layers.append(weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None))
layers.append(nn.ReLU())
self.main = nn.Sequential(*layers)
def forward(self, x):
return self.main(x)
class QuestionEmbedding(nn.Module):
def __init__(self, in_dim, num_hid, nlayers, bidirect, dropout, rnn_type='GRU'):
"""Module for question embedding
"""
super(QuestionEmbedding, self).__init__()
assert rnn_type == 'LSTM' or rnn_type == 'GRU'
rnn_cls = nn.LSTM if rnn_type == 'LSTM' else nn.GRU
self.rnn = rnn_cls(
in_dim, num_hid, nlayers,
bidirectional=bidirect,
dropout=dropout,
batch_first=True)
self.in_dim = in_dim
self.num_hid = num_hid
self.nlayers = nlayers
self.rnn_type = rnn_type
self.ndirections = 1 + int(bidirect)
def init_hidden(self, batch):
# just to get the type of tensor
weight = next(self.parameters()).data
hid_shape = (self.nlayers * self.ndirections, batch, self.num_hid)
if self.rnn_type == 'LSTM':
return (Variable(weight.new(*hid_shape).zero_()),
Variable(weight.new(*hid_shape).zero_()))
else:
return Variable(weight.new(*hid_shape).zero_())
def forward(self, x):
# x: [batch, sequence, in_dim]
batch = x.size(0)
hidden = self.init_hidden(batch)
self.rnn.flatten_parameters()
output, hidden = self.rnn(x, hidden)
if self.ndirections == 1:
return output[:, -1]
forward_ = output[:, -1, :self.num_hid]
backward = output[:, 0, self.num_hid:]
return torch.cat((forward_, backward), dim=1)
def forward_all(self, x):
# x: [batch, sequence, in_dim]
batch = x.size(0)
hidden = self.init_hidden(batch)
self.rnn.flatten_parameters()
output, hidden = self.rnn(x, hidden)
return output
| 10,169 | 34.190311 | 129 | py |
introd | introd-main/cfvqa/cfvqa/models/criterions/rubiintrod_criterion.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
class RUBiIntroDCriterion(nn.Module):
def __init__(self):
super().__init__()
self.cls_loss = nn.CrossEntropyLoss(reduction='none')
def forward(self, net_out, batch):
out = {}
logits_all = net_out['logits_all']
class_id = batch['class_id'].squeeze(1)
# KD
logits_t = net_out['logits']
logits_s = net_out['logits_stu']
p_t = torch.nn.functional.softmax(logits_t, -1).clone().detach()
kd_loss = - p_t*F.log_softmax(logits_s, -1)
kd_loss = kd_loss.sum(1)
cls_loss = self.cls_loss(logits_s, class_id)
# weight estimation
cls_loss_ood = self.cls_loss(logits_t, class_id)
cls_loss_id = self.cls_loss(logits_all, class_id)
weight = cls_loss_ood/(cls_loss_ood+cls_loss_id)
weight = torch.round(weight)
weight = weight.detach()
loss = (weight*kd_loss).mean() + ((1-weight)*cls_loss).mean()
out['loss'] = loss
return out
| 1,165 | 28.897436 | 72 | py |
introd | introd-main/cfvqa/cfvqa/models/criterions/cfvqaintrod_criterion.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
class CFVQAIntroDCriterion(nn.Module):
def __init__(self):
super().__init__()
self.cls_loss = nn.CrossEntropyLoss(reduction='none')
def forward(self, net_out, batch):
out = {}
logits_all = net_out['logits_all']
class_id = batch['class_id'].squeeze(1)
# KD
logits_t = net_out['logits_cfvqa']
logits_s = net_out['logits_stu']
p_t = torch.nn.functional.softmax(logits_t, -1).clone().detach()
kd_loss = - p_t*F.log_softmax(logits_s, -1)
kd_loss = kd_loss.sum(1)
cls_loss = self.cls_loss(logits_s, class_id)
# weight estimation
cls_loss_ood = self.cls_loss(logits_t, class_id)
cls_loss_id = self.cls_loss(logits_all, class_id)
weight = cls_loss_ood/(cls_loss_ood+cls_loss_id)
weight = weight.detach()
loss = (weight*kd_loss).mean() + ((1-weight)*cls_loss).mean()
out['loss'] = loss
return out
| 1,135 | 28.894737 | 72 | py |
introd | introd-main/cfvqa/cfvqa/models/criterions/factory.py | from bootstrap.lib.options import Options
from block.models.criterions.vqa_cross_entropy import VQACrossEntropyLoss
from .rubi_criterion import RUBiCriterion
from .cfvqa_criterion import CFVQACriterion
from .cfvqaintrod_criterion import CFVQAIntroDCriterion
from .rubiintrod_criterion import RUBiIntroDCriterion
def factory(engine, mode):
name = Options()['model.criterion.name']
split = engine.dataset[mode].split
eval_only = 'train' not in engine.dataset
opt = Options()['model.criterion']
if split == "test" and 'tdiuc' not in Options()['dataset.name']:
return None
if name == 'vqa_cross_entropy':
criterion = VQACrossEntropyLoss()
elif name == "rubi_criterion":
criterion = RUBiCriterion(
question_loss_weight=opt['question_loss_weight']
)
elif name == "cfvqa_criterion":
criterion = CFVQACriterion(
question_loss_weight=opt['question_loss_weight'],
vision_loss_weight=opt['vision_loss_weight'],
is_va=True,
)
elif name == "cfvqasimple_criterion":
criterion = CFVQACriterion(
question_loss_weight=opt['question_loss_weight'],
is_va=False,
)
elif name == "cfvqaintrod_criterion":
criterion = CFVQAIntroDCriterion()
elif name == "rubiintrod_criterion":
criterion = RUBiIntroDCriterion()
else:
raise ValueError(name)
return criterion
| 1,453 | 35.35 | 73 | py |
introd | introd-main/cfvqa/cfvqa/models/criterions/__init__.py | 0 | 0 | 0 | py | |
introd | introd-main/cfvqa/cfvqa/models/criterions/rubi_criterion.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
class RUBiCriterion(nn.Module):
def __init__(self, question_loss_weight=1.0):
super().__init__()
Logger()(f'RUBiCriterion, with question_loss_weight = ({question_loss_weight})')
self.question_loss_weight = question_loss_weight
self.fusion_loss = nn.CrossEntropyLoss()
self.question_loss = nn.CrossEntropyLoss()
def forward(self, net_out, batch):
out = {}
# logits = net_out['logits']
logits_q = net_out['logits_q']
logits_rubi = net_out['logits_all']
class_id = batch['class_id'].squeeze(1)
fusion_loss = self.fusion_loss(logits_rubi, class_id)
question_loss = self.question_loss(logits_q, class_id)
loss = fusion_loss + self.question_loss_weight * question_loss
out['loss'] = loss
out['loss_mm_q'] = fusion_loss
out['loss_q'] = question_loss
return out
| 1,058 | 32.09375 | 88 | py |
introd | introd-main/cfvqa/cfvqa/models/criterions/cfvqa_criterion.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
class CFVQACriterion(nn.Module):
def __init__(self, question_loss_weight=1.0, vision_loss_weight=1.0, is_va=True):
super().__init__()
self.is_va = is_va
Logger()(f'CFVQACriterion, with question_loss_weight = ({question_loss_weight})')
if self.is_va:
Logger()(f'CFVQACriterion, with vision_loss_weight = ({vision_loss_weight})')
self.fusion_loss = nn.CrossEntropyLoss()
self.question_loss = nn.CrossEntropyLoss()
self.question_loss_weight = question_loss_weight
if self.is_va:
self.vision_loss = nn.CrossEntropyLoss()
self.vision_loss_weight = vision_loss_weight
def forward(self, net_out, batch):
out = {}
class_id = batch['class_id'].squeeze(1)
logits_rubi = net_out['logits_all']
fusion_loss = self.fusion_loss(logits_rubi, class_id)
logits_q = net_out['logits_q']
question_loss = self.question_loss(logits_q, class_id)
if self.is_va:
logits_v = net_out['logits_v']
vision_loss = self.vision_loss(logits_v, class_id)
nde = net_out['z_nde']
p_te = torch.nn.functional.softmax(logits_rubi, -1).clone().detach()
p_nde = torch.nn.functional.softmax(nde, -1)
kl_loss = - p_te*p_nde.log()
kl_loss = kl_loss.sum(1).mean()
loss = fusion_loss \
+ self.question_loss_weight * question_loss \
+ kl_loss
if self.is_va:
loss += self.vision_loss_weight * vision_loss
out['loss'] = loss
out['loss_mm_q'] = fusion_loss
out['loss_q'] = question_loss
if self.is_va:
out['loss_v'] = vision_loss
return out
| 1,918 | 33.267857 | 89 | py |
introd | introd-main/cfvqa/cfvqa/models/metrics/vqa_rubi_metrics.py | import torch
import torch.nn as nn
import os
import json
from scipy import stats
import numpy as np
from collections import defaultdict
from bootstrap.models.metrics.accuracy import accuracy
from block.models.metrics.vqa_accuracies import VQAAccuracies
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
class VQAAccuracy(nn.Module):
def __init__(self, topk=[1,5]):
super().__init__()
self.topk = topk
def forward(self, cri_out, net_out, batch):
out = {}
class_id = batch['class_id'].data.cpu()
for key in ['', '_all', '_q']:
logits = net_out[f'logits{key}'].data.cpu()
acc_out = accuracy(logits, class_id, topk=self.topk)
for i, k in enumerate(self.topk):
out[f'accuracy{key}_top{k}'] = acc_out[i]
return out
class VQARUBiMetrics(VQAAccuracies):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.accuracy = VQAAccuracy()
self.rm_dir_rslt = 1 if Options()['dataset.train_split'] is not None else 0
def forward(self, cri_out, net_out, batch):
out = {}
if self.accuracy is not None:
out = self.accuracy(cri_out, net_out, batch)
# add answers and answer_ids keys to net_out
net_out = self.engine.model.network.process_answers(net_out)
batch_size = len(batch['index'])
for i in range(batch_size):
# Open Ended Accuracy (VQA-VQA2)
if self.open_ended:
for key in ['', '_all', '_q']:
pred_item = {
'question_id': batch['question_id'][i],
'answer': net_out[f'answers{key}'][i]
}
self.results[key].append(pred_item)
if self.dataset.split == 'test':
pred_item = {
'question_id': batch['question_id'][i],
'answer': net_out[f'answers'][i]
}
if 'is_testdev' in batch and batch['is_testdev'][i]:
self.results_testdev.append(pred_item)
if self.logits['tensor'] is None:
self.logits['tensor'] = torch.FloatTensor(len(self.dataset), logits.size(1))
self.logits['tensor'][self.idx] = logits[i]
self.logits['qid_to_idx'][batch['question_id'][i]] = self.idx
self.idx += 1
# TDIUC metrics
if self.tdiuc:
gt_aid = batch['answer_id'][i]
gt_ans = batch['answer'][i]
gt_type = batch['question_type'][i]
self.gt_types.append(gt_type)
if gt_ans in self.ans_to_aid:
self.gt_aids.append(gt_aid)
else:
self.gt_aids.append(-1)
self.gt_aid_not_found += 1
for key in ['', '_all', '_q']:
qid = batch['question_id'][i]
pred_aid = net_out[f'answer_ids{key}'][i]
self.pred_aids[key].append(pred_aid)
self.res_by_type[key][gt_type+'_pred'].append(pred_aid)
if gt_ans in self.ans_to_aid:
self.res_by_type[key][gt_type+'_gt'].append(gt_aid)
if gt_aid == pred_aid:
self.res_by_type[key][gt_type+'_t'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_gt'].append(-1)
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
return out
def reset_oe(self):
self.results = dict()
self.dir_rslt = dict()
self.path_rslt = dict()
for key in ['', '_q', '_all']:
self.results[key] = []
self.dir_rslt[key] = os.path.join(
self.dir_exp,
f'results{key}',
self.dataset.split,
'epoch,{}'.format(self.engine.epoch))
os.system('mkdir -p '+self.dir_rslt[key])
self.path_rslt[key] = os.path.join(
self.dir_rslt[key],
'OpenEnded_mscoco_{}_model_results.json'.format(
self.dataset.get_subtype()))
if self.dataset.split == 'test':
pass
# self.results_testdev = []
# self.path_rslt_testdev = os.path.join(
# self.dir_rslt,
# 'OpenEnded_mscoco_{}_model_results.json'.format(
# self.dataset.get_subtype(testdev=True)))
# self.path_logits = os.path.join(self.dir_rslt, 'logits.pth')
# os.system('mkdir -p '+os.path.dirname(self.path_logits))
# self.logits = {}
# self.logits['aid_to_ans'] = self.engine.model.network.aid_to_ans
# self.logits['qid_to_idx'] = {}
# self.logits['tensor'] = None
# self.idx = 0
# path_aid_to_ans = os.path.join(self.dir_rslt, 'aid_to_ans.json')
# with open(path_aid_to_ans, 'w') as f:
# json.dump(self.engine.model.network.aid_to_ans, f)
def reset_tdiuc(self):
self.pred_aids = defaultdict(list)
self.gt_aids = []
self.gt_types = []
self.gt_aid_not_found = 0
self.res_by_type = {key: defaultdict(list) for key in ['', '_all', '_q']}
def compute_oe_accuracy(self):
logs_name_prefix = Options()['misc'].get('logs_name', '') or ''
for key in ['', '_all', '_q']:
logs_name = (logs_name_prefix + key) or "logs"
with open(self.path_rslt[key], 'w') as f:
json.dump(self.results[key], f)
# if self.dataset.split == 'test':
# with open(self.path_rslt_testdev, 'w') as f:
# json.dump(self.results_testdev, f)
if 'test' not in self.dataset.split:
call_to_prog = 'python -m block.models.metrics.compute_oe_accuracy '\
+ '--dir_vqa {} --dir_exp {} --dir_rslt {} --epoch {} --split {} --logs_name {} --rm {} &'\
.format(self.dir_vqa, self.dir_exp, self.dir_rslt[key], self.engine.epoch, self.dataset.split, logs_name, self.rm_dir_rslt)
Logger()('`'+call_to_prog+'`')
os.system(call_to_prog)
def compute_tdiuc_metrics(self):
Logger()('{} of validation answers were not found in ans_to_aid'.format(self.gt_aid_not_found))
for key in ['', '_all', '_q']:
Logger()(f'Computing TDIUC metrics for logits{key}')
accuracy = float(100*np.mean(np.array(self.pred_aids[key])==np.array(self.gt_aids)))
Logger()('Overall Traditional Accuracy is {:.2f}'.format(accuracy))
Logger().log_value('{}_epoch.tdiuc.accuracy{}'.format(self.mode, key), accuracy, should_print=False)
types = list(set(self.gt_types))
sum_acc = []
eps = 1e-10
Logger()('---------------------------------------')
Logger()('Not using per-answer normalization...')
for tp in types:
acc = 100*(len(self.res_by_type[key][tp+'_t'])/len(self.res_by_type[key][tp+'_t']+self.res_by_type[key][tp+'_f']))
sum_acc.append(acc+eps)
Logger()(f"Accuracy {key} for class '{tp}' is {acc:.2f}")
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h'.format(self.mode, key), acc_mpt_h, should_print=False)
Logger()('---------------------------------------')
Logger()('Using per-answer normalization...')
for tp in types:
per_ans_stat = defaultdict(int)
for g,p in zip(self.res_by_type[key][tp+'_gt'],self.res_by_type[key][tp+'_pred']):
per_ans_stat[str(g)+'_gt']+=1
if g==p:
per_ans_stat[str(g)]+=1
unq_acc = 0
for unq_ans in set(self.res_by_type[key][tp+'_gt']):
acc_curr_ans = per_ans_stat[str(unq_ans)]/per_ans_stat[str(unq_ans)+'_gt']
unq_acc +=acc_curr_ans
acc = 100*unq_acc/len(set(self.res_by_type[key][tp+'_gt']))
sum_acc.append(acc+eps)
Logger()("Accuracy {} for class '{}' is {:.2f}".format(key, tp, acc))
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType_norm.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy is {:.2f}'.format(acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a_norm'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy is {:.2f}'.format(acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h_norm'.format(self.mode, key), acc_mpt_h, should_print=False)
| 10,030 | 43.384956 | 143 | py |
introd | introd-main/cfvqa/cfvqa/models/metrics/vqa_cfvqasimple_metrics.py | import torch
import torch.nn as nn
import os
import json
from scipy import stats
import numpy as np
from collections import defaultdict
from bootstrap.models.metrics.accuracy import accuracy
from block.models.metrics.vqa_accuracies import VQAAccuracies
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
class VQAAccuracy(nn.Module):
def __init__(self, topk=[1,5]):
super().__init__()
self.topk = topk
def forward(self, cri_out, net_out, batch):
out = {}
class_id = batch['class_id'].data.cpu()
for key in ['_all', '_vq', '_cfvqa', '_q']:
logits = net_out[f'logits{key}'].data.cpu()
acc_out = accuracy(logits, class_id, topk=self.topk)
for i, k in enumerate(self.topk):
out[f'accuracy{key}_top{k}'] = acc_out[i]
return out
class VQACFVQASimpleMetrics(VQAAccuracies):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if Options()['dataset.eval_split'] == 'test': # 0430
self.accuracy = None
else:
self.accuracy = VQAAccuracy()
self.rm_dir_rslt = 1 if Options()['dataset.train_split'] is not None else 0
def forward(self, cri_out, net_out, batch):
out = {}
if self.accuracy is not None:
out = self.accuracy(cri_out, net_out, batch)
# add answers and answer_ids keys to net_out
net_out = self.engine.model.network.process_answers(net_out)
batch_size = len(batch['index'])
for i in range(batch_size):
# Open Ended Accuracy (VQA-VQA2)
if self.open_ended:
for key in ['_all', '_vq', '_cfvqa', '_q']:
pred_item = {
'question_id': batch['question_id'][i],
'answer': net_out[f'answers{key}'][i]
}
self.results[key].append(pred_item)
# if self.dataset.split == 'test': # 0430
# pred_item = {
# 'question_id': batch['question_id'][i],
# 'answer': net_out[f'answers{key}'][i]
# # 'answer': net_out[f'answers'][i]
# }
# # if 'is_testdev' in batch and batch['is_testdev'][i]: # 0430
# # self.results_testdev.append(pred_item)
# if self.logits['tensor'] is None:
# self.logits['tensor'] = torch.FloatTensor(len(self.dataset), logits.size(1))
# self.logits['tensor'][self.idx] = logits[i]
# self.logits['qid_to_idx'][batch['question_id'][i]] = self.idx
# self.idx += 1
# TDIUC metrics
if self.tdiuc:
gt_aid = batch['answer_id'][i]
gt_ans = batch['answer'][i]
gt_type = batch['question_type'][i]
self.gt_types.append(gt_type)
if gt_ans in self.ans_to_aid:
self.gt_aids.append(gt_aid)
else:
self.gt_aids.append(-1)
self.gt_aid_not_found += 1
for key in ['_all', '_vq', '_cfvqa', '_q']:
qid = batch['question_id'][i]
pred_aid = net_out[f'answer_ids{key}'][i]
self.pred_aids[key].append(pred_aid)
self.res_by_type[key][gt_type+'_pred'].append(pred_aid)
if gt_ans in self.ans_to_aid:
self.res_by_type[key][gt_type+'_gt'].append(gt_aid)
if gt_aid == pred_aid:
self.res_by_type[key][gt_type+'_t'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_gt'].append(-1)
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
return out
def reset_oe(self):
self.results = dict()
self.dir_rslt = dict()
self.path_rslt = dict()
for key in ['_all', '_vq', '_cfvqa', '_q']:
self.results[key] = []
self.dir_rslt[key] = os.path.join(
self.dir_exp,
f'results{key}',
self.dataset.split,
'epoch,{}'.format(self.engine.epoch))
os.system('mkdir -p '+self.dir_rslt[key])
self.path_rslt[key] = os.path.join(
self.dir_rslt[key],
'OpenEnded_mscoco_{}_model_results.json'.format(
self.dataset.get_subtype()))
if self.dataset.split == 'test':
pass
# self.results_testdev = []
# self.path_rslt_testdev = os.path.join(
# self.dir_rslt,
# 'OpenEnded_mscoco_{}_model_results.json'.format(
# self.dataset.get_subtype(testdev=True)))
# self.path_logits = os.path.join(self.dir_rslt, 'logits.pth')
# os.system('mkdir -p '+os.path.dirname(self.path_logits))
# self.logits = {}
# self.logits['aid_to_ans'] = self.engine.model.network.aid_to_ans
# self.logits['qid_to_idx'] = {}
# self.logits['tensor'] = None
# self.idx = 0
# path_aid_to_ans = os.path.join(self.dir_rslt, 'aid_to_ans.json')
# with open(path_aid_to_ans, 'w') as f:
# json.dump(self.engine.model.network.aid_to_ans, f)
def reset_tdiuc(self):
self.pred_aids = defaultdict(list)
self.gt_aids = []
self.gt_types = []
self.gt_aid_not_found = 0
self.res_by_type = {key: defaultdict(list) for key in ['_all', '_vq', '_cfvqa', '_q']}
def compute_oe_accuracy(self):
logs_name_prefix = Options()['misc'].get('logs_name', '') or ''
for key in ['_all', '_vq', '_cfvqa', '_q']:
logs_name = (logs_name_prefix + key) or "logs"
with open(self.path_rslt[key], 'w') as f:
json.dump(self.results[key], f)
# if self.dataset.split == 'test':
# with open(self.path_rslt_testdev, 'w') as f:
# json.dump(self.results_testdev, f)
if 'test' not in self.dataset.split:
call_to_prog = 'python -m block.models.metrics.compute_oe_accuracy '\
+ '--dir_vqa {} --dir_exp {} --dir_rslt {} --epoch {} --split {} --logs_name {} --rm {} &'\
.format(self.dir_vqa, self.dir_exp, self.dir_rslt[key], self.engine.epoch, self.dataset.split, logs_name, self.rm_dir_rslt)
Logger()('`'+call_to_prog+'`')
os.system(call_to_prog)
def compute_tdiuc_metrics(self):
Logger()('{} of validation answers were not found in ans_to_aid'.format(self.gt_aid_not_found))
for key in ['_all', '_vq', '_cfvqa', '_q']:
Logger()(f'Computing TDIUC metrics for logits{key}')
accuracy = float(100*np.mean(np.array(self.pred_aids[key])==np.array(self.gt_aids)))
Logger()('Overall Traditional Accuracy is {:.2f}'.format(accuracy))
Logger().log_value('{}_epoch.tdiuc.accuracy{}'.format(self.mode, key), accuracy, should_print=False)
types = list(set(self.gt_types))
sum_acc = []
eps = 1e-10
Logger()('---------------------------------------')
Logger()('Not using per-answer normalization...')
for tp in types:
acc = 100*(len(self.res_by_type[key][tp+'_t'])/len(self.res_by_type[key][tp+'_t']+self.res_by_type[key][tp+'_f']))
sum_acc.append(acc+eps)
Logger()(f"Accuracy {key} for class '{tp}' is {acc:.2f}")
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h'.format(self.mode, key), acc_mpt_h, should_print=False)
Logger()('---------------------------------------')
Logger()('Using per-answer normalization...')
for tp in types:
per_ans_stat = defaultdict(int)
for g,p in zip(self.res_by_type[key][tp+'_gt'],self.res_by_type[key][tp+'_pred']):
per_ans_stat[str(g)+'_gt']+=1
if g==p:
per_ans_stat[str(g)]+=1
unq_acc = 0
for unq_ans in set(self.res_by_type[key][tp+'_gt']):
acc_curr_ans = per_ans_stat[str(unq_ans)]/per_ans_stat[str(unq_ans)+'_gt']
unq_acc +=acc_curr_ans
acc = 100*unq_acc/len(set(self.res_by_type[key][tp+'_gt']))
sum_acc.append(acc+eps)
Logger()("Accuracy {} for class '{}' is {:.2f}".format(key, tp, acc))
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType_norm.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy is {:.2f}'.format(acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a_norm'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy is {:.2f}'.format(acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h_norm'.format(self.mode, key), acc_mpt_h, should_print=False)
| 10,348 | 43.995652 | 143 | py |
introd | introd-main/cfvqa/cfvqa/models/metrics/vqa_rubiintrod_metrics.py | import torch
import torch.nn as nn
import os
import json
from scipy import stats
import numpy as np
from collections import defaultdict
from bootstrap.models.metrics.accuracy import accuracy
from block.models.metrics.vqa_accuracies import VQAAccuracies
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
class VQAAccuracy(nn.Module):
def __init__(self, topk=[1,5]):
super().__init__()
self.topk = topk
def forward(self, cri_out, net_out, batch):
out = {}
class_id = batch['class_id'].data.cpu()
for key in ['', '_stu']:
logits = net_out[f'logits{key}'].data.cpu()
acc_out = accuracy(logits, class_id, topk=self.topk)
for i, k in enumerate(self.topk):
out[f'accuracy{key}_top{k}'] = acc_out[i]
return out
class VQARUBiIntroDMetrics(VQAAccuracies):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.accuracy = VQAAccuracy()
self.rm_dir_rslt = 1 if Options()['dataset.train_split'] is not None else 0
def forward(self, cri_out, net_out, batch):
out = {}
if self.accuracy is not None:
out = self.accuracy(cri_out, net_out, batch)
# add answers and answer_ids keys to net_out
net_out = self.engine.model.network.process_answers(net_out)
batch_size = len(batch['index'])
for i in range(batch_size):
# Open Ended Accuracy (VQA-VQA2)
if self.open_ended:
for key in ['', '_stu']:
pred_item = {
'question_id': batch['question_id'][i],
'answer': net_out[f'answers{key}'][i]
}
self.results[key].append(pred_item)
if self.dataset.split == 'test':
pred_item = {
'question_id': batch['question_id'][i],
'answer': net_out[f'answers'][i]
}
if 'is_testdev' in batch and batch['is_testdev'][i]:
self.results_testdev.append(pred_item)
if self.logits['tensor'] is None:
self.logits['tensor'] = torch.FloatTensor(len(self.dataset), logits.size(1))
self.logits['tensor'][self.idx] = logits[i]
self.logits['qid_to_idx'][batch['question_id'][i]] = self.idx
self.idx += 1
# TDIUC metrics
if self.tdiuc:
gt_aid = batch['answer_id'][i]
gt_ans = batch['answer'][i]
gt_type = batch['question_type'][i]
self.gt_types.append(gt_type)
if gt_ans in self.ans_to_aid:
self.gt_aids.append(gt_aid)
else:
self.gt_aids.append(-1)
self.gt_aid_not_found += 1
for key in ['', '_stu']:
qid = batch['question_id'][i]
pred_aid = net_out[f'answer_ids{key}'][i]
self.pred_aids[key].append(pred_aid)
self.res_by_type[key][gt_type+'_pred'].append(pred_aid)
if gt_ans in self.ans_to_aid:
self.res_by_type[key][gt_type+'_gt'].append(gt_aid)
if gt_aid == pred_aid:
self.res_by_type[key][gt_type+'_t'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_gt'].append(-1)
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
return out
def reset_oe(self):
self.results = dict()
self.dir_rslt = dict()
self.path_rslt = dict()
for key in ['', '_q', '_all']:
self.results[key] = []
self.dir_rslt[key] = os.path.join(
self.dir_exp,
f'results{key}',
self.dataset.split,
'epoch,{}'.format(self.engine.epoch))
os.system('mkdir -p '+self.dir_rslt[key])
self.path_rslt[key] = os.path.join(
self.dir_rslt[key],
'OpenEnded_mscoco_{}_model_results.json'.format(
self.dataset.get_subtype()))
if self.dataset.split == 'test':
pass
# self.results_testdev = []
# self.path_rslt_testdev = os.path.join(
# self.dir_rslt,
# 'OpenEnded_mscoco_{}_model_results.json'.format(
# self.dataset.get_subtype(testdev=True)))
# self.path_logits = os.path.join(self.dir_rslt, 'logits.pth')
# os.system('mkdir -p '+os.path.dirname(self.path_logits))
# self.logits = {}
# self.logits['aid_to_ans'] = self.engine.model.network.aid_to_ans
# self.logits['qid_to_idx'] = {}
# self.logits['tensor'] = None
# self.idx = 0
# path_aid_to_ans = os.path.join(self.dir_rslt, 'aid_to_ans.json')
# with open(path_aid_to_ans, 'w') as f:
# json.dump(self.engine.model.network.aid_to_ans, f)
def reset_tdiuc(self):
self.pred_aids = defaultdict(list)
self.gt_aids = []
self.gt_types = []
self.gt_aid_not_found = 0
self.res_by_type = {key: defaultdict(list) for key in ['', '_stu']}
def compute_oe_accuracy(self):
logs_name_prefix = Options()['misc'].get('logs_name', '') or ''
for key in ['', '_stu']:
logs_name = (logs_name_prefix + key) or "logs"
with open(self.path_rslt[key], 'w') as f:
json.dump(self.results[key], f)
# if self.dataset.split == 'test':
# with open(self.path_rslt_testdev, 'w') as f:
# json.dump(self.results_testdev, f)
if 'test' not in self.dataset.split:
call_to_prog = 'python -m block.models.metrics.compute_oe_accuracy '\
+ '--dir_vqa {} --dir_exp {} --dir_rslt {} --epoch {} --split {} --logs_name {} --rm {} &'\
.format(self.dir_vqa, self.dir_exp, self.dir_rslt[key], self.engine.epoch, self.dataset.split, logs_name, self.rm_dir_rslt)
Logger()('`'+call_to_prog+'`')
os.system(call_to_prog)
def compute_tdiuc_metrics(self):
Logger()('{} of validation answers were not found in ans_to_aid'.format(self.gt_aid_not_found))
for key in ['', '_stu']:
Logger()(f'Computing TDIUC metrics for logits{key}')
accuracy = float(100*np.mean(np.array(self.pred_aids[key])==np.array(self.gt_aids)))
Logger()('Overall Traditional Accuracy is {:.2f}'.format(accuracy))
Logger().log_value('{}_epoch.tdiuc.accuracy{}'.format(self.mode, key), accuracy, should_print=False)
types = list(set(self.gt_types))
sum_acc = []
eps = 1e-10
Logger()('---------------------------------------')
Logger()('Not using per-answer normalization...')
for tp in types:
acc = 100*(len(self.res_by_type[key][tp+'_t'])/len(self.res_by_type[key][tp+'_t']+self.res_by_type[key][tp+'_f']))
sum_acc.append(acc+eps)
Logger()(f"Accuracy {key} for class '{tp}' is {acc:.2f}")
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h'.format(self.mode, key), acc_mpt_h, should_print=False)
Logger()('---------------------------------------')
Logger()('Using per-answer normalization...')
for tp in types:
per_ans_stat = defaultdict(int)
for g,p in zip(self.res_by_type[key][tp+'_gt'],self.res_by_type[key][tp+'_pred']):
per_ans_stat[str(g)+'_gt']+=1
if g==p:
per_ans_stat[str(g)]+=1
unq_acc = 0
for unq_ans in set(self.res_by_type[key][tp+'_gt']):
acc_curr_ans = per_ans_stat[str(unq_ans)]/per_ans_stat[str(unq_ans)+'_gt']
unq_acc +=acc_curr_ans
acc = 100*unq_acc/len(set(self.res_by_type[key][tp+'_gt']))
sum_acc.append(acc+eps)
Logger()("Accuracy {} for class '{}' is {:.2f}".format(key, tp, acc))
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType_norm.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy is {:.2f}'.format(acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a_norm'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy is {:.2f}'.format(acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h_norm'.format(self.mode, key), acc_mpt_h, should_print=False)
| 10,000 | 43.252212 | 143 | py |
introd | introd-main/cfvqa/cfvqa/models/metrics/vqa_cfvqa_metrics.py | import torch
import torch.nn as nn
import os
import json
from scipy import stats
import numpy as np
from collections import defaultdict
from bootstrap.models.metrics.accuracy import accuracy
from block.models.metrics.vqa_accuracies import VQAAccuracies
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
class VQAAccuracy(nn.Module):
def __init__(self, topk=[1,5]):
super().__init__()
self.topk = topk
def forward(self, cri_out, net_out, batch):
out = {}
class_id = batch['class_id'].data.cpu()
for key in ['_all', '_vq', '_cfvqa', '_q', '_v']:
logits = net_out[f'logits{key}'].data.cpu()
acc_out = accuracy(logits, class_id, topk=self.topk)
for i, k in enumerate(self.topk):
out[f'accuracy{key}_top{k}'] = acc_out[i]
return out
class VQACFVQAMetrics(VQAAccuracies):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if Options()['dataset.eval_split'] == 'test': # 0430
self.accuracy = None
else:
self.accuracy = VQAAccuracy()
self.rm_dir_rslt = 1 if Options()['dataset.train_split'] is not None else 0
def forward(self, cri_out, net_out, batch):
out = {}
if self.accuracy is not None:
out = self.accuracy(cri_out, net_out, batch)
# add answers and answer_ids keys to net_out
net_out = self.engine.model.network.process_answers(net_out)
batch_size = len(batch['index'])
for i in range(batch_size):
# Open Ended Accuracy (VQA-VQA2)
if self.open_ended:
for key in ['_all', '_vq', '_cfvqa', '_q', '_v']:
pred_item = {
'question_id': batch['question_id'][i],
'answer': net_out[f'answers{key}'][i]
}
self.results[key].append(pred_item)
# if self.dataset.split == 'test': # 0430
# pred_item = {
# 'question_id': batch['question_id'][i],
# 'answer': net_out[f'answers{key}'][i]
# # 'answer': net_out[f'answers'][i]
# }
# # if 'is_testdev' in batch and batch['is_testdev'][i]: # 0430
# # self.results_testdev.append(pred_item)
# if self.logits['tensor'] is None:
# self.logits['tensor'] = torch.FloatTensor(len(self.dataset), logits.size(1))
# self.logits['tensor'][self.idx] = logits[i]
# self.logits['qid_to_idx'][batch['question_id'][i]] = self.idx
# self.idx += 1
# TDIUC metrics
if self.tdiuc:
gt_aid = batch['answer_id'][i]
gt_ans = batch['answer'][i]
gt_type = batch['question_type'][i]
self.gt_types.append(gt_type)
if gt_ans in self.ans_to_aid:
self.gt_aids.append(gt_aid)
else:
self.gt_aids.append(-1)
self.gt_aid_not_found += 1
for key in ['_all', '_vq', '_cfvqa', '_q', '_v']:
qid = batch['question_id'][i]
pred_aid = net_out[f'answer_ids{key}'][i]
self.pred_aids[key].append(pred_aid)
self.res_by_type[key][gt_type+'_pred'].append(pred_aid)
if gt_ans in self.ans_to_aid:
self.res_by_type[key][gt_type+'_gt'].append(gt_aid)
if gt_aid == pred_aid:
self.res_by_type[key][gt_type+'_t'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_gt'].append(-1)
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
return out
def reset_oe(self):
self.results = dict()
self.dir_rslt = dict()
self.path_rslt = dict()
for key in ['_all', '_vq', '_cfvqa', '_q', '_v']:
self.results[key] = []
self.dir_rslt[key] = os.path.join(
self.dir_exp,
f'results{key}',
self.dataset.split,
'epoch,{}'.format(self.engine.epoch))
os.system('mkdir -p '+self.dir_rslt[key])
self.path_rslt[key] = os.path.join(
self.dir_rslt[key],
'OpenEnded_mscoco_{}_model_results.json'.format(
self.dataset.get_subtype()))
if self.dataset.split == 'test':
pass
# self.results_testdev = []
# self.path_rslt_testdev = os.path.join(
# self.dir_rslt,
# 'OpenEnded_mscoco_{}_model_results.json'.format(
# self.dataset.get_subtype(testdev=True)))
# self.path_logits = os.path.join(self.dir_rslt, 'logits.pth')
# os.system('mkdir -p '+os.path.dirname(self.path_logits))
# self.logits = {}
# self.logits['aid_to_ans'] = self.engine.model.network.aid_to_ans
# self.logits['qid_to_idx'] = {}
# self.logits['tensor'] = None
# self.idx = 0
# path_aid_to_ans = os.path.join(self.dir_rslt, 'aid_to_ans.json')
# with open(path_aid_to_ans, 'w') as f:
# json.dump(self.engine.model.network.aid_to_ans, f)
def reset_tdiuc(self):
self.pred_aids = defaultdict(list)
self.gt_aids = []
self.gt_types = []
self.gt_aid_not_found = 0
self.res_by_type = {key: defaultdict(list) for key in ['_all', '_vq', '_cfvqa', '_q', '_v']}
def compute_oe_accuracy(self):
logs_name_prefix = Options()['misc'].get('logs_name', '') or ''
for key in ['_all', '_vq', '_cfvqa', '_q', '_v']:
logs_name = (logs_name_prefix + key) or "logs"
with open(self.path_rslt[key], 'w') as f:
json.dump(self.results[key], f)
# if self.dataset.split == 'test':
# with open(self.path_rslt_testdev, 'w') as f:
# json.dump(self.results_testdev, f)
if 'test' not in self.dataset.split:
call_to_prog = 'python -m block.models.metrics.compute_oe_accuracy '\
+ '--dir_vqa {} --dir_exp {} --dir_rslt {} --epoch {} --split {} --logs_name {} --rm {} &'\
.format(self.dir_vqa, self.dir_exp, self.dir_rslt[key], self.engine.epoch, self.dataset.split, logs_name, self.rm_dir_rslt)
Logger()('`'+call_to_prog+'`')
os.system(call_to_prog)
def compute_tdiuc_metrics(self):
Logger()('{} of validation answers were not found in ans_to_aid'.format(self.gt_aid_not_found))
for key in ['_all', '_vq', '_cfvqa', '_q', '_v']:
Logger()(f'Computing TDIUC metrics for logits{key}')
accuracy = float(100*np.mean(np.array(self.pred_aids[key])==np.array(self.gt_aids)))
Logger()('Overall Traditional Accuracy is {:.2f}'.format(accuracy))
Logger().log_value('{}_epoch.tdiuc.accuracy{}'.format(self.mode, key), accuracy, should_print=False)
types = list(set(self.gt_types))
sum_acc = []
eps = 1e-10
Logger()('---------------------------------------')
Logger()('Not using per-answer normalization...')
for tp in types:
acc = 100*(len(self.res_by_type[key][tp+'_t'])/len(self.res_by_type[key][tp+'_t']+self.res_by_type[key][tp+'_f']))
sum_acc.append(acc+eps)
Logger()(f"Accuracy {key} for class '{tp}' is {acc:.2f}")
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h'.format(self.mode, key), acc_mpt_h, should_print=False)
Logger()('---------------------------------------')
Logger()('Using per-answer normalization...')
for tp in types:
per_ans_stat = defaultdict(int)
for g,p in zip(self.res_by_type[key][tp+'_gt'],self.res_by_type[key][tp+'_pred']):
per_ans_stat[str(g)+'_gt']+=1
if g==p:
per_ans_stat[str(g)]+=1
unq_acc = 0
for unq_ans in set(self.res_by_type[key][tp+'_gt']):
acc_curr_ans = per_ans_stat[str(unq_ans)]/per_ans_stat[str(unq_ans)+'_gt']
unq_acc +=acc_curr_ans
acc = 100*unq_acc/len(set(self.res_by_type[key][tp+'_gt']))
sum_acc.append(acc+eps)
Logger()("Accuracy {} for class '{}' is {:.2f}".format(key, tp, acc))
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType_norm.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy is {:.2f}'.format(acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a_norm'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy is {:.2f}'.format(acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h_norm'.format(self.mode, key), acc_mpt_h, should_print=False)
| 10,384 | 44.152174 | 143 | py |
introd | introd-main/cfvqa/cfvqa/models/metrics/factory.py | from bootstrap.lib.options import Options
from block.models.metrics.vqa_accuracies import VQAAccuracies
from .vqa_rubi_metrics import VQARUBiMetrics
from .vqa_cfvqa_metrics import VQACFVQAMetrics
from .vqa_cfvqasimple_metrics import VQACFVQASimpleMetrics
from .vqa_cfvqaintrod_metrics import VQACFVQAIntroDMetrics
from .vqa_rubiintrod_metrics import VQARUBiIntroDMetrics
def factory(engine, mode):
name = Options()['model.metric.name']
metric = None
if name == 'vqa_accuracies':
open_ended = ('tdiuc' not in Options()['dataset.name'] and 'gqa' not in Options()['dataset.name'])
if mode == 'train':
split = engine.dataset['train'].split
if split == 'train':
metric = VQAAccuracies(engine,
mode='train',
open_ended=open_ended,
tdiuc=True,
dir_exp=Options()['exp.dir'],
dir_vqa=Options()['dataset.dir'])
elif split == 'trainval':
metric = None
else:
raise ValueError(split)
elif mode == 'eval':
metric = VQAAccuracies(engine,
mode='eval',
open_ended=open_ended,
tdiuc=('tdiuc' in Options()['dataset.name'] or Options()['dataset.eval_split'] != 'test'),
dir_exp=Options()['exp.dir'],
dir_vqa=Options()['dataset.dir'])
else:
metric = None
elif name == "vqa_rubi_metrics":
open_ended = ('tdiuc' not in Options()['dataset.name'] and 'gqa' not in Options()['dataset.name'])
metric = VQARUBiMetrics(engine,
mode=mode,
open_ended=open_ended,
tdiuc=True,
dir_exp=Options()['exp.dir'],
dir_vqa=Options()['dataset.dir']
)
elif name == "vqa_cfvqa_metrics":
open_ended = ('tdiuc' not in Options()['dataset.name'] and 'gqa' not in Options()['dataset.name'])
metric = VQACFVQAMetrics(engine,
mode=mode,
open_ended=open_ended,
tdiuc=True,
dir_exp=Options()['exp.dir'],
dir_vqa=Options()['dataset.dir']
)
elif name == "vqa_cfvqasimple_metrics":
open_ended = ('tdiuc' not in Options()['dataset.name'] and 'gqa' not in Options()['dataset.name'])
metric = VQACFVQASimpleMetrics(engine,
mode=mode,
open_ended=open_ended,
tdiuc=True,
dir_exp=Options()['exp.dir'],
dir_vqa=Options()['dataset.dir']
)
elif name == "vqa_cfvqaintrod_metrics":
open_ended = ('tdiuc' not in Options()['dataset.name'] and 'gqa' not in Options()['dataset.name'])
metric = VQACFVQAIntroDMetrics(engine,
mode=mode,
open_ended=open_ended,
tdiuc=True,
dir_exp=Options()['exp.dir'],
dir_vqa=Options()['dataset.dir']
)
elif name == "vqa_rubiintrod_metrics":
open_ended = ('tdiuc' not in Options()['dataset.name'] and 'gqa' not in Options()['dataset.name'])
metric = VQARUBiIntroDMetrics(engine,
mode=mode,
open_ended=open_ended,
tdiuc=True,
dir_exp=Options()['exp.dir'],
dir_vqa=Options()['dataset.dir']
)
else:
raise ValueError(name)
return metric
| 3,402 | 36.395604 | 106 | py |
introd | introd-main/cfvqa/cfvqa/models/metrics/__init__.py | 0 | 0 | 0 | py | |
introd | introd-main/cfvqa/cfvqa/models/metrics/vqa_cfvqaintrod_metrics.py | import torch
import torch.nn as nn
import os
import json
from scipy import stats
import numpy as np
from collections import defaultdict
from bootstrap.models.metrics.accuracy import accuracy
from block.models.metrics.vqa_accuracies import VQAAccuracies
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
class VQAAccuracy(nn.Module):
def __init__(self, topk=[1,5]):
super().__init__()
self.topk = topk
def forward(self, cri_out, net_out, batch):
out = {}
class_id = batch['class_id'].data.cpu()
for key in ['_all', '_cfvqa', '_stu']:
logits = net_out[f'logits{key}'].data.cpu()
acc_out = accuracy(logits, class_id, topk=self.topk)
for i, k in enumerate(self.topk):
out[f'accuracy{key}_top{k}'] = acc_out[i]
return out
class VQACFVQAIntroDMetrics(VQAAccuracies):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if Options()['dataset.eval_split'] == 'test': # 0430
self.accuracy = None
else:
self.accuracy = VQAAccuracy()
self.rm_dir_rslt = 1 if Options()['dataset.train_split'] is not None else 0
def forward(self, cri_out, net_out, batch):
out = {}
if self.accuracy is not None:
out = self.accuracy(cri_out, net_out, batch)
# add answers and answer_ids keys to net_out
net_out = self.engine.model.network.process_answers(net_out)
batch_size = len(batch['index'])
for i in range(batch_size):
# Open Ended Accuracy (VQA-VQA2)
if self.open_ended:
for key in ['_all', '_cfvqa', '_stu']:
pred_item = {
'question_id': batch['question_id'][i],
'answer': net_out[f'answers{key}'][i]
}
self.results[key].append(pred_item)
# if self.dataset.split == 'test': # 0430
# pred_item = {
# 'question_id': batch['question_id'][i],
# 'answer': net_out[f'answers{key}'][i]
# # 'answer': net_out[f'answers'][i]
# }
# # if 'is_testdev' in batch and batch['is_testdev'][i]: # 0430
# # self.results_testdev.append(pred_item)
# if self.logits['tensor'] is None:
# self.logits['tensor'] = torch.FloatTensor(len(self.dataset), logits.size(1))
# self.logits['tensor'][self.idx] = logits[i]
# self.logits['qid_to_idx'][batch['question_id'][i]] = self.idx
# self.idx += 1
# TDIUC metrics
if self.tdiuc:
gt_aid = batch['answer_id'][i]
gt_ans = batch['answer'][i]
gt_type = batch['question_type'][i]
self.gt_types.append(gt_type)
if gt_ans in self.ans_to_aid:
self.gt_aids.append(gt_aid)
else:
self.gt_aids.append(-1)
self.gt_aid_not_found += 1
for key in ['_all', '_cfvqa', '_stu']:
qid = batch['question_id'][i]
pred_aid = net_out[f'answer_ids{key}'][i]
self.pred_aids[key].append(pred_aid)
self.res_by_type[key][gt_type+'_pred'].append(pred_aid)
if gt_ans in self.ans_to_aid:
self.res_by_type[key][gt_type+'_gt'].append(gt_aid)
if gt_aid == pred_aid:
self.res_by_type[key][gt_type+'_t'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_gt'].append(-1)
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
return out
def reset_oe(self):
self.results = dict()
self.dir_rslt = dict()
self.path_rslt = dict()
for key in ['_all', '_cfvqa', '_stu']:
self.results[key] = []
self.dir_rslt[key] = os.path.join(
self.dir_exp,
f'results{key}',
self.dataset.split,
'epoch,{}'.format(self.engine.epoch))
os.system('mkdir -p '+self.dir_rslt[key])
self.path_rslt[key] = os.path.join(
self.dir_rslt[key],
'OpenEnded_mscoco_{}_model_results.json'.format(
self.dataset.get_subtype()))
if self.dataset.split == 'test':
pass
# self.results_testdev = []
# self.path_rslt_testdev = os.path.join(
# self.dir_rslt,
# 'OpenEnded_mscoco_{}_model_results.json'.format(
# self.dataset.get_subtype(testdev=True)))
# self.path_logits = os.path.join(self.dir_rslt, 'logits.pth')
# os.system('mkdir -p '+os.path.dirname(self.path_logits))
# self.logits = {}
# self.logits['aid_to_ans'] = self.engine.model.network.aid_to_ans
# self.logits['qid_to_idx'] = {}
# self.logits['tensor'] = None
# self.idx = 0
# path_aid_to_ans = os.path.join(self.dir_rslt, 'aid_to_ans.json')
# with open(path_aid_to_ans, 'w') as f:
# json.dump(self.engine.model.network.aid_to_ans, f)
def reset_tdiuc(self):
self.pred_aids = defaultdict(list)
self.gt_aids = []
self.gt_types = []
self.gt_aid_not_found = 0
self.res_by_type = {key: defaultdict(list) for key in ['_all', '_cfvqa', '_stu']}
def compute_oe_accuracy(self):
logs_name_prefix = Options()['misc'].get('logs_name', '') or ''
for key in ['_all', '_cfvqa', '_stu']:
logs_name = (logs_name_prefix + key) or "logs"
with open(self.path_rslt[key], 'w') as f:
json.dump(self.results[key], f)
# if self.dataset.split == 'test':
# with open(self.path_rslt_testdev, 'w') as f:
# json.dump(self.results_testdev, f)
if 'test' not in self.dataset.split:
call_to_prog = 'python -m block.models.metrics.compute_oe_accuracy '\
+ '--dir_vqa {} --dir_exp {} --dir_rslt {} --epoch {} --split {} --logs_name {} --rm {} &'\
.format(self.dir_vqa, self.dir_exp, self.dir_rslt[key], self.engine.epoch, self.dataset.split, logs_name, self.rm_dir_rslt)
Logger()('`'+call_to_prog+'`')
os.system(call_to_prog)
def compute_tdiuc_metrics(self):
Logger()('{} of validation answers were not found in ans_to_aid'.format(self.gt_aid_not_found))
for key in ['_all', '_cfvqa', '_stu']:
Logger()(f'Computing TDIUC metrics for logits{key}')
accuracy = float(100*np.mean(np.array(self.pred_aids[key])==np.array(self.gt_aids)))
Logger()('Overall Traditional Accuracy is {:.2f}'.format(accuracy))
Logger().log_value('{}_epoch.tdiuc.accuracy{}'.format(self.mode, key), accuracy, should_print=False)
types = list(set(self.gt_types))
sum_acc = []
eps = 1e-10
Logger()('---------------------------------------')
Logger()('Not using per-answer normalization...')
for tp in types:
acc = 100*(len(self.res_by_type[key][tp+'_t'])/len(self.res_by_type[key][tp+'_t']+self.res_by_type[key][tp+'_f']))
sum_acc.append(acc+eps)
Logger()(f"Accuracy {key} for class '{tp}' is {acc:.2f}")
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h'.format(self.mode, key), acc_mpt_h, should_print=False)
Logger()('---------------------------------------')
Logger()('Using per-answer normalization...')
for tp in types:
per_ans_stat = defaultdict(int)
for g,p in zip(self.res_by_type[key][tp+'_gt'],self.res_by_type[key][tp+'_pred']):
per_ans_stat[str(g)+'_gt']+=1
if g==p:
per_ans_stat[str(g)]+=1
unq_acc = 0
for unq_ans in set(self.res_by_type[key][tp+'_gt']):
acc_curr_ans = per_ans_stat[str(unq_ans)]/per_ans_stat[str(unq_ans)+'_gt']
unq_acc +=acc_curr_ans
acc = 100*unq_acc/len(set(self.res_by_type[key][tp+'_gt']))
sum_acc.append(acc+eps)
Logger()("Accuracy {} for class '{}' is {:.2f}".format(key, tp, acc))
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType_norm.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy is {:.2f}'.format(acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a_norm'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy is {:.2f}'.format(acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h_norm'.format(self.mode, key), acc_mpt_h, should_print=False)
| 10,313 | 43.843478 | 143 | py |
introd | introd-main/cfvqa/cfvqa/datasets/vqacp.py | import os
import csv
import copy
import json
import torch
import numpy as np
from tqdm import tqdm
from os import path as osp
from bootstrap.lib.logger import Logger
from block.datasets.vqa_utils import AbstractVQA
from copy import deepcopy
import random
import h5py
class VQACP(AbstractVQA):
def __init__(self,
dir_data='data/vqa/vqacp2',
split='train',
batch_size=80,
nb_threads=4,
pin_memory=False,
shuffle=False,
nans=1000,
minwcount=10,
nlp='mcb',
proc_split='train',
samplingans=False,
dir_rcnn='data/coco/extract_rcnn',
dir_cnn=None,
dir_vgg16=None,
has_testdevset=False,
):
super(VQACP, self).__init__(
dir_data=dir_data,
split=split,
batch_size=batch_size,
nb_threads=nb_threads,
pin_memory=pin_memory,
shuffle=shuffle,
nans=nans,
minwcount=minwcount,
nlp=nlp,
proc_split=proc_split,
samplingans=samplingans,
has_valset=True,
has_testset=False,
has_testdevset=has_testdevset,
has_testset_anno=False,
has_answers_occurence=True,
do_tokenize_answers=False)
self.dir_rcnn = dir_rcnn
self.dir_cnn = dir_cnn
self.dir_vgg16 = dir_vgg16
self.load_image_features()
self.load_original_annotation = False
def add_rcnn_to_item(self, item):
path_rcnn = os.path.join(self.dir_rcnn, '{}.pth'.format(item['image_name']))
item_rcnn = torch.load(path_rcnn)
item['visual'] = item_rcnn['pooled_feat']
item['coord'] = item_rcnn['rois']
item['norm_coord'] = item_rcnn['norm_rois']
item['nb_regions'] = item['visual'].size(0)
return item
def load_image_features(self):
if self.dir_cnn:
filename_train = os.path.join(self.dir_cnn, 'trainset.hdf5')
filename_val = os.path.join(self.dir_cnn, 'valset.hdf5')
Logger()(f"Opening file {filename_train}, {filename_val}")
self.image_features_train = h5py.File(filename_train, 'r', swmr=True)
self.image_features_val = h5py.File(filename_val, 'r', swmr=True)
# load txt
with open(os.path.join(self.dir_cnn, 'trainset.txt'.format(self.split)), 'r') as f:
self.image_names_to_index_train = {}
for i, line in enumerate(f):
self.image_names_to_index_train[line.strip()] = i
with open(os.path.join(self.dir_cnn, 'valset.txt'.format(self.split)), 'r') as f:
self.image_names_to_index_val = {}
for i, line in enumerate(f):
self.image_names_to_index_val[line.strip()] = i
elif self.dir_vgg16:
# list filenames
self.filenames_train = os.listdir(os.path.join(self.dir_vgg16, 'train'))
self.filenames_val = os.listdir(os.path.join(self.dir_vgg16, 'val'))
def add_vgg_to_item(self, item):
image_name = item['image_name']
filename = image_name + '.pth'
if filename in self.filenames_train:
path = os.path.join(self.dir_vgg16, 'train', filename)
elif filename in self.filenames_val:
path = os.path.join(self.dir_vgg16, 'val', filename)
visual = torch.load(path)
visual = visual.permute(1, 2, 0).view(14*14, 512)
item['visual'] = visual
return item
def add_cnn_to_item(self, item):
image_name = item['image_name']
if image_name in self.image_names_to_index_train:
index = self.image_names_to_index_train[image_name]
image = torch.tensor(self.image_features_train['att'][index])
elif image_name in self.image_names_to_index_val:
index = self.image_names_to_index_val[image_name]
image = torch.tensor(self.image_features_val['att'][index])
image = image.permute(1, 2, 0).view(196, 2048)
item['visual'] = image
return item
def __getitem__(self, index):
item = {}
item['index'] = index
# Process Question (word token)
question = self.dataset['questions'][index]
if self.load_original_annotation:
item['original_question'] = question
item['question_id'] = question['question_id']
item['question'] = torch.LongTensor(question['question_wids'])
item['lengths'] = torch.LongTensor([len(question['question_wids'])])
item['image_name'] = question['image_name']
# Process Object, Attribut and Relational features
if self.dir_rcnn:
item = self.add_rcnn_to_item(item)
elif self.dir_cnn:
item = self.add_cnn_to_item(item)
elif self.dir_vgg16:
item = self.add_vgg_to_item(item)
# Process Answer if exists
if 'annotations' in self.dataset:
annotation = self.dataset['annotations'][index]
if self.load_original_annotation:
item['original_annotation'] = annotation
if 'train' in self.split and self.samplingans:
proba = annotation['answers_count']
proba = proba / np.sum(proba)
item['answer_id'] = int(np.random.choice(annotation['answers_id'], p=proba))
else:
item['answer_id'] = annotation['answer_id']
item['class_id'] = torch.LongTensor([item['answer_id']])
item['answer'] = annotation['answer']
item['question_type'] = annotation['question_type']
return item
def download(self):
dir_ann = osp.join(self.dir_raw, 'annotations')
os.system('mkdir -p '+dir_ann)
os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v1_train_questions.json -P' + dir_ann)
os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v1_test_questions.json -P' + dir_ann)
os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v1_train_annotations.json -P' + dir_ann)
os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v1_test_annotations.json -P' + dir_ann)
train_q = {"questions":json.load(open(osp.join(dir_ann, "vqacp_v1_train_questions.json")))}
val_q = {"questions":json.load(open(osp.join(dir_ann, "vqacp_v1_test_questions.json")))}
train_ann = {"annotations":json.load(open(osp.join(dir_ann, "vqacp_v1_train_annotations.json")))}
val_ann = {"annotations":json.load(open(osp.join(dir_ann, "vqacp_v1_test_annotations.json")))}
train_q['info'] = {}
train_q['data_type'] = 'mscoco'
train_q['data_subtype'] = "train2014cp"
train_q['task_type'] = "Open-Ended"
train_q['license'] = {}
val_q['info'] = {}
val_q['data_type'] = 'mscoco'
val_q['data_subtype'] = "val2014cp"
val_q['task_type'] = "Open-Ended"
val_q['license'] = {}
for k in ["info", 'data_type','data_subtype', 'license']:
train_ann[k] = train_q[k]
val_ann[k] = val_q[k]
with open(osp.join(dir_ann, "OpenEnded_mscoco_train2014_questions.json"), 'w') as F:
F.write(json.dumps(train_q))
with open(osp.join(dir_ann, "OpenEnded_mscoco_val2014_questions.json"), 'w') as F:
F.write(json.dumps(val_q))
with open(osp.join(dir_ann, "mscoco_train2014_annotations.json"), 'w') as F:
F.write(json.dumps(train_ann))
with open(osp.join(dir_ann, "mscoco_val2014_annotations.json"), 'w') as F:
F.write(json.dumps(val_ann))
def add_image_names(self, dataset):
for q in dataset['questions']:
q['image_name'] = 'COCO_%s_%012d.jpg'%(q['coco_split'],q['image_id'])
return dataset
| 7,952 | 41.079365 | 111 | py |
introd | introd-main/cfvqa/cfvqa/datasets/vqacp2.py | import os
import csv
import copy
import json
import torch
import numpy as np
from tqdm import tqdm
from os import path as osp
from bootstrap.lib.logger import Logger
from block.datasets.vqa_utils import AbstractVQA
from copy import deepcopy
import random
import h5py
class VQACP2(AbstractVQA):
def __init__(self,
dir_data='data/vqa/vqacp2',
split='train',
batch_size=80,
nb_threads=4,
pin_memory=False,
shuffle=False,
nans=1000,
minwcount=10,
nlp='mcb',
proc_split='train',
samplingans=False,
dir_rcnn='data/coco/extract_rcnn',
dir_cnn=None,
dir_vgg16=None,
has_testdevset=False,
):
super(VQACP2, self).__init__(
dir_data=dir_data,
split=split,
batch_size=batch_size,
nb_threads=nb_threads,
pin_memory=pin_memory,
shuffle=shuffle,
nans=nans,
minwcount=minwcount,
nlp=nlp,
proc_split=proc_split,
samplingans=samplingans,
has_valset=True,
has_testset=False,
has_testdevset=has_testdevset,
has_testset_anno=False,
has_answers_occurence=True,
do_tokenize_answers=False)
self.dir_rcnn = dir_rcnn
self.dir_cnn = dir_cnn
self.dir_vgg16 = dir_vgg16
self.load_image_features()
self.load_original_annotation = False
def add_rcnn_to_item(self, item):
path_rcnn = os.path.join(self.dir_rcnn, '{}.pth'.format(item['image_name']))
item_rcnn = torch.load(path_rcnn)
item['visual'] = item_rcnn['pooled_feat']
item['coord'] = item_rcnn['rois']
item['norm_coord'] = item_rcnn['norm_rois']
item['nb_regions'] = item['visual'].size(0)
return item
def load_image_features(self):
if self.dir_cnn:
filename_train = os.path.join(self.dir_cnn, 'trainset.hdf5')
filename_val = os.path.join(self.dir_cnn, 'valset.hdf5')
Logger()(f"Opening file {filename_train}, {filename_val}")
self.image_features_train = h5py.File(filename_train, 'r', swmr=True)
self.image_features_val = h5py.File(filename_val, 'r', swmr=True)
# load txt
with open(os.path.join(self.dir_cnn, 'trainset.txt'.format(self.split)), 'r') as f:
self.image_names_to_index_train = {}
for i, line in enumerate(f):
self.image_names_to_index_train[line.strip()] = i
with open(os.path.join(self.dir_cnn, 'valset.txt'.format(self.split)), 'r') as f:
self.image_names_to_index_val = {}
for i, line in enumerate(f):
self.image_names_to_index_val[line.strip()] = i
elif self.dir_vgg16:
# list filenames
self.filenames_train = os.listdir(os.path.join(self.dir_vgg16, 'train'))
self.filenames_val = os.listdir(os.path.join(self.dir_vgg16, 'val'))
def add_vgg_to_item(self, item):
image_name = item['image_name']
filename = image_name + '.pth'
if filename in self.filenames_train:
path = os.path.join(self.dir_vgg16, 'train', filename)
elif filename in self.filenames_val:
path = os.path.join(self.dir_vgg16, 'val', filename)
visual = torch.load(path)
visual = visual.permute(1, 2, 0).view(14*14, 512)
item['visual'] = visual
return item
def add_cnn_to_item(self, item):
image_name = item['image_name']
if image_name in self.image_names_to_index_train:
index = self.image_names_to_index_train[image_name]
image = torch.tensor(self.image_features_train['att'][index])
elif image_name in self.image_names_to_index_val:
index = self.image_names_to_index_val[image_name]
image = torch.tensor(self.image_features_val['att'][index])
image = image.permute(1, 2, 0).view(196, 2048)
item['visual'] = image
return item
def __getitem__(self, index):
item = {}
item['index'] = index
# Process Question (word token)
question = self.dataset['questions'][index]
if self.load_original_annotation:
item['original_question'] = question
item['question_id'] = question['question_id']
item['question'] = torch.LongTensor(question['question_wids'])
item['lengths'] = torch.LongTensor([len(question['question_wids'])])
item['image_name'] = question['image_name']
# Process Object, Attribut and Relational features
if self.dir_rcnn:
item = self.add_rcnn_to_item(item)
elif self.dir_cnn:
item = self.add_cnn_to_item(item)
elif self.dir_vgg16:
item = self.add_vgg_to_item(item)
# Process Answer if exists
if 'annotations' in self.dataset:
annotation = self.dataset['annotations'][index]
if self.load_original_annotation:
item['original_annotation'] = annotation
if 'train' in self.split and self.samplingans:
proba = annotation['answers_count']
proba = proba / np.sum(proba)
item['answer_id'] = int(np.random.choice(annotation['answers_id'], p=proba))
else:
item['answer_id'] = annotation['answer_id']
item['class_id'] = torch.LongTensor([item['answer_id']])
item['answer'] = annotation['answer']
item['question_type'] = annotation['question_type']
return item
def download(self):
dir_ann = osp.join(self.dir_raw, 'annotations')
os.system('mkdir -p '+dir_ann)
os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v2_train_questions.json -P' + dir_ann)
os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v2_test_questions.json -P' + dir_ann)
os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v2_train_annotations.json -P' + dir_ann)
os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v2_test_annotations.json -P' + dir_ann)
train_q = {"questions":json.load(open(osp.join(dir_ann, "vqacp_v2_train_questions.json")))}
val_q = {"questions":json.load(open(osp.join(dir_ann, "vqacp_v2_test_questions.json")))}
train_ann = {"annotations":json.load(open(osp.join(dir_ann, "vqacp_v2_train_annotations.json")))}
val_ann = {"annotations":json.load(open(osp.join(dir_ann, "vqacp_v2_test_annotations.json")))}
train_q['info'] = {}
train_q['data_type'] = 'mscoco'
train_q['data_subtype'] = "train2014cp"
train_q['task_type'] = "Open-Ended"
train_q['license'] = {}
val_q['info'] = {}
val_q['data_type'] = 'mscoco'
val_q['data_subtype'] = "val2014cp"
val_q['task_type'] = "Open-Ended"
val_q['license'] = {}
for k in ["info", 'data_type','data_subtype', 'license']:
train_ann[k] = train_q[k]
val_ann[k] = val_q[k]
with open(osp.join(dir_ann, "OpenEnded_mscoco_train2014_questions.json"), 'w') as F:
F.write(json.dumps(train_q))
with open(osp.join(dir_ann, "OpenEnded_mscoco_val2014_questions.json"), 'w') as F:
F.write(json.dumps(val_q))
with open(osp.join(dir_ann, "mscoco_train2014_annotations.json"), 'w') as F:
F.write(json.dumps(train_ann))
with open(osp.join(dir_ann, "mscoco_val2014_annotations.json"), 'w') as F:
F.write(json.dumps(val_ann))
def add_image_names(self, dataset):
for q in dataset['questions']:
q['image_name'] = 'COCO_%s_%012d.jpg'%(q['coco_split'],q['image_id'])
return dataset
| 7,954 | 41.089947 | 111 | py |
introd | introd-main/cfvqa/cfvqa/datasets/factory.py | from bootstrap.lib.options import Options
from block.datasets.tdiuc import TDIUC
from block.datasets.vrd import VRD
from block.datasets.vg import VG
from block.datasets.vqa_utils import ListVQADatasets
from .vqa2 import VQA2
from .vqacp2 import VQACP2
from .vqacp import VQACP
def factory(engine=None):
opt = Options()['dataset']
dataset = {}
if opt.get('train_split', None):
dataset['train'] = factory_split(opt['train_split'])
if opt.get('eval_split', None):
dataset['eval'] = factory_split(opt['eval_split'])
return dataset
def factory_split(split):
opt = Options()['dataset']
shuffle = ('train' in split)
if opt['name'] == 'vqacp2':
assert(split in ['train', 'val', 'test'])
samplingans = (opt['samplingans'] and split == 'train')
dataset = VQACP2(
dir_data=opt['dir'],
split=split,
batch_size=opt['batch_size'],
nb_threads=opt['nb_threads'],
pin_memory=Options()['misc']['cuda'],
shuffle=shuffle,
nans=opt['nans'],
minwcount=opt['minwcount'],
nlp=opt['nlp'],
proc_split=opt['proc_split'],
samplingans=samplingans,
dir_rcnn=opt['dir_rcnn'],
dir_cnn=opt.get('dir_cnn', None),
dir_vgg16=opt.get('dir_vgg16', None),
)
elif opt['name'] == 'vqacp':
assert(split in ['train', 'val', 'test'])
samplingans = (opt['samplingans'] and split == 'train')
dataset = VQACP(
dir_data=opt['dir'],
split=split,
batch_size=opt['batch_size'],
nb_threads=opt['nb_threads'],
pin_memory=Options()['misc']['cuda'],
shuffle=shuffle,
nans=opt['nans'],
minwcount=opt['minwcount'],
nlp=opt['nlp'],
proc_split=opt['proc_split'],
samplingans=samplingans,
dir_rcnn=opt['dir_rcnn'],
dir_cnn=opt.get('dir_cnn', None),
dir_vgg16=opt.get('dir_vgg16', None),
)
elif opt['name'] == 'vqacpv2-with-testdev':
assert(split in ['train', 'val', 'test'])
samplingans = (opt['samplingans'] and split == 'train')
dataset = VQACP2(
dir_data=opt['dir'],
split=split,
batch_size=opt['batch_size'],
nb_threads=opt['nb_threads'],
pin_memory=Options()['misc']['cuda'],
shuffle=shuffle,
nans=opt['nans'],
minwcount=opt['minwcount'],
nlp=opt['nlp'],
proc_split=opt['proc_split'],
samplingans=samplingans,
dir_rcnn=opt['dir_rcnn'],
dir_cnn=opt.get('dir_cnn', None),
dir_vgg16=opt.get('dir_vgg16', None),
has_testdevset=True,
)
elif opt['name'] == 'vqa2':
assert(split in ['train', 'val', 'test'])
samplingans = (opt['samplingans'] and split == 'train')
if opt['vg']:
assert(opt['proc_split'] == 'trainval')
# trainvalset
vqa2 = VQA2(
dir_data=opt['dir'],
split='train',
nans=opt['nans'],
minwcount=opt['minwcount'],
nlp=opt['nlp'],
proc_split=opt['proc_split'],
samplingans=samplingans,
dir_rcnn=opt['dir_rcnn'])
vg = VG(
dir_data=opt['dir_vg'],
split='train',
nans=10000,
minwcount=0,
nlp=opt['nlp'],
dir_rcnn=opt['dir_rcnn_vg'])
vqa2vg = ListVQADatasets(
[vqa2,vg],
split='train',
batch_size=opt['batch_size'],
nb_threads=opt['nb_threads'],
pin_memory=Options()['misc.cuda'],
shuffle=shuffle)
if split == 'train':
dataset = vqa2vg
else:
dataset = VQA2(
dir_data=opt['dir'],
split=split,
batch_size=opt['batch_size'],
nb_threads=opt['nb_threads'],
pin_memory=Options()['misc.cuda'],
shuffle=False,
nans=opt['nans'],
minwcount=opt['minwcount'],
nlp=opt['nlp'],
proc_split=opt['proc_split'],
samplingans=samplingans,
dir_rcnn=opt['dir_rcnn'])
dataset.sync_from(vqa2vg)
else:
dataset = VQA2(
dir_data=opt['dir'],
split=split,
batch_size=opt['batch_size'],
nb_threads=opt['nb_threads'],
pin_memory=Options()['misc.cuda'],
shuffle=shuffle,
nans=opt['nans'],
minwcount=opt['minwcount'],
nlp=opt['nlp'],
proc_split=opt['proc_split'],
samplingans=samplingans,
dir_rcnn=opt['dir_rcnn'],
dir_cnn=opt.get('dir_cnn', None),
)
return dataset
| 5,245 | 32.202532 | 63 | py |
introd | introd-main/cfvqa/cfvqa/datasets/__init__.py | 0 | 0 | 0 | py | |
introd | introd-main/cfvqa/cfvqa/datasets/vqa2.py | import os
import csv
import copy
import json
import torch
import numpy as np
from os import path as osp
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
from block.datasets.vqa_utils import AbstractVQA
from copy import deepcopy
import random
import tqdm
import h5py
class VQA2(AbstractVQA):
def __init__(self,
dir_data='data/vqa2',
split='train',
batch_size=10,
nb_threads=4,
pin_memory=False,
shuffle=False,
nans=1000,
minwcount=10,
nlp='mcb',
proc_split='train',
samplingans=False,
dir_rcnn='data/coco/extract_rcnn',
adversarial=False,
dir_cnn=None
):
super(VQA2, self).__init__(
dir_data=dir_data,
split=split,
batch_size=batch_size,
nb_threads=nb_threads,
pin_memory=pin_memory,
shuffle=shuffle,
nans=nans,
minwcount=minwcount,
nlp=nlp,
proc_split=proc_split,
samplingans=samplingans,
has_valset=True,
has_testset=True,
has_answers_occurence=True,
do_tokenize_answers=False)
self.dir_rcnn = dir_rcnn
self.dir_cnn = dir_cnn
self.load_image_features()
# to activate manually in visualization context (notebo# to activate manually in visualization context (notebook)
self.load_original_annotation = False
def add_rcnn_to_item(self, item):
path_rcnn = os.path.join(self.dir_rcnn, '{}.pth'.format(item['image_name']))
item_rcnn = torch.load(path_rcnn)
item['visual'] = item_rcnn['pooled_feat']
item['coord'] = item_rcnn['rois']
item['norm_coord'] = item_rcnn.get('norm_rois', None)
item['nb_regions'] = item['visual'].size(0)
return item
def add_cnn_to_item(self, item):
image_name = item['image_name']
if image_name in self.image_names_to_index_train:
index = self.image_names_to_index_train[image_name]
image = torch.tensor(self.image_features_train['att'][index])
elif image_name in self.image_names_to_index_val:
index = self.image_names_to_index_val[image_name]
image = torch.tensor(self.image_features_val['att'][index])
image = image.permute(1, 2, 0).view(196, 2048)
item['visual'] = image
return item
def load_image_features(self):
if self.dir_cnn:
filename_train = os.path.join(self.dir_cnn, 'trainset.hdf5')
filename_val = os.path.join(self.dir_cnn, 'valset.hdf5')
Logger()(f"Opening file {filename_train}, {filename_val}")
self.image_features_train = h5py.File(filename_train, 'r', swmr=True)
self.image_features_val = h5py.File(filename_val, 'r', swmr=True)
# load txt
with open(os.path.join(self.dir_cnn, 'trainset.txt'.format(self.split)), 'r') as f:
self.image_names_to_index_train = {}
for i, line in enumerate(f):
self.image_names_to_index_train[line.strip()] = i
with open(os.path.join(self.dir_cnn, 'valset.txt'.format(self.split)), 'r') as f:
self.image_names_to_index_val = {}
for i, line in enumerate(f):
self.image_names_to_index_val[line.strip()] = i
def __getitem__(self, index):
item = {}
item['index'] = index
# Process Question (word token)
question = self.dataset['questions'][index]
if self.load_original_annotation:
item['original_question'] = question
item['question_id'] = question['question_id']
item['question'] = torch.tensor(question['question_wids'], dtype=torch.long)
item['lengths'] = torch.tensor([len(question['question_wids'])], dtype=torch.long)
item['image_name'] = question['image_name']
# Process Object, Attribut and Relational features
# Process Object, Attribut and Relational features
if self.dir_rcnn:
item = self.add_rcnn_to_item(item)
elif self.dir_cnn:
item = self.add_cnn_to_item(item)
# Process Answer if exists
if 'annotations' in self.dataset:
annotation = self.dataset['annotations'][index]
if self.load_original_annotation:
item['original_annotation'] = annotation
if 'train' in self.split and self.samplingans:
proba = annotation['answers_count']
proba = proba / np.sum(proba)
item['answer_id'] = int(np.random.choice(annotation['answers_id'], p=proba))
else:
item['answer_id'] = annotation['answer_id']
item['class_id'] = torch.tensor([item['answer_id']], dtype=torch.long)
item['answer'] = annotation['answer']
item['question_type'] = annotation['question_type']
else:
if item['question_id'] in self.is_qid_testdev:
item['is_testdev'] = True
else:
item['is_testdev'] = False
# if Options()['model.network.name'] == 'xmn_net':
# num_feat = 36
# relation_mask = np.zeros((num_feat, num_feat))
# boxes = item['coord']
# for i in range(num_feat):
# for j in range(i+1, num_feat):
# # if there is no overlap between two bounding box
# if boxes[0,i]>boxes[2,j] or boxes[0,j]>boxes[2,i] or boxes[1,i]>boxes[3,j] or boxes[1,j]>boxes[3,i]:
# pass
# else:
# relation_mask[i,j] = relation_mask[j,i] = 1
# relation_mask = torch.from_numpy(relation_mask).byte()
# item['relation_mask'] = relation_mask
return item
def download(self):
dir_zip = osp.join(self.dir_raw, 'zip')
os.system('mkdir -p '+dir_zip)
dir_ann = osp.join(self.dir_raw, 'annotations')
os.system('mkdir -p '+dir_ann)
os.system('wget http://visualqa.org/data/mscoco/vqa/v2_Questions_Train_mscoco.zip -P '+dir_zip)
os.system('wget http://visualqa.org/data/mscoco/vqa/v2_Questions_Val_mscoco.zip -P '+dir_zip)
os.system('wget http://visualqa.org/data/mscoco/vqa/v2_Questions_Test_mscoco.zip -P '+dir_zip)
os.system('wget http://visualqa.org/data/mscoco/vqa/v2_Annotations_Train_mscoco.zip -P '+dir_zip)
os.system('wget http://visualqa.org/data/mscoco/vqa/v2_Annotations_Val_mscoco.zip -P '+dir_zip)
os.system('unzip '+osp.join(dir_zip, 'v2_Questions_Train_mscoco.zip')+' -d '+dir_ann)
os.system('unzip '+osp.join(dir_zip, 'v2_Questions_Val_mscoco.zip')+' -d '+dir_ann)
os.system('unzip '+osp.join(dir_zip, 'v2_Questions_Test_mscoco.zip')+' -d '+dir_ann)
os.system('unzip '+osp.join(dir_zip, 'v2_Annotations_Train_mscoco.zip')+' -d '+dir_ann)
os.system('unzip '+osp.join(dir_zip, 'v2_Annotations_Val_mscoco.zip')+' -d '+dir_ann)
os.system('mv '+osp.join(dir_ann, 'v2_mscoco_train2014_annotations.json')+' '
+osp.join(dir_ann, 'mscoco_train2014_annotations.json'))
os.system('mv '+osp.join(dir_ann, 'v2_mscoco_val2014_annotations.json')+' '
+osp.join(dir_ann, 'mscoco_val2014_annotations.json'))
os.system('mv '+osp.join(dir_ann, 'v2_OpenEnded_mscoco_train2014_questions.json')+' '
+osp.join(dir_ann, 'OpenEnded_mscoco_train2014_questions.json'))
os.system('mv '+osp.join(dir_ann, 'v2_OpenEnded_mscoco_val2014_questions.json')+' '
+osp.join(dir_ann, 'OpenEnded_mscoco_val2014_questions.json'))
os.system('mv '+osp.join(dir_ann, 'v2_OpenEnded_mscoco_test2015_questions.json')+' '
+osp.join(dir_ann, 'OpenEnded_mscoco_test2015_questions.json'))
os.system('mv '+osp.join(dir_ann, 'v2_OpenEnded_mscoco_test-dev2015_questions.json')+' '
+osp.join(dir_ann, 'OpenEnded_mscoco_test-dev2015_questions.json'))
| 8,260 | 44.640884 | 122 | py |
introd | introd-main/cfvqa/cfvqa/engines/engine.py | import os
import math
import time
import torch
import datetime
import threading
import numpy as np
from bootstrap.lib import utils
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
class Engine(object):
"""Contains training and evaluation procedures
"""
def __init__(self):
self.hooks = {}
self.epoch = 0
self.dataset = None
self.model = None
self.optimizer = None
self.view = None
self.best_out = {}
# generate_view will be executed at the end of each
# training and evaluation epoch
self.register_hook('train_on_flush', self.generate_view)
self.register_hook('eval_on_flush', self.generate_view)
def generate_view(self):
""" Generate a view.html via an asynchronous call to `self.view.generate()`
"""
if self.view is not None:
threading.Thread(target=self.view.generate).start()
# path_opts = os.path.join(Options()['exp']['dir'], 'options.yaml')
# os.system('python -m bootstrap.views.view --path_opts {}'.format(path_opts))
def load_state_dict(self, state):
"""
"""
self.epoch = state['epoch']
self.best_out = state['best_out']
def state_dict(self):
"""
"""
state = {}
state['epoch'] = self.epoch
state['best_out'] = self.best_out
return state
def hook(self, name):
""" Run all the callback functions that have been registered
for a hook.
Args:
name: the name of the hook
"""
if name in self.hooks:
for func in self.hooks[name]:
func()
def register_hook(self, name, func):
""" Register a callback function to be triggered when the hook
is called.
Args:
name: the name of the hook
func: the callback function (no argument)
Example usage:
.. code-block:: python
def func():
print('hooked!')
engine.register_hook('train_on_start_batch', func)
"""
if name not in self.hooks:
self.hooks[name] = []
self.hooks[name].append(func)
def resume(self, map_location=None):
""" Resume a checkpoint using the `bootstrap.lib.options.Options`
"""
Logger()('Loading {} checkpoint'.format(Options()['exp']['resume']))
self.load(Options()['exp']['dir'],
Options()['exp']['resume'],
self.model, self.optimizer,
map_location=map_location)
# self.epoch += 1
if self.epoch > 0:
self.epoch += 1
def eval(self):
""" Launch evaluation procedures
"""
Logger()('Launching evaluation procedures')
if Options()['dataset']['eval_split']:
# self.epoch-1 to be equal to the same resumed epoch
# or to be equal to -1 when not resumed
self.eval_epoch(self.model, self.dataset['eval'], self.epoch-1, logs_json=True)
Logger()('Ending evaluation procedures')
def train(self):
""" Launch training procedures
List of the hooks:
- train_on_start: before the full training procedure
"""
Logger()('Launching training procedures')
self.hook('train_on_start')
while self.epoch < Options()['engine']['nb_epochs']:
self.train_epoch(self.model, self.dataset['train'], self.optimizer, self.epoch)
if Options()['dataset']['eval_split']:
out = self.eval_epoch(self.model, self.dataset['eval'], self.epoch)
if 'saving_criteria' in Options()['engine'] and Options()['engine']['saving_criteria'] is not None:
for saving_criteria in Options()['engine']['saving_criteria']:
if self.is_best(out, saving_criteria):
name = saving_criteria.split(':')[0]
Logger()('Saving best checkpoint for strategy {}'.format(name))
self.save(Options()['exp']['dir'], 'best_{}'.format(name), self.model, self.optimizer)
Logger()('Saving last checkpoint')
self.save(Options()['exp']['dir'], 'last', self.model, self.optimizer)
self.epoch += 1
Logger()('Ending training procedures')
def train_epoch(self, model, dataset, optimizer, epoch, mode='train'):
""" Launch training procedures for one epoch
List of the hooks:
- train_on_start_epoch: before the training procedure for an epoch
- train_on_start_batch: before the training precedure for a batch
- train_on_forward: after the forward of the model
- train_on_bachward: after the backward of the loss
- train_on_update: after the optimization step
- train_on_print: after the print to the terminal
- train_on_end_batch: end of the training procedure for a batch
- train_on_end_epoch: before saving the logs in logs.json
- train_on_flush: end of the training procedure for an epoch
"""
utils.set_random_seed(Options()['misc']['seed'] + epoch) # to be able to reproduce exps on reload
Logger()('Training model on {}set for epoch {}'.format(dataset.split, epoch))
model.train()
timer = {
'begin': time.time(),
'elapsed': time.time(),
'process': None,
'load': None,
'run_avg': 0
}
out_epoch = {}
batch_loader = dataset.make_batch_loader()
self.hook(f'{mode}_on_start_epoch')
for i, batch in enumerate(batch_loader):
timer['load'] = time.time() - timer['elapsed']
self.hook(f'{mode}_on_start_batch')
optimizer.zero_grad()
out = model(batch)
self.hook(f'{mode}_on_forward')
if not torch.isnan(out['loss']):
out['loss'].backward()
else:
Logger()('NaN detected')
#torch.cuda.synchronize()
self.hook(f'{mode}_on_backward')
optimizer.step()
#torch.cuda.synchronize()
self.hook(f'{mode}_on_update')
timer['process'] = time.time() - timer['elapsed']
if i == 0:
timer['run_avg'] = timer['process']
else:
timer['run_avg'] = timer['run_avg'] * 0.8 + timer['process'] * 0.2
Logger().log_value(f'{mode}_batch.epoch', epoch, should_print=False)
Logger().log_value(f'{mode}_batch.batch', i, should_print=False)
Logger().log_value(f'{mode}_batch.timer.process', timer['process'], should_print=False)
Logger().log_value(f'{mode}_batch.timer.load', timer['load'], should_print=False)
for key, value in out.items():
if torch.is_tensor(value):
if value.dim() <= 1:
value = value.item() # get number from a torch scalar
else:
continue
if type(value) == list:
continue
if type(value) == dict:
continue
if key not in out_epoch:
out_epoch[key] = []
out_epoch[key].append(value)
Logger().log_value(f'{mode}_batch.'+key, value, should_print=False)
if i % Options()['engine']['print_freq'] == 0:
Logger()("{}: epoch {} | batch {}/{}".format(mode, epoch, i, len(batch_loader) - 1))
Logger()("{} elapsed: {} | left: {}".format(' '*len(mode),
datetime.timedelta(seconds=math.floor(time.time() - timer['begin'])),
datetime.timedelta(seconds=math.floor(timer['run_avg'] * (len(batch_loader) - 1 - i)))))
Logger()("{} process: {:.5f} | load: {:.5f}".format(' '*len(mode), timer['process'], timer['load']))
Logger()("{} loss: {:.5f}".format(' '*len(mode), out['loss'].data.item()))
self.hook(f'{mode}_on_print')
timer['elapsed'] = time.time()
self.hook(f'{mode}_on_end_batch')
if Options()['engine']['debug']:
if i > 2:
break
Logger().log_value(f'{mode}_epoch.epoch', epoch, should_print=True)
for key, value in out_epoch.items():
# Logger().log_value(f'{mode}_epoch.'+key, sum(value)/len(value), should_print=True)
Logger().log_value(f'{mode}_epoch.'+key, np.asarray(value).mean(), should_print=True)
self.hook(f'{mode}_on_end_epoch')
Logger().flush()
self.hook(f'{mode}_on_flush')
def eval_epoch(self, model, dataset, epoch, mode='eval', logs_json=True):
""" Launch evaluation procedures for one epoch
List of the hooks (``mode='eval'`` by default):
- mode_on_start_epoch: before the evaluation procedure for an epoch
- mode_on_start_batch: before the evaluation precedure for a batch
- mode_on_forward: after the forward of the model
- mode_on_print: after the print to the terminal
- mode_on_end_batch: end of the evaluation procedure for a batch
- mode_on_end_epoch: before saving the logs in logs.json
- mode_on_flush: end of the evaluation procedure for an epoch
Returns:
out(dict): mean of all the scalar outputs of the model, indexed by output name, for this epoch
"""
utils.set_random_seed(Options()['misc']['seed'] + epoch) # to be able to reproduce exps on reload
Logger()('Evaluating model on {}set for epoch {}'.format(dataset.split, epoch))
model.eval()
timer = {
'begin': time.time(),
'elapsed': time.time(),
'process': None,
'load': None,
'run_avg': 0
}
out_epoch = {}
batch_loader = dataset.make_batch_loader()
self.hook('{}_on_start_epoch'.format(mode))
for i, batch in enumerate(batch_loader):
timer['load'] = time.time() - timer['elapsed']
self.hook('{}_on_start_batch'.format(mode))
with torch.no_grad():
out = model(batch)
#torch.cuda.synchronize()
self.hook('{}_on_forward'.format(mode))
timer['process'] = time.time() - timer['elapsed']
if i == 0:
timer['run_avg'] = timer['process']
else:
timer['run_avg'] = timer['run_avg'] * 0.8 + timer['process'] * 0.2
Logger().log_value('{}_batch.batch'.format(mode), i, should_print=False)
Logger().log_value('{}_batch.epoch'.format(mode), epoch, should_print=False)
Logger().log_value('{}_batch.timer.process'.format(mode), timer['process'], should_print=False)
Logger().log_value('{}_batch.timer.load'.format(mode), timer['load'], should_print=False)
for key, value in out.items():
if torch.is_tensor(value):
if value.dim() <= 1:
value = value.item() # get number from a torch scalar
else:
continue
if type(value) == list:
continue
if type(value) == dict:
continue
if key not in out_epoch:
out_epoch[key] = []
out_epoch[key].append(value)
Logger().log_value('{}_batch.{}'.format(mode, key), value, should_print=False)
if i % Options()['engine']['print_freq'] == 0:
Logger()("{}: epoch {} | batch {}/{}".format(mode, epoch, i, len(batch_loader) - 1))
Logger()("{} elapsed: {} | left: {}".format(' '*len(mode),
datetime.timedelta(seconds=math.floor(time.time() - timer['begin'])),
datetime.timedelta(seconds=math.floor(timer['run_avg'] * (len(batch_loader) - 1 - i)))))
Logger()("{} process: {:.5f} | load: {:.5f}".format(' '*len(mode), timer['process'], timer['load']))
self.hook('{}_on_print'.format(mode))
timer['elapsed'] = time.time()
self.hook('{}_on_end_batch'.format(mode))
if Options()['engine']['debug']:
if i > 10:
break
out = {}
for key, value in out_epoch.items():
try:
# out[key] = sum(value)/len(value)
out[key] = np.asarray(value).mean()
except:
import ipdb; ipdb.set_trace()
Logger().log_value('{}_epoch.epoch'.format(mode), epoch, should_print=True)
for key, value in out.items():
Logger().log_value('{}_epoch.{}'.format(mode, key), value, should_print=True)
self.hook('{}_on_end_epoch'.format(mode))
if logs_json:
Logger().flush()
self.hook('{}_on_flush'.format(mode))
return out
def is_best(self, out, saving_criteria):
""" Verify if the last model is the best for a specific saving criteria
Args:
out(dict): mean of all the scalar outputs of model indexed by output name
saving_criteria(str):
Returns:
is_best(bool)
Example usage:
.. code-block:: python
out = {
'loss': 0.2,
'acctop1': 87.02
}
engine.is_best(out, 'loss:min')
"""
if ':min' in saving_criteria:
name = saving_criteria.replace(':min', '')
order = '<'
elif ':max' in saving_criteria:
name = saving_criteria.replace(':max', '')
order = '>'
else:
error_msg = """'--engine.saving_criteria' named '{}' does not specify order,
you need to chose between '{}' or '{}' to specify if the criteria needs to be minimize or maximize""".format(
saving_criteria, saving_criteria+':min', saving_criteria+':max')
raise ValueError(error_msg)
if name not in out:
raise KeyError("'--engine.saving_criteria' named '{}' not in outputs '{}'".format(name, list(out.keys())))
if name not in self.best_out:
self.best_out[name] = out[name]
else:
if eval('{} {} {}'.format(out[name], order, self.best_out[name])):
self.best_out[name] = out[name]
return True
return False
def load(self, dir_logs, name, model, optimizer, map_location=None):
""" Load a checkpoint
Args:
dir_logs: directory of the checkpoint
name: name of the checkpoint
model: model associated to the checkpoint
optimizer: optimizer associated to the checkpoint
"""
path_template = os.path.join(dir_logs, 'ckpt_{}_{}.pth.tar')
Logger()('Loading model...')
model_state = torch.load(path_template.format(name, 'model'), map_location=map_location)
model.load_state_dict(model_state, strict=False)
if Options()['dataset']['train_split'] is not None:
if os.path.isfile(path_template.format(name, 'optimizer')):
Logger()('Loading optimizer...')
optimizer_state = torch.load(path_template.format(name, 'optimizer'), map_location=map_location)
optimizer.load_state_dict(optimizer_state)
else:
Logger()('No optimizer checkpoint', log_level=Logger.WARNING)
if os.path.isfile(path_template.format(name, 'engine')):
Logger()('Loading engine...')
engine_state = torch.load(path_template.format(name, 'engine'), map_location=map_location)
self.load_state_dict(engine_state)
else:
Logger()('No engine checkpoint', log_level=Logger.WARNING)
def save(self, dir_logs, name, model, optimizer):
""" Save a checkpoint
Args:
dir_logs: directory of the checkpoint
name: name of the checkpoint
model: model associated to the checkpoint
optimizer: optimizer associated to the checkpoint
"""
path_template = os.path.join(dir_logs, 'ckpt_{}_{}.pth.tar')
Logger()('Saving model...')
model_state = model.state_dict()
torch.save(model_state, path_template.format(name, 'model'))
Logger()('Saving optimizer...')
optimizer_state = optimizer.state_dict()
torch.save(optimizer_state, path_template.format(name, 'optimizer'))
Logger()('Saving engine...')
engine_state = self.state_dict()
torch.save(engine_state, path_template.format(name, 'engine'))
| 17,193 | 38.345538 | 121 | py |
introd | introd-main/cfvqa/cfvqa/engines/logger.py | from bootstrap.lib.logger import Logger
from .engine import Engine
class LoggerEngine(Engine):
""" LoggerEngine is similar to Engine. The only difference is a more powerful is_best method.
It is able to look into the logger dictionary that contains the list of all the logged variables
indexed by name.
Example usage:
.. code-block:: python
out = {
'loss': 0.2,
'acctop1': 87.02
}
engine.is_best(out, 'loss:min')
# Logger().values['eval_epoch.recall_at_1'] contains a list
# of all the recall at 1 values for each evaluation epoch
engine.is_best(out, 'eval_epoch.recall_at_1')
"""
def __init__(self):
super(LoggerEngine, self).__init__()
def is_best(self, out, saving_criteria):
if ':min' in saving_criteria:
name = saving_criteria.replace(':min', '')
order = '<'
elif ':max' in saving_criteria:
name = saving_criteria.replace(':max', '')
order = '>'
else:
error_msg = """'--engine.saving_criteria' named '{}' does not specify order,
you need to chose between '{}' or '{}' to specify if the criteria needs to be minimize or maximize""".format(
saving_criteria, saving_criteria + ':min', saving_criteria + ':max')
raise ValueError(error_msg)
if name in out:
new_value = out[name]
elif name in Logger().values:
new_value = Logger().values[name][-1]
else:
raise ValueError("name '{}' not in outputs '{}' and not in logger '{}'".format(
name, list(out.keys()), list(Logger().values.keys())))
if name not in self.best_out:
self.best_out[name] = new_value
else:
if eval('{} {} {}'.format(new_value, order, self.best_out[name])):
self.best_out[name] = new_value
return True
return False
| 2,067 | 35.280702 | 121 | py |
introd | introd-main/cfvqa/cfvqa/engines/factory.py | import importlib
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
from .engine import Engine
from .logger import LoggerEngine
def factory():
Logger()('Creating engine...')
if Options()['engine'].get('import', False):
# import usually is "yourmodule.engine.factory"
module = importlib.import_module(Options()['engine']['import'])
engine = module.factory()
elif Options()['engine']['name'] == 'default':
engine = Engine()
elif Options()['engine']['name'] == 'logger':
engine = LoggerEngine()
else:
raise ValueError
return engine
| 639 | 23.615385 | 71 | py |
introd | introd-main/cfvqa/cfvqa/engines/__init__.py | from .factory import factory | 28 | 28 | 28 | py |
introd | introd-main/cfvqa/cfvqa/optimizers/factory.py | import torch.nn as nn
from bootstrap.lib.options import Options
from bootstrap.optimizers.factory import factory_optimizer
from block.optimizers.lr_scheduler import ReduceLROnPlateau
from block.optimizers.lr_scheduler import BanOptimizer
def factory(model, engine):
opt = Options()['optimizer']
optimizer = BanOptimizer(engine,
name=Options()['optimizer'].get('name', 'Adamax'),
lr=Options()['optimizer']['lr'],
gradual_warmup_steps=Options()['optimizer'].get('gradual_warmup_steps', [0.5, 2.0, 4]),
lr_decay_epochs=Options()['optimizer'].get('lr_decay_epochs', [10, 20, 2]),
lr_decay_rate=Options()['optimizer'].get('lr_decay_rate', .25))
if opt.get('lr_scheduler', None):
optimizer = ReduceLROnPlateau(optimizer, engine,
**opt['lr_scheduler'])
if opt.get('init', None) == 'glorot':
for p in model.network.parameters():
if p.dim()==1:
p.data.fill_(0)
elif p.dim()>=2:
nn.init.xavier_uniform_(p.data)
else:
raise ValueError(p.dim())
return optimizer
| 1,127 | 35.387097 | 95 | py |
introd | introd-main/cfvqa/cfvqa/optimizers/__init__.py | 0 | 0 | 0 | py | |
introd | introd-main/css/fc.py | from __future__ import print_function
import torch.nn as nn
from torch.nn.utils.weight_norm import weight_norm
class FCNet(nn.Module):
"""Simple class for non-linear fully connect network
"""
def __init__(self, dims):
super(FCNet, self).__init__()
layers = []
for i in range(len(dims)-2):
in_dim = dims[i]
out_dim = dims[i+1]
layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
layers.append(nn.ReLU())
layers.append(weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None))
layers.append(nn.ReLU())
self.main = nn.Sequential(*layers)
def forward(self, x):
return self.main(x)
if __name__ == '__main__':
fc1 = FCNet([10, 20, 10])
print(fc1)
print('============')
fc2 = FCNet([10, 20])
print(fc2)
| 853 | 24.117647 | 76 | py |
introd | introd-main/css/main.py | import argparse
import json
import cPickle as pickle
from collections import defaultdict, Counter
from os.path import dirname, join
import os
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import numpy as np
from dataset import Dictionary, VQAFeatureDataset
import base_model
from train import train
import utils
import click
from vqa_debias_loss_functions import *
def parse_args():
parser = argparse.ArgumentParser("Train the BottomUpTopDown model with a de-biasing method")
# Arguments we added
parser.add_argument(
'--cache_features', default=True,
help="Cache image features in RAM. Makes things much faster, "
"especially if the filesystem is slow, but requires at least 48gb of RAM")
parser.add_argument(
'--dataset', default='cpv2',
choices=["v2", "cpv2", "cpv1", "cpv2val"],
help="Run on VQA-2.0 instead of VQA-CP 2.0"
)
parser.add_argument(
'-p', "--entropy_penalty", default=0.36, type=float,
help="Entropy regularizer weight for the learned_mixin model")
parser.add_argument(
'--mode', default="updn",
choices=["updn", "q_debias","v_debias","q_v_debias"],
help="Kind of ensemble loss to use")
parser.add_argument(
'--debias', default="learned_mixin",
choices=["learned_mixin_rw2", "learned_mixin_rw", "learned_mixin", "reweight", "bias_product", "none",'focal'],
help="Kind of ensemble loss to use")
parser.add_argument(
'--topq', type=int,default=1,
choices=[1,2,3],
help="num of words to be masked in questio")
parser.add_argument(
'--keep_qtype', default=True,
help="keep qtype or not")
parser.add_argument(
'--topv', type=int,default=1,
choices=[1,3,5,-1],
help="num of object bbox to be masked in image")
parser.add_argument(
'--top_hint',type=int, default=9,
choices=[9,18,27,36],
help="num of hint")
parser.add_argument(
'--qvp', type=int,default=0,
choices=[0,1,2,3,4,5,6,7,8,9,10],
help="ratio of q_bias and v_bias")
parser.add_argument(
'--eval_each_epoch', default=True,
help="Evaluate every epoch, instead of at the end")
# Arguments from the original model, we leave this default, except we
# set --epochs to 30 since the model maxes out its performance on VQA 2.0 well before then
# parser.add_argument('--epochs', type=int, default=30)
parser.add_argument('--epochs', type=int, default=15)
parser.add_argument('--num_hid', type=int, default=1024)
parser.add_argument('--model', type=str, default='baseline0_newatt')
parser.add_argument('--output', type=str, default='logs/exp0')
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--seed', type=int, default=1111, help='random seed')
parser.add_argument('--feature', type=str, default='css')
args = parser.parse_args()
return args
def get_bias(train_dset,eval_dset):
# Compute the bias:
# The bias here is just the expected score for each answer/question type
answer_voc_size = train_dset.num_ans_candidates
# question_type -> answer -> total score
question_type_to_probs = defaultdict(Counter)
# question_type -> num_occurances
question_type_to_count = Counter()
for ex in train_dset.entries:
ans = ex["answer"]
q_type = ans["question_type"]
question_type_to_count[q_type] += 1
if ans["labels"] is not None:
for label, score in zip(ans["labels"], ans["scores"]):
question_type_to_probs[q_type][label] += score
question_type_to_prob_array = {}
for q_type, count in question_type_to_count.items():
prob_array = np.zeros(answer_voc_size, np.float32)
for label, total_score in question_type_to_probs[q_type].items():
prob_array[label] += total_score
prob_array /= count
question_type_to_prob_array[q_type] = prob_array
for ds in [train_dset,eval_dset]:
for ex in ds.entries:
q_type = ex["answer"]["question_type"]
ex["bias"] = question_type_to_prob_array[q_type]
def main():
args = parse_args()
dataset=args.dataset
# args.output=os.path.join('logs',args.output)
if not os.path.isdir(args.output):
utils.create_dir(args.output)
else:
if click.confirm('Exp directory already exists in {}. Erase?'
.format(args.output, default=False)):
os.system('rm -r ' + args.output)
utils.create_dir(args.output)
else:
os._exit(1)
if dataset=='cpv1':
dictionary = Dictionary.load_from_file('data/dictionary_v1.pkl')
elif dataset=='cpv2' or dataset=='v2' or dataset=='cpv2val':
dictionary = Dictionary.load_from_file('data/dictionary.pkl')
print("Building train dataset...")
train_dset = VQAFeatureDataset('train', dictionary, dataset=dataset,
cache_image_features=args.cache_features)
print("Building test dataset...")
eval_dset = VQAFeatureDataset('val', dictionary, dataset=dataset,
cache_image_features=args.cache_features)
get_bias(train_dset,eval_dset)
# Build the model using the original constructor
constructor = 'build_%s' % args.model
model = getattr(base_model, constructor)(train_dset, args.num_hid).cuda()
if dataset=='cpv1':
model.w_emb.init_embedding('data/glove6b_init_300d_v1.npy')
elif dataset=='cpv2' or dataset=='v2' or dataset=='cpv2val':
model.w_emb.init_embedding('data/glove6b_init_300d.npy')
# Add the loss_fn based our arguments
if args.debias == "bias_product":
model.debias_loss_fn = BiasProduct()
elif args.debias == "none":
model.debias_loss_fn = Plain()
elif args.debias == "reweight":
model.debias_loss_fn = ReweightByInvBias()
elif args.debias == "learned_mixin":
model.debias_loss_fn = LearnedMixin(args.entropy_penalty)
elif args.debias == "focal":
model.debias_loss_fn = Focal()
else:
raise RuntimeError(args.mode)
with open('util/qid2type_%s.json'%args.dataset,'r') as f:
qid2type=json.load(f)
model=model.cuda()
batch_size = args.batch_size
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True
train_loader = DataLoader(train_dset, batch_size, shuffle=True, num_workers=0)
eval_loader = DataLoader(eval_dset, batch_size, shuffle=False, num_workers=0)
print("Starting training...")
train(model, train_loader, eval_loader, args,qid2type)
if __name__ == '__main__':
main()
| 6,824 | 34.732984 | 119 | py |
introd | introd-main/css/vqa_debias_loss_functions.py | from collections import OrderedDict, defaultdict, Counter
from torch import nn
from torch.nn import functional as F
import numpy as np
import torch
import inspect
def convert_sigmoid_logits_to_binary_logprobs(logits):
"""computes log(sigmoid(logits)), log(1-sigmoid(logits))"""
log_prob = -F.softplus(-logits)
log_one_minus_prob = -logits + log_prob
return log_prob, log_one_minus_prob
def elementwise_logsumexp(a, b):
"""computes log(exp(x) + exp(b))"""
return torch.max(a, b) + torch.log1p(torch.exp(-torch.abs(a - b)))
def renormalize_binary_logits(a, b):
"""Normalize so exp(a) + exp(b) == 1"""
norm = elementwise_logsumexp(a, b)
return a - norm, b - norm
class DebiasLossFn(nn.Module):
"""General API for our loss functions"""
def forward(self, hidden, logits, bias, labels):
"""
:param hidden: [batch, n_hidden] hidden features from the last layer in the model
:param logits: [batch, n_answers_options] sigmoid logits for each answer option
:param bias: [batch, n_answers_options]
bias probabilities for each answer option between 0 and 1
:param labels: [batch, n_answers_options]
scores for each answer option, between 0 and 1
:return: Scalar loss
"""
raise NotImplementedError()
def to_json(self):
"""Get a json representation of this loss function.
We construct this by looking up the __init__ args
"""
cls = self.__class__
init = cls.__init__
if init is object.__init__:
return [] # No init args
init_signature = inspect.getargspec(init)
if init_signature.varargs is not None:
raise NotImplementedError("varags not supported")
if init_signature.keywords is not None:
raise NotImplementedError("keywords not supported")
args = [x for x in init_signature.args if x != "self"]
out = OrderedDict()
out["name"] = cls.__name__
for key in args:
out[key] = getattr(self, key)
return out
class Plain(DebiasLossFn):
def forward(self, hidden, logits, bias, labels):
loss = F.binary_cross_entropy_with_logits(logits, labels)
loss *= labels.size(1)
return loss
class PlainKD(DebiasLossFn):
def forward(self, hidden, logits, bias, labels):
loss = F.binary_cross_entropy_with_logits(logits, labels, reduction='none')
# loss *= labels.size(1)
loss = loss.sum(1)
# return loss
return None, loss
class Focal(DebiasLossFn):
def forward(self, hidden, logits, bias, labels):
# import pdb;pdb.set_trace()
focal_logits=torch.log(F.softmax(logits,dim=1)+1e-5) * ((1-F.softmax(bias,dim=1))*(1-F.softmax(bias,dim=1)))
loss=F.binary_cross_entropy_with_logits(focal_logits,labels)
loss*=labels.size(1)
return loss
class ReweightByInvBias(DebiasLossFn):
def forward(self, hidden, logits, bias, labels):
# Manually compute the binary cross entropy since the old version of torch always aggregates
log_prob, log_one_minus_prob = convert_sigmoid_logits_to_binary_logprobs(logits)
loss = -(log_prob * labels + (1 - labels) * log_one_minus_prob)
weights = (1 - bias)
loss *= weights # Apply the weights
return loss.sum() / weights.sum()
class BiasProduct(DebiasLossFn):
def __init__(self, smooth=True, smooth_init=-1, constant_smooth=0.0):
"""
:param smooth: Add a learned sigmoid(a) factor to the bias to smooth it
:param smooth_init: How to initialize `a`
:param constant_smooth: Constant to add to the bias to smooth it
"""
super(BiasProduct, self).__init__()
self.constant_smooth = constant_smooth
self.smooth_init = smooth_init
self.smooth = smooth
if smooth:
self.smooth_param = torch.nn.Parameter(
torch.from_numpy(np.full((1,), smooth_init, dtype=np.float32)))
else:
self.smooth_param = None
def forward(self, hidden, logits, bias, labels):
smooth = self.constant_smooth
if self.smooth:
smooth += F.sigmoid(self.smooth_param)
# Convert the bias into log-space, with a factor for both the
# binary outputs for each answer option
bias_lp = torch.log(bias + smooth)
bias_l_inv = torch.log1p(-bias + smooth)
# Convert the the logits into log-space with the same format
log_prob, log_one_minus_prob = convert_sigmoid_logits_to_binary_logprobs(logits)
# import pdb;pdb.set_trace()
# Add the bias
log_prob += bias_lp
log_one_minus_prob += bias_l_inv
# Re-normalize the factors in logspace
log_prob, log_one_minus_prob = renormalize_binary_logits(log_prob, log_one_minus_prob)
# Compute the binary cross entropy
loss = -(log_prob * labels + (1 - labels) * log_one_minus_prob).sum(1).mean(0)
return loss
class LearnedMixin(DebiasLossFn):
def __init__(self, w, smooth=True, smooth_init=-1, constant_smooth=0.0):
"""
:param w: Weight of the entropy penalty
:param smooth: Add a learned sigmoid(a) factor to the bias to smooth it
:param smooth_init: How to initialize `a`
:param constant_smooth: Constant to add to the bias to smooth it
"""
super(LearnedMixin, self).__init__()
self.w = w
# self.w=0
self.smooth_init = smooth_init
self.constant_smooth = constant_smooth
self.bias_lin = torch.nn.Linear(1024, 1)
self.smooth = smooth
if self.smooth:
self.smooth_param = torch.nn.Parameter(
torch.from_numpy(np.full((1,), smooth_init, dtype=np.float32)))
else:
self.smooth_param = None
def forward(self, hidden, logits, bias, labels):
factor = self.bias_lin.forward(hidden) # [batch, 1]
factor = F.softplus(factor)
bias = torch.stack([bias, 1 - bias], 2) # [batch, n_answers, 2]
# Smooth
bias += self.constant_smooth
if self.smooth:
soften_factor = F.sigmoid(self.smooth_param)
bias = bias + soften_factor.unsqueeze(1)
bias = torch.log(bias) # Convert to logspace
# Scale by the factor
# [batch, n_answers, 2] * [batch, 1, 1] -> [batch, n_answers, 2]
bias = bias * factor.unsqueeze(1)
log_prob, log_one_minus_prob = convert_sigmoid_logits_to_binary_logprobs(logits)
log_probs = torch.stack([log_prob, log_one_minus_prob], 2)
# Add the bias in
logits = bias + log_probs
# Renormalize to get log probabilities
log_prob, log_one_minus_prob = renormalize_binary_logits(logits[:, :, 0], logits[:, :, 1])
# Compute loss
loss = -(log_prob * labels + (1 - labels) * log_one_minus_prob).sum(1).mean(0)
# Re-normalized version of the bias
bias_norm = elementwise_logsumexp(bias[:, :, 0], bias[:, :, 1])
bias_logprob = bias - bias_norm.unsqueeze(2)
# Compute and add the entropy penalty
entropy = -(torch.exp(bias_logprob) * bias_logprob).sum(2).mean()
return loss + self.w * entropy
class LearnedMixinKD(DebiasLossFn):
def __init__(self, smooth=True, smooth_init=-1, constant_smooth=0.0):
"""
:param w: Weight of the entropy penalty
:param smooth: Add a learned sigmoid(a) factor to the bias to smooth it
:param smooth_init: How to initialize `a`
:param constant_smooth: Constant to add to the bias to smooth it
"""
super(LearnedMixinKD, self).__init__()
self.smooth_init = smooth_init
self.constant_smooth = constant_smooth
self.bias_lin = torch.nn.Linear(1024, 1)
self.smooth = smooth
if self.smooth:
self.smooth_param = torch.nn.Parameter(
torch.from_numpy(np.full((1,), smooth_init, dtype=np.float32)))
else:
self.smooth_param = None
def forward(self, hidden, logits, bias, labels):
factor = self.bias_lin.forward(hidden) # [batch, 1]
factor = F.softplus(factor)
bias = torch.stack([bias, 1 - bias], 2) # [batch, n_answers, 2]
# Smooth
bias += self.constant_smooth
if self.smooth:
soften_factor = F.sigmoid(self.smooth_param)
bias = bias + soften_factor.unsqueeze(1)
bias = torch.log(bias) # Convert to logspace
# Scale by the factor
# [batch, n_answers, 2] * [batch, 1, 1] -> [batch, n_answers, 2]
bias = bias * factor.unsqueeze(1)
log_prob, log_one_minus_prob = convert_sigmoid_logits_to_binary_logprobs(logits)
log_probs = torch.stack([log_prob, log_one_minus_prob], 2)
# Add the bias in
logits = bias + log_probs
# Renormalize to get log probabilities
log_prob, log_one_minus_prob = renormalize_binary_logits(logits[:, :, 0], logits[:, :, 1])
# Compute loss
loss = -(log_prob * labels + (1 - labels) * log_one_minus_prob).sum(1).mean(0)
# Re-normalized version of the bias
bias_norm = elementwise_logsumexp(bias[:, :, 0], bias[:, :, 1])
bias_logprob = bias - bias_norm.unsqueeze(2)
prob_all = torch.exp(log_prob)
p = torch.clamp(1-prob_all, min=1e-12)
p = torch.clamp(prob_all/p, min=1e-12)
logits_all = torch.log(p)
return logits_all, loss | 9,581 | 35.022556 | 116 | py |
introd | introd-main/css/base_model.py | import torch
import torch.nn as nn
from attention import Attention, NewAttention
from language_model import WordEmbedding, QuestionEmbedding
from classifier import SimpleClassifier
from fc import FCNet
import numpy as np
def mask_softmax(x,mask):
mask=mask.unsqueeze(2).float()
x2=torch.exp(x-torch.max(x))
x3=x2*mask
epsilon=1e-5
x3_sum=torch.sum(x3,dim=1,keepdim=True)+epsilon
x4=x3/x3_sum.expand_as(x3)
return x4
class BaseModel(nn.Module):
def __init__(self, w_emb, q_emb, v_att, q_net, v_net, classifier):
super(BaseModel, self).__init__()
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att = v_att
self.q_net = q_net
self.v_net = v_net
self.classifier = classifier
self.debias_loss_fn = None
# self.bias_scale = torch.nn.Parameter(torch.from_numpy(np.ones((1, ), dtype=np.float32)*1.2))
self.bias_lin = torch.nn.Linear(1024, 1)
def forward(self, v, q, labels, bias,v_mask):
"""Forward
v: [batch, num_objs, obj_dim]
b: [batch, num_objs, b_dim]
q: [batch_size, seq_length]
return: logits, not probs
"""
w_emb = self.w_emb(q)
q_emb = self.q_emb(w_emb) # [batch, q_dim]
att = self.v_att(v, q_emb)
if v_mask is None:
att = nn.functional.softmax(att, 1)
else:
att= mask_softmax(att,v_mask)
v_emb = (att * v).sum(1) # [batch, v_dim]
q_repr = self.q_net(q_emb)
v_repr = self.v_net(v_emb)
joint_repr = q_repr * v_repr
logits = self.classifier(joint_repr)
if labels is not None:
loss = self.debias_loss_fn(joint_repr, logits, bias, labels)
else:
loss = None
return logits, loss,w_emb
def build_baseline0(dataset, num_hid):
w_emb = WordEmbedding(dataset.dictionary.ntoken, 300, 0.0)
q_emb = QuestionEmbedding(300, num_hid, 1, False, 0.0)
v_att = Attention(dataset.v_dim, q_emb.num_hid, num_hid)
q_net = FCNet([num_hid, num_hid])
v_net = FCNet([dataset.v_dim, num_hid])
classifier = SimpleClassifier(
num_hid, 2 * num_hid, dataset.num_ans_candidates, 0.5)
return BaseModel(w_emb, q_emb, v_att, q_net, v_net, classifier)
def build_baseline0_newatt(dataset, num_hid):
w_emb = WordEmbedding(dataset.dictionary.ntoken, 300, 0.0)
q_emb = QuestionEmbedding(300, num_hid, 1, False, 0.0)
v_att = NewAttention(dataset.v_dim, q_emb.num_hid, num_hid)
q_net = FCNet([q_emb.num_hid, num_hid])
v_net = FCNet([dataset.v_dim, num_hid])
classifier = SimpleClassifier(
num_hid, num_hid * 2, dataset.num_ans_candidates, 0.5)
return BaseModel(w_emb, q_emb, v_att, q_net, v_net, classifier) | 2,765 | 32.325301 | 102 | py |
introd | introd-main/css/base_model_introd.py | import torch
import torch.nn as nn
from attention import Attention, NewAttention
from language_model import WordEmbedding, QuestionEmbedding
from classifier import SimpleClassifier
from fc import FCNet
import numpy as np
def mask_softmax(x,mask):
mask=mask.unsqueeze(2).float()
x2=torch.exp(x-torch.max(x))
x3=x2*mask
epsilon=1e-5
x3_sum=torch.sum(x3,dim=1,keepdim=True)+epsilon
x4=x3/x3_sum.expand_as(x3)
return x4
class BaseModel(nn.Module):
def __init__(self, w_emb, q_emb, v_att, q_net, v_net, classifier):
super(BaseModel, self).__init__()
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att = v_att
self.q_net = q_net
self.v_net = v_net
self.classifier = classifier
self.debias_loss_fn = None
# self.bias_scale = torch.nn.Parameter(torch.from_numpy(np.ones((1, ), dtype=np.float32)*1.2))
self.bias_lin = torch.nn.Linear(1024, 1)
def forward(self, v, q, labels, bias,v_mask):
"""Forward
v: [batch, num_objs, obj_dim]
b: [batch, num_objs, b_dim]
q: [batch_size, seq_length]
return: logits, not probs
"""
w_emb = self.w_emb(q)
q_emb = self.q_emb(w_emb) # [batch, q_dim]
att = self.v_att(v, q_emb)
if v_mask is None:
att = nn.functional.softmax(att, 1)
else:
att= mask_softmax(att,v_mask)
v_emb = (att * v).sum(1) # [batch, v_dim]
q_repr = self.q_net(q_emb)
v_repr = self.v_net(v_emb)
joint_repr = q_repr * v_repr
logits = self.classifier(joint_repr)
if labels is not None:
logits_all, loss = self.debias_loss_fn(joint_repr, logits, bias, labels)
else:
logits_all = None
loss = None
return logits, logits_all, loss, w_emb
def build_baseline0(dataset, num_hid):
w_emb = WordEmbedding(dataset.dictionary.ntoken, 300, 0.0)
q_emb = QuestionEmbedding(300, num_hid, 1, False, 0.0)
v_att = Attention(dataset.v_dim, q_emb.num_hid, num_hid)
q_net = FCNet([num_hid, num_hid])
v_net = FCNet([dataset.v_dim, num_hid])
classifier = SimpleClassifier(
num_hid, 2 * num_hid, dataset.num_ans_candidates, 0.5)
return BaseModel(w_emb, q_emb, v_att, q_net, v_net, classifier)
def build_baseline0_newatt(dataset, num_hid):
w_emb = WordEmbedding(dataset.dictionary.ntoken, 300, 0.0)
q_emb = QuestionEmbedding(300, num_hid, 1, False, 0.0)
v_att = NewAttention(dataset.v_dim, q_emb.num_hid, num_hid)
q_net = FCNet([q_emb.num_hid, num_hid])
v_net = FCNet([dataset.v_dim, num_hid])
classifier = SimpleClassifier(
num_hid, num_hid * 2, dataset.num_ans_candidates, 0.5)
return BaseModel(w_emb, q_emb, v_att, q_net, v_net, classifier) | 2,820 | 32.583333 | 102 | py |
introd | introd-main/css/train_introd.py | import json
import os
import pickle
import time
from os.path import join
import torch
import torch.nn as nn
import utils
from torch.autograd import Variable
import numpy as np
from tqdm import tqdm
import random
import copy
from torch.nn import functional as F
def compute_score_with_logits(logits, labels):
logits = torch.argmax(logits, 1)
one_hots = torch.zeros(*labels.size()).cuda()
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
def train(model_teacher, model, train_loader, eval_loader,args,qid2type):
dataset=args.dataset
num_epochs=args.epochs
mode=args.mode
run_eval=args.eval_each_epoch
output=args.output
optim = torch.optim.Adamax(model.parameters())
logger = utils.Logger(os.path.join(output, 'log.txt'))
total_step = 0
best_eval_score = 0
logsigmoid = torch.nn.LogSigmoid()
KLDivLoss = torch.nn.KLDivLoss(reduction='none')
for epoch in range(num_epochs):
total_loss = 0
train_score = 0
t = time.time()
for i, (v, q, a, b, _, _, _, _) in tqdm(enumerate(train_loader), ncols=100,
desc="Epoch %d" % (epoch + 1), total=len(train_loader)):
total_step += 1
#########################################
v = Variable(v).cuda().requires_grad_()
q = Variable(q).cuda()
# q_mask=Variable(q_mask).cuda()
a = Variable(a).cuda()
b = Variable(b).cuda()
# hintscore = Variable(hintscore).cuda()
# type_mask=Variable(type_mask).float().cuda()
# notype_mask=Variable(notype_mask).float().cuda()
#########################################
pred_nie, pred_te, _, _ = model_teacher(v, q, a, b, None)
pred, _, loss_ce, _ = model(v, q, a, b, None)
aa = a/torch.clamp(a.sum(1, keepdim=True), min=1e-24)
loss_te = -(aa*logsigmoid(pred_te) + (1-aa)*logsigmoid(-pred_te)).sum(1)
loss_nie = -(aa*logsigmoid(pred_nie) + (1-aa)*logsigmoid(-pred_nie)).sum(1)
loss_te = torch.clamp(loss_te, min=1e-12)
loss_nie = torch.clamp(loss_nie, min=1e-12)
w = loss_nie/(loss_te+loss_nie)
w = w.clone().detach()
# KL
prob_nie = F.softmax(pred_nie, -1).clone().detach()
loss_kl = - prob_nie*F.log_softmax(pred, -1)
loss_kl = loss_kl.sum(1)
loss = (w*loss_kl + (1-w)*loss_ce).mean()
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
batch_score = compute_score_with_logits(pred, a.data).sum()
train_score += batch_score
total_loss /= len(train_loader.dataset)
train_score = 100 * train_score / len(train_loader.dataset)
if run_eval:
model.train(False)
results = evaluate(model, eval_loader, qid2type)
results["epoch"] = epoch + 1
results["step"] = total_step
results["train_loss"] = total_loss
results["train_score"] = train_score
model.train(True)
eval_score = results["score"]
bound = results["upper_bound"]
yn = results['score_yesno']
other = results['score_other']
num = results['score_number']
logger.write('epoch %d, time: %.2f' % (epoch + 1, time.time() - t))
logger.write('\ttrain_loss: %.2f, score: %.2f' % (total_loss, train_score))
if run_eval:
logger.write('\teval score: %.2f (%.2f)' % (100 * eval_score, 100 * bound))
logger.write('\tyn score: %.2f other score: %.2f num score: %.2f' % (100 * yn, 100 * other, 100 * num))
if eval_score > best_eval_score:
model_path = os.path.join(output, 'model.pth')
torch.save(model.state_dict(), model_path)
best_eval_score = eval_score
def evaluate(model, dataloader, qid2type):
score = 0
upper_bound = 0
score_yesno = 0
score_number = 0
score_other = 0
total_yesno = 0
total_number = 0
total_other = 0
for v, q, a, b, qids, _ in tqdm(dataloader, ncols=100, total=len(dataloader), desc="eval"):
v = Variable(v, requires_grad=False).cuda()
q = Variable(q, requires_grad=False).cuda()
pred, _, _, _ = model(v, q, None, None, None)
batch_score = compute_score_with_logits(pred, a.cuda()).cpu().numpy().sum(1)
score += batch_score.sum()
upper_bound += (a.max(1)[0]).sum()
qids = qids.detach().cpu().int().numpy()
for j in range(len(qids)):
qid = qids[j]
typ = qid2type[str(qid)]
if typ == 'yes/no':
score_yesno += batch_score[j]
total_yesno += 1
elif typ == 'other':
score_other += batch_score[j]
total_other += 1
elif typ == 'number':
score_number += batch_score[j]
total_number += 1
else:
print('Hahahahahahahahahahaha')
score = score / len(dataloader.dataset)
upper_bound = upper_bound / len(dataloader.dataset)
score_yesno /= total_yesno
score_other /= total_other
score_number /= total_number
results = dict(
score=score,
upper_bound=upper_bound,
score_yesno=score_yesno,
score_other=score_other,
score_number=score_number,
)
return results
| 5,773 | 32.569767 | 115 | py |
introd | introd-main/css/main_introd.py | import argparse
import json
import cPickle as pickle
from collections import defaultdict, Counter
from os.path import dirname, join
import os
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import numpy as np
from dataset import Dictionary, VQAFeatureDataset
import base_model_introd as base_model
from train_introd import train
import utils
import click
from vqa_debias_loss_functions import *
def parse_args():
parser = argparse.ArgumentParser("Train the BottomUpTopDown model with a de-biasing method")
# Arguments we added
parser.add_argument(
'--cache_features', default=True,
help="Cache image features in RAM. Makes things much faster, "
"especially if the filesystem is slow, but requires at least 48gb of RAM")
parser.add_argument(
'--dataset', default='cpv2',
choices=["v2", "cpv2", "cpv1", "cpv2val"],
help="Run on VQA-2.0 instead of VQA-CP 2.0"
)
parser.add_argument(
'-p', "--entropy_penalty", default=0.36, type=float,
help="Entropy regularizer weight for the learned_mixin model")
parser.add_argument(
'--mode', default="updn",
choices=["updn", "q_debias","v_debias","q_v_debias"],
help="Kind of ensemble loss to use")
parser.add_argument(
'--debias', default="learned_mixin",
choices=["learned_mixin_rw2", "learned_mixin_rw", "learned_mixin", "reweight", "bias_product", "none",'focal'],
help="Kind of ensemble loss to use")
parser.add_argument(
'--topq', type=int,default=1,
choices=[1,2,3],
help="num of words to be masked in questio")
parser.add_argument(
'--keep_qtype', default=True,
help="keep qtype or not")
parser.add_argument(
'--topv', type=int,default=1,
choices=[1,3,5,-1],
help="num of object bbox to be masked in image")
parser.add_argument(
'--top_hint',type=int, default=9,
choices=[9,18,27,36],
help="num of hint")
parser.add_argument(
'--qvp', type=int,default=0,
choices=[0,1,2,3,4,5,6,7,8,9,10],
help="ratio of q_bias and v_bias")
parser.add_argument(
'--eval_each_epoch', default=True,
help="Evaluate every epoch, instead of at the end")
# Arguments from the original model, we leave this default, except we
# set --epochs to 30 since the model maxes out its performance on VQA 2.0 well before then
parser.add_argument('--epochs', type=int, default=30)
parser.add_argument('--num_hid', type=int, default=1024)
parser.add_argument('--model', type=str, default='baseline0_newatt')
parser.add_argument('--source', type=str, default='./logs/vqacp2/css/')
parser.add_argument('--output', type=str, default='logs/exp0')
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--seed', type=int, default=1111, help='random seed')
args = parser.parse_args()
return args
def get_bias(train_dset,eval_dset):
# Compute the bias:
# The bias here is just the expected score for each answer/question type
answer_voc_size = train_dset.num_ans_candidates
# question_type -> answer -> total score
question_type_to_probs = defaultdict(Counter)
# question_type -> num_occurances
question_type_to_count = Counter()
for ex in train_dset.entries:
ans = ex["answer"]
q_type = ans["question_type"]
question_type_to_count[q_type] += 1
if ans["labels"] is not None:
for label, score in zip(ans["labels"], ans["scores"]):
question_type_to_probs[q_type][label] += score
question_type_to_prob_array = {}
for q_type, count in question_type_to_count.items():
prob_array = np.zeros(answer_voc_size, np.float32)
for label, total_score in question_type_to_probs[q_type].items():
prob_array[label] += total_score
prob_array /= count
question_type_to_prob_array[q_type] = prob_array
for ds in [train_dset,eval_dset]:
for ex in ds.entries:
q_type = ex["answer"]["question_type"]
ex["bias"] = question_type_to_prob_array[q_type]
def main():
args = parse_args()
dataset=args.dataset
# args.output=os.path.join('logs',args.output)
if not os.path.isdir(args.output):
utils.create_dir(args.output)
else:
if click.confirm('Exp directory already exists in {}. Erase?'
.format(args.output, default=False)):
os.system('rm -r ' + args.output)
utils.create_dir(args.output)
else:
os._exit(1)
if dataset=='cpv1':
dictionary = Dictionary.load_from_file('data/dictionary_v1.pkl')
elif dataset=='cpv2' or dataset=='v2' or dataset=='cpv2val':
dictionary = Dictionary.load_from_file('data/dictionary.pkl')
print("Building train dataset...")
train_dset = VQAFeatureDataset('train', dictionary, dataset=dataset,
cache_image_features=args.cache_features)
print("Building test dataset...")
eval_dset = VQAFeatureDataset('val', dictionary, dataset=dataset,
cache_image_features=args.cache_features)
get_bias(train_dset,eval_dset)
# Build the model using the original constructor
constructor = 'build_%s' % args.model
model = getattr(base_model, constructor)(train_dset, args.num_hid).cuda()
if dataset=='cpv1':
model.w_emb.init_embedding('data/glove6b_init_300d_v1.npy')
elif dataset=='cpv2' or dataset=='v2' or dataset=='cpv2val':
model.w_emb.init_embedding('data/glove6b_init_300d.npy')
model_student = getattr(base_model, constructor)(train_dset, args.num_hid).cuda()
if dataset=='cpv1':
model_student.w_emb.init_embedding('data/glove6b_init_300d_v1.npy')
elif dataset=='cpv2' or dataset=='v2' or dataset=='cpv2val':
model_student.w_emb.init_embedding('data/glove6b_init_300d.npy')
state_dict = torch.load(join(args.source, "model.pth"))
model.debias_loss_fn = LearnedMixinKD()
model.load_state_dict(state_dict, strict=False)
model_student.debias_loss_fn = PlainKD()
model.train(False)
with open('util/qid2type_%s.json'%args.dataset,'r') as f:
qid2type=json.load(f)
model=model.cuda()
model_student=model_student.cuda()
batch_size = args.batch_size
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True
train_loader = DataLoader(train_dset, batch_size, shuffle=True, num_workers=0)
eval_loader = DataLoader(eval_dset, batch_size, shuffle=False, num_workers=0)
print("Starting training...")
train(model, model_student, train_loader, eval_loader, args,qid2type)
if __name__ == '__main__':
main()
| 6,902 | 35.718085 | 119 | py |
introd | introd-main/css/utils.py | from __future__ import print_function
import errno
import os
import numpy as np
# from PIL import Image
import torch
import torch.nn as nn
EPS = 1e-7
def assert_eq(real, expected):
# assert real == expected, '%s (true) vs %s (expected)' % (real, expected)
assert real == real, '%s (true) vs %s (expected)' % (real, expected)
def assert_array_eq(real, expected):
assert (np.abs(real-expected) < EPS).all(), \
'%s (true) vs %s (expected)' % (real, expected)
def load_folder(folder, suffix):
imgs = []
for f in sorted(os.listdir(folder)):
if f.endswith(suffix):
imgs.append(os.path.join(folder, f))
return imgs
# def load_imageid(folder):
# images = load_folder(folder, 'jpg')
# img_ids = set()
# for img in images:
# img_id = int(img.split('/')[-1].split('.')[0].split('_')[-1])
# img_ids.add(img_id)
# return img_ids
# def pil_loader(path):
# with open(path, 'rb') as f:
# with Image.open(f) as img:
# return img.convert('RGB')
def weights_init(m):
"""custom weights initialization."""
cname = m.__class__
if cname == nn.Linear or cname == nn.Conv2d or cname == nn.ConvTranspose2d:
m.weight.data.normal_(0.0, 0.02)
elif cname == nn.BatchNorm2d:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
else:
print('%s is not initialized.' % cname)
def init_net(net, net_file):
if net_file:
net.load_state_dict(torch.load(net_file))
else:
net.apply(weights_init)
def create_dir(path):
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
class Logger(object):
def __init__(self, output_name):
dirname = os.path.dirname(output_name)
if not os.path.exists(dirname):
os.mkdir(dirname)
self.log_file = open(output_name, 'w')
self.infos = {}
def append(self, key, val):
vals = self.infos.setdefault(key, [])
vals.append(val)
def log(self, extra_msg=''):
msgs = [extra_msg]
for key, vals in self.infos.iteritems():
msgs.append('%s %.6f' % (key, np.mean(vals)))
msg = '\n'.join(msgs)
self.log_file.write(msg + '\n')
self.log_file.flush()
self.infos = {}
return msg
def write(self, msg):
self.log_file.write(msg + '\n')
self.log_file.flush()
print(msg)
| 2,535 | 23.862745 | 79 | py |
introd | introd-main/css/classifier.py | import torch.nn as nn
from torch.nn.utils.weight_norm import weight_norm
class SimpleClassifier(nn.Module):
def __init__(self, in_dim, hid_dim, out_dim, dropout):
super(SimpleClassifier, self).__init__()
layers = [
weight_norm(nn.Linear(in_dim, hid_dim), dim=None),
nn.ReLU(),
nn.Dropout(dropout, inplace=True),
weight_norm(nn.Linear(hid_dim, out_dim), dim=None)
]
self.main = nn.Sequential(*layers)
def forward(self, x):
logits = self.main(x)
return logits
| 565 | 28.789474 | 62 | py |
introd | introd-main/css/dataset.py | from __future__ import print_function
from __future__ import unicode_literals
import os
import json
import cPickle
from collections import Counter
import numpy as np
import utils
import h5py
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from random import choice
class Dictionary(object):
def __init__(self, word2idx=None, idx2word=None):
if word2idx is None:
word2idx = {}
if idx2word is None:
idx2word = []
self.word2idx = word2idx
self.idx2word = idx2word
@property
def ntoken(self):
return len(self.word2idx)
@property
def padding_idx(self):
return len(self.word2idx)
def tokenize(self, sentence, add_word):
sentence = sentence.lower()
sentence = sentence.replace(',', '').replace('?', '').replace('\'s', ' \'s').replace('-',
' ').replace('.','').replace('"', '').replace('n\'t', ' not').replace('$', ' dollar ')
words = sentence.split()
tokens = []
if add_word:
for w in words:
tokens.append(self.add_word(w))
else:
for w in words:
if w in self.word2idx:
tokens.append(self.word2idx[w])
else:
tokens.append(len(self.word2idx))
return tokens
def dump_to_file(self, path):
cPickle.dump([self.word2idx, self.idx2word], open(path, 'wb'))
print('dictionary dumped to %s' % path)
@classmethod
def load_from_file(cls, path):
print('loading dictionary from %s' % path)
word2idx, idx2word = cPickle.load(open(path, 'rb'))
d = cls(word2idx, idx2word)
return d
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
def _create_entry(img_idx, question, answer):
answer.pop('image_id')
answer.pop('question_id')
entry = {
'question_id' : question['question_id'],
'image_id' : question['image_id'],
'image_idx' : img_idx,
'question' : question['question'],
'answer' : answer
}
return entry
def _load_dataset(dataroot, name, img_id2val, dataset):
"""Load entries
img_id2val: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val'
"""
if dataset=='cpv2':
answer_path = os.path.join(dataroot, 'cp-cache', '%s_target.pkl' % name)
name = "train" if name == "train" else "test"
question_path = os.path.join(dataroot, 'vqacp_v2_%s_questions.json' % name)
with open(question_path) as f:
questions = json.load(f)
elif dataset=='cpv1':
answer_path = os.path.join(dataroot, 'cp-v1-cache', '%s_target.pkl' % name)
name = "train" if name == "train" else "test"
question_path = os.path.join(dataroot, 'vqacp_v1_%s_questions.json' % name)
with open(question_path) as f:
questions = json.load(f)
elif dataset=='v2':
answer_path = os.path.join(dataroot, 'cache', '%s_target.pkl' % name)
question_path = os.path.join(dataroot, 'v2_OpenEnded_mscoco_%s2014_questions.json' % name)
with open(question_path) as f:
questions = json.load(f)["questions"]
elif dataset=='cpv2val':
answer_path = os.path.join(dataroot, 'cpval-cache', '%s_target.pkl' % name)
name = "train" if name == "train" else "test"
question_path = os.path.join(dataroot, 'vqacp2val', 'vqacp_v2_%s_questions.json' % name)
with open(question_path) as f:
questions = json.load(f)
with open(answer_path, 'rb') as f:
answers = cPickle.load(f)
questions.sort(key=lambda x: x['question_id'])
answers.sort(key=lambda x: x['question_id'])
utils.assert_eq(len(questions), len(answers))
entries = []
for question, answer in zip(questions, answers):
if answer["labels"] is None:
raise ValueError()
utils.assert_eq(question['question_id'], answer['question_id'])
utils.assert_eq(question['image_id'], answer['image_id'])
img_id = question['image_id']
img_idx = None
if img_id2val:
img_idx = img_id2val[img_id]
entries.append(_create_entry(img_idx, question, answer))
return entries
class VQAFeatureDataset(Dataset):
def __init__(self, name, dictionary, dataroot='data', dataset='cpv2',
use_hdf5=False, cache_image_features=False):
super(VQAFeatureDataset, self).__init__()
self.name=name
if dataset=='cpv2':
with open('data/train_cpv2_hintscore.json', 'r') as f:
self.train_hintscore = json.load(f)
with open('data/test_cpv2_hintscore.json', 'r') as f:
self.test_hintsocre = json.load(f)
with open('util/cpv2_type_mask.json', 'r') as f:
self.type_mask = json.load(f)
with open('util/cpv2_notype_mask.json', 'r') as f:
self.notype_mask = json.load(f)
elif dataset=='cpv1':
with open('data/train_cpv1_hintscore.json', 'r') as f:
self.train_hintscore = json.load(f)
with open('data/test_cpv1_hintscore.json', 'r') as f:
self.test_hintsocre = json.load(f)
with open('util/cpv1_type_mask.json', 'r') as f:
self.type_mask = json.load(f)
with open('util/cpv1_notype_mask.json', 'r') as f:
self.notype_mask = json.load(f)
elif dataset=='v2':
with open('data/train_v2_hintscore.json', 'r') as f:
self.train_hintscore = json.load(f)
with open('data/test_v2_hintscore.json', 'r') as f:
self.test_hintsocre = json.load(f)
with open('util/v2_type_mask.json', 'r') as f:
self.type_mask = json.load(f)
with open('util/v2_notype_mask.json', 'r') as f:
self.notype_mask = json.load(f)
elif dataset=='cpv2val':
with open('data/train_cpv2_hintscore.json', 'r') as f:
self.train_hintscore = json.load(f)
with open('data/train_cpv2_hintscore.json', 'r') as f:
self.test_hintsocre = json.load(f)
with open('util/cpv2_type_mask.json', 'r') as f:
self.type_mask = json.load(f)
with open('util/cpv2_notype_mask.json', 'r') as f:
self.notype_mask = json.load(f)
assert name in ['train', 'val']
if dataset=='cpv2':
ans2label_path = os.path.join(dataroot, 'cp-cache', 'trainval_ans2label.pkl')
label2ans_path = os.path.join(dataroot, 'cp-cache', 'trainval_label2ans.pkl')
elif dataset=='cpv1':
ans2label_path = os.path.join(dataroot, 'cp-v1-cache', 'trainval_ans2label.pkl')
label2ans_path = os.path.join(dataroot, 'cp-v1-cache', 'trainval_label2ans.pkl')
elif dataset=='v2':
ans2label_path = os.path.join(dataroot, 'cache', 'trainval_ans2label.pkl')
label2ans_path = os.path.join(dataroot, 'cache', 'trainval_label2ans.pkl')
elif dataset=='cpv2val':
ans2label_path = os.path.join(dataroot, 'cpval-cache', 'trainval_ans2label.pkl')
label2ans_path = os.path.join(dataroot, 'cpval-cache', 'trainval_label2ans.pkl')
self.ans2label = cPickle.load(open(ans2label_path, 'rb'))
self.label2ans = cPickle.load(open(label2ans_path, 'rb'))
self.num_ans_candidates = len(self.ans2label)
self.dictionary = dictionary
self.use_hdf5 = use_hdf5
if use_hdf5:
h5_path = os.path.join(dataroot, '%s36.hdf5'%name)
self.hf = h5py.File(h5_path, 'r')
self.features = self.hf.get('image_features')
with open("util/%s36_imgid2img.pkl"%name, "rb") as f:
imgid2idx = cPickle.load(f)
else:
imgid2idx = None
self.entries = _load_dataset(dataroot, name, imgid2idx, dataset=dataset)
if cache_image_features:
image_to_fe = {}
for entry in tqdm(self.entries, ncols=100, desc="caching-features"):
img_id = entry["image_id"]
if img_id not in image_to_fe:
if use_hdf5:
fe = np.array(self.features[imgid2idx[img_id]])
else:
fe=torch.load('data/css_features/'+str(img_id)+'.pth')['image_feature']
image_to_fe[img_id]=fe
self.image_to_fe = image_to_fe
if use_hdf5:
self.hf.close()
else:
self.image_to_fe = None
self.tokenize()
self.tensorize()
self.v_dim = 2048
def tokenize(self, max_length=14):
"""Tokenizes the questions.
This will add q_token in each entry of the dataset.
-1 represent nil, and should be treated as padding_idx in embedding
"""
for entry in tqdm(self.entries, ncols=100, desc="tokenize"):
tokens = self.dictionary.tokenize(entry['question'], False)
tokens = tokens[:max_length]
if len(tokens) < max_length:
# Note here we pad in front of the sentence
padding = [self.dictionary.padding_idx] * (max_length - len(tokens))
padding_mask=[self.dictionary.padding_idx-1] * (max_length - len(tokens))
tokens_mask = padding_mask + tokens
tokens = padding + tokens
utils.assert_eq(len(tokens), max_length)
entry['q_token'] = tokens
entry['q_token_mask']=tokens_mask
def tensorize(self):
for entry in tqdm(self.entries, ncols=100, desc="tensorize"):
question = torch.from_numpy(np.array(entry['q_token']))
question_mask = torch.from_numpy(np.array(entry['q_token_mask']))
entry['q_token'] = question
entry['q_token_mask']=question_mask
answer = entry['answer']
labels = np.array(answer['labels'])
scores = np.array(answer['scores'], dtype=np.float32)
if len(labels):
labels = torch.from_numpy(labels)
scores = torch.from_numpy(scores)
entry['answer']['labels'] = labels
entry['answer']['scores'] = scores
else:
entry['answer']['labels'] = None
entry['answer']['scores'] = None
def __getitem__(self, index):
entry = self.entries[index]
if self.image_to_fe is not None:
features = self.image_to_fe[entry["image_id"]]
elif self.use_hdf5:
features = np.array(self.features[entry['image_idx']])
features = torch.from_numpy(features).view(36, 2048)
else:
features = torch.load('data/css_features/' + str(entry["image_id"]) + '.pth')['image_feature']
q_id=entry['question_id']
ques = entry['q_token']
ques_mask=entry['q_token_mask']
answer = entry['answer']
labels = answer['labels']
scores = answer['scores']
target = torch.zeros(self.num_ans_candidates)
if labels is not None:
target.scatter_(0, labels, scores)
if self.name=='train':
train_hint=torch.tensor(self.train_hintscore[str(q_id)])
type_mask=torch.tensor(self.type_mask[str(q_id)])
notype_mask=torch.tensor(self.notype_mask[str(q_id)])
if "bias" in entry:
return features, ques, target,entry["bias"],train_hint,type_mask,notype_mask,ques_mask
else:
return features, ques,target, 0,train_hint
else:
test_hint=torch.tensor(self.test_hintsocre[str(q_id)])
if "bias" in entry:
return features, ques, target, entry["bias"],q_id,test_hint
else:
return features, ques, target, 0,q_id,test_hint
def __len__(self):
return len(self.entries)
| 12,287 | 38.009524 | 106 | py |
introd | introd-main/css/eval.py | import argparse
import json
import cPickle
from collections import defaultdict, Counter
from os.path import dirname, join
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import numpy as np
import os
# from new_dataset import Dictionary, VQAFeatureDataset
from dataset import Dictionary, VQAFeatureDataset
import base_model
from train import train
import utils
from vqa_debias_loss_functions import *
from tqdm import tqdm
from torch.autograd import Variable
def parse_args():
parser = argparse.ArgumentParser("Train the BottomUpTopDown model with a de-biasing method")
# Arguments we added
parser.add_argument(
'--cache_features', default=True,
help="Cache image features in RAM. Makes things much faster, "
"especially if the filesystem is slow, but requires at least 48gb of RAM")
parser.add_argument(
'--dataset', default='cpv2', help="Run on VQA-2.0 instead of VQA-CP 2.0")
parser.add_argument(
'-p', "--entropy_penalty", default=0.36, type=float,
help="Entropy regularizer weight for the learned_mixin model")
parser.add_argument(
'--debias', default="learned_mixin",
choices=["learned_mixin", "reweight", "bias_product", "none"],
help="Kind of ensemble loss to use")
# Arguments from the original model, we leave this default, except we
# set --epochs to 15 since the model maxes out its performance on VQA 2.0 well before then
parser.add_argument('--num_hid', type=int, default=1024)
parser.add_argument('--model', type=str, default='baseline0_newatt')
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--seed', type=int, default=1111, help='random seed')
parser.add_argument('--model_state', type=str, default='logs/exp0/model.pth')
args = parser.parse_args()
return args
def compute_score_with_logits(logits, labels):
# logits = torch.max(logits, 1)[1].data # argmax
logits = torch.argmax(logits,1)
one_hots = torch.zeros(*labels.size()).cuda()
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
def evaluate(model,dataloader,qid2type):
score = 0
upper_bound = 0
score_yesno = 0
score_number = 0
score_other = 0
total_yesno = 0
total_number = 0
total_other = 0
model.train(False)
# import pdb;pdb.set_trace()
for v, q, a, b,qids,hintscore in tqdm(dataloader, ncols=100, total=len(dataloader), desc="eval"):
v = Variable(v, requires_grad=False).cuda()
q = Variable(q, requires_grad=False).cuda()
pred, _ ,_= model(v, q, None, None,None)
batch_score= compute_score_with_logits(pred, a.cuda()).cpu().numpy().sum(1)
score += batch_score.sum()
upper_bound += (a.max(1)[0]).sum()
qids = qids.detach().cpu().int().numpy()
for j in range(len(qids)):
qid=qids[j]
typ = qid2type[str(qid)]
if typ == 'yes/no':
score_yesno += batch_score[j]
total_yesno += 1
elif typ == 'other':
score_other += batch_score[j]
total_other += 1
elif typ == 'number':
score_number += batch_score[j]
total_number += 1
else:
print('Hahahahahahahahahahaha')
score = score / len(dataloader.dataset)
upper_bound = upper_bound / len(dataloader.dataset)
score_yesno /= total_yesno
score_other /= total_other
score_number /= total_number
print('\teval overall score: %.2f' % (100 * score))
print('\teval up_bound score: %.2f' % (100 * upper_bound))
print('\teval y/n score: %.2f' % (100 * score_yesno))
print('\teval other score: %.2f' % (100 * score_other))
print('\teval number score: %.2f' % (100 * score_number))
def evaluate_ai(model,dataloader,qid2type,label2ans):
score=0
upper_bound=0
ai_top1=0
ai_top2=0
ai_top3=0
for v, q, a, b, qids, hintscore in tqdm(dataloader, ncols=100, total=len(dataloader), desc="eval"):
v = Variable(v, requires_grad=False).cuda().float().requires_grad_()
q = Variable(q, requires_grad=False).cuda()
a=a.cuda()
hintscore=hintscore.cuda().float()
pred, _, _ = model(v, q, None, None, None)
vqa_grad = torch.autograd.grad((pred * (a > 0).float()).sum(), v, create_graph=True)[0] # [b , 36, 2048]
vqa_grad_cam=vqa_grad.sum(2)
sv_ind=torch.argmax(vqa_grad_cam,1)
x_ind_top1=torch.topk(vqa_grad_cam,k=1)[1]
x_ind_top2=torch.topk(vqa_grad_cam,k=2)[1]
x_ind_top3=torch.topk(vqa_grad_cam,k=3)[1]
y_score_top1 = hintscore.gather(1,x_ind_top1).sum(1)/1
y_score_top2 = hintscore.gather(1,x_ind_top2).sum(1)/2
y_score_top3 = hintscore.gather(1,x_ind_top3).sum(1)/3
batch_score=compute_score_with_logits(pred,a.cuda()).cpu().numpy().sum(1)
score+=batch_score.sum()
upper_bound+=(a.max(1)[0]).sum()
qids=qids.detach().cpu().int().numpy()
for j in range(len(qids)):
if batch_score[j]>0:
ai_top1 += y_score_top1[j]
ai_top2 += y_score_top2[j]
ai_top3 += y_score_top3[j]
score = score / len(dataloader.dataset)
upper_bound = upper_bound / len(dataloader.dataset)
ai_top1=(ai_top1.item() * 1.0) / len(dataloader.dataset)
ai_top2=(ai_top2.item() * 1.0) / len(dataloader.dataset)
ai_top3=(ai_top3.item() * 1.0) / len(dataloader.dataset)
print('\teval overall score: %.2f' % (100 * score))
print('\teval up_bound score: %.2f' % (100 * upper_bound))
print('\ttop1_ai_score: %.2f' % (100 * ai_top1))
print('\ttop2_ai_score: %.2f' % (100 * ai_top2))
print('\ttop3_ai_score: %.2f' % (100 * ai_top3))
def main():
args = parse_args()
dataset = args.dataset
with open('util/qid2type_%s.json'%args.dataset,'r') as f:
qid2type=json.load(f)
if dataset=='cpv1':
dictionary = Dictionary.load_from_file('data/dictionary_v1.pkl')
elif dataset=='cpv2' or dataset=='v2':
dictionary = Dictionary.load_from_file('data/dictionary.pkl')
print("Building test dataset...")
eval_dset = VQAFeatureDataset('val', dictionary, dataset=dataset,
cache_image_features=args.cache_features)
# Build the model using the original constructor
constructor = 'build_%s' % args.model
model = getattr(base_model, constructor)(eval_dset, args.num_hid).cuda()
if args.debias == "bias_product":
model.debias_loss_fn = BiasProduct()
elif args.debias == "none":
model.debias_loss_fn = Plain()
elif args.debias == "reweight":
model.debias_loss_fn = ReweightByInvBias()
elif args.debias == "learned_mixin":
model.debias_loss_fn = LearnedMixin(args.entropy_penalty)
else:
raise RuntimeError(args.mode)
model_state = torch.load(args.model_state)
model.load_state_dict(model_state)
model = model.cuda()
batch_size = args.batch_size
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True
# The original version uses multiple workers, but that just seems slower on my setup
eval_loader = DataLoader(eval_dset, batch_size, shuffle=False, num_workers=0)
print("Starting eval...")
evaluate(model,eval_loader,qid2type)
if __name__ == '__main__':
main()
| 7,543 | 34.088372 | 113 | py |
introd | introd-main/css/attention.py | import torch
import torch.nn as nn
from torch.nn.utils.weight_norm import weight_norm
from fc import FCNet
class Attention(nn.Module):
def __init__(self, v_dim, q_dim, num_hid):
super(Attention, self).__init__()
self.nonlinear = FCNet([v_dim + q_dim, num_hid])
self.linear = weight_norm(nn.Linear(num_hid, 1), dim=None)
def forward(self, v, q):
"""
v: [batch, k, vdim]
q: [batch, qdim]
"""
logits = self.logits(v, q)
w = nn.functional.softmax(logits, 1)
return w
def logits(self, v, q):
num_objs = v.size(1)
q = q.unsqueeze(1).repeat(1, num_objs, 1)
vq = torch.cat((v, q), 2)
joint_repr = self.nonlinear(vq)
logits = self.linear(joint_repr)
return logits
class NewAttention(nn.Module):
def __init__(self, v_dim, q_dim, num_hid, dropout=0.2):
super(NewAttention, self).__init__()
self.v_proj = FCNet([v_dim, num_hid])
self.q_proj = FCNet([q_dim, num_hid])
self.dropout = nn.Dropout(dropout)
self.linear = weight_norm(nn.Linear(q_dim, 1), dim=None)
def forward(self, v, q):
"""
v: [batch, k, vdim]
q: [batch, qdim]
"""
logits = self.logits(v, q)
# w = nn.functional.softmax(logits, 1)
# return w
return logits
def logits(self, v, q):
batch, k, _ = v.size()
v_proj = self.v_proj(v) # [batch, k, qdim]
q_proj = self.q_proj(q).unsqueeze(1).repeat(1, k, 1)
joint_repr = v_proj * q_proj
joint_repr = self.dropout(joint_repr)
logits = self.linear(joint_repr)
return logits
| 1,686 | 28.086207 | 66 | py |
introd | introd-main/css/train.py | import json
import os
import pickle
import time
from os.path import join
import torch
import torch.nn as nn
import utils
from torch.autograd import Variable
import numpy as np
from tqdm import tqdm
import random
import copy
def compute_score_with_logits(logits, labels):
logits = torch.argmax(logits, 1)
one_hots = torch.zeros(*labels.size()).cuda()
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
def train(model, train_loader, eval_loader,args,qid2type):
dataset=args.dataset
num_epochs=args.epochs
mode=args.mode
run_eval=args.eval_each_epoch
output=args.output
optim = torch.optim.Adamax(model.parameters())
logger = utils.Logger(os.path.join(output, 'log.txt'))
total_step = 0
best_eval_score = 0
if mode=='q_debias':
topq=args.topq
keep_qtype=args.keep_qtype
elif mode=='v_debias':
topv=args.topv
top_hint=args.top_hint
elif mode=='q_v_debias':
topv=args.topv
top_hint=args.top_hint
topq=args.topq
keep_qtype=args.keep_qtype
qvp=args.qvp
for epoch in range(num_epochs):
total_loss = 0
train_score = 0
t = time.time()
for i, (v, q, a, b, hintscore,type_mask,notype_mask,q_mask) in tqdm(enumerate(train_loader), ncols=100,
desc="Epoch %d" % (epoch + 1), total=len(train_loader)):
total_step += 1
#########################################
v = Variable(v).cuda().requires_grad_()
q = Variable(q).cuda()
q_mask=Variable(q_mask).cuda()
a = Variable(a).cuda()
b = Variable(b).cuda()
hintscore = Variable(hintscore).cuda()
type_mask=Variable(type_mask).float().cuda()
notype_mask=Variable(notype_mask).float().cuda()
#########################################
if mode=='updn':
pred, loss,_ = model(v, q, a, b, None)
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
batch_score = compute_score_with_logits(pred, a.data).sum()
train_score += batch_score
elif mode=='q_debias':
if keep_qtype==True:
sen_mask=type_mask
else:
sen_mask=notype_mask
## first train
pred, loss,word_emb = model(v, q, a, b, None)
word_grad = torch.autograd.grad((pred * (a > 0).float()).sum(), word_emb, create_graph=True)[0]
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
batch_score = compute_score_with_logits(pred, a.data).sum()
train_score += batch_score
## second train
word_grad_cam = word_grad.sum(2)
# word_grad_cam_sigmoid = torch.sigmoid(word_grad_cam * 1000)
word_grad_cam_sigmoid = torch.exp(word_grad_cam * sen_mask)
word_grad_cam_sigmoid = word_grad_cam_sigmoid * sen_mask
w_ind = word_grad_cam_sigmoid.sort(1, descending=True)[1][:, :topq]
q2 = copy.deepcopy(q_mask)
m1 = copy.deepcopy(sen_mask) ##[0,0,0...0,1,1,1,1]
m1.scatter_(1, w_ind, 0) ##[0,0,0...0,0,1,1,0]
m2 = 1 - m1 ##[1,1,1...1,1,0,0,1]
if dataset=='cpv1':
m3=m1*18330
else:
m3 = m1 * 18455 ##[0,0,0...0,0,18455,18455,0]
q2 = q2 * m2.long() + m3.long()
pred, _, _ = model(v, q2, None, b, None)
pred_ind = torch.argsort(pred, 1, descending=True)[:, :5]
false_ans = torch.ones(pred.shape[0], pred.shape[1]).cuda()
false_ans.scatter_(1, pred_ind, 0)
a2 = a * false_ans
q3 = copy.deepcopy(q)
if dataset=='cpv1':
q3.scatter_(1, w_ind, 18330)
else:
q3.scatter_(1, w_ind, 18455)
## third train
pred, loss, _ = model(v, q3, a2, b, None)
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
elif mode=='v_debias':
## first train
pred, loss, _ = model(v, q, a, b, None)
visual_grad=torch.autograd.grad((pred * (a > 0).float()).sum(), v, create_graph=True)[0]
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
batch_score = compute_score_with_logits(pred, a.data).sum()
train_score += batch_score
##second train
v_mask = torch.zeros(v.shape[0], 36).cuda()
visual_grad_cam = visual_grad.sum(2)
hint_sort, hint_ind = hintscore.sort(1, descending=True)
v_ind = hint_ind[:, :top_hint]
v_grad = visual_grad_cam.gather(1, v_ind)
if topv==-1:
v_grad_score,v_grad_ind=v_grad.sort(1,descending=True)
v_grad_score=nn.functional.softmax(v_grad_score*10,dim=1)
v_grad_sum=torch.cumsum(v_grad_score,dim=1)
v_grad_mask=(v_grad_sum<=0.65).long()
v_grad_mask[:,0] = 1
v_mask_ind=v_grad_mask*v_ind
for x in range(a.shape[0]):
num=len(torch.nonzero(v_grad_mask[x]))
v_mask[x].scatter_(0,v_mask_ind[x,:num],1)
else:
v_grad_ind = v_grad.sort(1, descending=True)[1][:, :topv]
v_star = v_ind.gather(1, v_grad_ind)
v_mask.scatter_(1, v_star, 1)
pred, _, _ = model(v, q, None, b, v_mask)
pred_ind = torch.argsort(pred, 1, descending=True)[:, :5]
false_ans = torch.ones(pred.shape[0], pred.shape[1]).cuda()
false_ans.scatter_(1, pred_ind, 0)
a2 = a * false_ans
v_mask = 1 - v_mask
pred, loss, _ = model(v, q, a2, b, v_mask)
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
elif mode=='q_v_debias':
random_num = random.randint(1, 10)
if keep_qtype == True:
sen_mask = type_mask
else:
sen_mask = notype_mask
if random_num<=qvp:
## first train
pred, loss, word_emb = model(v, q, a, b, None)
word_grad = torch.autograd.grad((pred * (a > 0).float()).sum(), word_emb, create_graph=True)[0]
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
batch_score = compute_score_with_logits(pred, a.data).sum()
train_score += batch_score
## second train
word_grad_cam = word_grad.sum(2)
# word_grad_cam_sigmoid = torch.sigmoid(word_grad_cam * 1000)
word_grad_cam_sigmoid = torch.exp(word_grad_cam * sen_mask)
word_grad_cam_sigmoid = word_grad_cam_sigmoid * sen_mask
w_ind = word_grad_cam_sigmoid.sort(1, descending=True)[1][:, :topq]
q2 = copy.deepcopy(q_mask)
m1 = copy.deepcopy(sen_mask) ##[0,0,0...0,1,1,1,1]
m1.scatter_(1, w_ind, 0) ##[0,0,0...0,0,1,1,0]
m2 = 1 - m1 ##[1,1,1...1,1,0,0,1]
if dataset=='cpv1':
m3=m1*18330
else:
m3 = m1 * 18455 ##[0,0,0...0,0,18455,18455,0]
q2 = q2 * m2.long() + m3.long()
pred, _, _ = model(v, q2, None, b, None)
pred_ind = torch.argsort(pred, 1, descending=True)[:, :5]
false_ans = torch.ones(pred.shape[0], pred.shape[1]).cuda()
false_ans.scatter_(1, pred_ind, 0)
a2 = a * false_ans
q3 = copy.deepcopy(q)
if dataset=='cpv1':
q3.scatter_(1, w_ind, 18330)
else:
q3.scatter_(1, w_ind, 18455)
## third train
pred, loss, _ = model(v, q3, a2, b, None)
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
else:
## first train
pred, loss, _ = model(v, q, a, b, None)
visual_grad = torch.autograd.grad((pred * (a > 0).float()).sum(), v, create_graph=True)[0]
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
batch_score = compute_score_with_logits(pred, a.data).sum()
train_score += batch_score
##second train
v_mask = torch.zeros(v.shape[0], 36).cuda()
visual_grad_cam = visual_grad.sum(2)
hint_sort, hint_ind = hintscore.sort(1, descending=True)
v_ind = hint_ind[:, :top_hint]
v_grad = visual_grad_cam.gather(1, v_ind)
if topv == -1:
v_grad_score, v_grad_ind = v_grad.sort(1, descending=True)
v_grad_score = nn.functional.softmax(v_grad_score * 10, dim=1)
v_grad_sum = torch.cumsum(v_grad_score, dim=1)
v_grad_mask = (v_grad_sum <= 0.65).long()
v_grad_mask[:,0] = 1
v_mask_ind = v_grad_mask * v_ind
for x in range(a.shape[0]):
num = len(torch.nonzero(v_grad_mask[x]))
v_mask[x].scatter_(0, v_mask_ind[x,:num], 1)
else:
v_grad_ind = v_grad.sort(1, descending=True)[1][:, :topv]
v_star = v_ind.gather(1, v_grad_ind)
v_mask.scatter_(1, v_star, 1)
pred, _, _ = model(v, q, None, b, v_mask)
pred_ind = torch.argsort(pred, 1, descending=True)[:, :5]
false_ans = torch.ones(pred.shape[0], pred.shape[1]).cuda()
false_ans.scatter_(1, pred_ind, 0)
a2 = a * false_ans
v_mask = 1 - v_mask
pred, loss, _ = model(v, q, a2, b, v_mask)
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
if mode=='updn':
total_loss /= len(train_loader.dataset)
else:
total_loss /= len(train_loader.dataset) * 2
train_score = 100 * train_score / len(train_loader.dataset)
if run_eval:
model.train(False)
results = evaluate(model, eval_loader, qid2type)
results["epoch"] = epoch + 1
results["step"] = total_step
results["train_loss"] = total_loss
results["train_score"] = train_score
model.train(True)
eval_score = results["score"]
bound = results["upper_bound"]
yn = results['score_yesno']
other = results['score_other']
num = results['score_number']
logger.write('epoch %d, time: %.2f' % (epoch + 1, time.time() - t))
logger.write('\ttrain_loss: %.2f, score: %.2f' % (total_loss, train_score))
if run_eval:
logger.write('\teval score: %.2f (%.2f)' % (100 * eval_score, 100 * bound))
logger.write('\tyn score: %.2f other score: %.2f num score: %.2f' % (100 * yn, 100 * other, 100 * num))
if eval_score > best_eval_score:
model_path = os.path.join(output, 'model_best.pth')
torch.save(model.state_dict(), model_path)
best_eval_score = eval_score
model_path = os.path.join(output, 'model.pth')
torch.save(model.state_dict(), model_path)
best_eval_score = eval_score
def evaluate(model, dataloader, qid2type):
score = 0
upper_bound = 0
score_yesno = 0
score_number = 0
score_other = 0
total_yesno = 0
total_number = 0
total_other = 0
for v, q, a, b, qids, _ in tqdm(dataloader, ncols=100, total=len(dataloader), desc="eval"):
v = Variable(v, requires_grad=False).cuda()
q = Variable(q, requires_grad=False).cuda()
pred, _,_ = model(v, q, None, None, None)
batch_score = compute_score_with_logits(pred, a.cuda()).cpu().numpy().sum(1)
score += batch_score.sum()
upper_bound += (a.max(1)[0]).sum()
qids = qids.detach().cpu().int().numpy()
for j in range(len(qids)):
qid = qids[j]
typ = qid2type[str(qid)]
if typ == 'yes/no':
score_yesno += batch_score[j]
total_yesno += 1
elif typ == 'other':
score_other += batch_score[j]
total_other += 1
elif typ == 'number':
score_number += batch_score[j]
total_number += 1
else:
print('Hahahahahahahahahahaha')
score = score / len(dataloader.dataset)
upper_bound = upper_bound / len(dataloader.dataset)
score_yesno /= total_yesno
score_other /= total_other
score_number /= total_number
results = dict(
score=score,
upper_bound=upper_bound,
score_yesno=score_yesno,
score_other=score_other,
score_number=score_number,
)
return results
| 15,958 | 36.817536 | 115 | py |
introd | introd-main/css/language_model.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
class WordEmbedding(nn.Module):
"""Word Embedding
The ntoken-th dim is used for padding_idx, which agrees *implicitly*
with the definition in Dictionary.
"""
def __init__(self, ntoken, emb_dim, dropout):
super(WordEmbedding, self).__init__()
self.emb = nn.Embedding(ntoken+1, emb_dim, padding_idx=ntoken)
self.dropout = nn.Dropout(dropout)
self.ntoken = ntoken
self.emb_dim = emb_dim
def init_embedding(self, np_file):
weight_init = torch.from_numpy(np.load(np_file))
assert weight_init.shape == (self.ntoken, self.emb_dim)
self.emb.weight.data[:self.ntoken] = weight_init
def forward(self, x):
emb = self.emb(x)
emb = self.dropout(emb)
return emb
class QuestionEmbedding(nn.Module):
def __init__(self, in_dim, num_hid, nlayers, bidirect, dropout, rnn_type='GRU'):
"""Module for question embedding
"""
super(QuestionEmbedding, self).__init__()
assert rnn_type == 'LSTM' or rnn_type == 'GRU'
rnn_cls = nn.LSTM if rnn_type == 'LSTM' else nn.GRU
self.rnn = rnn_cls(
in_dim, num_hid, nlayers,
bidirectional=bidirect,
dropout=dropout,
batch_first=True)
self.in_dim = in_dim
self.num_hid = num_hid
self.nlayers = nlayers
self.rnn_type = rnn_type
self.ndirections = 1 + int(bidirect)
def init_hidden(self, batch):
# just to get the type of tensor
weight = next(self.parameters()).data
hid_shape = (self.nlayers * self.ndirections, batch, self.num_hid)
if self.rnn_type == 'LSTM':
return (Variable(weight.new(*hid_shape).zero_()),
Variable(weight.new(*hid_shape).zero_()))
else:
return Variable(weight.new(*hid_shape).zero_())
def forward(self, x):
# x: [batch, sequence, in_dim]
batch = x.size(0)
hidden = self.init_hidden(batch)
self.rnn.flatten_parameters()
output, hidden = self.rnn(x, hidden)
if self.ndirections == 1:
return output[:, -1]
forward_ = output[:, -1, :self.num_hid]
backward = output[:, 0, self.num_hid:]
return torch.cat((forward_, backward), dim=1)
def forward_all(self, x):
# x: [batch, sequence, in_dim]
batch = x.size(0)
hidden = self.init_hidden(batch)
self.rnn.flatten_parameters()
output, hidden = self.rnn(x, hidden)
return output
| 2,639 | 31.195122 | 84 | py |
introd | introd-main/css/tools/compute_softscore.py | from __future__ import print_function
import argparse
import os
import sys
import json
import numpy as np
import re
import cPickle
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from dataset import Dictionary
import utils
contractions = {
"aint": "ain't", "arent": "aren't", "cant": "can't", "couldve":
"could've", "couldnt": "couldn't", "couldn'tve": "couldn't've",
"couldnt've": "couldn't've", "didnt": "didn't", "doesnt":
"doesn't", "dont": "don't", "hadnt": "hadn't", "hadnt've":
"hadn't've", "hadn'tve": "hadn't've", "hasnt": "hasn't", "havent":
"haven't", "hed": "he'd", "hed've": "he'd've", "he'dve":
"he'd've", "hes": "he's", "howd": "how'd", "howll": "how'll",
"hows": "how's", "Id've": "I'd've", "I'dve": "I'd've", "Im":
"I'm", "Ive": "I've", "isnt": "isn't", "itd": "it'd", "itd've":
"it'd've", "it'dve": "it'd've", "itll": "it'll", "let's": "let's",
"maam": "ma'am", "mightnt": "mightn't", "mightnt've":
"mightn't've", "mightn'tve": "mightn't've", "mightve": "might've",
"mustnt": "mustn't", "mustve": "must've", "neednt": "needn't",
"notve": "not've", "oclock": "o'clock", "oughtnt": "oughtn't",
"ow's'at": "'ow's'at", "'ows'at": "'ow's'at", "'ow'sat":
"'ow's'at", "shant": "shan't", "shed've": "she'd've", "she'dve":
"she'd've", "she's": "she's", "shouldve": "should've", "shouldnt":
"shouldn't", "shouldnt've": "shouldn't've", "shouldn'tve":
"shouldn't've", "somebody'd": "somebodyd", "somebodyd've":
"somebody'd've", "somebody'dve": "somebody'd've", "somebodyll":
"somebody'll", "somebodys": "somebody's", "someoned": "someone'd",
"someoned've": "someone'd've", "someone'dve": "someone'd've",
"someonell": "someone'll", "someones": "someone's", "somethingd":
"something'd", "somethingd've": "something'd've", "something'dve":
"something'd've", "somethingll": "something'll", "thats":
"that's", "thered": "there'd", "thered've": "there'd've",
"there'dve": "there'd've", "therere": "there're", "theres":
"there's", "theyd": "they'd", "theyd've": "they'd've", "they'dve":
"they'd've", "theyll": "they'll", "theyre": "they're", "theyve":
"they've", "twas": "'twas", "wasnt": "wasn't", "wed've":
"we'd've", "we'dve": "we'd've", "weve": "we've", "werent":
"weren't", "whatll": "what'll", "whatre": "what're", "whats":
"what's", "whatve": "what've", "whens": "when's", "whered":
"where'd", "wheres": "where's", "whereve": "where've", "whod":
"who'd", "whod've": "who'd've", "who'dve": "who'd've", "wholl":
"who'll", "whos": "who's", "whove": "who've", "whyll": "why'll",
"whyre": "why're", "whys": "why's", "wont": "won't", "wouldve":
"would've", "wouldnt": "wouldn't", "wouldnt've": "wouldn't've",
"wouldn'tve": "wouldn't've", "yall": "y'all", "yall'll":
"y'all'll", "y'allll": "y'all'll", "yall'd've": "y'all'd've",
"y'alld've": "y'all'd've", "y'all'dve": "y'all'd've", "youd":
"you'd", "youd've": "you'd've", "you'dve": "you'd've", "youll":
"you'll", "youre": "you're", "youve": "you've"
}
manual_map = { 'none': '0',
'zero': '0',
'one': '1',
'two': '2',
'three': '3',
'four': '4',
'five': '5',
'six': '6',
'seven': '7',
'eight': '8',
'nine': '9',
'ten': '10'}
articles = ['a', 'an', 'the']
period_strip = re.compile("(?!<=\d)(\.)(?!\d)")
comma_strip = re.compile("(\d)(\,)(\d)")
punct = [';', r"/", '[', ']', '"', '{', '}',
'(', ')', '=', '+', '\\', '_', '-',
'>', '<', '@', '`', ',', '?', '!']
def get_score(occurences):
if occurences == 0:
return 0
elif occurences == 1:
return 0.3
elif occurences == 2:
return 0.6
elif occurences == 3:
return 0.9
else:
return 1
def process_punctuation(inText):
outText = inText
for p in punct:
if (p + ' ' in inText or ' ' + p in inText) \
or (re.search(comma_strip, inText) != None):
outText = outText.replace(p, '')
else:
outText = outText.replace(p, ' ')
outText = period_strip.sub("", outText, re.UNICODE)
return outText
def process_digit_article(inText):
outText = []
tempText = inText.lower().split()
for word in tempText:
word = manual_map.setdefault(word, word)
if word not in articles:
outText.append(word)
else:
pass
for wordId, word in enumerate(outText):
if word in contractions:
outText[wordId] = contractions[word]
outText = ' '.join(outText)
return outText
def multiple_replace(text, wordDict):
for key in wordDict:
text = text.replace(key, wordDict[key])
return text
def preprocess_answer(answer):
answer = process_digit_article(process_punctuation(answer))
answer = answer.replace(',', '')
return answer
def filter_answers(answers_dset, min_occurence):
"""This will change the answer to preprocessed version
"""
occurence = {}
for ans_entry in answers_dset:
gtruth = ans_entry['multiple_choice_answer']
gtruth = preprocess_answer(gtruth)
if gtruth not in occurence:
occurence[gtruth] = set()
occurence[gtruth].add(ans_entry['question_id'])
for answer in occurence.keys():
if len(occurence[answer]) < min_occurence:
occurence.pop(answer)
print('Num of answers that appear >= %d times: %d' % (
min_occurence, len(occurence)))
return occurence
def create_ans2label(occurence, name, cache_root):
"""Note that this will also create label2ans.pkl at the same time
occurence: dict {answer -> whatever}
name: prefix of the output file
cache_root: str
"""
ans2label = {}
label2ans = []
label = 0
for answer in occurence:
label2ans.append(answer)
ans2label[answer] = label
label += 1
utils.create_dir(cache_root)
cache_file = os.path.join(cache_root, name+'_ans2label.pkl')
cPickle.dump(ans2label, open(cache_file, 'wb'))
cache_file = os.path.join(cache_root, name+'_label2ans.pkl')
cPickle.dump(label2ans, open(cache_file, 'wb'))
return ans2label
def compute_target(answers_dset, ans2label, name, cache_root):
"""Augment answers_dset with soft score as label
***answers_dset should be preprocessed***
Write result into a cache file
"""
target = []
for ans_entry in answers_dset:
answers = ans_entry['answers']
answer_count = {}
for answer in answers:
answer_ = answer['answer']
answer_count[answer_] = answer_count.get(answer_, 0) + 1
labels = []
scores = []
for answer in answer_count:
if answer not in ans2label:
continue
labels.append(ans2label[answer])
score = get_score(answer_count[answer])
scores.append(score)
label_counts = {}
for k, v in answer_count.items():
if k in ans2label:
label_counts[ans2label[k]] = v
target.append({
'question_id': ans_entry['question_id'],
'question_type': ans_entry['question_type'],
'image_id': ans_entry['image_id'],
'label_counts': label_counts,
'labels': labels,
'scores': scores
})
print(cache_root)
utils.create_dir(cache_root)
cache_file = os.path.join(cache_root, name+'_target.pkl')
print(cache_file)
with open(cache_file, 'wb') as f:
cPickle.dump(target, f)
return target
def get_answer(qid, answers):
for ans in answers:
if ans['question_id'] == qid:
return ans
def get_question(qid, questions):
for question in questions:
if question['question_id'] == qid:
return question
def load_cp():
train_answer_file = "data/vqacp_v2_train_annotations.json"
with open(train_answer_file) as f:
train_answers = json.load(f) # ['annotations']
val_answer_file = "data/vqacp_v2_test_annotations.json"
with open(val_answer_file) as f:
val_answers = json.load(f) # ['annotations']
occurence = filter_answers(train_answers, 9)
ans2label = create_ans2label(occurence, 'trainval', "data/cp-cache")
compute_target(train_answers, ans2label, 'train', "data/cp-cache")
compute_target(val_answers, ans2label, 'val', "data/cp-cache")
def load_cp_v1():
train_answer_file = "data/vqacp_v1_train_annotations.json"
with open(train_answer_file) as f:
train_answers = json.load(f) # ['annotations']
val_answer_file = "data/vqacp_v1_test_annotations.json"
with open(val_answer_file) as f:
val_answers = json.load(f) # ['annotations']
occurence = filter_answers(train_answers, 9)
ans2label = create_ans2label(occurence, 'trainval', "data/cp-v1-cache")
compute_target(train_answers, ans2label, 'train', "data/cp-v1-cache")
compute_target(val_answers, ans2label, 'val', "data/cp-v1-cache")
def load_v2():
train_answer_file = 'data/v2_mscoco_train2014_annotations.json'
with open(train_answer_file) as f:
train_answers = json.load(f)['annotations']
val_answer_file = 'data/v2_mscoco_val2014_annotations.json'
with open(val_answer_file) as f:
val_answers = json.load(f)['annotations']
occurence = filter_answers(train_answers, 9)
ans2label = create_ans2label(occurence, 'trainval', "data/cache")
compute_target(train_answers, ans2label, 'train', "data/cache")
compute_target(val_answers, ans2label, 'val', "data/cache")
def load_cp():
train_answer_file = "data/vqacp_v2_train_annotations.json"
with open(train_answer_file) as f:
train_answers = json.load(f) # ['annotations']
val_answer_file = "data/vqacp_v2_test_annotations.json"
with open(val_answer_file) as f:
val_answers = json.load(f) # ['annotations']
occurence = filter_answers(train_answers, 9)
ans2label = create_ans2label(occurence, 'trainval', "data/cpval-cache")
compute_target(train_answers, ans2label, 'train', "data/cpval-cache")
compute_target(val_answers, ans2label, 'val', "data/cpval-cache")
def main():
parser = argparse.ArgumentParser("Dataset preprocessing")
parser.add_argument("dataset", choices=["cp_v2", "v2",'cp_v1'])
args = parser.parse_args()
if args.dataset == "v2":
load_v2()
elif args.dataset == "cp_v1":
load_cp_v1()
elif args.dataset=='cp_v2':
load_cp()
if __name__ == '__main__':
main()
| 10,741 | 33.210191 | 76 | py |
introd | introd-main/css/tools/compute_softscore_val.py | from __future__ import print_function
import argparse
import os
import sys
import json
import numpy as np
import re
import cPickle
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from dataset import Dictionary
import utils
contractions = {
"aint": "ain't", "arent": "aren't", "cant": "can't", "couldve":
"could've", "couldnt": "couldn't", "couldn'tve": "couldn't've",
"couldnt've": "couldn't've", "didnt": "didn't", "doesnt":
"doesn't", "dont": "don't", "hadnt": "hadn't", "hadnt've":
"hadn't've", "hadn'tve": "hadn't've", "hasnt": "hasn't", "havent":
"haven't", "hed": "he'd", "hed've": "he'd've", "he'dve":
"he'd've", "hes": "he's", "howd": "how'd", "howll": "how'll",
"hows": "how's", "Id've": "I'd've", "I'dve": "I'd've", "Im":
"I'm", "Ive": "I've", "isnt": "isn't", "itd": "it'd", "itd've":
"it'd've", "it'dve": "it'd've", "itll": "it'll", "let's": "let's",
"maam": "ma'am", "mightnt": "mightn't", "mightnt've":
"mightn't've", "mightn'tve": "mightn't've", "mightve": "might've",
"mustnt": "mustn't", "mustve": "must've", "neednt": "needn't",
"notve": "not've", "oclock": "o'clock", "oughtnt": "oughtn't",
"ow's'at": "'ow's'at", "'ows'at": "'ow's'at", "'ow'sat":
"'ow's'at", "shant": "shan't", "shed've": "she'd've", "she'dve":
"she'd've", "she's": "she's", "shouldve": "should've", "shouldnt":
"shouldn't", "shouldnt've": "shouldn't've", "shouldn'tve":
"shouldn't've", "somebody'd": "somebodyd", "somebodyd've":
"somebody'd've", "somebody'dve": "somebody'd've", "somebodyll":
"somebody'll", "somebodys": "somebody's", "someoned": "someone'd",
"someoned've": "someone'd've", "someone'dve": "someone'd've",
"someonell": "someone'll", "someones": "someone's", "somethingd":
"something'd", "somethingd've": "something'd've", "something'dve":
"something'd've", "somethingll": "something'll", "thats":
"that's", "thered": "there'd", "thered've": "there'd've",
"there'dve": "there'd've", "therere": "there're", "theres":
"there's", "theyd": "they'd", "theyd've": "they'd've", "they'dve":
"they'd've", "theyll": "they'll", "theyre": "they're", "theyve":
"they've", "twas": "'twas", "wasnt": "wasn't", "wed've":
"we'd've", "we'dve": "we'd've", "weve": "we've", "werent":
"weren't", "whatll": "what'll", "whatre": "what're", "whats":
"what's", "whatve": "what've", "whens": "when's", "whered":
"where'd", "wheres": "where's", "whereve": "where've", "whod":
"who'd", "whod've": "who'd've", "who'dve": "who'd've", "wholl":
"who'll", "whos": "who's", "whove": "who've", "whyll": "why'll",
"whyre": "why're", "whys": "why's", "wont": "won't", "wouldve":
"would've", "wouldnt": "wouldn't", "wouldnt've": "wouldn't've",
"wouldn'tve": "wouldn't've", "yall": "y'all", "yall'll":
"y'all'll", "y'allll": "y'all'll", "yall'd've": "y'all'd've",
"y'alld've": "y'all'd've", "y'all'dve": "y'all'd've", "youd":
"you'd", "youd've": "you'd've", "you'dve": "you'd've", "youll":
"you'll", "youre": "you're", "youve": "you've"
}
manual_map = { 'none': '0',
'zero': '0',
'one': '1',
'two': '2',
'three': '3',
'four': '4',
'five': '5',
'six': '6',
'seven': '7',
'eight': '8',
'nine': '9',
'ten': '10'}
articles = ['a', 'an', 'the']
period_strip = re.compile("(?!<=\d)(\.)(?!\d)")
comma_strip = re.compile("(\d)(\,)(\d)")
punct = [';', r"/", '[', ']', '"', '{', '}',
'(', ')', '=', '+', '\\', '_', '-',
'>', '<', '@', '`', ',', '?', '!']
def get_score(occurences):
if occurences == 0:
return 0
elif occurences == 1:
return 0.3
elif occurences == 2:
return 0.6
elif occurences == 3:
return 0.9
else:
return 1
def process_punctuation(inText):
outText = inText
for p in punct:
if (p + ' ' in inText or ' ' + p in inText) \
or (re.search(comma_strip, inText) != None):
outText = outText.replace(p, '')
else:
outText = outText.replace(p, ' ')
outText = period_strip.sub("", outText, re.UNICODE)
return outText
def process_digit_article(inText):
outText = []
tempText = inText.lower().split()
for word in tempText:
word = manual_map.setdefault(word, word)
if word not in articles:
outText.append(word)
else:
pass
for wordId, word in enumerate(outText):
if word in contractions:
outText[wordId] = contractions[word]
outText = ' '.join(outText)
return outText
def multiple_replace(text, wordDict):
for key in wordDict:
text = text.replace(key, wordDict[key])
return text
def preprocess_answer(answer):
answer = process_digit_article(process_punctuation(answer))
answer = answer.replace(',', '')
return answer
def filter_answers(answers_dset, min_occurence):
"""This will change the answer to preprocessed version
"""
occurence = {}
for ans_entry in answers_dset:
gtruth = ans_entry['multiple_choice_answer']
gtruth = preprocess_answer(gtruth)
if gtruth not in occurence:
occurence[gtruth] = set()
occurence[gtruth].add(ans_entry['question_id'])
for answer in occurence.keys():
if len(occurence[answer]) < min_occurence:
occurence.pop(answer)
print('Num of answers that appear >= %d times: %d' % (
min_occurence, len(occurence)))
return occurence
def create_ans2label(occurence, name, cache_root):
"""Note that this will also create label2ans.pkl at the same time
occurence: dict {answer -> whatever}
name: prefix of the output file
cache_root: str
"""
ans2label = {}
label2ans = []
label = 0
for answer in occurence:
label2ans.append(answer)
ans2label[answer] = label
label += 1
utils.create_dir(cache_root)
cache_file = os.path.join(cache_root, name+'_ans2label.pkl')
cPickle.dump(ans2label, open(cache_file, 'wb'))
cache_file = os.path.join(cache_root, name+'_label2ans.pkl')
cPickle.dump(label2ans, open(cache_file, 'wb'))
return ans2label
def compute_target(answers_dset, ans2label, name, cache_root):
"""Augment answers_dset with soft score as label
***answers_dset should be preprocessed***
Write result into a cache file
"""
target = []
for ans_entry in answers_dset:
answers = ans_entry['answers']
answer_count = {}
for answer in answers:
answer_ = answer['answer']
answer_count[answer_] = answer_count.get(answer_, 0) + 1
labels = []
scores = []
for answer in answer_count:
if answer not in ans2label:
continue
labels.append(ans2label[answer])
score = get_score(answer_count[answer])
scores.append(score)
label_counts = {}
for k, v in answer_count.items():
if k in ans2label:
label_counts[ans2label[k]] = v
target.append({
'question_id': ans_entry['question_id'],
'question_type': ans_entry['question_type'],
'image_id': ans_entry['image_id'],
'label_counts': label_counts,
'labels': labels,
'scores': scores
})
print(cache_root)
utils.create_dir(cache_root)
cache_file = os.path.join(cache_root, name+'_target.pkl')
print(cache_file)
with open(cache_file, 'wb') as f:
cPickle.dump(target, f)
return target
def get_answer(qid, answers):
for ans in answers:
if ans['question_id'] == qid:
return ans
def get_question(qid, questions):
for question in questions:
if question['question_id'] == qid:
return question
def load_cp():
train_answer_file = "data/vqacp2val/vqacp_v2_train_annotations.json"
with open(train_answer_file) as f:
train_answers = json.load(f) # ['annotations']
val_answer_file = "data/vqacp2val/vqacp_v2_test_annotations.json"
with open(val_answer_file) as f:
val_answers = json.load(f) # ['annotations']
occurence = filter_answers(train_answers, 9)
ans2label = create_ans2label(occurence, 'trainval', "data/cpval-cache")
compute_target(train_answers, ans2label, 'train', "data/cpval-cache")
compute_target(val_answers, ans2label, 'val', "data/cpval-cache")
def main():
# parser = argparse.ArgumentParser("Dataset preprocessing")
# parser.add_argument("dataset", choices=["cp_v2", "v2",'cp_v1'])
# args = parser.parse_args()
# if args.dataset == "v2":
# load_v2()
# elif args.dataset == "cp_v1":
# load_cp_v1()
# elif args.dataset=='cp_v2':
# load_cp()
load_cp()
if __name__ == '__main__':
main()
| 9,026 | 32.309963 | 76 | py |
introd | introd-main/css/tools/create_dictionary_v1.py | from __future__ import print_function
import os
import sys
import json
import numpy as np
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from dataset import Dictionary
def create_dictionary(dataroot):
dictionary = Dictionary()
questions = []
files = [
'OpenEnded_mscoco_train2014_questions.json',
'OpenEnded_mscoco_val2014_questions.json',
'OpenEnded_mscoco_test2015_questions.json',
'OpenEnded_mscoco_test-dev2015_questions.json'
]
for path in files:
question_path = os.path.join(dataroot, path)
qs = json.load(open(question_path))['questions']
for q in qs:
dictionary.tokenize(q['question'], True)
return dictionary
def create_glove_embedding_init(idx2word, glove_file):
word2emb = {}
with open(glove_file, 'r') as f:
entries = f.readlines()
emb_dim = len(entries[0].split(' ')) - 1
print('embedding dim is %d' % emb_dim)
weights = np.zeros((len(idx2word), emb_dim), dtype=np.float32)
for entry in entries:
vals = entry.split(' ')
word = vals[0]
vals = map(float, vals[1:])
word2emb[word] = np.array(vals)
for idx, word in enumerate(idx2word):
if word not in word2emb:
continue
weights[idx] = word2emb[word]
return weights, word2emb
if __name__ == '__main__':
d = create_dictionary('data')
d.dump_to_file('data/dictionary_v1.pkl')
d = Dictionary.load_from_file('data/dictionary_v1.pkl')
emb_dim = 300
glove_file = 'data/glove/glove.6B.%dd.txt' % emb_dim
weights, word2emb = create_glove_embedding_init(d.idx2word, glove_file)
np.save('data/glove6b_init_%dd_v1.npy' % emb_dim, weights)
| 1,745 | 30.178571 | 76 | py |
introd | introd-main/css/tools/create_dictionary.py | from __future__ import print_function
import os
import sys
import json
import numpy as np
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from dataset import Dictionary
def create_dictionary(dataroot):
dictionary = Dictionary()
questions = []
files = [
'v2_OpenEnded_mscoco_train2014_questions.json',
'v2_OpenEnded_mscoco_val2014_questions.json',
'v2_OpenEnded_mscoco_test2015_questions.json',
'v2_OpenEnded_mscoco_test-dev2015_questions.json'
]
for path in files:
question_path = os.path.join(dataroot, path)
qs = json.load(open(question_path))['questions']
for q in qs:
dictionary.tokenize(q['question'], True)
dictionary.tokenize('wordmask',True)
return dictionary
def create_glove_embedding_init(idx2word, glove_file):
word2emb = {}
with open(glove_file, 'r') as f:
entries = f.readlines()
emb_dim = len(entries[0].split(' ')) - 1
print('embedding dim is %d' % emb_dim)
weights = np.zeros((len(idx2word), emb_dim), dtype=np.float32)
for entry in entries:
vals = entry.split(' ')
word = vals[0]
vals = map(float, vals[1:])
word2emb[word] = np.array(vals)
for idx, word in enumerate(idx2word):
if word not in word2emb:
continue
weights[idx] = word2emb[word]
return weights, word2emb
if __name__ == '__main__':
d = create_dictionary('data')
d.dump_to_file('data/dictionary.pkl')
d = Dictionary.load_from_file('data/dictionary.pkl')
emb_dim = 300
glove_file = 'data/glove/glove.6B.%dd.txt' % emb_dim
weights, word2emb = create_glove_embedding_init(d.idx2word, glove_file)
np.save('data/glove6b_init_%dd.npy' % emb_dim, weights)
| 1,799 | 29.508475 | 76 | py |
lda-c | lda-c-master/topics.py | #! /usr/bin/python
# usage: python topics.py <beta file> <vocab file> <num words>
#
# <beta file> is output from the lda-c code
# <vocab file> is a list of words, one per line
# <num words> is the number of words to print from each topic
import sys
def print_topics(beta_file, vocab_file, nwords = 25):
# get the vocabulary
vocab = file(vocab_file, 'r').readlines()
# vocab = map(lambda x: x.split()[0], vocab)
vocab = map(lambda x: x.strip(), vocab)
# for each line in the beta file
indices = range(len(vocab))
topic_no = 0
for topic in file(beta_file, 'r'):
print 'topic %03d' % topic_no
topic = map(float, topic.split())
indices.sort(lambda x,y: -cmp(topic[x], topic[y]))
for i in range(nwords):
print ' %s' % vocab[indices[i]]
topic_no = topic_no + 1
print '\n'
if (__name__ == '__main__'):
if (len(sys.argv) != 4):
print 'usage: python topics.py <beta-file> <vocab-file> <num words>\n'
sys.exit(1)
beta_file = sys.argv[1]
vocab_file = sys.argv[2]
nwords = int(sys.argv[3])
print_topics(beta_file, vocab_file, nwords)
| 1,160 | 26.642857 | 77 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/conftest.py | import os
import pytest
from networkx import DiGraph
from api import create_app
CI_ENV = (os.getenv("CI") == "true")
#
# RT GRAPHS
#
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "test", "data")
TMP_DATA_DIR = os.path.join(TEST_DATA_DIR, "tmp")
@pytest.fixture(scope="module")
def mock_user_friends():
return [
{"screen_name":"A", "friend_names":["B", "C", "D"]},
{"screen_name":"B", "friend_names":["C", "D"]},
{"screen_name":"C", "friend_names":["D"]},
{"screen_name":"D", "friend_names":["C"]},
{"screen_name":"E", "friend_names":["F"]},
]
@pytest.fixture(scope="module")
def mock_graph(mock_user_friends):
graph = DiGraph()
for row in mock_user_friends:
user = row["screen_name"]
friends = row["friend_names"]
graph.add_node(user)
graph.add_nodes_from(friends)
graph.add_edges_from([(user, friend) for friend in friends])
return graph
@pytest.fixture(scope="module")
def expected_nodes():
return ["A", "B", "C", "D", "E", "F"] # all users, followed or following
@pytest.fixture(scope="module")
def expected_edges():
return [
("A", "B"), ("A", "C"), ("A", "D"), # "A" follows "B", "C", and "D"
("B", "C"), ("B", "D"), # "B" follows "C" and "D"
("C", "D"), # "C" follows "D"
("D", "C"), # "D" follows "C"
("E", "F") # "E" follows "F" and "F" follows no-one
]
mock_rt_graph_edge_list = [
# add some examples of users retweeting others:
{"user_screen_name": "user1", "retweet_user_screen_name": "leader1", "retweet_count": 40},
{"user_screen_name": "user2", "retweet_user_screen_name": "leader1", "retweet_count": 60},
{"user_screen_name": "user3", "retweet_user_screen_name": "leader2", "retweet_count": 40},
{"user_screen_name": "user4", "retweet_user_screen_name": "leader2", "retweet_count": 20},
{"user_screen_name": "user5", "retweet_user_screen_name": "leader3", "retweet_count": 40},
# add some examples of users retweeting eachother:
{"user_screen_name": "colead1", "retweet_user_screen_name": "colead2", "retweet_count": 30},
{"user_screen_name": "colead2", "retweet_user_screen_name": "colead1", "retweet_count": 20},
{"user_screen_name": "colead3", "retweet_user_screen_name": "colead4", "retweet_count": 10},
{"user_screen_name": "colead4", "retweet_user_screen_name": "colead3", "retweet_count": 40}
]
def compile_mock_rt_graph(edge_list=mock_rt_graph_edge_list, weight_attr="retweet_count"):
"""
Param
edge_list (list of dict) like: [
{"user_screen_name": "user1", "retweet_user_screen_name": "leader1", "weight": 4},
{"user_screen_name": "user2", "retweet_user_screen_name": "leader1", "weight": 6},
{"user_screen_name": "user3", "retweet_user_screen_name": "leader2", "weight": 4},
]
weight_attr (str) the name of the weight attribute for each edge in the edge list
"""
graph = DiGraph()
for row in edge_list:
graph.add_edge(row["user_screen_name"], row["retweet_user_screen_name"], rt_count=float(row[weight_attr]))
return graph
@pytest.fixture(scope="module")
def mock_rt_graph():
"""
Returns a retweet graph with sufficient energy to populate bot probabilities given default hyperparams
"""
return compile_mock_rt_graph()
#
# API
#
@pytest.fixture
def api_client():
app = create_app()
return app.test_client()
#
# TOXICITY CLASSIFICATION
#
from app.toxicity.model_manager import ModelManager
@pytest.fixture(scope="module")
def original_model_manager():
mgr = ModelManager(checkpoint_name="original")
mgr.load_model_state()
return mgr
toxicity_texts = [
"RT @realDonaldTrump: Crazy Nancy Pelosi should spend more time in her decaying city and less time on the Impeachment Hoax! https://t.co/eno…",
"RT @SpeakerPelosi: The House cannot choose our impeachment managers until we know what sort of trial the Senate will conduct. President Tr…",
]
| 4,029 | 32.865546 | 147 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/api/__init__.py |
import os
from dotenv import load_dotenv
from flask import Flask
from flask_cors import CORS
from api.routes.v0_routes import api_routes as api_v0_routes
from api.routes.v1_routes import api_routes as api_v1_routes
from app.bq_service import BigQueryService
load_dotenv()
SECRET_KEY = os.getenv("SECRET_KEY", default="super secret")
def create_app():
app = Flask(__name__)
CORS(app) # CORS(app, resources={r"/api/*": {"origins": "*"}})
app.config["SECRET_KEY"] = SECRET_KEY
app.config["BQ_SERVICE"] = BigQueryService(cautious=False)
#app.config.from_mapping(SECRET_KEY=SECRET_KEY, BQ_SERVICE=BigQueryService())
app.register_blueprint(api_v0_routes)
app.register_blueprint(api_v1_routes)
return app
if __name__ == "__main__":
my_app = create_app()
my_app.run(debug=True)
| 820 | 24.65625 | 81 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/api/prep/daily_bot_scores.py |
import os
import json
from pandas import read_csv
import numpy as np
from app import DATA_DIR
#def binned_score(num):
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
if __name__ == "__main__":
date = "2020-02-01" # todo iterate through dates
daily_dirpath = os.path.join(DATA_DIR, "retweet_graphs_v2", "k_days", "1", date)
csv_filepath = os.path.join(daily_dirpath, "bot_probabilities.csv")
json_histogram_filepath = os.path.join(daily_dirpath, "bot_probabilities_histogram.json")
#json_filepath = os.path.join(daily_dirpath, "bot_probabilities.json")
json_bars_filepath = os.path.join(daily_dirpath, "bot_probability_bars.json")
print("READING CSV", csv_filepath)
df = read_csv(csv_filepath)
print(df.head())
# https://numpy.org/doc/stable/reference/generated/numpy.histogram.html
# hist array The values of the histogram. See density and weights for a description of the possible semantics.
# bin_edges array of dtype float. Return the bin edges (length(hist)+1).
hist, bin_edges = np.histogram(df["bot_probability"], bins=100, range=[0, 1])
print("HIST:", len(list(hist))) #> 100
print("BIN EDGES:", len(list(bin_edges))) #> 101
# weird, but https://formidable.com/open-source/victory/docs/victory-histogram/
# turns out VictoryHistogram likes this format, so lets just convert it to json...
response = {
"date": date,
"hist": hist.tolist(),
"bin_edges": [round(v.item(), 2) for v in bin_edges] # round to 2 decimal places because dealing with some vals like 0.35000000000000003 ewww
}
print(response)
print("WRITING HISTOGRAM", json_histogram_filepath)
with open(json_histogram_filepath, "w") as json_file:
json.dump(response, json_file)
#bot_probabilities = df["bot_probability"].tolist()
#print("WRITING JSON", json_filepath)
#with open(json_filepath, "w") as json_file:
# json.dump(bot_probabilities, json_file)
#hist, bin_edges = np.histogram(df["bot_probability"], bins=20, range=[0, 1]) # in bins of 0.05
#categories = [round(val, 2) for val in bin_edges.tolist()[0:20]] #> [0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
#response = [{k: v} for k,v in zip(categories, hist)]
##> [{0.0: 634}, {0.05: 42}, {0.1: 30}, {0.15: 32}, {0.2: 32}, {0.25: 25}, {0.3: 48}, {0.35: 59}, {0.4: 62}, {0.45: 592}, {0.5: 322649}, {0.55: 2953}, {0.6: 1709}, {0.65: 1251}, {0.7: 1049}, {0.75: 792}, {0.8: 783}, {0.85: 740}, {0.9: 784}, {0.95: 1410}]
#print("WRITING JSON BARS", json_bars_filepath)
#with open(json_bars_filepath, "w") as json_file:
# json.dump(response, json_file, cls=NpEncoder)
hist, bin_edges = np.histogram(df["bot_probability"], bins=20, range=[0, 1]) # in bins of 0.05
categories = [round(val, 2) for val in bin_edges.tolist()[0:20]] #> [0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
response = [{"category":k, "frequency": v} for k,v in zip(categories, hist)]
#> [{0.0: 634}, {0.05: 42}, {0.1: 30}, {0.15: 32}, {0.2: 32}, {0.25: 25}, {0.3: 48}, {0.35: 59}, {0.4: 62}, {0.45: 592}, {0.5: 322649}, {0.55: 2953}, {0.6: 1709}, {0.65: 1251}, {0.7: 1049}, {0.75: 792}, {0.8: 783}, {0.85: 740}, {0.9: 784}, {0.95: 1410}]
print("WRITING JSON BARS", json_bars_filepath)
with open(json_bars_filepath, "w") as json_file:
json.dump(response, json_file, cls=NpEncoder)
| 3,822 | 45.621951 | 258 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/api/routes/v0_routes.py |
from flask import Blueprint, current_app, jsonify, request
api_routes = Blueprint("v0_routes", __name__)
@api_routes.route("/api/v0/user_details/<screen_name>")
def user_details(screen_name=None):
#print(f"USER DETAILS: '{screen_name}'")
if "@" in screen_name or ";" in screen_name: # just be super safe about preventing sql injection. there are no screen names with semicolons
return jsonify({"message": f"Oh, expecting a screen name like 'politico'. Please try again."}), 400
response = list(current_app.config["BQ_SERVICE"].fetch_user_details_api_v0(screen_name))
try:
return jsonify(dict(response[0]))
except IndexError as err:
print(err)
return jsonify({"message": f"Oh, couldn't find user with screen name '{screen_name}'. Please try again."}), 404
@api_routes.route("/api/v0/user_tweets/<screen_name>")
def user_tweets(screen_name=None):
#print(f"USER TWEETS: '{screen_name}'")
if "@" in screen_name or ";" in screen_name: # just be super safe about preventing sql injection. there are no screen names with semicolons
return jsonify({"message": f"Oh, expecting a screen name like 'politico'. Please try again."}), 400
response = list(current_app.config["BQ_SERVICE"].fetch_user_tweets_api_v0(screen_name))
try:
return jsonify([dict(row) for row in response])
except IndexError as err:
print(err)
return jsonify({"message": f"Oh, couldn't find user with screen name '{screen_name}'. Please try again."}), 404
@api_routes.route("/api/v0/users_most_retweeted")
def users_most_retweeted():
query_params = {"metric": request.args.get("metric"), "limit": request.args.get("limit")}
print("QUERY PARAMS:", query_params)
response = list(current_app.config["BQ_SERVICE"].fetch_users_most_retweeted_api_v0(**query_params))
return jsonify([dict(row) for row in response])
@api_routes.route("/api/v0/statuses_most_retweeted")
def statuses_most_retweeted():
query_params = {"metric": request.args.get("metric"), "limit": request.args.get("limit")}
print("QUERY PARAMS:", query_params)
response = list(current_app.config["BQ_SERVICE"].fetch_statuses_most_retweeted_api_v0(**query_params))
return jsonify([dict(row) for row in response])
@api_routes.route("/api/v0/top_profile_tokens")
def top_profile_tokens():
query_params = {"limit": request.args.get("limit")}
print("QUERY PARAMS:", query_params)
response = list(current_app.config["BQ_SERVICE"].fetch_top_profile_tokens_api_v0(**query_params))
return jsonify([dict(row) for row in response])
@api_routes.route("/api/v0/top_profile_tags")
def top_profile_tags():
query_params = {"limit": request.args.get("limit")}
print("QUERY PARAMS:", query_params)
response = list(current_app.config["BQ_SERVICE"].fetch_top_profile_tags_api_v0(**query_params))
return jsonify([dict(row) for row in response])
@api_routes.route("/api/v0/top_status_tokens")
def top_status_tokens():
query_params = {"limit": request.args.get("limit")}
print("QUERY PARAMS:", query_params)
response = list(current_app.config["BQ_SERVICE"].fetch_top_status_tokens_api_v0(**query_params))
return jsonify([dict(row) for row in response])
@api_routes.route("/api/v0/top_status_tags")
def top_status_tags():
query_params = {"limit": request.args.get("limit")}
print("QUERY PARAMS:", query_params)
response = list(current_app.config["BQ_SERVICE"].fetch_top_status_tags_api_v0(**query_params))
return jsonify([dict(row) for row in response])
| 3,560 | 47.780822 | 143 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/api/routes/v1_routes.py |
from flask import Blueprint, current_app, jsonify, request
api_routes = Blueprint("v1_routes", __name__)
@api_routes.route("/api/v1/user_tweets/<screen_name>")
def user_tweets(screen_name=None):
#print(f"USER TWEETS: '{screen_name}'")
if "@" in screen_name or ";" in screen_name: # just be super safe about preventing sql injection. there are no screen names with semicolons
return jsonify({"message": f"Oh, expecting a screen name like 'politico'. Please try again."}), 400
response = list(current_app.config["BQ_SERVICE"].fetch_user_tweets_api_v1(screen_name))
try:
return jsonify([dict(row) for row in response])
except IndexError as err:
print(err)
return jsonify({"message": f"Oh, couldn't find user with screen name '{screen_name}'. Please try again."}), 404
@api_routes.route("/api/v1/users_most_followed")
def users_most_followed():
query_params = {"limit": request.args.get("limit")}
print("QUERY PARAMS:", query_params)
response = list(current_app.config["BQ_SERVICE"].fetch_users_most_followed_api_v1(**query_params))
return jsonify([dict(row) for row in response])
| 1,150 | 45.04 | 143 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_api_v0.py | import json
import pytest
from conftest import CI_ENV
@pytest.mark.skipif(CI_ENV, reason="avoid issuing HTTP requests on CI")
def test_user_details(api_client):
expected_keys = ['screen_name_count', 'screen_names', 'tweet_count', 'user_created_at', 'user_descriptions', 'user_id', 'user_names']
response = api_client.get('/api/v0/user_details/politico')
parsed_response = json.loads(response.data)
assert response.status_code == 200
assert isinstance(parsed_response, dict)
assert sorted(list(parsed_response.keys())) == expected_keys
@pytest.mark.skipif(CI_ENV, reason="avoid issuing HTTP requests on CI")
def test_user_tweets(api_client):
expected_keys = ['created_at', 'opinion_score', 'status_id', 'status_text']
response = api_client.get('/api/v0/user_tweets/berniesanders')
parsed_response = json.loads(response.data)
assert response.status_code == 200
assert isinstance(parsed_response, list)
assert any(parsed_response)
assert isinstance(parsed_response[0], dict)
assert sorted(list(parsed_response[0].keys())) == expected_keys
@pytest.mark.skipif(CI_ENV, reason="avoid issuing HTTP requests on CI")
def test_users_most_retweeted(api_client):
expected_keys = ['community_id', 'retweet_count', 'retweeted_user_screen_name', 'retweeter_count']
response = api_client.get('/api/v0/users_most_retweeted')
users = json.loads(response.data)
assert response.status_code == 200
assert isinstance(users, list)
assert len(users) == 50
assert isinstance(users[0], dict)
assert sorted(list(users[0].keys())) == expected_keys
assert len([u for u in users if u["community_id"] == 0]) == len([u for u in users if u["community_id"] == 1])
response = api_client.get('/api/v0/users_most_retweeted?limit=3')
users = json.loads(response.data)
assert len(users) == 6
assert sorted(list(users[0].keys())) == expected_keys
assert len([u for u in users if u["community_id"] == 0]) == len([u for u in users if u["community_id"] == 1])
@pytest.mark.skipif(CI_ENV, reason="avoid issuing HTTP requests on CI")
def test_statuses_most_retweeted(api_client):
expected_keys = ['community_id', 'retweet_count', 'retweeted_user_screen_name', 'retweeter_count', 'status_text']
response = api_client.get('/api/v0/statuses_most_retweeted')
statuses = json.loads(response.data)
assert response.status_code == 200
assert isinstance(statuses, list)
assert len(statuses) == 50
assert isinstance(statuses[0], dict)
assert sorted(list(statuses[0].keys())) == expected_keys
assert len([s for s in statuses if s["community_id"] == 0]) == len([s for s in statuses if s["community_id"] == 1])
response = api_client.get('/api/v0/statuses_most_retweeted?limit=3')
statuses = json.loads(response.data)
assert len(statuses) == 6
assert sorted(list(statuses[0].keys())) == expected_keys
assert len([s for s in statuses if s["community_id"] == 0]) == len([s for s in statuses if s["community_id"] == 1])
@pytest.mark.skipif(CI_ENV, reason="avoid issuing HTTP requests on CI")
def test_top_profile_tokens(api_client):
expected_keys = ['community_id', 'count', 'pct', 'rank', 'token']
response = api_client.get('/api/v0/top_profile_tokens')
statuses = json.loads(response.data)
assert response.status_code == 200
assert isinstance(statuses, list)
assert len(statuses) == 40
assert isinstance(statuses[0], dict)
assert sorted(list(statuses[0].keys())) == expected_keys
assert len([s for s in statuses if s["community_id"] == 0]) == len([s for s in statuses if s["community_id"] == 1])
response = api_client.get('/api/v0/top_profile_tokens?limit=3')
statuses = json.loads(response.data)
assert len(statuses) == 6
assert sorted(list(statuses[0].keys())) == expected_keys
assert len([s for s in statuses if s["community_id"] == 0]) == len([s for s in statuses if s["community_id"] == 1])
@pytest.mark.skipif(CI_ENV, reason="avoid issuing HTTP requests on CI")
def test_top_profile_tags(api_client):
expected_keys = ['community_id', 'count', 'pct', 'rank', 'token']
response = api_client.get('/api/v0/top_profile_tags')
statuses = json.loads(response.data)
assert response.status_code == 200
assert isinstance(statuses, list)
assert len(statuses) == 40
assert isinstance(statuses[0], dict)
assert sorted(list(statuses[0].keys())) == expected_keys
assert len([s for s in statuses if s["community_id"] == 0]) == len([s for s in statuses if s["community_id"] == 1])
response = api_client.get('/api/v0/top_profile_tags?limit=3')
statuses = json.loads(response.data)
assert len(statuses) == 6
assert sorted(list(statuses[0].keys())) == expected_keys
assert len([s for s in statuses if s["community_id"] == 0]) == len([s for s in statuses if s["community_id"] == 1])
@pytest.mark.skipif(CI_ENV, reason="avoid issuing HTTP requests on CI")
def test_top_status_tokens(api_client):
expected_keys = ['community_id', 'count', 'doc_count', 'doc_pct', 'pct', 'rank', 'token']
response = api_client.get('/api/v0/top_status_tokens')
statuses = json.loads(response.data)
assert response.status_code == 200
assert isinstance(statuses, list)
assert len(statuses) == 100
assert isinstance(statuses[0], dict)
assert sorted(list(statuses[0].keys())) == expected_keys
assert len([s for s in statuses if s["community_id"] == 0]) == len([s for s in statuses if s["community_id"] == 1])
response = api_client.get('/api/v0/top_status_tokens?limit=3')
statuses = json.loads(response.data)
assert len(statuses) == 6
assert sorted(list(statuses[0].keys())) == expected_keys
assert len([s for s in statuses if s["community_id"] == 0]) == len([s for s in statuses if s["community_id"] == 1])
@pytest.mark.skipif(CI_ENV, reason="avoid issuing HTTP requests on CI")
def test_top_status_tags(api_client):
expected_keys = ['community_id', 'count', 'doc_count', 'doc_pct', 'pct', 'rank', 'token']
response = api_client.get('/api/v0/top_status_tags')
statuses = json.loads(response.data)
assert response.status_code == 200
assert isinstance(statuses, list)
assert len(statuses) == 100
assert isinstance(statuses[0], dict)
assert sorted(list(statuses[0].keys())) == expected_keys
assert len([s for s in statuses if s["community_id"] == 0]) == len([s for s in statuses if s["community_id"] == 1])
response = api_client.get('/api/v0/top_status_tags?limit=3')
statuses = json.loads(response.data)
assert len(statuses) == 6
assert sorted(list(statuses[0].keys())) == expected_keys
assert len([s for s in statuses if s["community_id"] == 0]) == len([s for s in statuses if s["community_id"] == 1])
| 6,818 | 47.021127 | 137 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_toxicity_checkpoint_scorer.py |
from app.toxicity.checkpoint_scorer import ToxicityScorer
from app.toxicity.model_manager import ModelManager
from conftest import toxicity_texts
def test_toxicity_scorer(original_model_manager):
# the different models have different class names
# so we need different table structures to store the resulting scores
original = ToxicityScorer(model_manager=original_model_manager)
assert original.mgr.model_name == "BertForSequenceClassification"
assert original.mgr.model_type == "bert-base-uncased"
assert original.mgr.class_names == [
'toxicity',
'severe_toxicity',
'obscene',
'threat',
'insult',
'identity_hate'
]
scores = original.predict(toxicity_texts)
assert scores[0].tolist() == [0.12640126049518585, 0.00022532008006237447, 0.0018298450158908963, 0.0005070280167274177, 0.009287197142839432, 0.0018323149997740984]
assert scores[1].tolist() == [0.0008546802564524114, 0.00011462702241260558, 0.00016588227299507707, 0.00013761487207375467, 0.0001857876923168078, 0.00015746793360449374]
| 1,089 | 42.6 | 175 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_toxicity_scorer.py |
from app.toxicity.scorer import ToxicityScorer
def test_toxicity_scorer():
# the different models have different class names
# so we need different table structures to store the resulting scores
original = ToxicityScorer(model_name="original") # todo: use fixture
assert original.model.class_names == [
'toxicity',
'severe_toxicity',
'obscene',
'threat',
'insult',
'identity_hate'
]
unbiased = ToxicityScorer(model_name="unbiased") # todo: use fixture
assert unbiased.model.class_names == [
'toxicity',
'severe_toxicity',
'obscene',
'identity_attack',
'insult',
'threat',
'sexual_explicit'
]
| 732 | 25.178571 | 73 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_model_training.py |
class DataFrame:
pass
class LogisticRegression:
pass
class MultinomialNB:
pass
def camel_to_snake(my_str):
return "".join([f"_{char.lower()}" if char.isupper() else char for char in str(my_str)]).lstrip("_")
def test_case_conversion():
assert camel_to_snake(DataFrame.__name__) == "data_frame"
assert camel_to_snake(LogisticRegression.__name__) == "logistic_regression"
#assert camel_to_snake(MultinomialNB.__name__) == "multinomial_nb"
| 470 | 25.166667 | 104 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_lda_topics.py |
#topics = [
# {'impeach': 0.058, 'trump': 0.052, 'gop': 0.042, 'clinton': 0.039, 'commit': 0.037, 'condu': 0.037, 'proper': 0.037, 'defense': 0.037, 'jury': 0.037, 'grand': 0.037},
# {'trump': 0.063, 'impeach': 0.058, 'gop': 0.048, 'defense': 0.033, 'clinton': 0.033, 'commit': 0.033, 'grand': 0.032, 'condu': 0.032, 'proper': 0.032, 'perjury': 0.032},
# {'impeach': 0.05, 'trump': 0.035, 'gop': 0.035, 'article': 0.015, 'donald': 0.014, 'draftedbut': 0.013, 'lawmaker': 0.013, 'joncoopertweet': 0.013, 'especially': 0.013, 'man': 0.013},
# {'impeach': 0.05, 'trump': 0.028, 'gop': 0.02, 'proper': 0.018, 'condu': 0.018, 'grand': 0.018, 'perjury': 0.018, 'clinton': 0.018, 'defense': 0.018, 'commit': 0.018},
# {'impeach': 0.038, 'trump': 0.024, 'investigation': 0.023, 'democrat': 0.015, 'thing': 0.013, 'jordan': 0.013, 'leak': 0.012, 'jim': 0.012, 'ahead': 0.012, 'american': 0.012},
# {'impeach': 0.058, 'trump': 0.043, 'sethabramson': 0.022, 'fact': 0.02, 'democrat': 0.017, 'little': 0.016, 'tell': 0.016, 'america': 0.016, 'happen': 0.015, 'phrase': 0.015},
# {'house': 0.023, 'impeach': 0.023, 'senate': 0.02, 'democrat': 0.02, 'moderate': 0.019, 'unlikel': 0.018, 'joycewhitevance': 0.018, 'remember': 0.018, 'kyledcheney': 0.018, 'sarahnferris': 0.018},
# {'gop': 0.088, 'impeach': 0.08, 'trump': 0.071, 'defense': 0.044, 'clinton': 0.044, 'commit': 0.044, 'grand': 0.044, 'joycewhitevance': 0.044, 'proper': 0.044, 'jury': 0.044},
# {'impeach': 0.034, 'investigation': 0.027, 'article': 0.025, 'joycewhitevance': 0.025, 'trump': 0.022, 'congress': 0.016, 'leak': 0.016, 'ahead': 0.014, '2016': 0.014, 'bribery': 0.014},
# {'impeach': 0.044, 'trump': 0.02, 'power': 0.011, 'abuse': 0.01, 'vote': 0.009, 'democratic': 0.009, 'table': 0.009, 'congress': 0.009, 'republican': 0.008, 'fact': 0.008},
# {'impeach': 0.063, 'trump': 0.05, 'joycewhitevance': 0.036, 'gop': 0.033, 'defense': 0.033, 'commit': 0.031, 'clinton': 0.031, 'proper': 0.031, 'jury': 0.03, 'perjury': 0.03},
# {'impeach': 0.037, 'hearing': 0.023, 'gop': 0.023, 'hunter': 0.022, 'biden': 0.022, 'effort': 0.02, 'matt': 0.02, 'gaetz': 0.02, 'problem': 0.019, 'backfire': 0.019},
# {'impeach': 0.07, 'trump': 0.05, 'gop': 0.046, 'defense': 0.045, 'proper': 0.045, 'commit': 0.045, 'perjury': 0.045, 'condu': 0.045, 'clinton': 0.045, 'jury': 0.045},
# {'impeach': 0.049, 'trump': 0.03, 'people': 0.019, 'democrat': 0.018, 'americans': 0.018, 'accept': 0.017, 'patriotic': 0.017, 'jordan': 0.017, 'american': 0.017, 'wordswithsteph': 0.017},
# {'gop': 0.076, 'impeach': 0.063, 'trump': 0.058, 'article': 0.052, 'joncoopertweet': 0.045, 'donald': 0.045, 'especially': 0.039, 'man': 0.038, 'lawmaker': 0.037, 'draftedbut': 0.037},
# {'impeach': 0.064, 'gop': 0.047, 'trump': 0.03, 'clinton': 0.027, 'joycewhitevance': 0.026, 'commit': 0.026, 'grand': 0.026, 'jury': 0.025, 'condu': 0.025, 'proper': 0.025},
# {'impeach': 0.048, 'trump': 0.046, 'gop': 0.027, 'donald': 0.019, 'office': 0.014, 'board': 0.014, 'article': 0.013, 'philadelphia': 0.013, 'inquirer': 0.013, 'editorial': 0.013},
# {'trump': 0.053, 'impeach': 0.047, 'joycewhitevance': 0.036, 'gop': 0.033, 'defense': 0.03, 'condu': 0.029, 'clinton': 0.029, 'jury': 0.029, 'grand': 0.029, 'proper': 0.029},
# {'impeach': 0.071, 'trump': 0.06, 'clinton': 0.059, 'commit': 0.058, 'jury': 0.058, 'gop': 0.058, 'condu': 0.057, 'perjury': 0.057, 'grand': 0.057, 'defense': 0.057},
# {'impeach': 0.043, 'trump': 0.024, 'congress': 0.024, 'article': 0.02, 'htt': 0.019, 'bribery': 0.018, 'specifically': 0.018, 'offense': 0.018, 'include': 0.018, 'glennkirschner2': 0.018}
#]
| 3,663 | 125.344828 | 201 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_psycopg_grapher.py |
def test_set_uniqueness():
nodes = set()
nodes.add(1)
nodes.update([1,2,3])
assert nodes == {1, 2, 3}
| 120 | 14.125 | 29 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_friend_collection_in_batches.py |
from app.friend_collection.batch_per_thread import split_into_batches
def test_split_into_batches():
batches = split_into_batches([0,1,2,3,4,5,6,7,8,9,10], 3)
assert list(batches) == [
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10]
]
| 275 | 20.230769 | 69 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_k_days.py |
from datetime import datetime
from app.retweet_graphs_v2.k_days.generator import DateRangeGenerator
def test_date_ranges():
gen = DateRangeGenerator(start_date="2020-01-01", k_days=3, n_periods=5)
assert [{"start_at": dr.start_at, "end_at": dr.end_at} for dr in gen.date_ranges] == [
{'start_at': datetime(2020, 1, 1, 0, 0), 'end_at': datetime(2020, 1, 3, 23, 59, 59)},
{'start_at': datetime(2020, 1, 4, 0, 0), 'end_at': datetime(2020, 1, 6, 23, 59, 59)},
{'start_at': datetime(2020, 1, 7, 0, 0), 'end_at': datetime(2020, 1, 9, 23, 59, 59)},
{'start_at': datetime(2020, 1, 10, 0, 0), 'end_at': datetime(2020, 1, 12, 23, 59, 59)},
{'start_at': datetime(2020, 1, 13, 0, 0), 'end_at': datetime(2020, 1, 15, 23, 59, 59)}
]
| 778 | 47.6875 | 95 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_tweet_recollection.py |
from app.tweet_recollection.collector import Collector
def test_recollection():
collector = Collector()
#assert collector.limit == 100000
#assert collector.batch_size == 100
assert collector.batch_size <= 100
assert collector.batch_size <= collector.limit
methods = list(dir(collector))
assert "perform" in methods
assert "fetch_remaining_status_ids" in methods
assert "lookup_statuses" in methods
assert "save_statuses" in methods
assert "save_urls" in methods
| 511 | 25.947368 | 54 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_tokenizers.py |
import re
from app.bot_communities.tokenizers import Tokenizer, SpacyTokenizer, ALPHANUMERIC_PATTERN, TWITTER_ALPHANUMERIC_PATTERN
def test_string_cleaning_keeps_tags_and_handles():
status_text = "#HELLO @you http://hello.you ya know?"
assert re.sub(ALPHANUMERIC_PATTERN, "", status_text) == 'HELLO you httphelloyou ya know'
assert re.sub(TWITTER_ALPHANUMERIC_PATTERN, "", status_text) == '#HELLO @you httphelloyou ya know'
def test_tokenizers():
tokenizer = Tokenizer()
spacy_tokenizer = SpacyTokenizer()
status_text = "Welcome to New York. Welcoming isn't it? :-D"
assert tokenizer.basic_tokens(status_text) == ['welcome', 'new', 'york', 'welcoming']
assert tokenizer.porter_stems(status_text) == ['welcom', 'new', 'york', 'welcom']
assert tokenizer.custom_stems(status_text) == ['welcome', 'new', 'york', 'welcoming']
assert spacy_tokenizer.custom_stem_lemmas(status_text) == ['welcome', 'new', 'york', 'welcome']
assert [t.text for t in spacy_tokenizer.entity_tokens(status_text)] == ["New York"]
status_text = "#HELLO #HELLO, #HELLO. #HELLO; #hello are you there? lol www.yo.com/#message"
assert tokenizer.hashtags(status_text) == ["#HELLO", "#HELLO", "#HELLO", "#HELLO", "#HELLO"]
status_text = "come @me bro"
assert tokenizer.handles(status_text) == ["@ME"]
status_text = "STROKE SURVIVOR, MOTHER, GRANDMOTHER, ### LIBERAL DEM, AM THAT NICE LADY @ CHURCH"
assert tokenizer.handles(status_text) == []
assert tokenizer.hashtags(status_text) == []
#user_descriptions = None
user_descriptions = ""
assert spacy_tokenizer.custom_stem_lemmas(user_descriptions) == []
| 1,663 | 43.972973 | 120 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_csv_grapher.py | import os
import pandas
from networkx import DiGraph, Graph
# columns: screen_name, friend_1, friend_2, friend_3, friend_4, etc...
#CSV_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "..", "data", "example_network.csv")
MOCK_CSV_FILEPATH = os.path.join(os.path.dirname(__file__), "data", "mock_network.csv")
def compile_nodes_and_edges(screen_names, csv_filepath=MOCK_CSV_FILEPATH):
"""
Each edge tuple like... "0 follows 1"
"""
nodes = {}
for screen_name in screen_names:
nodes[screen_name] = 1
edges = []
with open(csv_filepath) as csv_file:
for index, line in enumerate(csv_file):
#print("-------------")
user_friends = line.strip("\n").split(",")
user_name = user_friends[0] # follower
friend_names = user_friends[1:] # friends
#print("USER:", user_name, "FRIENDS:", friend_names)
for friend_name in friend_names:
if friend_name in nodes.keys():
#edges.append((friend_name, user_name)) # 0 is followed by 1
edges.append((user_name, friend_name)) # 0 follows 1
return nodes, edges
def generate_graph(edges):
"""
Converts edges into a networkx digraph object.
Param edges (list of tuples)
... like [('A', 'B'), ('B', 'C'), ('A', 'C'), ('E', 'D'), ('D', 'E')]
... where 'B' follows 'A', 'C' follows 'B', etc.
Returns graph (networkx.classes.digraph.DiGraph)
"""
graph = DiGraph()
for edge in edges:
source = edge[0] # source, a.k.a friend, a.k.a followed
recipient = edge[1] # recipient, a.k.a user, a.k.a follower
graph.add_node(source)
graph.add_node(recipient)
graph.add_edge(source, recipient)
return graph
def test_nodes_and_edges():
#
# Given:
# A doesn't follow anyone
# B follows A
# C follows B and A
# D and E follow eachother
#
df = pandas.read_csv(MOCK_CSV_FILEPATH, header=None)
screen_names = df[0].tolist()
nodes, edges = compile_nodes_and_edges(screen_names, csv_filepath=MOCK_CSV_FILEPATH)
assert nodes == {'A': 1, 'B': 1, 'C': 1, 'D': 1, 'E': 1, 'F': 1}
assert edges == [
('A', 'B'), ('A', 'C'), ('A', 'D'), # "A" follows "B", "C", and "D"
('B', 'C'), ('B', 'D'), # "B" follows "C" and "D"
('C', 'D'), # "C" follows "D"
('D', 'C'), # "D" follows "C"
('E', 'F') # "E" follows "F" and "F" follows no-one
]
def test_graph_generation():
graph = generate_graph([('A', 'B'), ('B', 'C'), ('A', 'C'), ('E', 'D'), ('D', 'E')])
assert isinstance(graph, DiGraph)
assert len(graph.edges) == 5
assert len(graph.nodes) == 5
def test_undirected():
# https://networkx.github.io/documentation/latest/reference/classes/generated/networkx.DiGraph.to_undirected.html#networkx.DiGraph.to_undirected
graph = generate_graph([('A', 'B'), ('B', 'A')])
undirected = graph.to_undirected()
assert isinstance(graph, DiGraph) and isinstance(undirected, Graph)
assert len(graph.nodes) == 2 and len(undirected.nodes) == 2
assert len(graph.edges) == 2 and len(undirected.edges) == 1
def test_duplication():
graph = DiGraph()
graph.add_node("A")
graph.add_node("A")
assert len(graph.nodes) == 1
| 3,331 | 35.217391 | 148 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_botcode.py |
from networkx import DiGraph
from app.botcode.network_classifier_helper import (ALPHA, LAMBDA_1, LAMBDA_2, EPSILON,
compute_link_energy, compile_energy_graph, parse_bidirectional_links)
from app.botcode.investigation import classify_bot_probabilities
from conftest import compile_mock_rt_graph
def test_default_hyperparams():
# should match the default values described in the botcode README file
assert ALPHA == [1.0, 100.0, 100.0]
assert LAMBDA_1 == 0.8
assert LAMBDA_2 == 0.6
assert EPSILON == 0.001
def test_link_energy_nonactivation():
#
# setup
#
graph = compile_mock_rt_graph([
# add some examples of users retweeting others:
{"user_screen_name": "user1", "retweet_user_screen_name": "leader1", "retweet_count": 4},
{"user_screen_name": "user2", "retweet_user_screen_name": "leader1", "retweet_count": 6},
{"user_screen_name": "user3", "retweet_user_screen_name": "leader2", "retweet_count": 4},
{"user_screen_name": "user4", "retweet_user_screen_name": "leader2", "retweet_count": 2},
{"user_screen_name": "user5", "retweet_user_screen_name": "leader3", "retweet_count": 4},
# add some examples of users retweeting eachother:
{"user_screen_name": "colead1", "retweet_user_screen_name": "colead2", "retweet_count": 3},
{"user_screen_name": "colead2", "retweet_user_screen_name": "colead1", "retweet_count": 2},
{"user_screen_name": "colead3", "retweet_user_screen_name": "colead4", "retweet_count": 1},
{"user_screen_name": "colead4", "retweet_user_screen_name": "colead3", "retweet_count": 4}
])
in_degrees = dict(graph.in_degree(weight="rt_count")) # users receiving retweets
out_degrees = dict(graph.out_degree(weight="rt_count")) # users doing the retweeting
assert in_degrees == {'user1': 0, 'leader1': 10.0, 'user2': 0, 'user3': 0, 'leader2': 6.0, 'user4': 0, 'user5': 0, 'leader3': 4.0, 'colead1': 2.0, 'colead2': 3.0, 'colead3': 4.0, 'colead4': 1.0}
assert out_degrees == {'user1': 4.0, 'leader1': 0, 'user2': 6.0, 'user3': 4.0, 'leader2': 0, 'user4': 2.0, 'user5': 4.0, 'leader3': 0, 'colead1': 3.0, 'colead2': 2.0, 'colead3': 1.0, 'colead4': 4.0}
#
# w/o sufficient number of retweets, given default hyperparams, not enough to activate, energy is zero
#
energy = compute_link_energy('colead1', 'colead2', 3.0, in_degrees, out_degrees, alpha=[1,100,100])
assert energy == [0.0, 0.0, 0.0, 0.0]
assert sum(energy) == 0
#
# w/ sufficient number of retweets, given different hyperparams, energy is positive
#
energy = compute_link_energy('colead1', 'colead2', 3.0, in_degrees, out_degrees, alpha=[1,10,10])
assert energy == [0.01676872682112003, 0.027947878035200054, 0.01120709909211522, 0.022358302428160046]
assert sum(energy) > 0
#
# Use the Mock RT Graph (see conftest.py)
#
def test_mock_rt_graph(mock_rt_graph):
in_degrees = dict(mock_rt_graph.in_degree(weight="rt_count")) # users receiving retweets
out_degrees = dict(mock_rt_graph.out_degree(weight="rt_count")) # users doing the retweeting
assert in_degrees == {'user1': 0, 'leader1': 100.0, 'user2': 0, 'user3': 0, 'leader2': 60.0, 'user4': 0, 'user5': 0, 'leader3': 40.0, 'colead1': 20.0, 'colead2': 30.0, 'colead3': 40.0, 'colead4': 10.0}
assert out_degrees == {'user1': 40.0, 'leader1': 0, 'user2': 60.0, 'user3': 40.0, 'leader2': 0, 'user4': 20.0, 'user5': 40.0, 'leader3': 0, 'colead1': 30.0, 'colead2': 20.0, 'colead3': 10.0, 'colead4': 40.0}
def test_link_energy(mock_rt_graph):
#
# w/ sufficient number of retweets, given default hyperparams, energy is positive
#
in_degrees = dict(mock_rt_graph.in_degree(weight="rt_count")) # users receiving retweets
out_degrees = dict(mock_rt_graph.out_degree(weight="rt_count")) # users doing the retweeting
energy = compute_link_energy('colead1', 'colead2', 30.0, in_degrees, out_degrees, alpha=[1,100,100])
assert energy == [0.16768726821120034, 0.2794787803520006, 0.1120709909211522, 0.22358302428160048]
assert sum(energy) > 0
def test_energy_grapher(mock_rt_graph):
#
# setup and inspection
#
assert list(mock_rt_graph.edges(data=True)) == [
('user1', 'leader1', {'rt_count': 40.0}),
('user2', 'leader1', {'rt_count': 60.0}),
('user3', 'leader2', {'rt_count': 40.0}),
('user4', 'leader2', {'rt_count': 20.0}),
('user5', 'leader3', {'rt_count': 40.0}),
('colead1', 'colead2', {'rt_count': 30.0}),
('colead2', 'colead1', {'rt_count': 20.0}),
('colead3', 'colead4', {'rt_count': 10.0}),
('colead4', 'colead3', {'rt_count': 40.0})
]
assert sorted(list(mock_rt_graph.nodes)) == [
'colead1', 'colead2', 'colead3', 'colead4',
'leader1', 'leader2', 'leader3',
'user1', 'user2', 'user3', 'user4', 'user5'
]
in_degrees = dict(mock_rt_graph.in_degree(weight="rt_count")) # users receiving retweets
out_degrees = dict(mock_rt_graph.out_degree(weight="rt_count")) # users doing the retweeting
assert in_degrees == {
'user1': 0, 'leader1': 100.0, 'user2': 0, 'user3': 0,
'leader2': 60.0, 'user4': 0, 'user5': 0, 'leader3': 40.0,
'colead1': 20.0, 'colead2': 30.0, 'colead3': 40.0, 'colead4': 10.0
}
assert out_degrees == {
'user1': 40.0, 'leader1': 0, 'user2': 60.0, 'user3': 40.0,
'leader2': 0, 'user4': 20.0, 'user5': 40.0, 'leader3': 0,
'colead1': 30.0, 'colead2': 20.0, 'colead3': 10.0, 'colead4': 40.0
}
# this was an original intention in the original function, for all graph nodes to be represented in both in and out degrees
for node in mock_rt_graph.nodes():
assert node in in_degrees.keys()
assert node in out_degrees.keys()
links = parse_bidirectional_links(mock_rt_graph)
assert links == [
['user1', 'leader1', True, False, 40.0, 0],
['user2', 'leader1', True, False, 60.0, 0],
['user3', 'leader2', True, False, 40.0, 0],
['user4', 'leader2', True, False, 20.0, 0],
['user5', 'leader3', True, False, 40.0, 0],
['colead1', 'colead2', True, True, 30.0, 20.0],
['colead2', 'colead1', True, True, 20.0, 30.0],
['colead3', 'colead4', True, True, 10.0, 40.0],
['colead4', 'colead3', True, True, 40.0, 10.0]
] # represents the number of times each pair of users has retweeted eachother
energies = [(link[0], link[1], compute_link_energy(link[0], link[1], link[4], in_degrees, out_degrees)) for link in links]
assert energies == [
('user1', 'leader1', [4.378212571352552, 7.297020952254254, 2.926105401853955, 5.837616761803403]),
('user2', 'leader1', [12.212770724430582, 20.35461787405097, 8.162201767494437, 16.283694299240775]),
('user3', 'leader2', [2.466816598014041, 4.111360996690069, 1.6486557596727172, 3.289088797352055]),
('user4', 'leader2', [0.11179151214080021, 0.1863191869013337, 0.0747139939474348, 0.14905534952106697]),
('user5', 'leader3', [1.1382209562616026, 1.8970349271026712, 0.760711005768171, 1.517627941682137]),
('colead1', 'colead2', [0.16768726821120034, 0.2794787803520006, 0.1120709909211522, 0.22358302428160048]),
('colead2', 'colead1', [0.004024201565597737, 0.006707002609329562, 0.0026895080463411537, 0.00536560208746365]),
('colead3', 'colead4', [0.0, 0, 0.0, 0.0]), # number of retweets not sufficient to activate the energy function
('colead4', 'colead3', [1.1382209562616026, 1.8970349271026712, 0.760711005768171, 1.517627941682137])
] # people doing the retweeting
positive_energies = [e for e in energies if sum(e[2]) > 0]
assert positive_energies == [
('user1', 'leader1', [4.378212571352552, 7.297020952254254, 2.926105401853955, 5.837616761803403]),
('user2','leader1', [12.212770724430582, 20.35461787405097, 8.162201767494437, 16.283694299240775]),
('user3','leader2', [2.466816598014041, 4.111360996690069, 1.6486557596727172, 3.289088797352055]),
('user4', 'leader2', [0.11179151214080021, 0.1863191869013337, 0.0747139939474348, 0.14905534952106697]),
('user5', 'leader3', [1.1382209562616026, 1.8970349271026712, 0.760711005768171, 1.517627941682137]),
('colead1', 'colead2', [0.16768726821120034, 0.2794787803520006, 0.1120709909211522, 0.22358302428160048]),
('colead2', 'colead1',[0.004024201565597737, 0.006707002609329562, 0.0026895080463411537, 0.00536560208746365]),
('colead4', 'colead3', [1.1382209562616026, 1.8970349271026712, 0.760711005768171, 1.517627941682137])
] # people doing the most retweeting
prior_probabilities = dict.fromkeys(list(mock_rt_graph.nodes), 0.5)
assert prior_probabilities == {
'user1': 0.5, 'leader1': 0.5, 'user2': 0.5, 'user3': 0.5,
'leader2': 0.5, 'user4': 0.5, 'user5': 0.5, 'leader3': 0.5,
'colead1': 0.5, 'colead2': 0.5, 'colead3': 0.5, 'colead4': 0.5
}
#
# it produces an energy graph and other important results:
#
energy_graph, pl, user_data = compile_energy_graph(mock_rt_graph, prior_probabilities,
positive_energies, out_degrees, in_degrees)
assert isinstance(energy_graph, DiGraph)
assert list(energy_graph.nodes) == [
'user1', 'leader1', 'user2', 'user3', 'leader2', 'user4', 'user5', 'leader3',
'colead1', 'colead2', 'colead4', 'colead3', 0, 1
] # includes all original graph nodes, as well as 0 and 1
assert list(energy_graph.edges(data=True)) == [
('user1', 'leader1', {'capacity': 0.0036485104761272424}),
('user1', 0, {'capacity': 2.519226673861572}),
('leader1', 'user1', {'capacity': 0.0036485104761272424}),
('leader1', 'user2', {'capacity': 0.010177308937025842}),
('leader1', 0, {'capacity': 15.894635625321241}),
('user2', 'leader1', {'capacity': 0.010177308937025842}),
('user2', 0, {'capacity': 5.7868903035412}),
('user3', 'leader2', {'capacity': 0.002055680498344925}),
('user3', 0, {'capacity': 1.7220152699816351}),
('leader2', 'user3', {'capacity': 0.002055680498344925}),
('leader2', 'user4', {'capacity': 9.315959345064517e-05}),
('leader2', 0, {'capacity': 3.055796861489319}),
('user4', 'leader2', {'capacity': 9.315959345064517e-05}),
('user4', 0, {'capacity': 0.7397735570820041}),
('user5', 'leader3', {'capacity': 0.0009485174635512905}),
('user5', 0, {'capacity': 1.1678801710673887}),
('leader3', 'user5', {'capacity': 0.0009485174635512905}),
('leader3', 0, {'capacity': 1.736042131734639}),
('colead1', 'colead2', {'capacity': 0.00014309289148063935}),
('colead1', 0, {'capacity': 0.7667739200275123}),
('colead2', 'colead1', {'capacity': 0.00014309289148063935}),
('colead2', 0, {'capacity': 0.8484690674614424}),
('colead4', 'colead3', {'capacity': 0.0009485174635512905}),
('colead4', 0, {'capacity': 1.1678801710673887}),
('colead3', 'colead4', {'capacity': 0.0009485174635512905}),
('colead3', 0, {'capacity': 1.736042131734639}),
(1, 'user1', {'capacity': 3.974982353836296}),
(1, 'leader1', {'capacity': 4.847805914212305}),
(1, 'user2', {'capacity': 9.84763656941437}),
(1, 'user3', {'capacity': 2.5422317888213035}),
(1, 'leader2', {'capacity': 1.3388736281445532}),
(1, 'user4', {'capacity': 0.7769442348688201}),
(1, 'user5', {'capacity': 1.5463386390243716}),
(1, 'leader3', {'capacity': 0.9781766783571215}),
(1, 'colead1', {'capacity': 0.8198504891653093}),
(1, 'colead2', {'capacity': 0.7381553417313793}),
(1, 'colead3', {'capacity': 0.9781766783571215}),
(1, 'colead4', {'capacity': 1.5463386390243716})
] # the extra 1s and 0s seem to be for baseline comparisons when
assert sorted(pl) == ['colead1', 'colead4', 'user1', 'user2', 'user3', 'user4', 'user5'] # seems to represent the users who retweet but don't get retweeted (a.k.a the bot list)
assert user_data == {
'user1': {'user_id': 'user1', 'out': 40.0, 'in': 0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 0},
'leader1': {'user_id': 'leader1', 'out': 0, 'in': 100.0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 0},
'user2': {'user_id': 'user2', 'out': 60.0, 'in': 0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 0},
'user3': {'user_id': 'user3', 'out': 40.0, 'in': 0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 0},
'leader2': {'user_id': 'leader2', 'out': 0, 'in': 60.0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 0},
'user4': {'user_id': 'user4', 'out': 20.0, 'in': 0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 0},
'user5': {'user_id': 'user5', 'out': 40.0, 'in': 0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 0},
'leader3': {'user_id': 'leader3', 'out': 0, 'in': 40.0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 0},
'colead1': {'user_id': 'colead1', 'out': 30.0, 'in': 20.0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 0},
'colead2': {'user_id': 'colead2', 'out': 20.0, 'in': 30.0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 0},
'colead3': {'user_id': 'colead3', 'out': 10.0, 'in': 40.0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 0},
'colead4': {'user_id': 'colead4', 'out': 40.0, 'in': 10.0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 0}
}
def test_bot_probabilities(mock_rt_graph):
expected_bot_probabilities = {
'colead1': 1,
'colead2': 0,
'colead3': 0,
'colead4': 1,
'leader1': 0,
'leader2': 0,
'leader3': 0,
'user1': 1,
'user2': 1,
'user3': 1,
'user4': 1,
'user5': 1
}
expected_user_data = {
'user1': {'user_id': 'user1', 'out': 40.0, 'in': 0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 1},
'leader1': {'user_id': 'leader1', 'out': 0, 'in': 100.0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 0},
'user2': {'user_id': 'user2', 'out': 60.0, 'in': 0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 1},
'user3': {'user_id': 'user3', 'out': 40.0, 'in': 0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 1},
'leader2': {'user_id': 'leader2', 'out': 0, 'in': 60.0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 0},
'user4': {'user_id': 'user4', 'out': 20.0, 'in': 0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 1},
'user5': {'user_id': 'user5', 'out': 40.0, 'in': 0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 1},
'leader3': {'user_id': 'leader3', 'out': 0, 'in': 40.0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 0},
'colead1': {'user_id': 'colead1', 'out': 30.0, 'in': 20.0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 1},
'colead2': {'user_id': 'colead2', 'out': 20.0, 'in': 30.0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 0},
'colead3': {'user_id': 'colead3', 'out': 10.0, 'in': 40.0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 0},
'colead4': {'user_id': 'colead4', 'out': 40.0, 'in': 10.0, 'old_prob': 0.5, 'phi_0': 0.6931471805599453, 'phi_1': 0.6931471805599453, 'prob': 0, 'clustering': 1}
}
#
# setup
#
in_degrees = dict(mock_rt_graph.in_degree(weight="rt_count")) # users receiving retweets
out_degrees = dict(mock_rt_graph.out_degree(weight="rt_count")) # users doing the retweeting
links = parse_bidirectional_links(mock_rt_graph)
energies = [(link[0], link[1], compute_link_energy(link[0], link[1], link[4], in_degrees, out_degrees)) for link in links]
positive_energies = [e for e in energies if sum(e[2]) > 0]
prior_probabilities = dict.fromkeys(list(mock_rt_graph.nodes), 0.5)
energy_graph, pl, user_data = compile_energy_graph(mock_rt_graph, prior_probabilities, positive_energies, out_degrees, in_degrees)
#
# it assigns bot probabilities to 1 if user is in bot list returned from energy grapher function
#
bot_probabilities = dict.fromkeys(list(user_data.keys()), 0) # start with defaults of 0 for each user
for user in pl:
user_data[user]["clustering"] = 1
bot_probabilities[user] = 1
assert bot_probabilities == expected_bot_probabilities
assert user_data == expected_user_data
#
# now test the bridge function does the same thing!
#
bot_probabilities, user_data = classify_bot_probabilities(mock_rt_graph)
assert bot_probabilities == expected_bot_probabilities
assert user_data == expected_user_data
| 17,960 | 60.091837 | 211 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_bq_grapher.py | import os
from networkx import read_gpickle
from app.friend_graphs.bq_grapher import BigQueryGrapher
from app.bq_service import BigQueryService
def test_network_grapher(mock_graph, expected_nodes, expected_edges):
graph_filepath = os.path.join(os.path.dirname(__file__), "data", "mock_graph.gpickle")
if os.path.isfile(graph_filepath): os.remove(graph_filepath)
assert os.path.isfile(graph_filepath) == False
grapher = BigQueryGrapher(bq_service=BigQueryService()) # TODO: mock grapher.perform() method to return the graph, instead of initializing with it
grapher.graph = mock_graph
assert list(grapher.graph.nodes) == expected_nodes
assert list(grapher.graph.edges) == expected_edges
grapher.write_graph_to_file(graph_filepath)
assert os.path.isfile(graph_filepath) == True
reconstituted_graph = read_gpickle(graph_filepath)
assert list(reconstituted_graph.nodes) == expected_nodes
assert list(reconstituted_graph.edges) == expected_edges
| 994 | 40.458333 | 150 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_pg_service.py |
# test bot screen name mathing strategy, that it case-insensitively finds a given screen name in an array of screen names:
#sql = """
# SELECT
# 'ACLU' as screen_name
#
# ,'ACLU' ilike any('{user1, aclu}'::text[]) as t1 -- TRUE
# ,'ACLU' ilike any('{user1, ACLU}'::text[]) as t2 -- TRUE
# ,'ACLU' ilike any('{user1, acLu}'::text[]) as t3 -- TRUE
#
# ,'ACLU' ilike any('{user1, user2}'::text[]) as f1 -- FALSE
# ,'ACLU' ilike any('{user1, acluser1}'::text[]) as f2 -- FALSE
# ,'ACLU' ilike any('{user1, aclu_ser1}'::text[]) as f3 -- FALSE
# ,'ACLU' ilike any('{user1, aclu ser1}'::text[]) as f4 -- FALSE
# """"
| 640 | 34.611111 | 122 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_toxicity_model_manager.py | from detoxify import Detoxify
import numpy as np
from transformers import BertForSequenceClassification, BertTokenizer
from pandas import DataFrame
from conftest import toxicity_texts
def test_packaged_model():
model = Detoxify("original")
results = model.predict(toxicity_texts)
assert results == {
'toxicity': [0.12640126049518585, 0.0008546802564524114],
'severe_toxicity': [0.00022532008006237447, 0.00011462702241260558],
'obscene': [0.0018298450158908963, 0.00016588227299507707],
'threat': [0.0005070280167274177, 0.00013761487207375467],
'insult': [0.009287197142839432, 0.0001857876923168078],
'identity_hate': [0.0018323149997740984, 0.00015746793360449374]
}
def test_reconstituted_model(original_model_manager):
mgr = original_model_manager
assert mgr.tokenizer_name == "BertTokenizer"
assert mgr.model_name == "BertForSequenceClassification"
assert mgr.model_type == "bert-base-uncased"
assert mgr.class_names == ['toxicity', 'severe_toxicity', 'obscene', 'threat', 'insult', 'identity_hate']
assert mgr.num_classes == 6
assert isinstance(mgr.model, BertForSequenceClassification)
assert isinstance(mgr.tokenizer, BertTokenizer)
scores = mgr.predict_scores(toxicity_texts)
assert isinstance(scores, np.ndarray)
assert scores.shape == (2, 6)
assert scores[0].tolist() == [0.12640126049518585, 0.00022532008006237447, 0.0018298450158908963, 0.0005070280167274177, 0.009287197142839432, 0.0018323149997740984]
assert scores[1].tolist() == [0.0008546802564524114, 0.00011462702241260558, 0.00016588227299507707, 0.00013761487207375467, 0.0001857876923168078, 0.00015746793360449374]
records = mgr.predict_records(toxicity_texts)
assert records == [
{
'text': 'RT @realDonaldTrump: Crazy Nancy Pelosi should spend more time in her decaying city and less time on the Impeachment Hoax! https://t.co/eno…',
'toxicity': 0.12640126049518585, 'severe_toxicity': 0.00022532008006237447, 'obscene': 0.0018298450158908963, 'threat': 0.0005070280167274177, 'insult': 0.009287197142839432, 'identity_hate': 0.0018323149997740984
}, {
'text': 'RT @SpeakerPelosi: The House cannot choose our impeachment managers until we know what sort of trial the Senate will conduct. President Tr…',
'toxicity': 0.0008546802564524114, 'severe_toxicity': 0.00011462702241260558, 'obscene': 0.00016588227299507707, 'threat': 0.00013761487207375467, 'insult': 0.0001857876923168078, 'identity_hate': 0.00015746793360449374
}
]
df = mgr.predict_df(toxicity_texts)
assert isinstance(df, DataFrame)
| 2,689 | 53.897959 | 231 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_api_v1.py | import json
import pytest
from conftest import CI_ENV
@pytest.mark.skipif(CI_ENV, reason="avoid issuing HTTP requests on CI")
def test_user_tweets(api_client):
expected_keys = ['created_at', 'score_bert', 'score_lr', 'score_nb', 'status_id', 'status_text']
response = api_client.get('/api/v1/user_tweets/berniesanders')
parsed_response = json.loads(response.data)
assert response.status_code == 200
assert isinstance(parsed_response, list)
assert any(parsed_response)
assert isinstance(parsed_response[0], dict)
assert sorted(list(parsed_response[0].keys())) == expected_keys
@pytest.mark.skipif(CI_ENV, reason="avoid issuing HTTP requests on CI")
def test_users_most_retweeted(api_client):
expected_keys = ['avg_score_bert', 'avg_score_lr', 'avg_score_nb', 'category', 'follower_count', 'screen_name', 'status_count']
response = api_client.get('/api/v1/users_most_followed')
users = json.loads(response.data)
assert response.status_code == 200
assert isinstance(users, list)
assert len(users) == 500
assert isinstance(users[0], dict)
assert sorted(list(users[0].keys())) == expected_keys
response = api_client.get('/api/v1/users_most_followed?limit=3')
users = json.loads(response.data)
assert len(users) == 3
assert sorted(list(users[0].keys())) == expected_keys
# you also need to know these need to be sorted, so the limit really returns the "top" users.
# they happen to be sorted that way in the table, but need to explicitly sort. thanks.
| 1,543 | 38.589744 | 131 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_number_decorators.py |
from app.decorators.number_decorators import fmt_n, fmt_pct
def test_large_number_decoration():
assert fmt_n(1_234_567.89012345) == '1,234,568'
def test_percent_decoration():
assert fmt_pct(0.97777777) == '97.78%'
| 226 | 21.7 | 59 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_datetime_decorators.py |
from datetime import datetime
from app.decorators.datetime_decorators import logstamp, dt_to_date, dt_to_s, s_to_dt
from app.decorators.datetime_decorators import to_ts as dt_to_ts
from app.decorators.datetime_decorators import fmt_date as ts_to_date
from app.decorators.datetime_decorators import to_dt as ts_to_dt
dt = datetime(2020, 7, 26, 10, 29, 49, 828663)
ts = 1595759389.828663
s = "2020-07-26 10:29:49"
def test_logstamp():
# this test might fail if run right when the day changes.
# the point is it contains the current datetime info...
logstr = logstamp()
assert isinstance(logstr, str)
assert str(datetime.now().year) in logstr
assert str(datetime.now().month) in logstr
assert str(datetime.now().day) in logstr
def test_datetime_decorators():
assert dt_to_date(dt) == '2020-07-26'
assert dt_to_s(dt) == s
assert dt_to_ts(dt) == ts
def test_timestamp_decorators():
assert ts_to_date(ts) == '2020-07-26'
assert ts_to_dt(ts) == dt
def test_string_decorators():
assert s_to_dt(s) == dt.replace(microsecond = 0)
def test_inverse_conversions():
# we should be able to convert a timestamp to a date-time and then back again and it should be the same thing
assert dt_to_ts(dt) == ts
assert ts_to_dt(ts) == dt
# strings and datetime objects
assert dt_to_s(s_to_dt(s)) == s
| 1,360 | 30.651163 | 113 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/test/test_bq_service.py |
import pytest
from datetime import datetime
from conftest import CI_ENV
from app.bq_service import BigQueryService, split_into_batches, generate_timestamp
def test_generate_timestamp():
assert isinstance(generate_timestamp(), str)
assert isinstance(generate_timestamp(datetime.now()), str)
assert generate_timestamp(datetime(2021,10,31)) == '2021-10-31 00:00:00'
def test_split_into_batches():
batches = split_into_batches([0,1,2,3,4,5,6,7,8,9,10], 3)
assert list(batches) == [
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10]
]
@pytest.mark.skipif(CI_ENV, reason="avoid issuing HTTP requests on CI")
def test_upload_in_batches():
bq_service = BigQueryService(dataset_name="impeachment_test")
# when inserting more than 10,000 rows,
# is able to overcome error "too many rows present in the request, limit: 10000":
lots_of_rows = [{"start_date":"2020-01-01", "user_id":i, "bot_probability": .99} for i in range(1, 36000)]
errors = bq_service.upload_daily_bot_probabilities(lots_of_rows)
assert not any(errors)
| 1,090 | 32.060606 | 110 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/gcs_file_renaming.py | import os
from dotenv import load_dotenv
from app import seek_confirmation
from app.gcs_service import GoogleCloudStorageService
load_dotenv()
EXISTING_DIRPATH = os.getenv("EXISTING_DIRPATH", default="storage/data/archived_graphs")
EXISTING_PATTERN = os.getenv("EXISTING_PATTERN") or EXISTING_DIRPATH # can customize pattern of files to move in the existing directory, like "storage/data/2020-", otherwise just move everything in the existing directory
NEW_DIRPATH = os.getenv("NEW_DIRPATH", default="storage/data/archived")
if __name__ == "__main__":
#
# RENAMING THINGS.
# DO THIS AD-HOC, IF NECESSARY, FOR EXAMPLE IF YOU NEED TO ARCHIVE A BUNCH OF THINGS
#
gcs = GoogleCloudStorageService()
blobs = list(gcs.bucket.list_blobs())
blobs_to_rename = [blob for blob in blobs if EXISTING_PATTERN in blob.name and f"{EXISTING_PATTERN}/" != blob.name] # take all files in the dir, but not the dir itself!
for blob in blobs_to_rename:
print(blob)
seek_confirmation()
for blob in blobs_to_rename:
#new_name = os.path.join("storage", "data", "archived_graphs", blob.name.split("storage/data")[1])
new_name = NEW_DIRPATH + blob.name.split(EXISTING_DIRPATH)[1] # take everything after the current dirpath
#print(new_name, "...") #> 'storage/data/archived/2020-05-25-1905/metadata.json'
new_blob = gcs.bucket.rename_blob(blob, new_name)
print(new_blob)
| 1,442 | 38 | 220 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bq_service.py | from datetime import datetime, timedelta, timezone
import os
from functools import lru_cache
from pprint import pprint
from dotenv import load_dotenv
from google.cloud import bigquery
from google.cloud.bigquery import QueryJobConfig, ScalarQueryParameter
from pandas import DataFrame
from app import APP_ENV, seek_confirmation
from app.decorators.number_decorators import fmt_n
load_dotenv()
GOOGLE_APPLICATION_CREDENTIALS = os.getenv("GOOGLE_APPLICATION_CREDENTIALS") # implicit check by google.cloud (and keras)
PROJECT_NAME = os.getenv("BIGQUERY_PROJECT_NAME", default="tweet-collector-py")
DATASET_NAME = os.getenv("BIGQUERY_DATASET_NAME", default="impeachment_development") #> "_test" or "_production"
DESTRUCTIVE_MIGRATIONS = (os.getenv("DESTRUCTIVE_MIGRATIONS", default="false") == "true")
VERBOSE_QUERIES = (os.getenv("VERBOSE_QUERIES", default="false") == "true")
CLEANUP_MODE = (os.getenv("CLEANUP_MODE", default="true") == "true")
DEFAULT_START = "2019-12-02 01:00:00" # @deprectated, the "beginning of time" for the impeachment dataset. todo: allow customization via env var
DEFAULT_END = "2020-03-24 20:00:00" # @deprectated, the "end of time" for the impeachment dataset. todo: allow customization via env var
def generate_timestamp(dt=None):
"""Formats datetime object for storing in BigQuery. Uses current time by default. """
dt = dt or datetime.now()
return dt.strftime("%Y-%m-%d %H:%M:%S")
def generate_temp_table_id():
return datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
def split_into_batches(my_list, batch_size=9000):
"""Splits a list into evenly sized batches""" # h/t: h/t: https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
for i in range(0, len(my_list), batch_size):
yield my_list[i : i + batch_size]
class BigQueryService():
def __init__(self, project_name=PROJECT_NAME, dataset_name=DATASET_NAME,
verbose=VERBOSE_QUERIES, destructive=DESTRUCTIVE_MIGRATIONS, cautious=True):
self.project_name = project_name
self.dataset_name = dataset_name
self.dataset_address = f"{self.project_name}.{self.dataset_name}"
self.verbose = (verbose == True)
self.destructive = (destructive == True)
self.cautious = (cautious == True)
self.client = bigquery.Client()
print("-------------------------")
print("BIGQUERY SERVICE...")
print(" DATASET ADDRESS:", self.dataset_address.upper())
print(" DESTRUCTIVE MIGRATIONS:", self.destructive)
print(" VERBOSE QUERIES:", self.verbose)
if self.cautious:
seek_confirmation()
@property
def metadata(self):
return {"dataset_address": self.dataset_address, "destructive": self.destructive, "verbose": self.verbose}
def execute_query(self, sql):
"""Param: sql (str)"""
if self.verbose:
print(sql)
job = self.client.query(sql)
return job.result()
def execute_query_in_batches(self, sql, temp_table_name=None):
"""Param: sql (str)"""
if self.verbose:
print(sql)
if not temp_table_name:
temp_table_id = generate_temp_table_id()
temp_table_name = f"{self.dataset_address}.temp_{temp_table_id}"
job_config = bigquery.QueryJobConfig(
priority=bigquery.QueryPriority.BATCH,
allow_large_results=True,
destination=temp_table_name
)
job = self.client.query(sql, job_config=job_config)
print("BATCH QUERY JOB:", type(job), job.job_id, job.state, job.location)
return job
def query_to_df(self, sql):
"""high-level wrapper to return a DataFrame"""
results = self.execute_query(sql)
records = [dict(row) for row in list(results)]
df = DataFrame(records)
return df
def insert_records_in_batches(self, table, records):
"""
Params:
table (table ID string, Table, or TableReference)
records (list of dictionaries)
"""
rows_to_insert = [list(d.values()) for d in records]
#errors = self.client.insert_rows(table, rows_to_insert)
#> ... google.api_core.exceptions.BadRequest: 400 POST https://bigquery.googleapis.com/bigquery/v2/projects/.../tables/daily_bot_probabilities/insertAll:
#> ... too many rows present in the request, limit: 10000 row count: 36092.
#> ... see: https://cloud.google.com/bigquery/quotas#streaming_inserts
errors = []
batches = list(split_into_batches(rows_to_insert, batch_size=5000))
for batch in batches:
errors += self.client.insert_rows(table, batch)
return errors
def delete_temp_tables_older_than(self, days=3):
"""Deletes all tables that:
have "temp_" in their name (product of the batch jobs), and were
created at least X days ago (safely avoid deleting tables being used by in-progress batch jobs)
"""
cutoff_date = datetime.now(tz=timezone.utc) - timedelta(days=days)
print("CUTOFF DATE:", cutoff_date)
tables = list(self.client.list_tables(self.dataset_name)) # API call
tables_to_delete = [t for t in tables if "temp_" in t.table_id and t.created < cutoff_date]
print("TABLES TO DELETE:")
pprint([t.table_id for t in tables_to_delete])
seek_confirmation()
print("DELETING...")
for old_temp_table in tables_to_delete:
print(" ", old_temp_table.table_id)
self.client.delete_table(old_temp_table)
#def get_table(self, table_name):
# return self.client.get_table(f"{self.dataset_address}.{table_name}") # API call. cache it here once.
#
# COLLECTING TWEETS V2
#
def migrate_topics_table(self):
print("MIGRATING TOPICS TABLE...")
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.topics`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.topics` (
topic STRING NOT NULL,
created_at TIMESTAMP,
);
"""
return list(self.execute_query(sql))
def migrate_tweets_table(self):
print("MIGRATING TWEETS TABLE...")
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.tweets`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.tweets` (
status_id STRING,
status_text STRING,
truncated BOOLEAN,
retweeted_status_id STRING,
retweeted_user_id STRING,
retweeted_user_screen_name STRING,
reply_status_id STRING,
reply_user_id STRING,
is_quote BOOLEAN,
geo STRING,
created_at TIMESTAMP,
user_id STRING,
user_name STRING,
user_screen_name STRING,
user_description STRING,
user_location STRING,
user_verified BOOLEAN,
user_created_at TIMESTAMP
);
"""
return list(self.execute_query(sql))
@property
@lru_cache(maxsize=None)
def topics_table(self):
return self.client.get_table(f"{self.dataset_address}.topics") # an API call (caches results for subsequent inserts)
@property
@lru_cache(maxsize=None)
def tweets_table(self):
return self.client.get_table(f"{self.dataset_address}.tweets") # an API call (caches results for subsequent inserts)
def fetch_topics(self):
"""Returns a list of topic strings"""
sql = f"""
SELECT topic, created_at
FROM `{self.dataset_address}.topics`
ORDER BY created_at;
"""
return self.execute_query(sql)
def fetch_topic_names(self):
return [row.topic for row in self.fetch_topics()]
def append_topics(self, topics):
"""
Inserts topics unless they already exist.
Param: topics (list of dict)
"""
rows = self.fetch_topics()
existing_topics = [row.topic for row in rows]
new_topics = [topic for topic in topics if topic not in existing_topics]
if new_topics:
rows_to_insert = [[new_topic, generate_timestamp()] for new_topic in new_topics]
errors = self.client.insert_rows(self.topics_table, rows_to_insert)
return errors
else:
print("NO NEW TOPICS...")
return []
def append_tweets(self, tweets):
"""Param: tweets (list of dict)"""
rows_to_insert = [list(d.values()) for d in tweets]
errors = self.client.insert_rows(self.tweets_table, rows_to_insert)
return errors
#
# COLLECTING USER FRIENDS
#
def migrate_populate_users(self):
"""
Resulting table has a row for each user id / screen name combo
(multiple rows per user id if they changed their screen name)
"""
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.users`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.users` as (
SELECT DISTINCT
user_id
,user_screen_name as screen_name
FROM `{self.dataset_address}.tweets`
WHERE user_id IS NOT NULL AND user_screen_name IS NOT NULL
ORDER BY 1
);
"""
results = self.execute_query(sql)
return list(results)
def migrate_user_friends(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.user_friends`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.user_friends` (
user_id STRING,
screen_name STRING,
friend_count INT64,
friend_names ARRAY<STRING>,
start_at TIMESTAMP,
end_at TIMESTAMP
);
"""
results = self.execute_query(sql)
return list(results)
def fetch_remaining_users(self, min_id=None, max_id=None, limit=None):
"""Returns a list of table rows"""
sql = f"""
SELECT
u.user_id
,u.screen_name
FROM `{self.dataset_address}.users` u
LEFT JOIN `{self.dataset_address}.user_friends` f ON u.user_id = f.user_id
WHERE f.user_id IS NULL
"""
if min_id and max_id:
sql += f" AND CAST(u.user_id as int64) BETWEEN {int(min_id)} AND {int(max_id)} "
sql += f"ORDER BY u.user_id "
if limit:
sql += f"LIMIT {int(limit)};"
results = self.execute_query(sql)
return list(results)
@property
@lru_cache(maxsize=None)
def user_friends_table(self):
return self.client.get_table(f"{self.dataset_address}.user_friends") # an API call (caches results for subsequent inserts)
def insert_user_friends(self, records):
"""
Param: records (list of dictionaries)
"""
rows_to_insert = [list(d.values()) for d in records]
#rows_to_insert = [list(d.values()) for d in records if any(d["friend_names"])] # doesn't store failed attempts. can try those again later
#if any(rows_to_insert):
errors = self.client.insert_rows(self.user_friends_table, rows_to_insert)
return errors
def user_friend_collection_progress(self):
sql = f"""
SELECT
count(distinct user_id) as user_count
,round(avg(runtime_seconds), 2) as avg_duration
,round(sum(has_friends) / count(distinct user_id), 2) as pct_friendly
,round(avg(CASE WHEN has_friends = 1 THEN runtime_seconds END), 2) as avg_duration_friendly
,round(avg(CASE WHEN has_friends = 1 THEN friend_count END), 2) as avg_friends_friendly
FROM (
SELECT
user_id
,friend_count
,if(friend_count > 0, 1, 0) as has_friends
,start_at
,end_at
,DATETIME_DIFF(CAST(end_at as DATETIME), cast(start_at as DATETIME), SECOND) as runtime_seconds
FROM `{service.dataset_address}.user_friends`
) subq
"""
return self.execute_query(sql)
#
# FRIEND GRAPHS
#
def fetch_user_friends(self, min_id=None, max_id=None, limit=None):
sql = f"""
SELECT user_id, screen_name, friend_count, friend_names, start_at, end_at
FROM `{self.dataset_address}.user_friends`
"""
if min_id and max_id:
sql += f" WHERE CAST(user_id as int64) BETWEEN {int(min_id)} AND {int(max_id)} "
sql += f"ORDER BY user_id "
if limit:
sql += f"LIMIT {int(limit)};"
#return list(self.execute_query(sql))
return self.execute_query(sql) # return the generator so we can avoid storing the results in memory
def fetch_user_friends_in_batches(self, limit=None, min_friends=None):
sql = f"""
SELECT user_id, screen_name, friend_count, friend_names
FROM `{self.dataset_address}.user_friends`
"""
if min_friends:
sql += f" WHERE ARRAY_LENGTH(friend_names) >= {int(min_friends)} "
if limit:
sql += f" LIMIT {int(limit)}; "
return self.execute_query_in_batches(sql)
def partition_user_friends(self, n=10):
"""Params n (int) the number of partitions, each will be of equal size"""
sql = f"""
SELECT
partition_id
,count(DISTINCT user_id) as user_count
,min(user_id) as min_id
,max(user_id) as max_id
FROM (
SELECT
NTILE({int(n)}) OVER (ORDER BY CAST(user_id as int64)) as partition_id
,CAST(user_id as int64) as user_id
FROM (SELECT DISTINCT user_id FROM `{self.dataset_address}.user_friends`)
) user_partitions
GROUP BY partition_id
"""
results = self.execute_query(sql)
return list(results)
def fetch_random_users(self, limit=1000, topic="impeach", start_at=DEFAULT_START, end_at=DEFAULT_END):
"""
Fetches a random slice of users talking about a given topic during a given timeframe.
Params:
topic (str) the topic they were tweeting about:
to be balanced, choose 'impeach', '#IGHearing', '#SenateHearing', etc.
to be left-leaning, choose '#ImpeachAndConvict', '#ImpeachAndRemove', etc.
to be right-leaning, choose '#ShamTrial', '#AquittedForever', '#MAGA', etc.
limit (int) the max number of users to fetch
start_at (str) a date string for the earliest tweet
end_at (str) a date string for the latest tweet
"""
sql = f"""
SELECT DISTINCT user_id, user_screen_name, user_created_at
FROM `{self.dataset_address}.tweets`
WHERE upper(status_text) LIKE '%{topic.upper()}%' AND (created_at BETWEEN '{start_at}' AND '{end_at}')
ORDER BY rand()
LIMIT {int(limit)};
"""
return self.execute_query(sql)
#
# RETWEET GRAPHS
#
def fetch_retweet_counts_in_batches(self, topic=None, start_at=None, end_at=None):
"""
For each retweeter, includes the number of times each they retweeted each other user.
Optionally about a given topic.
Optionally with within a given timeframe.
Params:
topic (str) the topic they were tweeting about, like 'impeach', '#MAGA', "@politico", etc.
start_at (str) a date string for the earliest tweet
end_at (str) a date string for the latest tweet
"""
sql = f"""
SELECT
user_id
,user_screen_name
,retweet_user_screen_name
,count(distinct status_id) as retweet_count
FROM `{self.dataset_address}.retweets`
WHERE user_screen_name <> retweet_user_screen_name -- excludes people retweeting themselves
"""
if topic:
sql+=f"""
AND upper(status_text) LIKE '%{topic.upper()}%'
"""
if start_at and end_at:
sql+=f"""
AND (created_at BETWEEN '{start_at}' AND '{end_at}')
"""
sql += """
GROUP BY 1,2,3
"""
return self.execute_query_in_batches(sql)
def fetch_specific_user_friends(self, screen_names):
sql = f"""
SELECT user_id, screen_name, friend_count, friend_names, start_at, end_at
FROM `{self.dataset_address}.user_friends`
WHERE screen_name in {tuple(screen_names)} -- tuple conversion surrounds comma-separated screen_names in parens
"""
return self.execute_query(sql)
def fetch_specific_retweet_counts(self, screen_names):
"""FYI this fetches multiple rows per screen_name, for each screen_name that user retweeted"""
sql = f"""
SELECT user_id, user_screen_name, retweet_user_screen_name, retweet_count
FROM `{self.dataset_address}.retweet_counts`
WHERE user_screen_name in {tuple(screen_names)} -- tuple conversion surrounds comma-separated screen_names in parens
-- AND user_screen_name <> retweet_user_screen_name -- exclude users who have retweeted themselves
ORDER BY 2,3
"""
return self.execute_query(sql)
def fetch_retweet_weeks(self, start_at=None, end_at=None):
"""
Params:
start_at (str) like "2019-12-15 00:00:00"
end_at (str) like "2020-03-21 23:59:59"
"""
sql = f"""
SELECT
CASE
WHEN EXTRACT(week from created_at) = 0 THEN EXTRACT(year from created_at) - 1 -- treat first week of new year as the previous year
ELSE EXTRACT(year from created_at)
END year
,CASE
WHEN EXTRACT(week from created_at) = 0 THEN 52 -- treat first week of new year as the previous week
ELSE EXTRACT(week from created_at)
END week
,count(DISTINCT EXTRACT(day from created_at)) as day_count
,min(created_at) as min_created
,max(created_at) as max_created
,count(DISTINCT status_id) as retweet_count
,count(DISTINCT user_id) as user_count
FROM `{self.dataset_address}.retweets`
"""
if start_at and end_at:
sql += f"""
WHERE created_at BETWEEN '{start_at}' AND '{end_at}'
"""
sql += """
GROUP BY 1,2
ORDER BY 1,2
"""
return self.execute_query(sql)
#
# LOCAL ANALYSIS (PG PIPELINE)
#
def fetch_tweets_in_batches(self, limit=None, start_at=None, end_at=None):
sql = f"""
SELECT
status_id
,status_text
,truncated
,NULL as retweeted_status_id -- restore for version 2
,NULL as retweeted_user_id -- restore for version 2
,NULL as retweeted_user_screen_name -- restore for version 2
,reply_status_id
,reply_user_id
,is_quote
,geo
,created_at
,user_id
,user_name
,user_screen_name
,user_description
,user_location
,user_verified
,user_created_at
FROM `{self.dataset_address}.tweets`
"""
if start_at and end_at:
sql+=f"""
WHERE (created_at BETWEEN '{str(start_at)}' AND '{str(end_at)}')
"""
if limit:
sql += f" LIMIT {int(limit)}; "
return self.execute_query_in_batches(sql)
def fetch_user_details_in_batches(self, limit=None):
sql = f"""
SELECT
user_id
,screen_name
,name
,description
,location
,verified
,created_at
,screen_name_count
,name_count
,description_count
,location_count
,verified_count
,created_at_count
,screen_names
,names
,descriptions
,locations
,verifieds
,created_ats
,friend_count
,status_count
,retweet_count
-- these topics are specific to the impeachment dataset, so will need to generalize if/when working with another topic (leave for future concern)
,impeach_and_convict
,senate_hearing
,ig_hearing
,facts_matter
,sham_trial
,maga
,acquitted_forever
FROM `{self.dataset_address}.user_details`
"""
if limit:
sql += f"LIMIT {int(limit)};"
return self.execute_query_in_batches(sql)
def fetch_retweeter_details_in_batches(self, limit=None):
sql = f"""
SELECT
user_id
,verified
,created_at
,screen_name_count
,name_count
,retweet_count
,ig_report
,ig_hearing
,senate_hearing
,not_above_the_law
,impeach_and_convict
,impeach_and_remove
,facts_matter
,sham_trial
,maga
,acquitted_forever
,country_over_party
FROM `{self.dataset_address}.retweeter_details`
"""
if limit:
sql += f"LIMIT {int(limit)};"
return self.execute_query_in_batches(sql)
def fetch_retweeters_by_topic_exclusive(self, topic):
"""
Get the retweeters talking about topic x and those not, so we can perform a two-sample KS-test on them.
"""
topic = topic.upper() # do uppercase conversion once here instead of many times inside sql below
sql = f"""
-- TOPIC: '{topic}'
SELECT
rt.user_id
,rt.user_created_at
,count(distinct case when REGEXP_CONTAINS(upper(rt.status_text), '{topic}') then rt.status_id end) as count
FROM {self.dataset_address}.retweets rt
GROUP BY 1,2
"""
return self.execute_query(sql)
def fetch_retweeters_by_topics_exclusive(self, x_topic, y_topic):
"""
Get the retweeters talking about topic x and not y (and vice versa).
For each user, determines how many times they were talking about topic x and y.
Only returns users who were talking about one or the other, so we can perform a two-sample KS-test on them.
"""
x_topic = x_topic.upper() # do uppercase conversion once here instead of many times inside sql below
y_topic = y_topic.upper() # do uppercase conversion once here instead of many times inside sql below
sql = f"""
-- TOPICS: '{x_topic}' | '{y_topic}'
SELECT
rt.user_id
,rt.user_created_at
,count(distinct case when REGEXP_CONTAINS(upper(rt.status_text), '{x_topic}') then rt.status_id end) as x_count
,count(distinct case when REGEXP_CONTAINS(upper(rt.status_text), '{y_topic}') then rt.status_id end) as y_count
FROM {self.dataset_address}.retweets rt
WHERE REGEXP_CONTAINS(upper(rt.status_text), '{x_topic}')
OR REGEXP_CONTAINS(upper(rt.status_text), '{y_topic}')
GROUP BY 1,2
HAVING (x_count > 0 and y_count = 0) OR (x_count = 0 and y_count > 0) -- mutually exclusive populations
"""
return self.execute_query(sql)
#
# RETWEET GRAPHS V2 - USER ID LOOKUPS
#
def fetch_idless_screen_names(self):
sql = f"""
SELECT DISTINCT rt.retweet_user_screen_name as screen_name
FROM {self.dataset_address}.retweets rt
LEFT JOIN {self.dataset_address}.tweets t on t.user_screen_name = rt.retweet_user_screen_name
WHERE t.user_id IS NULL
"""
return self.execute_query(sql)
def migrate_user_id_lookups_table(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.user_id_lookups`; "
sql += f"""
CREATE TABLE `{self.dataset_address}.user_id_lookups` (
lookup_at TIMESTAMP,
counter INT64,
screen_name STRING,
user_id STRING,
message STRING
);
"""
return self.execute_query(sql)
@property
@lru_cache(maxsize=None)
def user_id_lookups_table(self):
return self.client.get_table(f"{self.dataset_address}.user_id_lookups") # an API call (caches results for subsequent inserts)
def upload_user_id_lookups(self, records):
"""
Param: records (list of dictionaries)
"""
rows_to_insert = [list(d.values()) for d in records]
errors = self.client.insert_rows(self.user_id_lookups_table, rows_to_insert)
return errors
def fetch_max_user_id_postlookup(self):
sql = f"""
SELECT max(user_id) as max_user_id -- 999999827600650240
FROM (
SELECT DISTINCT user_id FROM {self.dataset_address}.tweets -- 3,600,545
UNION ALL
SELECT DISTINCT user_id FROM {self.dataset_address}.user_id_lookups WHERE user_id IS NOT NULL -- 14,969
) all_user_ids -- 3,615,409
"""
results = list(self.execute_query(sql))
return int(results[0]["max_user_id"])
def fetch_idless_screen_names_postlookup(self):
sql = f"""
SELECT distinct upper(screen_name) as screen_name
FROM {self.dataset_address}.user_id_lookups
WHERE user_id is NULL
ORDER BY screen_name
"""
return self.execute_query(sql)
def migrate_user_id_assignments_table(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.user_id_assignments`; "
sql += f"""
CREATE TABLE `{self.dataset_address}.user_id_assignments` (
screen_name STRING,
user_id STRING,
);
"""
return self.execute_query(sql)
@property
@lru_cache(maxsize=None)
def user_id_assignments_table(self):
return self.client.get_table(f"{self.dataset_address}.user_id_assignments") # an API call (caches results for subsequent inserts)
def upload_user_id_assignments(self, records):
"""
Param: records (list of dictionaries)
"""
rows_to_insert = [list(d.values()) for d in records]
errors = self.client.insert_rows(self.user_id_assignments_table, rows_to_insert)
return errors
def migrate_populate_user_screen_names_table(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.user_screen_names`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.user_screen_names` as (
SELECT DISTINCT user_id, upper(screen_name) as screen_name
FROM (
SELECT DISTINCT user_id, user_screen_name as screen_name FROM `{self.dataset_address}.tweets` -- 3,636,492
UNION ALL
SELECT DISTINCT user_id, screen_name FROM `{self.dataset_address}.user_id_lookups` WHERE user_id IS NOT NULL -- 14,969
UNION ALL
SELECT DISTINCT user_id, screen_name FROM `{self.dataset_address}.user_id_assignments` -- 2,224
) all_user_screen_names -- 3,615,409
ORDER BY user_id, screen_name
);
"""
return self.execute_query(sql)
def migrate_populate_user_details_table_v2(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.user_details_v2`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.user_details_v2` as (
SELECT
user_id
,count(DISTINCT UPPER(screen_name)) as screen_name_count
,ARRAY_AGG(DISTINCT UPPER(screen_name) IGNORE NULLS) as screen_names
-- ,ANY_VALUE(screen_name) as screen_name
FROM `{self.dataset_address}.user_screen_names`
GROUP BY 1
ORDER BY 2 desc
-- LIMIT 100
);
"""
return self.execute_query(sql)
def migrate_populate_retweets_table_v2(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.retweets_v2`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.retweets_v2` as (
SELECT
cast(rt.user_id as int64) as user_id
,UPPER(rt.user_screen_name) as user_screen_name
,rt.user_created_at
,cast(sn.user_id as int64) as retweeted_user_id
,UPPER(rt.retweet_user_screen_name) as retweeted_user_screen_name
,rt.status_id
,rt.status_text
,rt.created_at
FROM `{self.dataset_address}.retweets` rt
JOIN `{self.dataset_address}.user_screen_names` sn
ON UPPER(rt.retweet_user_screen_name) = UPPER(sn.screen_name)
WHERE rt.user_screen_name <> rt.retweet_user_screen_name -- excludes people retweeting themselves
);
"""
return self.execute_query(sql)
def fetch_retweet_edges_in_batches_v2(self, topic=None, start_at=None, end_at=None):
"""
For each retweeter, includes the number of times each they retweeted each other user.
Optionally about a given topic.
Optionally with within a given timeframe.
Params:
topic (str) : the topic they were tweeting about, like 'impeach', '#MAGA', "@politico", etc.
start_at (str) : a date string for the earliest tweet
end_at (str) : a date string for the latest tweet
"""
sql = f"""
SELECT
rt.user_id
,rt.retweeted_user_id
,count(distinct rt.status_id) as retweet_count
FROM `{self.dataset_address}.retweets_v2` rt
WHERE rt.user_screen_name <> rt.retweeted_user_screen_name -- excludes people retweeting themselves
"""
if topic:
sql+=f"""
AND upper(rt.status_text) LIKE '%{topic.upper()}%'
"""
if start_at and end_at:
sql+=f"""
AND (rt.created_at BETWEEN '{str(start_at)}' AND '{str(end_at)}')
"""
sql += """
GROUP BY 1,2
"""
return self.execute_query_in_batches(sql)
def migrate_daily_bot_probabilities_table(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.daily_bot_probabilities`; "
sql += f"""
CREATE TABLE `{self.dataset_address}.daily_bot_probabilities` (
start_date STRING,
user_id INT64,
bot_probability FLOAT64,
);
"""
return self.execute_query(sql)
#
# RETWEET GRAPHS V2 - BOT CLASSIFICATIONS
#
@property
@lru_cache(maxsize=None)
def daily_bot_probabilities_table(self):
return self.client.get_table(f"{self.dataset_address}.daily_bot_probabilities") # an API call (caches results for subsequent inserts)
def upload_daily_bot_probabilities(self, records):
return self.insert_records_in_batches(self.daily_bot_probabilities_table, records)
def sql_fetch_bot_ids(self, bot_min=0.8):
sql = f"""
SELECT DISTINCT bp.user_id
FROM `{self.dataset_address}.daily_bot_probabilities` bp
WHERE bp.bot_probability >= {float(bot_min)}
"""
return sql
def fetch_bot_ids(self, bot_min=0.8):
"""Returns any user who has ever had a bot score above the given threshold."""
return self.execute_query(self.sql_fetch_bot_ids(bot_min))
def fetch_bot_retweet_edges_in_batches(self, bot_min=0.8):
"""
For each bot (user with any bot score greater than the specified threshold),
and each user they retweeted, includes the number of times the bot retweeted them.
Params:
bot_min (float) consider users with any score above this threshold as bots
"""
sql = f"""
SELECT
rt.user_id
,rt.retweeted_user_id
,count(distinct rt.status_id) as retweet_count
FROM `{self.dataset_address}.retweets_v2` rt
JOIN (
{self.sql_fetch_bot_ids(bot_min)}
) bp ON bp.user_id = rt.user_id
WHERE rt.user_screen_name <> rt.retweeted_user_screen_name -- excludes people retweeting themselves
GROUP BY 1,2
-- ORDER BY 1,2
"""
return self.execute_query_in_batches(sql)
#
# RETWEET GRAPHS V2 - BOT COMMUNITIES
#
#@property
#@lru_cache(maxsize=None) # don't cache, or cache one for each value of n_communities
def n_bot_communities_table(self, n_communities):
return self.client.get_table(f"{self.dataset_address}.{n_communities}_bot_communities") # an API call (caches results for subsequent inserts)
def destructively_migrate_n_bot_communities_table(self, n_communities):
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.{n_communities}_bot_communities`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.{n_communities}_bot_communities` (
user_id INT64,
community_id INT64,
);
"""
return self.execute_query(sql)
def overwrite_n_bot_communities_table(self, n_communities, records):
self.destructively_migrate_n_bot_communities_table(n_communities)
table = self.n_bot_communities_table(n_communities)
return self.insert_records_in_batches(table, records)
def download_n_bot_community_tweets_in_batches(self, n_communities):
sql = f"""
SELECT
bc.community_id
,t.user_id
,t.user_name
,t.user_screen_name
,t.user_description
,t.user_location
,t.user_verified
,t.user_created_at
,t.status_id
,t.status_text
,t.retweet_status_id
,t.reply_user_id
,t.is_quote as status_is_quote
,t.geo as status_geo
,t.created_at as status_created_at
FROM `{self.dataset_address}.{n_communities}_bot_communities` bc -- 681
JOIN `{self.dataset_address}.tweets` t on CAST(t.user_id as int64) = bc.user_id
-- WHERE t.retweet_status_id IS NULL
-- ORDER BY 1,2
"""
return self.execute_query_in_batches(sql)
def download_n_bot_community_retweets_in_batches(self, n_communities):
sql = f"""
SELECT
bc.community_id
,ud.user_id
,ud.screen_name_count as user_screen_name_count
,ARRAY_TO_STRING(ud.screen_names, ' | ') as user_screen_names
,rt.user_created_at
,rt.retweeted_user_id
,rt.retweeted_user_screen_name
,rt.status_id
,rt.status_text
,rt.created_at as status_created_at
FROM `{self.dataset_address}.{n_communities}_bot_communities` bc -- 681
JOIN `{self.dataset_address}.user_details_v2` ud on CAST(ud.user_id as int64) = bc.user_id
JOIN `{self.dataset_address}.retweets_v2` rt on rt.user_id = bc.user_id
-- ORDER BY 1,2
"""
return self.execute_query_in_batches(sql)
def destructively_migrate_token_frequencies_table(self, table_address, records):
print("DESTRUCTIVELY MIGRATING TABLE:", table_address)
sql = f"""
DROP TABLE IF EXISTS `{table_address}`;
CREATE TABLE IF NOT EXISTS `{table_address}` (
token STRING,
rank INT64,
count INT64,
pct FLOAT64,
doc_count INT64,
doc_pct FLOAT64
);
"""
self.execute_query(sql)
table = self.client.get_table(table_address) # API call
print("INSERTING", len(records), "RECORDS...")
return self.insert_records_in_batches(table, records)
def fetch_bot_community_profiles(self, n_communities=2):
sql = f"""
SELECT community_id, bot_id as user_id, user_descriptions
FROM `{self.dataset_address}.{int(n_communities)}_community_profiles`
"""
return self.execute_query(sql)
def upload_bot_community_profile_tokens(self, records, community_id, n_communities=2):
table_address = f"{self.dataset_address}.{n_communities}_community_{community_id}_profile_tokens"
self.destructively_migrate_token_frequencies_table(table_address=table_address, records=records)
def upload_bot_community_profile_tags(self, records, community_id, n_communities=2):
table_address = f"{self.dataset_address}.{n_communities}_community_{community_id}_profile_tags"
self.destructively_migrate_token_frequencies_table(table_address=table_address, records=records)
def fetch_bot_community_statuses(self, n_communities, community_id=None, limit=None):
sql = f"""
SELECT community_id, user_id, status_id, status_text
FROM `{self.dataset_address}.{int(n_communities)}_community_labeled_tweets`
"""
if community_id:
sql += f" WHERE community_id = {int(community_id)}"
if limit:
sql += f" LIMIT {int(limit)}"
return self.execute_query(sql)
def upload_bot_community_status_tokens(self, records, community_id, n_communities=2):
table_address = f"{self.dataset_address}.{n_communities}_community_{community_id}_status_tokens"
self.destructively_migrate_token_frequencies_table(table_address, records=records)
def upload_bot_community_status_tags(self, records, community_id, n_communities=2):
table_address = f"{self.dataset_address}.{n_communities}_community_{community_id}_status_tags"
self.destructively_migrate_token_frequencies_table(table_address, records=records)
#
# BOT FOLLOWER GRAPHS
#
def destructively_migrate_user_friends_flat(self):
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.user_friends_flat`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.user_friends_flat` as (
SELECT user_id, upper(screen_name) as screen_name, upper(friend_name) as friend_name
FROM `{self.dataset_address}.user_friends`
CROSS JOIN UNNEST(friend_names) AS friend_name
);
""" # 1,976,670,168 rows WAT
return self.execute_query(sql)
def destructively_migrate_bots_table(self, bot_min=0.8):
bot_min_str = str(int(bot_min * 100)) #> "80"
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.bots_above_{bot_min_str}`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.bots_above_{bot_min_str}` as (
SELECT
bp.user_id as bot_id
,sn.screen_name as bot_screen_name
,count(distinct start_date) as day_count
,avg(bot_probability) as avg_daily_score
FROM `{self.dataset_address}.daily_bot_probabilities` bp
JOIN `{self.dataset_address}.user_screen_names` sn ON CAST(sn.user_id as int64) = bp.user_id
WHERE bp.bot_probability >= {float(bot_min)}
GROUP BY 1,2
ORDER BY 3 desc
);
"""
return self.execute_query(sql)
def destructively_migrate_bot_followers_table(self, bot_min=0.8):
bot_min_str = str(int(bot_min * 100)) #> "80"
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.bot_followers_above_{bot_min_str}`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.bot_followers_above_{bot_min_str}` as (
SELECT
b.bot_id
,b.bot_screen_name
,uff.user_id as follower_id
,uff.screen_name as follower_screen_name
FROM `{self.dataset_address}.user_friends_flat` uff
JOIN `{self.dataset_address}.bots_above_{bot_min_str}` b ON upper(b.bot_screen_name) = upper(uff.friend_name)
);
""" # 29,861,268 rows WAT
return self.execute_query(sql)
def fetch_bot_followers_in_batches(self, bot_min=0.8):
"""
Returns a row for each bot for each user who follows them.
Params: bot_min (float) consider users with any score above this threshold as bots (uses pre-computed classification scores)
"""
bot_min_str = str(int(bot_min * 100)) #> "80"
sql = f"""
SELECT DISTINCT bot_id, follower_id
FROM `{self.dataset_address}.bot_followers_above_{bot_min_str}`
"""
return self.execute_query_in_batches(sql)
def fetch_bot_follower_lists(self, bot_min=0.8):
"""
Returns a row for each bot, with a list of aggregated follower ids.
Params: bot_min (float) consider users with any score above this threshold as bots (uses pre-computed classification scores)
"""
bot_min_str = str(int(bot_min * 100)) #> "80"
sql = f"""
SELECT bot_id, ARRAY_AGG(distinct follower_id) as follower_ids
FROM `{self.dataset_address}.bot_followers_above_{bot_min_str}`
GROUP BY 1
""" # takes 90 seconds for ~25K rows
return self.execute_query(sql)
#
# NLP (BASILICA)
#
@property
@lru_cache(maxsize=None)
def basilica_embeddings_table(self):
return self.client.get_table(f"{self.dataset_address}.basilica_embeddings") # an API call (caches results for subsequent inserts)
def upload_basilica_embeddings(self, records):
return self.insert_records_in_batches(self.basilica_embeddings_table, records)
def fetch_basilica_embedless_partitioned_statuses(self, min_val=0.0, max_val=1.0, limit=None, in_batches=False):
"""Params min_val and max_val reference partition decimal values from 0.0 to 1.0"""
sql = f"""
SELECT ps.status_id, ps.status_text
FROM `{self.dataset_address}.partitioned_statuses` ps
LEFT JOIN `{self.dataset_address}.basilica_embeddings` emb ON ps.status_id = emb.status_id
WHERE emb.status_id IS NULL
AND ps.partition_val BETWEEN {float(min_val)} AND {float(max_val)}
"""
if limit:
sql += f" LIMIT {int(limit)};"
if in_batches:
print("FETCHING STATUSES IN BATCHES...")
return self.execute_query_in_batches(sql)
else:
print("FETCHING STATUSES...")
return self.execute_query(sql)
#
# NLP (CUSTOM)
#
def fetch_labeled_tweets_in_batches(self, limit=None):
sql = f"""
SELECT
status_id
,status_text
,community_id
--,community_score
FROM `{self.dataset_address}.2_community_labeled_tweets`
"""
if limit:
sql += f" LIMIT {int(limit)}"
return self.execute_query(sql)
else:
return self.execute_query_in_batches(sql)
def fetch_unlabeled_statuses_in_batches(self, limit=None):
sql = f"""
SELECT s.status_id, s.status_text
FROM `{self.dataset_address}.statuses` s
LEFT JOIN `{self.dataset_address}.2_community_labeled_tweets` l ON l.status_id = s.status_id
WHERE l.status_id IS NULL
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
else:
return self.execute_query_in_batches(sql)
def destructively_migrate_2_community_predictions_table(self):
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.2_community_predictions`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.2_community_predictions` (
status_id INT64,
predicted_community_id INT64
);
"""
return self.execute_query(sql)
@property
@lru_cache(maxsize=None)
def community_predictions_table(self):
return self.client.get_table(f"{self.dataset_address}.2_community_predictions") # an API call (caches results for subsequent inserts)
def upload_predictions_in_batches(self, records):
return self.insert_records_in_batches(self.community_predictions_table, records)
def fetch_predictions(self, limit=None):
sql = f"""
SELECT status_id, predicted_community_id
FROM `{self.dataset_address}.2_community_predictions`
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
else:
return self.execute_query_in_batches(sql)
#
# NLP V2
#
def nlp_v2_fetch_statuses(self, limit=None):
sql = f"""
SELECT s.status_id, s.status_text
FROM `{self.dataset_address}.statuses` s
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def nlp_v2_destructively_migrate_predictions_table(self, model_name):
if model_name.lower() == "bert":
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.nlp_v2_predictions_bert`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.nlp_v2_predictions_bert` (
status_id INT64,
logit_0 FLOAT64,
logit_1 FLOAT64,
prediction FLOAT64
);
"""
else:
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.nlp_v2_predictions_{model_name}`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.nlp_v2_predictions_{model_name}` (
status_id INT64,
prediction STRING -- todo: convert this D/R label back to 0/1 "score"
);
"""
return self.execute_query(sql)
def nlp_v2_get_predictions_table(self, model_name):
return self.client.get_table(f"{self.dataset_address}.nlp_v2_predictions_{model_name}") # API call.
#
# DAILY ACTIVE FRIEND GRAPHS V4
#
def fetch_daily_statuses(self, date, limit=None):
sql = f"""
SELECT DISTINCT
t.status_id
, t.status_text
, t.created_at
, t.user_id
, UPPER(t.user_screen_name) as screen_name
,CASE WHEN bu.community_id IS NOT NULL THEN TRUE ELSE FALSE END bot
--,bu.community_id
-- ,r.tweet_count as rate
FROM `{self.dataset_address}.tweets` t
LEFT JOIN `{self.dataset_address}.2_bot_communities` bu ON bu.user_id = cast(t.user_id as int64)
WHERE EXTRACT(DATE from created_at) = '{date}'
--LIMIT 10
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_daily_active_tweeter_statuses(self, date, tweet_min=None, limit=None):
sql = f"""
SELECT DISTINCT
t.status_id
,t.status_text
,t.created_at
,t.user_id
,UPPER(t.user_screen_name) as screen_name
,CASE WHEN bu.community_id IS NOT NULL THEN TRUE ELSE FALSE END bot
,cast(bu.community_id as int64) as community_id
,r.tweet_count as rate
FROM `{self.dataset_address}.tweets` t
LEFT JOIN `{self.dataset_address}.2_bot_communities` bu ON bu.user_id = cast(t.user_id as int64)
JOIN (
SELECT
cast(user_id as INT64) as user_id, count(distinct status_id) as tweet_count
FROM `{self.dataset_address}.tweets` t
WHERE EXTRACT(DATE from created_at) = '{date}'
GROUP BY 1
-- LIMIT 10
) r ON r.user_id = cast(t.user_id as int64)
WHERE EXTRACT(DATE from created_at) = '{date}'
"""
if tweet_min:
sql += f" AND tweet_count >= {int(tweet_min)};"
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_daily_active_tweeter_statuses_for_model_training(self, date, tweet_min=None, limit=None):
sql = f"""
WITH daily_tweets AS (
SELECT
cast(t.user_id as int64) as user_id
,UPPER(t.user_screen_name) as screen_name
,cast(t.status_id as int64) as status_id
,t.status_text
,t.created_at
FROM `{self.dataset_address}.tweets` t
WHERE extract(date from t.created_at) = '{date}'
)
SELECT DISTINCT
t.status_id ,t.status_text ,t.created_at
,t.user_id ,t.screen_name
,CASE WHEN bu.community_id IS NOT NULL THEN TRUE ELSE FALSE END bot
,cast(bu.community_id as int64) as community_id
,r.tweet_count as rate
,st.status_count as status_text_occurrence
FROM daily_tweets t
LEFT JOIN `{self.dataset_address}.2_bot_communities` bu ON bu.user_id = t.user_id
JOIN (
SELECT
CAST(user_id as INT64) as user_id
,count(distinct status_id) as tweet_count
FROM daily_tweets t
GROUP BY 1
) r ON r.user_id = cast(t.user_id as int64)
LEFT JOIN (
SELECT
t.status_text
,count(distinct t.status_id) as status_count
FROM daily_tweets t
GROUP BY 1
) st ON st.status_text = t.status_text
"""
if tweet_min:
sql += f" AND tweet_count >= {int(tweet_min)};"
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_daily_active_user_friends(self, date, tweet_min=None, limit=None):
sql = f"""
SELECT dau.user_id, dau.rate, uf.screen_name ,uf.friend_count, uf.friend_names
FROM (
SELECT cast(user_id as INT64) as user_id, count(distinct status_id) as rate
FROM `{self.dataset_address}.tweets` t
WHERE EXTRACT(DATE from t.created_at) = '{date}'
GROUP BY 1
) dau
JOIN `{self.dataset_address}.active_user_friends` uf ON uf.user_id = dau.user_id
"""
if tweet_min:
sql += f" WHERE dau.rate >= {int(tweet_min)};"
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_daily_active_edge_friends(self, date, tweet_min=2, limit=None):
sql = f"""
WITH dau AS (
SELECT
cast(user_id as INT64) as user_id
,upper(user_screen_name) as screen_name
,count(distinct status_id) as rate
FROM `{self.dataset_address}.tweets`
WHERE EXTRACT(DATE FROM created_at) = '{date}'
GROUP BY 1,2
HAVING count(distinct status_id) >= {int(tweet_min)}
)
SELECT
dau.user_id
,dau.screen_name
,dau.rate
,ARRAY_AGG(DISTINCT uff.friend_name) as friend_names
,count(DISTINCT uff.friend_name) as friend_count
FROM dau
JOIN `{self.dataset_address}.user_friends_flat` uff ON cast(uff.user_id as int64) = dau.user_id
WHERE uff.friend_name in (SELECT DISTINCT screen_name FROM dau)
GROUP BY 1,2,3
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_daily_active_edge_friends_for_csv(self, date, tweet_min=2, limit=None):
sql = f"""
WITH dau AS (
SELECT
cast(user_id as INT64) as user_id
,upper(user_screen_name) as screen_name
,count(distinct status_id) as rate
FROM `{self.dataset_address}.tweets`
WHERE EXTRACT(DATE FROM created_at) = '{date}'
GROUP BY 1,2
HAVING count(distinct status_id) >= {int(tweet_min)}
)
SELECT
dau.user_id
,dau.screen_name
,dau.rate
,STRING_AGG(DISTINCT uff.friend_name) as friend_names -- STRING AGG FOR CSV OUTPUT!
,count(DISTINCT uff.friend_name) as friend_count
FROM dau
JOIN `{self.dataset_address}.user_friends_flat` uff ON cast(uff.user_id as int64) = dau.user_id
WHERE uff.friend_name in (SELECT DISTINCT screen_name FROM dau)
GROUP BY 1,2,3
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_daily_statuses_with_opinion_scores(self, date, limit=None):
sql = f"""
WITH daily_tweets as (
SELECT user_id ,screen_name ,status_id ,status_text ,created_at ,score_lr ,score_nb
FROM `{self.dataset_address}.nlp_v2_predictions_combined` p
WHERE extract(date from created_at) = '{date}'
AND score_lr is not null and score_nb is not null -- there are 30,000 total null lr scores. drop for now
)
SELECT
t.user_id
,t.screen_name
,CASE WHEN bu.community_id IS NOT NULL THEN TRUE ELSE FALSE END bot
,cast(bu.community_id as int64) as community_id
,r.status_count as rate
,t.status_id
,t.status_text
,st.status_count as status_text_occurrences
,t.created_at
,t.score_lr
,t.score_nb
FROM daily_tweets t
JOIN (
SELECT user_id, count(distinct status_id) as status_count
FROM daily_tweets
GROUP BY 1
) r ON r.user_id = t.user_id
LEFT JOIN (
SELECT status_text ,count(distinct status_id) as status_count
FROM daily_tweets
GROUP BY 1
) st ON st.status_text = t.status_text
LEFT JOIN `{self.dataset_address}.2_bot_communities` bu ON bu.user_id = t.user_id
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
#
# DAILY ACTIVE FRIEND GRAPHS V5
#
def fetch_daily_nodes_with_active_edges(self, date, limit=None):
sql = f"""
WITH dau AS (
SELECT
user_id
,screen_name
,count(distinct status_id) as rate
FROM `{self.dataset_address}.nlp_v2_predictions_combined` p
WHERE extract(date from created_at) = '{date}'
AND score_lr is not null and score_nb is not null -- there are 30,000 total null lr scores. drop for now
GROUP BY 1,2
)
SELECT
dau.user_id
,dau.screen_name
,dau.rate
,CASE WHEN bu.community_id IS NOT NULL THEN TRUE ELSE FALSE END bot
,cast(bu.community_id as int64) as community_id
,STRING_AGG(DISTINCT uff.friend_name) as friend_names -- STRING AGG FOR CSV OUTPUT!
,count(DISTINCT uff.friend_name) as friend_count
FROM dau
JOIN `{self.dataset_address}.user_friends_flat` uff ON cast(uff.user_id as int64) = dau.user_id
LEFT JOIN `{self.dataset_address}.2_bot_communities` bu ON bu.user_id = dau.user_id
WHERE uff.friend_name in (SELECT DISTINCT screen_name FROM dau)
GROUP BY 1,2,3,4,5
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
#
# ACTIVE FRIEND GRAPHS V6
#
#def migrate_populate_nodes_with_active_edges_v6(self, limit=None):
# sql = f"""
# WITH au AS (
# SELECT
# cast(user_id as int64) as user_id
# ,upper(user_screen_name) as screen_name
# ,count(distinct status_id) as rate
# FROM `{self.dataset_address}.tweets` t
# WHERE created_at BETWEEN '2019-12-20 00:00:00' AND '2020-02-15 23:59:59' -- inclusive (primary collection period)
# GROUP BY 1,2
# )
#
# SELECT
# au.user_id
# ,au.screen_name
# ,au.rate
# ,CASE WHEN bu.community_id IS NOT NULL THEN TRUE ELSE FALSE END bot
# ,cast(bu.community_id as int64) as community_id
# ,STRING_AGG(DISTINCT uff.friend_name) as friend_names -- STRING AGG FOR CSV OUTPUT!
# ,count(DISTINCT uff.friend_name) as friend_count
# FROM au
# JOIN `{self.dataset_address}.user_friends_flat` uff ON cast(uff.user_id as int64) = au.user_id
# LEFT JOIN `{self.dataset_address}.2_bot_communities` bu ON bu.user_id = au.user_id
# WHERE uff.friend_name in (SELECT DISTINCT screen_name FROM au)
# GROUP BY 1,2,3,4,5
# """
# if limit:
# sql += f" LIMIT {int(limit)};"
# return self.execute_query(sql)
def fetch_nodes_with_active_edges_v6(self, limit=None):
sql = f"""
SELECT user_id, screen_name, rate, bot, community_id, friend_names, friend_count
FROM`{self.dataset_address}.nodes_with_active_edges_v6`
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_sn_nodes_with_active_edges_v7(self, limit=None):
sql = f"""
SELECT user_id, screen_name, status_count as rate, is_bot as bot, community_id, friend_names, friend_count
FROM`{self.dataset_address}.nodes_with_active_edges_v7_sn`
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
#
# BOT ANALYSIS
#
def fetch_statuses_with_tags(self, limit=None):
sql = f"""
SELECT user_id, is_bot, status_id, status_text
FROM`{self.dataset_address}.statuses_with_tags`
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_user_details_vq(self, limit=None):
sql = f"""
SELECT
user_id ,creation_date ,screen_name_count, screen_names
,status_count, rt_count
,is_bot ,bot_community
,mean_opinion ,opinion_community
,q_status_count ,q_status_pct
FROM`{self.dataset_address}.user_details_vq`
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_tweet_details_v6(self, limit=None):
sql = f"""
SELECT
status_id
,status_created_at
,is_rt ,rt_user_screen_name
,user_id
,screen_names ,screen_name_count
,created_on ,created_jan17 ,created_inaug
,is_bot ,is_q
,opinion_community ,mean_opinion
FROM `{self.dataset_address}.tweet_details_v6_slim`
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
#
# API - V0
# ... ALL ENDPOINTS MUST PREVENT SQL INJECTION
def fetch_user_details_api_v0(self, screen_name="politico"):
# TODO: super-charge this with cool stuff, like mention counts, average opinion score, etc.
# TODO: create some temporary tables, to make the query faster
sql = f"""
SELECT
user_id
,user_created_at
,tweet_count
,screen_name_count
,screen_names
,user_names
,user_descriptions
FROM `{self.dataset_address}.user_details_v3`
WHERE UPPER(@screen_name) in UNNEST(SPLIT(screen_names, '|'))
LIMIT 1
"""
job_config = bigquery.QueryJobConfig(query_parameters=[bigquery.ScalarQueryParameter("screen_name", "STRING", screen_name)])
return self.client.query(sql, job_config=job_config)
def fetch_user_tweets_api_v0(self, screen_name="politico"):
# TODO: create some temporary tables maybe, to make the query faster
sql = f"""
SELECT
t.status_id
,t.status_text
,t.created_at
,p.predicted_community_id as opinion_score
FROM `{self.dataset_address}.tweets` t
LEFT JOIN `{self.dataset_address}.2_community_predictions` p ON p.status_id = cast(t.status_id as int64)
WHERE upper(t.user_screen_name) = upper(@screen_name)
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("screen_name", "STRING", screen_name)])
return self.client.query(sql, job_config=job_config)
def fetch_users_most_retweeted_api_v0(self, metric=None, limit=None):
"""
Params:
metric : whether to calculate top users based on "retweet_count" or "retweeter_count"
limit : the number of top users to return for each community (max 1,000)
"""
metric = metric or "retweet_count"
limit = limit or 25
sql = f"""
(
SELECT community_id ,retweeted_user_screen_name ,retweeter_count , retweet_count
FROM `{self.dataset_address}.community_0_users_most_retweeted`
ORDER BY @metric DESC
LIMIT @limit
)
UNION ALL
(
SELECT community_id ,retweeted_user_screen_name ,retweeter_count , retweet_count
FROM `{self.dataset_address}.community_1_users_most_retweeted`
ORDER BY @metric DESC
LIMIT @limit
)
"""
job_config = QueryJobConfig(query_parameters=[
ScalarQueryParameter("metric", "STRING", metric),
ScalarQueryParameter("limit", "INT64", int(limit)),
])
return self.client.query(sql, job_config=job_config)
def fetch_statuses_most_retweeted_api_v0(self, metric=None, limit=None):
"""
Params:
metric : whether to calculate top statuses based on "retweet_count" or "retweeter_count"
limit : the number of top statuses to return for each community (max 1,000)
"""
metric = metric or "retweet_count"
limit = limit or 25
sql = f"""
(
SELECT community_id ,retweeted_user_screen_name ,status_text ,retweeter_count , retweet_count
FROM `{self.dataset_address}.community_0_statuses_most_retweeted`
ORDER BY @metric DESC
LIMIT @limit
)
UNION ALL
(
SELECT community_id ,retweeted_user_screen_name ,status_text ,retweeter_count , retweet_count
FROM `{self.dataset_address}.community_1_statuses_most_retweeted`
ORDER BY @metric DESC
LIMIT @limit
)
"""
job_config = QueryJobConfig(query_parameters=[
ScalarQueryParameter("metric", "STRING", metric),
ScalarQueryParameter("limit", "INT64", int(limit)),
])
return self.client.query(sql, job_config=job_config)
def fetch_top_profile_tokens_api_v0(self, limit=None):
"""
Params: limit : the number of top tokens to return for each community
"""
limit = limit or 20
sql = f"""
(
SELECT 0 as community_id, token, rank, count, pct
FROM `{self.dataset_address}.2_community_0_profile_tokens`
ORDER BY rank
LIMIT @limit
)
UNION ALL
(
SELECT 1 as community_id, token, rank, count, pct
FROM `{self.dataset_address}.2_community_1_profile_tokens`
ORDER BY rank
LIMIT @limit
)
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("limit", "INT64", int(limit))])
return self.client.query(sql, job_config=job_config)
def fetch_top_profile_tags_api_v0(self, limit=None):
"""
Params: limit : the number of top tags to return for each community
"""
limit = limit or 20
sql = f"""
(
SELECT 0 as community_id, token, rank, count, pct
FROM `{self.dataset_address}.2_community_0_profile_tags`
ORDER BY rank
LIMIT @limit
)
UNION ALL
(
SELECT 1 as community_id, token, rank, count, pct
FROM `{self.dataset_address}.2_community_1_profile_tags`
ORDER BY rank
LIMIT @limit
)
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("limit", "INT64", int(limit))])
return self.client.query(sql, job_config=job_config)
def fetch_top_status_tokens_api_v0(self, limit=None):
"""
Params: limit : the number of top tokens to return for each community
"""
limit = limit or 50
sql = f"""
(
SELECT 0 as community_id, token, rank, count, pct, doc_count, doc_pct
FROM `{self.dataset_address}.2_community_0_status_tokens`
ORDER BY rank
LIMIT @limit
)
UNION ALL
(
SELECT 1 as community_id, token, rank, count, pct, doc_count, doc_pct
FROM `{self.dataset_address}.2_community_1_status_tokens`
ORDER BY rank
LIMIT @limit
)
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("limit", "INT64", int(limit))])
return self.client.query(sql, job_config=job_config)
def fetch_top_status_tags_api_v0(self, limit=None):
"""
Params: limit : the number of top tokens to return for each community
"""
limit = limit or 50
sql = f"""
(
SELECT 0 as community_id, token, rank, count, pct, doc_count, doc_pct
FROM `{self.dataset_address}.2_community_0_status_tags`
ORDER BY rank
LIMIT @limit
)
UNION ALL
(
SELECT 1 as community_id, token, rank, count, pct, doc_count, doc_pct
FROM `{self.dataset_address}.2_community_1_status_tags`
ORDER BY rank
LIMIT @limit
)
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("limit", "INT64", int(limit))])
return self.client.query(sql, job_config=job_config)
#
# API - V1
# ... ALL ENDPOINTS MUST PREVENT SQL INJECTION
def fetch_user_tweets_api_v1(self, screen_name="politico"):
sql = f"""
SELECT
status_id
,status_text
,created_at
,score_lr
,score_nb
,score_bert
FROM `{self.dataset_address}.nlp_v2_predictions_combined` p
WHERE upper(screen_name) = upper(@screen_name)
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("screen_name", "STRING", screen_name)])
return self.client.query(sql, job_config=job_config)
def fetch_users_most_followed_api_v1(self, limit=None):
limit = limit or 500 # max 1000 based on the size of the precomputed table
sql = f"""
SELECT
screen_name --, user_id, user_created_at
,status_count
,follower_count
,avg_score_lr
,avg_score_nb
,avg_score_bert
,user_category as category
FROM `{self.dataset_address}.nlp_v2_predictions_by_user_most_followed`
ORDER BY follower_count DESC
LIMIT @limit
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("limit", "INT64", int(limit))])
return self.client.query(sql, job_config=job_config)
if __name__ == "__main__":
service = BigQueryService()
print(f" CLEANUP MODE: {CLEANUP_MODE}")
if CLEANUP_MODE:
service.delete_temp_tables_older_than(days=3)
seek_confirmation()
print("--------------------")
print("FETCHED TOPICS:")
print([row.topic for row in service.fetch_topics()])
sql = f"SELECT count(distinct status_id) as tweet_count FROM `{service.dataset_address}.tweets`"
results = service.execute_query(sql)
print("--------------------")
tweet_count = list(results)[0].tweet_count
print(f"FETCHED {fmt_n(tweet_count)} TWEETS")
print("--------------------")
sql = f"SELECT count(distinct user_id) as user_count FROM `{service.dataset_address}.tweets`"
results = service.execute_query(sql)
user_count = list(results)[0].user_count
print(f"FETCHED {fmt_n(user_count)} USERS")
results = service.user_friend_collection_progress()
row = list(results)[0]
collected_count = row.user_count
pct = collected_count / user_count
#print("--------------------")
#print("USERS COLLECTED:", collected_count)
#print(" PCT COLLECTED:", f"{(pct * 100):.1f}%")
#print(" AVG DURATION:", row.avg_duration)
if collected_count > 0:
print("--------------------")
print(f"USERS WITH FRIENDS: {row.pct_friendly * 100}%")
print(" AVG FRIENDS:", round(row.avg_friends_friendly))
#print(" AVG DURATION:", row.avg_duration_friendly)
| 72,536 | 38.040366 | 161 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/gcs_service.py |
import os
from pprint import pprint
from google.cloud import storage
from dotenv import load_dotenv
from conftest import TEST_DATA_DIR, TMP_DATA_DIR
load_dotenv()
GOOGLE_APPLICATION_CREDENTIALS = os.getenv("GOOGLE_APPLICATION_CREDENTIALS", default="google-credentials.json")
GCS_BUCKET_NAME=os.getenv("GCS_BUCKET_NAME", default="my-bucket") # "gs://my-bucket"
class GoogleCloudStorageService:
def __init__(self, bucket_name=GCS_BUCKET_NAME):
self.client = storage.Client() # implicit check for GOOGLE_APPLICATION_CREDENTIALS
self.bucket_name = bucket_name
self.bucket = self.get_bucket()
@property
def metadata(self):
return {"bucket_name": self.bucket_name}
def get_bucket(self):
return self.client.bucket(self.bucket_name)
def upload(self, local_filepath, remote_filepath):
blob = self.bucket.blob(remote_filepath)
# avoid timeout errors when uploading a large file
# h/t: https://github.com/googleapis/python-storage/issues/74
#
# https://googleapis.dev/python/storage/latest/blobs.html
# chunk_size (int) – (Optional) The size of a chunk of data whenever iterating (in bytes).
# This must be a multiple of 256 KB per the API specification.
#
max_chunk_size = 5 * 1024 * 1024 # 5 MB
blob.chunk_size = max_chunk_size
blob._MAX_MULTIPART_SIZE = max_chunk_size
blob.upload_from_filename(local_filepath)
return blob
def download(self, remote_filepath, local_filepath):
blob = self.bucket.blob(remote_filepath)
## avoid timeout errors when uploading a large file
## h/t: https://github.com/googleapis/python-storage/issues/74
##
## https://googleapis.dev/python/storage/latest/blobs.html
## chunk_size (int) – (Optional) The size of a chunk of data whenever iterating (in bytes).
## This must be a multiple of 256 KB per the API specification.
##
#max_chunk_size = 5 * 1024 * 1024 # 5 MB
#blob.chunk_size = max_chunk_size
#blob._MAX_MULTIPART_SIZE = max_chunk_size
blob.download_to_filename(local_filepath)
return blob
def file_exists(self, remote_filepath):
print("FILE EXISTS?", remote_filepath)
blob = self.bucket.blob(remote_filepath)
return blob.exists()
if __name__ == "__main__":
service = GoogleCloudStorageService()
#print("------------")
#print("BUCKETS:")
#for bucket in service.client.list_buckets():
# print(bucket)
print("------------")
print("BUCKET:")
bucket = service.get_bucket()
print(bucket)
if bucket:
for filename in ["mock_graph.gpickle", "mock_network.csv"]:
remote_filepath = os.path.join("storage", "data", filename)
print("------------")
print("UPLOADING LOCAL FILE...")
local_filepath = os.path.join(TEST_DATA_DIR, filename)
#blob = bucket.blob(remote_filepath)
#print(blob) #> <Blob: impeachment-analysis-2020, storage/data/mock_graph.gpickle, None>
#blob.upload_from_filename(local_filepath)
#print(blob.exists()) #> True
blob = service.upload(local_filepath, remote_filepath)
print(blob) #> <Blob: impeachment-analysis-2020, storage/data/mock_graph.gpickle, 1590433751346995>
print("------------")
print("DOWNLOADING REMOTE FILE (TEMPORARY)...")
tmp_local_filepath = os.path.join(TMP_DATA_DIR, filename)
blob = service.download(remote_filepath, tmp_local_filepath)
print(os.path.isfile(tmp_local_filepath))
os.remove(tmp_local_filepath)
| 3,735 | 35.271845 | 111 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/email_service.py | # app/email_service.py
import os
from dotenv import load_dotenv
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
from app import SERVER_NAME, SERVER_DASHBOARD_URL
load_dotenv()
SENDGRID_API_KEY = os.getenv("SENDGRID_API_KEY")
MY_EMAIL = os.getenv("MY_EMAIL_ADDRESS")
def send_email(subject="[Email Service] This is a test", html="<p>Hello World</p>"):
client = SendGridAPIClient(SENDGRID_API_KEY) #> <class 'sendgrid.sendgrid.SendGridAPIClient>
print("CLIENT:", type(client))
print("SUBJECT:", subject)
#print("HTML:", html)
message = Mail(from_email=MY_EMAIL, to_emails=MY_EMAIL, subject=subject, html_content=html)
try:
response = client.send(message)
print("RESPONSE:", type(response)) #> <class 'python_http_client.client.Response'>
print(response.status_code) #> 202 indicates SUCCESS
return response
except Exception as e:
print("OOPS", e.message)
return None
if __name__ == "__main__":
subject = "[Impeachment Tweet Analysis] Friend Collection Complete!"
html = f"""
<h3>This is a test.</h3>
<p>Server '{SERVER_NAME}' has completed its work.</p>
<p>So please shut it off so it can get some rest.</p>
<p>
<a href='{SERVER_DASHBOARD_URL}'>{SERVER_DASHBOARD_URL}</a>
</p>
<p>Thanks!</p>
"""
send_email(subject, html)
| 1,382 | 30.431818 | 96 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.