content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python
"""
Requirements:
* Python >= 3.6.2
* Pandas
* NumPy
Copyright (c) 2020 Georgios Fotakis <georgios.fotakis@i-med.ac.at>
MIT License <http://opensource.org/licenses/MIT>
"""
RELEASE = False
__version_info__ = ('0', '3', )
__version__ = '.'.join(__version_info__)
__version__ += '-dev' if not RELEASE else ''
import argparse
# import math
import os,sys
import pandas as pd
# import numpy as np
def append_score(pvacseq_tsv, immun_tsv, output):
pvacseq_reader = pd.read_csv(pvacseq_tsv, sep = '\t')
pvacseq_df = pd.DataFrame(pvacseq_reader)
print(pvacseq_df)
imm_reader = pd.read_csv(immun_tsv, sep = '\t')
imm_df = pd.DataFrame(imm_reader)
# Drop 1st column in order for the merge to function properly
imm_df.drop(columns=['Sample_ID'], inplace = True)
# Rename columns in order for the merge to function properly
imm_df.rename(columns={"mut_peptide":"MT Epitope Seq",
"Reference":"WT Epitope Seq",
"peptide_variant_position":"Mutation Position",
"TCGA_predict":"Immunogenicity_score"}, inplace=True)
# Inner join dataFrames
merged_df = pd.merge(pvacseq_df, imm_df)
merged_df.to_csv(output, sep="\t", index=False)
if __name__ == '__main__':
usage = __doc__.split('\n\n\n')
parser = argparse.ArgumentParser(description='Calculate CSiN')
parser.add_argument('--pvacseq_tsv', required=False, help='Input filtered MHC I pVACseq TSV file')
parser.add_argument('--score_tsv', required=False, help='Input immunogenicity scores TSV files')
parser.add_argument('--output', required=True, help='Path to output file')
args = parser.parse_args()
# Parse arguments
pvacseq_tsv = args.pvacseq_tsv
immun_tsv = args.score_tsv
output = args.output
append_score(pvacseq_tsv, immun_tsv, output)
|
# Python
import re
# Django
from django.core.validators import RegexValidator
from django.db import models
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
# Django OAuth Toolkit
from oauth2_provider.models import AbstractApplication, AbstractAccessToken
DATA_URI_RE = re.compile(r'.*') # FIXME
__all__ = ['OAuth2AccessToken', 'OAuth2Application']
class OAuth2Application(AbstractApplication):
class Meta:
app_label = 'main'
verbose_name = _('application')
description = models.TextField(
default='',
blank=True,
)
logo_data = models.TextField(
default='',
editable=False,
validators=[RegexValidator(DATA_URI_RE)],
)
class OAuth2AccessToken(AbstractAccessToken):
class Meta:
app_label = 'main'
verbose_name = _('access token')
description = models.CharField(
max_length=200,
default='',
blank=True,
)
last_used = models.DateTimeField(
null=True,
default=None,
editable=False,
)
def is_valid(self, scopes=None):
valid = super(OAuth2AccessToken, self).is_valid(scopes)
if valid:
self.last_used = now()
self.save(update_fields=['last_used'])
return valid
|
import configparser
from os import path
class Config:
def __init__(self):
self.config = None
self.section = None
def load(self, cfg_file, section):
if not path.exists(cfg_file):
raise FileNotFoundError()
self.config = configparser.ConfigParser()
self.config.read(cfg_file)
self.section = section
def get(self, name):
return self.config[self.section][name]
|
import threading
import __init__
import paho.mqtt.client as client_lib
import time
from sistem_climatizare.senzori_centralizare.basic_sensor import BasicSensor
class Display(threading.Thread):
@staticmethod
def fnc_activa(client, user_data, message):
print("Temperatura primita: ", str(message.payload.decode("utf-8")))
def __init__(self, *args, **kwargs):
super(Display, self).__init__(*args, **kwargs)
broker = "localhost"
self.subscriber = client_lib.Client("Display")
self.subscriber.connect(broker)
self._stop = threading.Event()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.is_set()
def display_loop(self):
while not self.stopped():
try:
self.subscriber.loop_start()
self.subscriber.subscribe(BasicSensor.__name__ + "/Temperature")
self.subscriber.on_message = self.fnc_activa
self.subscriber.loop_stop()
time.sleep(1)
except Exception as e:
print(e)
except KeyboardInterrupt:
break
print("Good Bye!")
if __name__ == "__main__":
display = Display()
display.display_loop()
|
from tkinter import *
root = Tk()
class Cell (Button):
Dead = 0
Live = 1
def __init__ (self,parent):
Button.__init__(self,parent, relief = "raised" , width = 2 , borderwidth = 1 , command = self.onpress)
self.displayState(Cell.Dead)
def onpress (self):
if self.state == Cell.Live:
self.displayState(Cell.Dead)
elif self.state == Cell.Dead:
self.displayState(Cell.Live)
def setNextState (self , Neighbours):
if self.state == Cell.Live and (Neighbours < 2 or Neighbours > 3):
self.nextState = Cell.Dead
elif self.state == Cell.Dead and Neighbours == 3:
self.nextState = Cell.Live
elif self.state == Cell.Dead and Neighbours != 3:
self.nextState = self.state
def stepToNextState(self):
self.displayState(self.nextState)
def displayState (self , newstate):
self.state = newstate
if self.state == Cell.Live:
self["bg"] = "black"
if self.state == Cell.Dead:
self["bg"] = "white"
class Grid:
def __init__(self,parent,sizex,sizey):
self.sizex = sizex
self.sizey = sizey
self.cells = []
for a in range (0,self.sizex):
rowcells = []
for b in range (0, self.sizey):
c = Cell(parent)
c.grid(row=b , column=a)
rowcells.append(c)
self.cells.append(rowcells)
def step (self):
cells = self.cells
for x in range (0,self.sizex):
if x==0: x_down = self.sizex-1
else: x_down = x-1
if x==self.sizex-1: x_up = 0
else: x_up = x+1
for y in range(0,self.sizey):
if y==0: y_down = self.sizey-1
else: Y_down = y-1
if y==self.sizey-1: y_up = 0
else: y_up = y+1
sum = cells[x_down][y].state + cells[x_up][y].state + cells[x][y_down].state + cells[x][y_up].state + cells[x_down][y_down].state +cells[x_up][y_up].state + cells[x_down][y_up].state + cells[x_up][y_down].state
cells[x][y].setNextState(sum)
for row in cells:
for cell in row:
cell.stepToNextState()
def clear(self):
for row in self.cells:
for cell in row:
cell.displayState(Cell.Dead)
if __name__ == "__main__":
frame = Frame(root)
frame.pack()
grid = Grid(frame,25,25)
bottomFrame = Frame(root)
bottomFrame.pack (side = BOTTOM)
buttonStep = Button(bottomFrame , text="Step" , command=grid.step)
buttonStep.pack(side = LEFT)
buttonClear = Button(bottomFrame, text = "Clear", command=grid.clear)
buttonClear.pack(side=LEFT , after=buttonStep)
root.mainloop()
|
import json
import unittest
from localstack.utils.aws import aws_stack
from localstack.utils.common import short_uid
TEST_QUEUE_NAME = 'TestQueue'
TEST_POLICY = """
{
"Version":"2012-10-17",
"Statement":[
{
"Effect": "Allow",
"Principal": { "AWS": "*" },
"Action": "sqs:SendMessage",
"Resource": "'$sqs_queue_arn'",
"Condition":{
"ArnEquals":{
"aws:SourceArn":"'$sns_topic_arn'"
}
}
}
]
}
"""
class SQSTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = aws_stack.connect_to_service('sqs')
def test_list_queue_tags(self):
# Since this API call is not implemented in ElasticMQ, we're mocking it
# and letting it return an empty response
queue_info = self.client.create_queue(QueueName=TEST_QUEUE_NAME)
queue_url = queue_info['QueueUrl']
res = self.client.list_queue_tags(QueueUrl=queue_url)
# Apparently, if there are no tags, then `Tags` should NOT appear in the response.
assert 'Tags' not in res
# clean up
self.client.delete_queue(QueueUrl=queue_url)
def test_publish_get_delete_message(self):
queue_name = 'queue-%s' % short_uid()
queue_info = self.client.create_queue(QueueName=queue_name)
queue_url = queue_info['QueueUrl']
self.assertIn(queue_name, queue_url)
# publish/receive message
self.client.send_message(QueueUrl=queue_url, MessageBody='msg123')
messages = self.client.receive_message(QueueUrl=queue_url)['Messages']
self.assertEquals(len(messages), 1)
# delete/receive message
self.client.delete_message(QueueUrl=queue_url, ReceiptHandle=messages[0]['ReceiptHandle'])
response = self.client.receive_message(QueueUrl=queue_url)
self.assertFalse(response.get('Messages'))
# clean up
self.client.delete_queue(QueueUrl=queue_url)
def test_create_fifo_queue(self):
fifo_queue = 'my-queue.fifo'
queue_info = self.client.create_queue(QueueName=fifo_queue, Attributes={'FifoQueue': 'true'})
queue_url = queue_info['QueueUrl']
# it should preserve .fifo in the queue name
self.assertIn(fifo_queue, queue_url)
# clean up
self.client.delete_queue(QueueUrl=queue_url)
def test_set_queue_policy(self):
queue_name = 'queue-%s' % short_uid()
queue_info = self.client.create_queue(QueueName=queue_name)
queue_url = queue_info['QueueUrl']
attributes = {
'Policy': TEST_POLICY
}
self.client.set_queue_attributes(QueueUrl=queue_url, Attributes=attributes)
attrs = self.client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'])['Attributes']
self.assertIn('sqs:SendMessage', attrs['Policy'])
attrs = self.client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['Policy'])['Attributes']
self.assertIn('sqs:SendMessage', attrs['Policy'])
# clean up
self.client.delete_queue(QueueUrl=queue_url)
def test_dead_letter_queue(self):
queue_name = 'queue-%s' % short_uid()
dlq_name = 'queue-%s' % short_uid()
dlq_info = self.client.create_queue(QueueName=dlq_name)
dlq_arn = aws_stack.sqs_queue_arn(dlq_name)
attributes = {'RedrivePolicy': json.dumps({'deadLetterTargetArn': dlq_arn, 'maxReceiveCount': 100})}
queue_info = self.client.create_queue(QueueName=queue_name, Attributes=attributes)
queue_url = queue_info['QueueUrl']
# clean up
self.client.delete_queue(QueueUrl=queue_url)
self.client.delete_queue(QueueUrl=dlq_info['QueueUrl'])
|
def relevant_paths(root_dir, only_title, only_part):
"""We may want to filter the paths we search in to those relevant to a
particular cfr title/part. Most index entries encode this as their first
two path components"""
prefix_path = root_dir.path
for sub_entry in root_dir.sub_entries():
suffix_path = sub_entry.path[len(prefix_path):]
if only_title and suffix_path[0] != str(only_title):
continue
if only_part and suffix_path[1] != str(only_part):
continue
yield sub_entry
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.flowplusplus.act_norm import ActNorm
from models.flowplusplus.act_norm import CondNorm
from models.flowplusplus.inv_conv import InvConv
from models.flowplusplus.nn import GatedConv
from models.flowplusplus.coupling import Coupling
from util import channelwise, checkerboard, Flip, safe_log, squeeze, unsqueeze
class FlowPlusPlus(nn.Module):
"""Flow++ Model
Based on the paper:
"Flow++: Improving Flow-Based Generative Models
with Variational Dequantization and Architecture Design"
by Jonathan Ho, Xi Chen, Aravind Srinivas, Yan Duan, Pieter Abbeel
(https://openreview.net/forum?id=Hyg74h05tX).
Args:
scales (tuple or list): Number of each type of coupling layer in each
scale. Each scale is a 2-tuple of the form
(num_channelwise, num_checkerboard).
in_channels (int): Number of channels in the input.
mid_channels (int): Number of channels in the intermediate layers.
num_blocks (int): Number of residual blocks in the s and t network of
`Coupling` layers.
num_dequant_blocks (int): Number of blocks in the dequantization flows.
"""
def __init__(self,
scales=((0, 4), (2, 3)),
in_shape=(3, 32, 32),
mid_channels=96,
num_blocks=10,
num_dequant_blocks=2,
num_components=32,
use_attn=True,
drop_prob=0.2,
condition_embd_size=0):
super(FlowPlusPlus, self).__init__()
# Register bounds to pre-process images, not learnable
self.register_buffer('bounds', torch.tensor([0.9], dtype=torch.float32))
if num_dequant_blocks > 0:
self.dequant_flows = _Dequantization(in_shape=in_shape,
mid_channels=mid_channels,
num_blocks=num_dequant_blocks,
use_attn=use_attn,
drop_prob=drop_prob)
else:
self.dequant_flows = None
self.flows = _FlowStep(scales=scales,
in_shape=in_shape,
mid_channels=mid_channels,
num_blocks=num_blocks,
num_components=num_components,
use_attn=use_attn,
drop_prob=drop_prob,
condition_embd_size=condition_embd_size)
def forward(self, x, condition_embd, reverse=False):
sldj = torch.zeros(x.size(0), device=x.device)
if not reverse:
x, sldj = self.dequantize(x, sldj)
x, sldj = self.to_logits(x, sldj)
x, sldj = self.flows(x, sldj, reverse, condition_embd=condition_embd)
return x, sldj
def sample(self, condition_embd):
batch_size = condition_embd.size()[0]
with torch.no_grad():
z = torch.randn((batch_size, 3, 32, 32), dtype=torch.float32, device=condition_embd.device)
x, _ = self.forward(z, reverse=True, condition_embd=condition_embd)
x = torch.sigmoid(x)
return x
def dequantize(self, x, sldj):
if self.dequant_flows is not None:
x, sldj = self.dequant_flows(x, sldj)
else:
x = (x * 255. + torch.rand_like(x)) / 256.
return x, sldj
def to_logits(self, x, sldj):
"""Convert the input image `x` to logits.
Args:
x (torch.Tensor): Input image.
sldj (torch.Tensor): Sum log-determinant of Jacobian.
Returns:
y (torch.Tensor): Dequantized logits of `x`.
See Also:
- Dequantization: https://arxiv.org/abs/1511.01844, Section 3.1
- Modeling logits: https://arxiv.org/abs/1605.08803, Section 4.1
"""
y = (2 * x - 1) * self.bounds
y = (y + 1) / 2
y = y.log() - (1. - y).log()
# Save log-determinant of Jacobian of initial transform
ldj = F.softplus(y) + F.softplus(-y) \
- F.softplus((1. - self.bounds).log() - self.bounds.log())
sldj = sldj + ldj.flatten(1).sum(-1)
return y, sldj
class _FlowStep(nn.Module):
"""Recursive builder for a Flow++ model.
Each `_FlowStep` corresponds to a single scale in Flow++.
The constructor is recursively called to build a full model.
Args:
scales (tuple): Number of each type of coupling layer in each scale.
Each scale is a 2-tuple of the form (num_channelwise, num_checkerboard).
in_channels (int): Number of channels in the input.
mid_channels (int): Number of channels in the intermediate layers.
num_blocks (int): Number of residual blocks in the s and t network of
`Coupling` layers.
num_components (int): Number of components in the mixture.
use_attn (bool): Use attention in the coupling layers.
drop_prob (float): Dropout probability.
"""
def __init__(self, scales, in_shape, mid_channels, num_blocks, num_components, use_attn, drop_prob, condition_embd_size=0):
super(_FlowStep, self).__init__()
in_channels, in_height, in_width = in_shape
num_channelwise, num_checkerboard = scales[0]
channels = []
for i in range(num_channelwise):
channels += [CondNorm(in_channels // 2, condition_embd_size=condition_embd_size),
InvConv(in_channels // 2),
Coupling(in_channels=in_channels // 2,
mid_channels=mid_channels,
num_blocks=num_blocks,
num_components=num_components,
use_attn=use_attn,
drop_prob=drop_prob,
condition_embd_size=condition_embd_size),
Flip()]
checkers = []
for i in range(num_checkerboard):
checkers += [CondNorm(in_channels, condition_embd_size=condition_embd_size),
InvConv(in_channels),
Coupling(in_channels=in_channels,
mid_channels=mid_channels,
num_blocks=num_blocks,
num_components=num_components,
use_attn=use_attn,
drop_prob=drop_prob,
condition_embd_size=condition_embd_size),
Flip()]
self.channels = nn.ModuleList(channels) if channels else None
self.checkers = nn.ModuleList(checkers) if checkers else None
if len(scales) <= 1:
self.next = None
else:
next_shape = (2 * in_channels, in_height // 2, in_width // 2)
self.next = _FlowStep(scales=scales[1:],
in_shape=next_shape,
mid_channels=mid_channels,
num_blocks=num_blocks,
num_components=num_components,
use_attn=use_attn,
drop_prob=drop_prob,
condition_embd_size=condition_embd_size)
def forward(self, x, sldj, reverse=False, condition_embd=None):
if reverse:
if self.next is not None:
x = squeeze(x)
x, x_split = x.chunk(2, dim=1)
x, sldj = self.next(x, sldj, reverse, condition_embd=condition_embd)
x = torch.cat((x, x_split), dim=1)
x = unsqueeze(x)
if self.checkers:
x = checkerboard(x)
for flow in reversed(self.checkers):
x, sldj = flow(x, sldj, reverse, condition_embd=condition_embd)
x = checkerboard(x, reverse=True)
if self.channels:
x = channelwise(x)
for flow in reversed(self.channels):
x, sldj = flow(x, sldj, reverse, condition_embd=condition_embd)
x = channelwise(x, reverse=True)
else:
if self.channels:
x = channelwise(x)
for flow in self.channels:
x, sldj = flow(x, sldj, reverse, condition_embd=condition_embd)
x = channelwise(x, reverse=True)
if self.checkers:
x = checkerboard(x)
for flow in self.checkers:
x, sldj = flow(x, sldj, reverse, condition_embd=condition_embd)
x = checkerboard(x, reverse=True)
if self.next is not None:
x = squeeze(x)
x, x_split = x.chunk(2, dim=1)
x, sldj = self.next(x, sldj, reverse, condition_embd=condition_embd)
x = torch.cat((x, x_split), dim=1)
x = unsqueeze(x)
return x, sldj
class _Dequantization(nn.Module):
"""Dequantization Network for Flow++
Args:
in_shape (int): Shape of the input.
mid_channels (int): Number of channels in the intermediate layers.
num_blocks (int): Number of residual blocks in the s and t network of
`Coupling` layers.
use_attn (bool): Use attention in the coupling layers.
drop_prob (float): Dropout probability.
num_flows (int): Number of InvConv+MLCoupling flows to use.
aux_channels (int): Number of channels in auxiliary input to couplings.
num_components (int): Number of components in the mixture.
"""
def __init__(self, in_shape, mid_channels, num_blocks, use_attn, drop_prob,
num_flows=4, aux_channels=32, num_components=32):
super(_Dequantization, self).__init__()
in_channels, in_height, in_width = in_shape
self.aux_conv = nn.Sequential(
nn.Conv2d(2 * in_channels, aux_channels, kernel_size=3, padding=1),
GatedConv(aux_channels, drop_prob),
GatedConv(aux_channels, drop_prob),
GatedConv(aux_channels, drop_prob))
flows = []
for _ in range(num_flows):
flows += [ActNorm(in_channels),
InvConv(in_channels),
Coupling(in_channels, mid_channels, num_blocks,
num_components, drop_prob,
use_attn=use_attn,
aux_channels=aux_channels),
Flip()]
self.flows = nn.ModuleList(flows)
def forward(self, x, sldj):
u = torch.randn_like(x)
eps_nll = 0.5 * (u ** 2 + math.log(2 * math.pi))
aux = self.aux_conv(torch.cat(checkerboard(x - 0.5), dim=1))
u = checkerboard(u)
for i, flow in enumerate(self.flows):
u, sldj = flow(u, sldj, aux=aux) if i % 4 == 2 else flow(u, sldj)
u = checkerboard(u, reverse=True)
u = torch.sigmoid(u)
x = (x * 255. + u) / 256.
sigmoid_ldj = safe_log(u) + safe_log(1. - u)
sldj = sldj + (eps_nll + sigmoid_ldj).flatten(1).sum(-1)
return x, sldj
|
#%%
import datetime
import ephem
import time
import urllib.request
from dateutil import tz
import libs.rigctllib as rigctllib
from sys import platform
from RPLCD import i2c
from config.satlist import SAT_LIST
import json
from libs.satlib import *
from libs.lcdlib import *
import RPi.GPIO as GPIO
gpio_pins = ["CLK", "DT", "SW"]
selected_sat_idx = 0
from gpiozero import RotaryEncoder, Button
def gpio_remove_event_detect():
for pin in gpio_pins:
GPIO.remove_event_detect(config["gpio_pins"][pin])
def gpio_init(config):
GPIO.setmode(GPIO.BCM)
for pin in ["CLK", "DT", "SW"]:
# set up the GPIO events on those pins
GPIO.setup(config["gpio_pins"][pin], GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def rotated_satsel(channel):
global selected_sat_idx
CLKState = GPIO.input(config["gpio_pins"]["CLK"])
DTState = GPIO.input(config["gpio_pins"]["DT"])
if CLKState == 0 and DTState == 1:
if selected_sat_idx + 1 in range(0, len(SAT_LIST)):
selected_sat_idx += 1
elif CLKState == 1 and DTState == 0:
if selected_sat_idx - 1 in range(0, len(SAT_LIST)):
selected_sat_idx -= 1
print(f"selected sat idx {selected_sat_idx}")
def rotated_freqmenu(channel):
global current_up
global current_down
CLKState = GPIO.input(config["gpio_pins"]["CLK"])
DTState = GPIO.input(config["gpio_pins"]["DT"])
if CLKState == 0 and DTState == 1:
if current_down + config["rotary_step"] in sat_range:
current_down += config["rotary_step"]
current_up -= config["rotary_step"]
elif CLKState == 1 and DTState == 0:
if current_down - config["rotary_step"] in sat_range:
current_down -= config["rotary_step"]
current_up += config["rotary_step"]
print(f"current down {current_down} - current up {current_up}")
def clicked_satsel(channel):
global sat_selection_menu
sat_selection_menu = False
def clicked_freqmenu(channel):
pass
def main():
with open("config/config.json", "r") as f:
config = json.load(f)
rotary = RotaryEncoder(config["gpio_pins"]["CLK"], config["gpio_pins"]["DT"])
if config["enable_radios"]:
rig_up = rigctllib.RigCtl(config["rig_up_config"])
rig_down = rigctllib.RigCtl(config["rig_down_config"])
selected_sat_idx = 0
lcd = init_lcd()
lcd.clear()
lcd.write_string("starting up")
from_zone = tz.gettz("UTC")
to_zone = tz.gettz(config["timezone"])
sat_selection_menu = True
gpio_init(config)
GPIO.add_event_detect(CLK, GPIO.FALLING, callback=rotated_satsel, bouncetime=180)
GPIO.add_event_detect(DT, GPIO.FALLING, callback=rotated_satsel, bouncetime=180)
GPIO.add_event_detect(SW, GPIO.FALLING, callback=clicked_satsel, bouncetime=150)
while sat_selection_menu:
lcd.home()
lcd.write_string("SELECT SATELLITE")
lcd.crlf()
lcd.crlf()
lcd.write_string(
f"{SAT_LIST[selected_sat_idx]['satname']} - {SAT_LIST[selected_sat_idx]['down_mode']}".ljust(
20, " "
)
)
lcd.crlf()
if SAT_LIST[selected_sat_idx]["down_mode"] == "FM":
lcd.write_string(SAT_LIST[selected_sat_idx]["tone"].ljust(20, " "))
else:
lcd.write_string("No Tone".ljust(20, " "))
print(f"selected sat {SAT_LIST[selected_sat_idx]['satname']}")
SELECTED_SAT = SAT_LIST[selected_sat_idx]
gpio_remove_event_detect()
try:
update_tles(config["sat_url"])
except:
print("error downloading tles")
sat = get_tles(SELECTED_SAT["satname"])
#%%
satellite = ephem.readtle(
sat[0], sat[1], sat[2]
) # create ephem object from tle information
obs = ephem.Observer() # recreate Oberserver with current time
obs.lon = config["observer_conf"]["lon"]
obs.lat = config["observer_conf"]["lat"]
obs.elevation = config["observer_conf"]["elev"]
if config["enable_radios"]:
rig_down.set_mode(mode=SELECTED_SAT["down_mode"])
rig_up.set_mode(mode=SELECTED_SAT["up_mode"])
sat_range = range(SELECTED_SAT["down_start"], SELECTED_SAT["down_end"])
current_down = SELECTED_SAT["down_center"]
current_up = SELECTED_SAT["up_center"]
GPIO.add_event_detect(
config["gpio_pins"]["CLK"],
GPIO.FALLING,
callback=rotated_freqmenu,
bouncetime=180,
)
GPIO.add_event_detect(
config["gpio_pins"]["DT"],
GPIO.FALLING,
callback=rotated_freqmenu,
bouncetime=180,
)
GPIO.add_event_detect(
config["gpio_pins"]["SW"],
GPIO.FALLING,
callback=clicked_freqmenu,
bouncetime=150,
)
while True:
obs.date = datetime.datetime.utcnow()
satellite.compute(obs)
shift_down = get_doppler_shift(current_down, satellite.range_velocity)
shift_up = get_doppler_shift(current_up, satellite.range_velocity)
shifted_down = get_shifted(current_down, shift_down, "down")
shifted_up = get_shifted(current_up, shift_up, "up")
if enable_radios:
rig_up.set_frequency(shifted_up)
rig_down.set_frequency(shifted_down)
write_lcd_loop(
lcd,
current_up,
current_down,
shifted_up,
shifted_down,
shift_up,
shift_down,
SELECTED_SAT,
)
if __name__ == "__main__":
main()
|
import os
import svg_tools
try:
columns, rows = os.get_terminal_size(0)
except OSError:
columns, rows = os.get_terminal_size(1)
def print_centered(text):
print(text.center(columns)[:-1])
print_centered("~~~ SVG to GD ~~~")
print_centered("Made by jaan and camila314")
print()
print_centered("It's recommended to backup your save files")
print_centered("if something goes wrong")
print()
running = True
while running:
try:
file_name = input("Specify file name(\"exit\" to close): ")
if file_name == "exit":
break
scale = 1 / float(input("Specify scale(>1 => bigger): "))
density = float(input("Specify density, distance between blocks(>1 => less): "))
block_size = float(input("Specify block size: "))
error_c = input("Error correction(y/n): ").lower() == "y"
print()
print("Converting...")
svg_tools.generate(file_name, scale, density, block_size, error_c)
except ValueError:
print("Please enter a valid value.")
print()
print()
|
#!/usr/bin/env python3.6
"""
Purpose:
Script responsible for helping organize music files in a
specified directory and of a specific music filetype
Steps:
- Parse Location of Music from CLI
- Determine Music Extension (Default .mp3)
- Find all music files in the directory
- Get the Artist for each File (From metadata, using eyeD3)
- Create Dir for Each Artist
- Move each song into the directory
usage:
python3.6 organize_music_by_artist.py [-h] --music-dir MUSIC_DIR\
[--music-format MUSIC_FORMAT]
example call:
python3.6 organize_music_by_artist.py --music-dir="~/Music" --music-format=".mp3"
"""
# Python Library Imports
import eyed3
import logging
import os
import re
import shutil
import sys
from argparse import ArgumentParser
from data_structure_helpers import string_helpers
from execution_helpers import function_executors
from logging_helpers import loggers
###
# Main Execution
###
@function_executors.main_executor
def main():
"""
Purpose:
Get File Metadata
"""
logging.info("Starting Process Organize Music By Artist")
cli_args = get_cli_arguments()
music_files = find_music_files(cli_args.music_dir, cli_args.music_format)
if not music_files:
logging.info("No music files found in the directory, exiting")
return
music_by_artist = get_music_by_artist(music_files)
if len(music_by_artist) == 1:
logging.info(
f"Only one artist, not going to organize: {list(music_by_artist.keys())[0]}"
)
return
for artist, artist_details in music_by_artist.items():
artist_dir = os.path.join(
cli_args.music_dir, string_helpers.convert_to_title_case(artist)
)
try:
os.mkdir(artist_dir)
except FileExistsError as file_err:
# logging.debug(f"{artist_dir} already exists, no need to create")
pass
except Exception as err:
logging.exception(f"Exception Creating Dir: {err}")
raise err
for song in artist_details["songs"]:
shutil.move(song, artist_dir)
logging.info("Process Organize Music By Artist Complete")
###
# Music Parsing
###
def find_music_files(music_dir, music_format):
"""
Purpose:
Find music files in specified dir
Args:
music_dir (String): Dir to search for music files
music_format (String): Format/Extension of music files
Return:
music_files (List of Strings): List of music files in the dir (filenames)
"""
return [
os.path.join(music_dir, filename)
for filename in os.listdir(music_dir)
if os.path.isfile(os.path.join(music_dir, filename))
and filename.endswith(music_format)
]
def get_music_by_artist(music_files):
"""
Purpose:
Take list of filenames of music files and organize into a dict
with the key of the artist name and a list of songs as the value.
Args:
music_files (List of Strings): List of music filenames
Return:
music_by_artist (Dict): dict with the key of the artist name and
a list of songs as the value.
"""
music_by_artist = {}
for music_file in music_files:
music_file_metadata = eyed3.load(music_file)
artist = get_artist_for_music_file(music_file, music_file_metadata)
music_by_artist.setdefault(artist, {"songs": [], "total_songs": 0})
music_by_artist[artist]["songs"].append(music_file)
music_by_artist[artist]["total_songs"] += 1
return music_by_artist
def get_artist_for_music_file(music_file, music_file_metadata):
"""
Purpose:
Get the artist from metadata of a music file
Args:
music_file (String): Full name of the music file
music_file_metadata (EyeD3 Object): Object of EyeD3, which has the metadata of
the file that can be used to get the artist
Return:
artist (String): Artist name, "Unknown" if it is not found
"""
try:
artist = music_file_metadata.tag.artist.lower()
except Exception as err:
logging.debug(f"No Arist Tag For Music: {music_file}")
artist = "unknown"
return artist
###
# Scrpt Configuration Functions
###
def get_cli_arguments():
"""
Purpose:
Parse CLI arguments for script
Args:
N/A
Return:
N/A
"""
logging.info("Getting and Parsing CLI Arguments")
parser = ArgumentParser(description="Organize Music Files")
required = parser.add_argument_group("Required Arguments")
optional = parser.add_argument_group("Optional Arguments")
# Required Arguments\
required.add_argument(
"--music-dir",
dest="music_dir",
help="Dir of Music Files",
required=True,
type=str,
)
# Optional Arguments
optional.add_argument(
"--music-format",
dest="music_format",
default=".mp3",
help="Format of Music Files",
required=False,
type=str,
)
return parser.parse_args()
if __name__ == "__main__":
try:
loggers.get_stdout_logging(
log_level=logging.INFO,
log_prefix="[parse_music_files] "
)
main()
except Exception as err:
logging.exception(f"{os.path.basename(__file__)} failed due to error: {err}")
raise err
|
import numpy as np
def findPaperLen(pairs):
max_x = 0;
max_y = 0;
for (x, y) in pairs:
if x > max_x:
max_x = x
if y > max_y:
max_y = y
return (max_x, max_y)
def day13_part1(paper, foldcmd):
(axis, val) = foldcmd
if axis == 'y':
top = paper[:val]
bottom = paper[val + 1:]
revBottom = np.flip(bottom, axis = 0)
firstFold = [['#' if top[i][j] == '#' or revBottom[i][j] == '#' else '.' for j in range(len(top[0]))] for i in range(len(top))]
else:
left = []
right = []
for i in range(len(paper)):
subLeft = []
subRight = []
for j in range(len(paper[0])):
if j < val:
subLeft.append(paper[i][j])
elif j > val:
subRight.append(paper[i][j])
left.append(subLeft)
right.append(subRight[::-1])
firstFold = [['#' if right[i][j] == '#' or left[i][j] == '#' else '.' for j in range(len(left[0]))] for i in range(len(left))]
return firstFold
def day13_part2(paper, folds):
# same code as part1 but complete all the folds
for (axis, val) in folds:
if axis == 'y':
top = paper[:val]
bottom = paper[val + 1:]
revBottom = np.flip(bottom, axis = 0)
paper = [['#' if top[i][j] == '#' or revBottom[i][j] == '#' else '.' for j in range(len(top[0]))] for i in range(len(top))]
else:
left = []
right = []
for i in range(len(paper)):
subLeft = []
subRight = []
for j in range(len(paper[0])):
if j < val:
subLeft.append(paper[i][j])
elif j > val:
subRight.append(paper[i][j])
left.append(subLeft)
right.append(subRight[::-1])
paper = [['#' if right[i][j] == '#' or left[i][j] == '#' else '.' for j in range(len(left[0]))] for i in range(len(left))]
# print answer
for i in range(len(paper)):
for j in range(len(paper[0])):
if paper[i][j] == '#':
print('#', end='')
else:
print(' ', end='')
print('\n', end='')
def main():
with open('input.txt', 'r') as f:
input = f.read().strip().split('\n\n')
pairsInput = input[0]
foldsInput = input[1]
# construct the list of pairs
pairs = []
for p in pairsInput.split('\n'):
x, y = map(int, p.split(','))
pairs.append((x, y))
# after list of pairs is constructed, find the dimensions of the paper
(x, y) = findPaperLen(pairs)
# create the paper and add the # in place
paper = [['.' for i in range(x + 1)] for j in range(y + 1)]
for (y, x) in pairs:
paper[x][y] = '#'
# create the folds list (has pairs of (axis, value))
folds = []
for fold in foldsInput.split('\n'):
axis, val = fold.split('=')
folds.append((axis[-1], int(val)))
firstFold = day13_part1(paper, folds.pop(0))
print("Part 1:", sum([line.count('#') for line in firstFold]))
day13_part2(firstFold, folds)
if __name__ == "__main__":
main()
|
from bs4 import BeautifulSoup
from requests import get
import json
def contact_scrape(link,output):
key = ['name','email','twitter','facebook','instagram','youtube']
val = []
soup = BeautifulSoup(get('https://su.sheffield.ac.uk' + link).text, 'html.parser')
#======================#
# Extract society name #
#======================#
name = soup.find('h2', class_="t__none f__b--eb")
if name is not None:
val.append(name.get_text())
else:
val.append('')
#=======================#
# Extract society email #
#=======================#
email = soup.find(class_ = 'c__b')
if email and email.a is not None:
val.append(email.a.get_text())
else:
val.append('')
#====================================#
# Extract society social media links #
#====================================#
social_media_list = soup.find('div', class_ ='highlight__social mgn__t--2')
if social_media_list is not None:
social_media_list = social_media_list.find_all('a')
for social_media in social_media_list:
link1 = social_media.get('href')
if link1 not in default_pages:
val.append(link1)
else:
val.append('')
return dict(zip(key,val))
#------------------------------------------------------------------------------
# Main Program
#------------------------------------------------------------------------------
# since pages keeps default sites for empty social media links
# -> we keep track of these sites and clear them out
default_pages = ['https://www.instagram.com/','https://www.facebook.com/','https://twitter.com/','https://youtube.com/user/']
# output to a csv file
output = [['name','email','twitter','facebook','instagram','youtube']]
# Initialise BS object of main page
soup = BeautifulSoup(get("https://su.sheffield.ac.uk/activities/find-a-society").text, 'html.parser')
# retrieve all society links
societies = soup.find_all('div', class_ = 'g__4--m g__4--t g__3--d mgn__b--1')
with open("soup-contact.json", "w", newline="") as f:
for society in societies:
link = society.a['href'] # extracting href links
line_output = json.dumps(contact_scrape(link,output))
f.write(line_output + '\n')
|
#!/usr/bin/python3
import timeit
import time
import os
import matplotlib.pyplot as plt
from tqdm import trange
from injectSeed import *
from subprocess import call
def getRuntime(bytecode):
""" executes program and returns running time """
start = time.time()
exec(bytecode)
end = time.time()
return end-start
def getRunTime_R(bytecode):
start = time.time()
exec(bytecode, globals())
end = time.time()
return end-start
def trialRun(bytecode, n, runtimeFunc):
""" executes program using parameter n """
bytecode = injectRandList(bytecode, n)
runtime = runtimeFunc(bytecode)
return runtime
def conductTrials(filename, n, runtimeFunc):
trials = list()
bytecode = open(filename).read()
for i in trange(n, desc=filename.split('/')[-1]):
trials.append(trialRun(bytecode, i, runtimeFunc))
return trials
def plotRuntime(runtimes, n, name):
fig = plt.figure()
ax = fig.add_subplot(111)
axi = [i for i in range(n)]
print()
print("Plotting {}...".format(name))
plt.plot(axi, runtimes, 'bo', markersize=0.5)
plt.savefig("plots/Sorting/{}".format(name))
plt.close()
def plotSortingAlg(n):
bubblesorts = conductTrials("samples/Sorting/bubblesort.py", n, getRuntime)
insertionsorts = conductTrials("samples/Sorting/insertionsort.py", n, getRuntime)
mergesorts = conductTrials("samples/Sorting/mergesort.py", n, getRunTime_R)
selectionsorts = conductTrials("samples/Sorting/selectionsort.py", n, getRuntime)
quicksorts = conductTrials("samples/Sorting/quicksort.py", n, getRunTime_R)
shellsorts = conductTrials("samples/Sorting/shellsort.py", n, getRuntime)
heapsorts = conductTrials("samples/Sorting/heapsort.py", n, getRunTime_R)
plotRuntime(bubblesorts, n, "bubblesorts.png")
plotRuntime(insertionsorts, n, "insertionsorts.png")
plotRuntime(mergesorts, n, "mergesorts.png")
plotRuntime(selectionsorts, n, "selectionsorts.png")
plotRuntime(quicksorts, n, "quicksorts.png")
plotRuntime(shellsorts, n, "shellsorts.png")
plotRuntime(heapsorts, n, "heapsorts.png")
def main():
plotSortingAlg(10000)
main()
|
from p5 import *
class Slider:
def __init__(self,low,high,default):
'''slider has range from low to high
and is set to default'''
self.low = low
self.high = high
self.val = default
self.clicked = False
def position(self,x,y):
'''slider's position on screen'''
self.x = x
self.y = y
#the position of the rect you slide:
self.rectx = self.x + mapping(self.val, self.low, self.high, 0, 120)
self.recty = self.y - 10
def value(self):
'''updates the slider and returns value'''
#gray line behind slider
stroke_weight(4)
stroke(200)
line((self.x,self.y), (self.x + 120,self.y))
#press mouse to move slider
if mouse_is_pressed and dist((mouse_x,mouse_y), (self.rectx,self.recty)) < 20:
self.rectx = mouse_x
#constrain rectangle
self.rectx = constrain(self.rectx, self.x, self.x + 120)
#draw rectangle
stroke_weight(1)
stroke(0)
fill(255)
rect((self.rectx, self.recty), 10, 20)
self.val = mapping(self.rectx,self.x,self.x + 120,self.low,self.high)
#draw label
# fill(0)
# text_size(12)
# text(int(self.val),self.rectx,self.recty + 35)
return self.val
def mapping(n, start1, stop1, start2, stop2):
"""Set the background color for the p5.renderer.
:param args:
:param args:
:param args:
:param args:
:param args:
:returns: """
return ((n - start1) / (stop1 - start1)) * (stop2 - start2) + start2
|
#!/usr/bin/env python
#
# Copyright 2018 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import re
import os
import ttfw_idf
import esp_prov
import wifi_tools
# Have esp_prov throw exception
esp_prov.config_throw_except = True
@ttfw_idf.idf_example_test(env_tag="Example_WIFI_BT")
def test_examples_provisioning_softap(env, extra_data):
# Acquire DUT
dut1 = env.get_dut("softap_prov", "examples/provisioning/legacy/softap_prov", dut_class=ttfw_idf.ESP32DUT)
# Get binary file
binary_file = os.path.join(dut1.app.binary_path, "softap_prov.bin")
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("softap_prov_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("softap_prov_bin_size", bin_size // 1024, dut1.TARGET)
# Upload binary and start testing
dut1.start_app()
# Parse IP address of STA
dut1.expect("Starting WiFi SoftAP provisioning", timeout=60)
[ssid, password] = dut1.expect(re.compile(r"SoftAP Provisioning started with SSID '(\S+)', Password '(\S+)'"), timeout=30)
iface = wifi_tools.get_wiface_name()
if iface is None:
raise RuntimeError("Failed to get Wi-Fi interface on host")
print("Interface name : " + iface)
print("SoftAP SSID : " + ssid)
print("SoftAP Password : " + password)
try:
ctrl = wifi_tools.wpa_cli(iface, reset_on_exit=True)
print("Connecting to DUT SoftAP...")
ip = ctrl.connect(ssid, password)
got_ip = dut1.expect(re.compile(r"DHCP server assigned IP to a station, IP is: (\d+.\d+.\d+.\d+)"), timeout=60)[0]
if ip != got_ip:
raise RuntimeError("SoftAP connected to another host! " + ip + "!=" + got_ip)
print("Connected to DUT SoftAP")
print("Starting Provisioning")
verbose = False
protover = "V0.1"
secver = 1
pop = "abcd1234"
provmode = "softap"
ap_ssid = "myssid"
ap_password = "mypassword"
softap_endpoint = ip.split('.')[0] + "." + ip.split('.')[1] + "." + ip.split('.')[2] + ".1:80"
print("Getting security")
security = esp_prov.get_security(secver, pop, verbose)
if security is None:
raise RuntimeError("Failed to get security")
print("Getting transport")
transport = esp_prov.get_transport(provmode, softap_endpoint)
if transport is None:
raise RuntimeError("Failed to get transport")
print("Verifying protocol version")
if not esp_prov.version_match(transport, protover):
raise RuntimeError("Mismatch in protocol version")
print("Starting Session")
if not esp_prov.establish_session(transport, security):
raise RuntimeError("Failed to start session")
print("Sending Wifi credential to DUT")
if not esp_prov.send_wifi_config(transport, security, ap_ssid, ap_password):
raise RuntimeError("Failed to send Wi-Fi config")
print("Applying config")
if not esp_prov.apply_wifi_config(transport, security):
raise RuntimeError("Failed to send apply config")
if not esp_prov.wait_wifi_connected(transport, security):
raise RuntimeError("Provisioning failed")
finally:
ctrl.reset()
if __name__ == '__main__':
test_examples_provisioning_softap()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# stride-ios-relay.py - Stride TCP connection relay for iOS devices to Windows developer host (using usbmuxd)
#
# Copyright (c) .NET Foundation and Contributors (https://dotnetfoundation.org/ & https://stride3d.net) and Silicon Studio Corp. (https://www.siliconstudio.co.jp)
# Copyright (C) 2009 Hector Martin "marcan" <hector@marcansoft.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 or version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import usbmux
import SocketServer
import select
from optparse import OptionParser
import sys
import threading
import time
import traceback
import socket
class SocketRelay(object):
def __init__(self, a, b, maxbuf=65535):
self.a = a
self.b = b
self.atob = ""
self.btoa = ""
self.maxbuf = maxbuf
def handle(self):
while True:
rlist = []
wlist = []
xlist = [self.a, self.b]
if self.atob:
wlist.append(self.b)
if self.btoa:
wlist.append(self.a)
if len(self.atob) < self.maxbuf:
rlist.append(self.a)
if len(self.btoa) < self.maxbuf:
rlist.append(self.b)
rlo, wlo, xlo = select.select(rlist, wlist, xlist)
if xlo:
return
if self.a in wlo:
n = self.a.send(self.btoa)
self.btoa = self.btoa[n:]
if self.b in wlo:
n = self.b.send(self.atob)
self.atob = self.atob[n:]
if self.a in rlo:
s = self.a.recv(self.maxbuf - len(self.atob))
if not s:
return
self.atob += s
if self.b in rlo:
s = self.b.recv(self.maxbuf - len(self.btoa))
if not s:
return
self.btoa += s
#print "Relay iter: %8d atob, %8d btoa, lists: %r %r %r"%(len(self.atob), len(self.btoa), rlo, wlo, xlo)
parser = OptionParser(usage="usage: %prog [OPTIONS] RemoteHost")
parser.add_option("-b", "--bufsize", dest='bufsize', action='store', metavar='KILOBYTES', type='int', default=16, help="specify buffer size for socket forwarding")
parser.add_option("-s", "--socket", dest='sockpath', action='store', metavar='PATH', type='str', default=None, help="specify the path of the usbmuxd socket")
options, args = parser.parse_args()
if len(args) != 1:
parser.print_help()
sys.exit(1)
alive = True
remotehost = args[0]
mux = usbmux.USBMux(options.sockpath)
class DeviceConnectionHelper():
def __init__(self, device):
self.device = device
def start_connection(self, device_sock):
try:
print "Connection opened with device, establishing connection to router (%s)"%(remotehost)
# Connect to router
router_sock = socket.socket()
router_sock.connect((remotehost, 31254))
print "Starting relay between iOS device and router"
# Forward connection between router and iOS device
fwd = SocketRelay(device_sock, router_sock, options.bufsize * 1024)
fwd.handle()
except:
traceback.print_exc(file=sys.stdout)
pass
finally:
print "Connection between iOS device and router has been interrupted"
device_sock.close()
router_sock.close()
def start_device(self):
self.device.alive = True
while self.device.alive and alive:
try:
device_sock = mux.connect(self.device, 31255)
# Start a thread for this connection
thread = threading.Thread(target = lambda: self.start_connection(device_sock))
thread.start()
except:
# Silently ignore exceptions (since we try to continuously connect to device)
pass
time.sleep(0.2)
def start_device_threaded(self):
thread = threading.Thread(target = self.start_device)
thread.start()
deviceNames = {
0x1290: 'iPhone',
0x1292: 'iPhone 3G',
0x1294: 'iPhone 3GS',
0x1297: 'iPhone 4 GSM',
0x129c: 'iPhone 4 CDMA',
0x12a0: 'iPhone 4S',
0x12a8: 'iPhone 5/6',
0x1291: 'iPod touch',
0x1293: 'iPod touch 2G',
0x1299: 'iPod touch 3G',
0x129e: 'iPod touch 4G',
0x129a: 'iPad',
0x129f: 'iPad 2 Wi-Fi',
0x12a2: 'iPad 2 GSM',
0x12a3: 'iPad 2 CDMA',
0x12a9: 'iPad 2 R2',
0x12a4: 'iPad 3 Wi-Fi',
0x12a5: 'iPad 3 CDMA',
0x12a6: 'iPad 3 Global',
0x129d: 'Apple TV 2G',
0x12a7: 'Apple TV 3G'
}
def device_name(device):
return deviceNames.get(device.usbprod, "Unknown(0x%04x)"%(device.usbprod))
def device_added(device):
# Try to connect to establish connection to device
print "Device connected: ID %d, Type %s (Serial %s)"%(device.devid, device_name(device), device.serial)
deviceConnectionHelper = DeviceConnectionHelper(device)
deviceConnectionHelper.start_device_threaded()
def device_removed(device):
print "Device removed: ID %d, Type %s (Serial %s)"%(device.devid, device_name(device), device.serial)
device.alive = False
print "Listening for iOS devices..."
mux.listener.callback_device_added = device_added
mux.listener.callback_device_removed = device_removed
alive = True
while alive:
try:
mux.process()
except:
alive = False
|
# -*- coding: utf-8 -*-
'''
.. created on 21.08.2016
.. by Christoph Schmitt
'''
from __future__ import print_function, absolute_import, division, unicode_literals
from reflexif.compat import *
from reflexif.compat import PY2
import io
import os
import unittest
from reflexif.io import Frame, SourceWrapper, FileSource
class TestFrame(unittest.TestCase):
data = bytes(range(256))
def testConstructorFail(self):
with self.assertRaises(IndexError):
Frame(self.data, -1)
with self.assertRaises(IndexError):
Frame(self.data, 0, len(self.data)+1)
with self.assertRaises(IndexError):
Frame(self.data, len(self.data), 1)
with self.assertRaises(IndexError):
Frame(self.data, len(self.data), -1)
def testConstructor(self):
Frame(self.data, 0)
Frame(self.data, 1)
f = Frame(self.data, len(self.data))
self.assertEqual(0, len(f))
f = Frame(self.data, len(self.data), 0)
self.assertEqual(0, len(f))
def testSliceLength(self):
f = Frame(self.data, 20, 10)
self.assertEqual(len(f[5:6]), 1)
self.assertEqual(len(f[0:10]), 10)
def testSliceContent(self):
f = Frame(self.data, 20, 10)
self.assertEqual(f[0:10].data, f.data)
self.assertEqual(f[5:6].data, self.data[25:26])
f2 = f[2:8]
f3 = f2[2:4]
self.assertEqual(f3.data, self.data[24:26])
def testSliceMethodFail(self):
f = Frame(self.data, 20, 10)
with self.assertRaises(ValueError):
f.slice(None, None)
with self.assertRaises(ValueError):
f.slice(None, 1)
with self.assertRaises(ValueError):
f.slice(1, None)
def testSliceSpecialCases(self):
f = Frame(self.data, 20, 10)
s1 = f[:]
s2 = f[1:]
s3 = f[:1]
self.assertEqual(s1.length, f.length)
self.assertEqual(s2.length, f.length-1)
self.assertEqual(s3.length, 1)
self.assertEqual(s1.offset, f.offset)
self.assertEqual(s2.offset, f.offset+1)
self.assertEqual(s3.offset, f.offset)
def testSliceFail(self):
f = Frame(self.data, 20, 10)
with self.assertRaises(IndexError):
f[:-1]
with self.assertRaises(IndexError):
f[-1:]
with self.assertRaises(IndexError):
f[::1]
with self.assertRaises(IndexError):
f[1:2:3]
with self.assertRaises(IndexError):
f[0:11]
with self.assertRaises(IndexError):
f[5:0]
with self.assertRaises(IndexError):
f[10:12]
def testDataFail(self):
f = Frame(self.data, 10, 20)
# make frame too long
f.length = len(self.data)-9
with self.assertRaises(EOFError):
data_slice = f.data
# debugging output
# used to identify https://github.com/IronLanguages/main/issues/1387
b = bytes(data_slice)
print('lengths: data_slice: %d, f: %d, f.source: %d, bytes(data_slice): %d' % (len(data_slice), len(f), len(f.source), len(b)))
print('f.source: ', type(f.source), '%r' % f.source, '%r' % bytes(f.source))
print('wrongly returned slice:', type(data_slice), '%r' % data_slice, '%r' % b)
def testBytes(self):
f = Frame(self.data, 10, 20)
actual = bytes(self.data[10:30])
expected = bytes(f)
if PY2:
# If the Python 2 implementation offers a bytes type (e.g. IronPython),
# we use it, so the elemets of actual by either be ints or strings
# of length 1.
self.assertIn(actual[0], (10, b'\x0a'))
else:
self.assertEqual(actual[0], 10)
self.assertEqual(expected, actual)
def testData(self):
l = 100
def testoffset(o):
f = Frame(self.data, o, l)
self.assertEqual(self.data[o:o+l], f.data)
for i in range(len(f)):
self.assertEqual(f[i], self.data[o+i])
testoffset(0)
testoffset(10)
testoffset(30)
testoffset(156)
class TestFrameWithWrapperSource(TestFrame):
data = SourceWrapper(bytes(range(256)))
class TestFrameWithMemoryView(TestFrame):
data = memoryview(bytes(range(256)))
class TestFrameWithFileSource(TestFrame):
data = FileSource(io.BytesIO(bytes(range(256))), 0, 256)
class TestFrameWithFileSourceWithOffset(TestFrame):
raw = bytearray(os.urandom(1024))
raw[200:456] = bytes(range(256))
data = FileSource(io.BytesIO(raw), 200, 256)
class TestFrameWithWrappedMemoryView(TestFrame):
data = SourceWrapper(memoryview(bytes(range(256))))
if __name__ == "__main__":
unittest.main()
|
"""Unit tests for the Hyperbolic space using Poincaré Ball Model.
We verify poincare ball model by compare results
of squared distance computed with inner_product
(using RiemannianMetric methods) and distance defined
in PoincareBall.
We also verify the distance is the same using differents
coordinates systems.
"""
import geomstats.backend as gs
import geomstats.tests
from geomstats.geometry.hyperboloid import Hyperboloid
from geomstats.geometry.poincare_ball import PoincareBall
class TestPoincareBall(geomstats.tests.TestCase):
def setUp(self):
self.manifold = PoincareBall(2)
self.metric = self.manifold.metric
self.hyperboloid_manifold = Hyperboloid(2)
self.hyperboloid_metric = self.hyperboloid_manifold.metric
def test_squared_dist(self):
point_a = gs.array([-0.3, 0.7])
point_b = gs.array([0.2, 0.5])
distance_a_b = self.metric.dist(point_a, point_b)
squared_distance = self.metric.squared_dist(point_a, point_b)
self.assertAllClose(distance_a_b**2, squared_distance, atol=1e-8)
@geomstats.tests.np_and_pytorch_only
def test_coordinates(self):
point_a = gs.array([-0.3, 0.7])
point_b = gs.array([0.2, 0.5])
point_a_h =\
self.manifold.to_coordinates(point_a, 'extrinsic')
point_b_h =\
self.manifold.to_coordinates(point_b, 'extrinsic')
dist_in_ball =\
self.metric.dist(point_a, point_b)
dist_in_hype =\
self.hyperboloid_metric.dist(point_a_h, point_b_h)
self.assertAllClose(dist_in_ball, dist_in_hype, atol=1e-8)
def test_dist_poincare(self):
point_a = gs.array([0.5, 0.5])
point_b = gs.array([0.5, -0.5])
dist_a_b =\
self.manifold.metric.dist(point_a, point_b)
result = dist_a_b
expected = 2.887270927429199
self.assertAllClose(result, expected)
def test_dist_vectorization(self):
point_a = gs.array([0.2, 0.5])
point_b = gs.array([[0.3, -0.5], [0.2, 0.2]])
dist_a_b =\
self.manifold.metric.dist(point_a, point_b)
result_vect = dist_a_b
result =\
[self.manifold.metric.dist(point_a, point_b[i])
for i in range(len(point_b))]
result = gs.stack(result, axis=0)
self.assertAllClose(result_vect, result)
def test_dist_broadcast(self):
point_a = gs.array([[0.2, 0.5], [0.3, 0.1]])
point_b = gs.array([[0.3, -0.5], [0.2, 0.2]])
point_c = gs.array([[0.2, 0.3], [0.5, 0.5], [-0.4, 0.1]])
point_d = gs.array([0.1, 0.2, 0.3])
dist_a_b =\
self.manifold.metric.dist_broadcast(point_a, point_b)
dist_b_c = gs.flatten(
self.manifold.metric.dist_broadcast(point_b, point_c))
result_vect = gs.concatenate(
(dist_a_b, dist_b_c), axis=0)
result_a_b =\
[self.manifold.metric.dist_broadcast(point_a[i], point_b[i])
for i in range(len(point_b))]
result_b_c = \
[self.manifold.metric.dist_broadcast(point_b[i], point_c[j])
for i in range(len(point_b))
for j in range(len(point_c))
]
result = result_a_b + result_b_c
result = gs.stack(result, axis=0)
self.assertAllClose(result_vect, result)
with self.assertRaises(ValueError):
self.manifold.metric.dist_broadcast(point_a, point_d)
def test_dist_pairwise(self):
point = gs.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.5]])
result = self.manifold.metric.dist_pairwise(point)
expected = gs.array([[0., 0.65821943, 1.34682524],
[0.65821943, 0., 0.71497076],
[1.34682524, 0.71497076, 0.]])
self.assertAllClose(result, expected, rtol=1e-3)
def test_mobius_vectorization(self):
point_a = gs.array([0.5, 0.5])
point_b = gs.array([[0.5, -0.3], [0.3, 0.4]])
dist_a_b =\
self.manifold.metric.mobius_add(point_a, point_b)
result_vect = dist_a_b
result =\
[self.manifold.metric.mobius_add(point_a, point_b[i])
for i in range(len(point_b))]
result = gs.stack(result, axis=0)
self.assertAllClose(result_vect, result)
dist_a_b =\
self.manifold.metric.mobius_add(point_b, point_a)
result_vect = dist_a_b
result =\
[self.manifold.metric.mobius_add(point_b[i], point_a)
for i in range(len(point_b))]
result = gs.stack(result, axis=0)
self.assertAllClose(result_vect, result)
def test_log_vectorization(self):
point_a = gs.array([0.5, 0.5])
point_b = gs.array([[0.5, -0.5], [0.4, 0.4]])
dist_a_b =\
self.manifold.metric.log(point_a, point_b)
result_vect = dist_a_b
result =\
[self.manifold.metric.log(point_a, point_b[i])
for i in range(len(point_b))]
result = gs.stack(result, axis=0)
self.assertAllClose(result_vect, result)
dist_a_b =\
self.manifold.metric.log(point_b, point_a)
result_vect = dist_a_b
result =\
[self.manifold.metric.log(point_b[i], point_a)
for i in range(len(point_b))]
result = gs.stack(result, axis=0)
self.assertAllClose(result_vect, result)
def test_exp_vectorization(self):
point_a = gs.array([0.5, 0.5])
point_b = gs.array([[0.0, 0.0], [0.5, -0.5], [0.4, 0.4]])
dist_a_b =\
self.manifold.metric.exp(point_a, point_b)
result_vect = dist_a_b
result =\
[self.manifold.metric.exp(point_a, point_b[i])
for i in range(len(point_b))]
result = gs.stack(result, axis=0)
self.assertAllClose(result_vect, result)
dist_a_b =\
self.manifold.metric.exp(point_b, point_a)
result_vect = dist_a_b
result =\
[self.manifold.metric.exp(point_b[i], point_a)
for i in range(len(point_b))]
result = gs.stack(result, axis=0)
self.assertAllClose(result_vect, result)
def test_log_poincare(self):
point = gs.array([0.3, 0.5])
base_point = gs.array([0.3, 0.3])
result = self.manifold.metric.log(point, base_point)
expected = gs.array([-0.01733576, 0.21958634])
self.manifold.metric.coords_type = 'extrinsic'
self.assertAllClose(result, expected)
def test_belong_true_poincare(self):
point = gs.array([0.3, 0.5])
belong = self.manifold.belongs(point)
self.assertTrue(belong)
def test_belong_false_poincare(self):
point = gs.array([1.2, 0.5])
belong = self.manifold.belongs(point)
self.assertFalse(belong)
def test_projection(self):
point = gs.array([1.2, 0.5])
projected_point = self.manifold.projection(point)
self.assertTrue(gs.sum(projected_point * projected_point) < 1.)
def test_exp_poincare(self):
point = gs.array([0.3, 0.5])
base_point = gs.array([0.3, 0.3])
tangent_vec = self.manifold.metric.log(point, base_point)
result = self.manifold.metric.exp(tangent_vec, base_point)
self.manifold.metric.coords_type = 'extrinsic'
self.assertAllClose(result, point)
def test_ball_retraction(self):
x = gs.array([[0.5, 0.6], [0.2, -0.1], [0.2, -0.4]])
y = gs.array([[0.3, 0.5], [0.3, -0.6], [0.3, -0.3]])
ball_metric = self.manifold.metric
tangent_vec = ball_metric.log(y, x)
ball_metric.retraction(tangent_vec, x)
def test_ball_geodesic(self):
path_function =\
self.manifold.metric.geodesic(gs.array([0.1, 0.1]),
gs.array([0.2, 0.2]))
steps = gs.array(gs.linspace(-1000., 1000., 10000))
path_function(steps)
def test_mobius_out_of_the_ball(self):
x, y = gs.array([0.7, 0.9]), gs.array([0.2, 0.2])
with self.assertRaises(ValueError):
self.manifold.metric.mobius_add(x, y, auto_project=False)
|
#!/usr/bin/env python
import os
import yaml
import pandas as pd
import argparse
__author__ = 'Teruaki Enoto'
__version__ = '0.01'
# v0.01 : 2020-08-12 : original version
def get_parser():
"""
Creates a new argument parser.
"""
parser = argparse.ArgumentParser('niauto.py',
usage='%(prog)s -o obsid',
description="""
(example) %(prog)s.py -o 1012010136
(example) %(prog)s.py -t "comet"
"""
)
version = '%(prog)s ' + __version__
parser.add_argument('obsid',metavar='obsid',type=str,help='input obsid')
parser.add_argument('--version', '-v', action='version', version=version,
help='show version of this command')
# parser.add_argument('--target_name', '-t', type=str, default=None,
# help='target source name to be shown (default=None)')
parser.add_argument('--csvfile', '-c', type=str, default="nicer_target_segment_table.csv",
help='csvfile')
parser.add_argument('--setupfile', '-s', type=str, default="{}/hoppy/nicer/cli/setup_template.yaml".format(os.getenv('HOPPY_PATH')),
help='setup yaml file')
parser.add_argument('--heasarc_repository', '-r', type=str, default=os.getenv('HEADAS_REPOSITORY'),
help='Heasarc repository directory. If this option is specified, the files are moved here.')
parser.add_argument('--nicerteam_repository', '-n', type=str, default=os.getenv('NICERTEAM_REPOSITORY'),
help='NICER team repository directory. If this option is specified, the files are moved here.')
parser.add_argument('--copyto', '-t', type=str, default=' /Users/enoto/Dropbox/01_enoto/research/nicer/auto/out',
help='copy to')
return parser
def niauto(args):
if not os.path.exists(args.csvfile):
cmd = 'niget_target_segment_sheet.py'
print(cmd);os.system(cmd)
df = pd.read_csv(args.csvfile,comment='#')
df['Observation ID'] = df['Observation ID'].astype(str).str.zfill(10)
str_start_timeutc = df['Start TimeUTC'][df['Observation ID'] == args.obsid]
yyyy = str(str_start_timeutc).split('-')[0].split()[-1]
mm = str(str_start_timeutc).split('-')[1]
yyyy_mm = "%s_%s" % (yyyy,mm)
datadir = '%s/nicer/data/obs/%s/%s' % (os.getenv('HEADAS_REPOSITORY'),yyyy_mm,args.obsid)
datadir_team = '%s/nicer/data/obs/%s/%s' % (os.getenv('NICERTEAM_REPOSITORY'),yyyy_mm,args.obsid)
if not os.path.exists(datadir) and not os.path.exists(datadir_team):
cmd = 'niwget.py -o %s -y %s -r %s' % (args.obsid,yyyy_mm,args.heasarc_repository)
print(cmd);os.system(cmd)
outdir = 'out/%s' % args.obsid
if not os.path.exists(outdir):
cmd = 'mkdir -p %s;' % (outdir)
print(cmd);os.system(cmd)
specdir = 'out/%s/%s/spec' % (args.obsid,args.obsid)
if not os.path.exists(specdir):
param = yaml.load(open(args.setupfile))
param['output_directory'] = outdir
if os.path.exists(datadir):
param['input_data_directory'] = '%s/nicer/data/obs/*' % os.getenv('HEADAS_REPOSITORY')
elif os.path.exists(datadir_team):
param['input_data_directory'] = '%s/nicer/data/obs/*' % os.getenv('NICERTEAM_REPOSITORY')
else:
print("Error: no input files")
return -1
input_setup_yaml = '%s/input_setup.yaml' % outdir
with open(input_setup_yaml, 'w') as file:
yaml.dump(param, file)
input_obsid_lst = '%s/input_obsid.lst' % outdir
f = open(input_obsid_lst,'w')
f.write('%s\n' % args.obsid)
f.close()
cmd = 'nipipeline.py '
cmd += '--setup_yamlfile %s ' % input_setup_yaml
cmd += '--obsid_lstfile %s ' % input_obsid_lst
print(cmd);os.system(cmd)
fitdir = '%s/fit' % outdir
if not os.path.exists(fitdir):
param = yaml.load(open('%s/input_setup.yaml' % outdir))
model_xcm = '%s/hoppy/xspec/model/tbabs_bbodyrad.xcm' % (os.getenv('HOPPY_PATH'))
cmd = 'xspec_fit.py '
cmd += '%s/ni%s_3c50_tot.pi ' % (specdir,args.obsid)
cmd += '-o %s/fit ' % (outdir)
cmd += '-b %s/ni%s_3c50_bkg.pi ' % (specdir,args.obsid)
cmd += '-r %s ' % param['xspec_rmf']
cmd += '-a %s ' % param['xspec_arf']
cmd += '-m %s ' % model_xcm
cmd += '-s 5 -n 80 --fitemin 0.3 --fitemax 10.0 '
cmd += '--rateband 0.8-6.0,2.0-10.0 '
cmd += '--fluxband 0.8-6.0,2.0-10.0 '
cmd += '--parerrnum "1,2,3" '
print(cmd);os.system(cmd)
if args.copyto != None:
cmd = 'mkdir -p %s/%s;' % (args.copyto,args.obsid)
cmd += 'cp -r %s %s/%s/' % (fitdir, args.copyto,args.obsid)
print(cmd);os.system(cmd)
print("finished...\n")
def main(args=None):
parser = get_parser()
args = parser.parse_args(args)
niauto(args)
if __name__=="__main__":
main()
|
print "\nSTRINGS"
s = 'hello'
print ('h' in s) == True
print ('H' in s) == False
print ('e' not in s) == False
print ('L' not in s) == True
print ('hello' + ' world') == 'hello world'
print 'a'*3 == 'aaa'
print 2*'hello' == 'hellohello'
s = '01234'
print s[4] == '4'
print s[-1] == '4'
print s[0:3] == s[:3] == s[None:3] == '012'
print s[0:] == s[0:None] == s[:] == '01234'
print s[1:3] == '12'
print s[-1:3] == ''
print s[-3:3] == '2'
print s[-4:-1] == '123'
print s[0:5:1] == s[:5:1] == s[0::1] == s[0:5:] == s[0::] == s[:5:] == s[::1] == s[::] =='01234'
print s[::-1] == '43210'
print s[4:2:-1] == '43'
print s[-1:2:-2] == '4'
print len(s) == 5
print min(s) == '0'
print max(s) == '4'
print "\nLISTS"
l = [0,1,2,3,4]
print (0 in l) == True
print (5 in l) == False
print (4 not in l) == False
print ('hello' not in l) == True
print ([0,1,2] + [3,4]) == l
print [0]*3 == [0,0,0]
print 2*[1,2] == [1,2,1,2]
l2 = [[]]*3
l2[0].append(3)
print l2 == [[3],[3],[3]]
print l[4] == 4
print l[-1] == 4
print l[0:3] == l[:3] == l[None:3] == [0,1,2]
print l[0:] == l[0:None] == l[:] == [0,1,2,3,4]
print l[1:3] == [1,2]
print l[-1:3] == []
print l[-3:3] == [2]
print l[-4:-1] == [1,2,3]
print l[0:5:1] == l[:5:1] == l[0::1] == l[0:5:] == l[0::] == l[:5:] == l[::1] == l[::] == [0,1,2,3,4]
print l[::-1] == [4,3,2,1,0]
print l[4:2:-1] == [4,3]
print l[-1:2:-2] == [4]
print len(l) == 5
print min(l) == 0
print max(l) == 4
print "\nTUPLES"
t = (0,1,2,3,4)
print (0 in t) == True
print (5 in t) == False
print (4 not in t) == False
print ('hello' not in t) == True
print ((0,1,2) + (3,4)) == t
print (0,)*3 == (0,0,0)
print 2*(1,2) == (1,2,1,2)
print t[4] == 4
print t[-1] == 4
print t[0:3] == t[:3] == t[None:3] == (0,1,2)
print t[0:] == t[0:None] == t[:] == (0,1,2,3,4)
print t[1:3] == (1,2)
print t[-1:3] == ()
print t[-3:3] == (2,)
print t[-4:-1] == (1,2,3)
print t[0:5:1] == t[:5:1] == t[0::1] == t[0:5:] == t[0::] == t[:5:] == t[::1] == t[::] == (0,1,2,3,4)
print t[::-1] == (4,3,2,1,0)
print t[4:2:-1] == (4,3)
print t[-1:2:-2] == (4,)
print len(t) == 5
print min(t) == 0
print max(t) == 4
|
#Done by Carlos Amaral (20/07/2020)
#try 15.5- Refactoring
"""
The fill_walk() method is lengthy. Create a new method
called get_step() to determine the direction and distance for each step, and
then calculate the step. You should end up with two calls to get_step() in
fill_walk() :
x_step = self.get_step()
y_step = self.get_step()
This refactoring should reduce the size of fill_walk() and make the
method easier to read and understand.
"""
import matplotlib.pyplot as plt
from random_walk3 import RandomWalk
#Keep making new walks as long as the program is active.
while True:
#Make a random walk.
rw = RandomWalk(5000)
rw.fill_walk()
#Plot the points in the walk.
plt.style.use('classic')
fig, ax = plt.subplots()
point_numbers = range(rw.num_points)
ax.plot(rw.x_values, rw.y_values, linewidth=2)
#Emphasize the first and last points
ax.scatter(rw.x_values, rw.y_values, c=point_numbers, cmap=plt.cm.Blues,
edgecolors='none', s=15)
ax.scatter(rw.x_values[-1], rw.y_values[-1], c='red', edgecolors='none',
s=100)
plt.show()
keep_running = input("Make another walk? (y/n): ")
if keep_running == 'n':
break
|
from torchtext import data
import os
class SST1Dataset(data.TabularDataset):
dirname = 'data'
@classmethod
def splits(cls, text_field, label_field,
train='phrases.train.tsv', validation='dev.tsv', test='test.tsv'):
prefix_name = 'stsa.fine.'
path = './data'
return super(SST1Dataset, cls).splits(
os.path.join(path, prefix_name), train, validation, test,
format='TSV', fields=[('label', label_field), ('text', text_field)]
)
|
from dataloader import dataloader
from dataloader.db.session import mapping_session
objectList = ['obj A', 'obj B']
#export
sf = dataloader(instance = 'test',
userName = '<source-username>',
password = '<source-password>',
securityToken = '<source-token>'
)
sf.exportRelationship(objectList)
for item in objectList:
sf.exportData(item)
#import
db = mapping_session('<unique-ID>','<source>','<destination>')
sf = dataloader(
instance = 'test',
userName = '<destination-username>',
password = '<destination-password>',
securityToken = '<destination-token>'
)
for item in objectList:
sf.insertData(item, db)
for item in objectList:
sf.updateData(item, db)
|
import sys
from weird_mesh import getWeirdMesh
argument = sys.argv[1]
n = int(sys.argv[2])
maillages = {"cartesian": "o", "triangle": "/\\//", "checkerboard": "o+", "raf_loc": "o123456789"}
xmin, xmax, ymin, ymax = 0., 1., 0., 1.
if argument == "raf_loc": f = lambda x, y: 2 * (int(x > 0.5 and y < 0.5) + int(x > 0.75 and y < 0.25))
else: f = None
if argument == "checkerboard": n += 1
getWeirdMesh(xmin, xmax, ymin, ymax, list(range(n + 1)), list(range(n + 1)), maillages[argument], func=f).write("mesh.med", 2)
|
import pandas as pd
import json
def create_hourly_elective_prob_json(
path_to_patient_df_csv: str, output_directory: str = "."
) -> pd.DataFrame:
"""
This function uses data from "patient_df.csv" to create "hourly_elective_prob.json". This is needed
to create the forecast. It is important that "patient_df.csv" follows this data dictionary including
all required fields and field formats: "../../config/patient_data_dictionary.json".
Parameters
----------
path_to_patient_df_csv : str
The input is the path to the ""patient_df.csv"" file. With format: "../../config/patient_data_dictionary.json"
output_directory: str
The directory where "create_hourly_elective_prob.json" will be saved. The default is to be saved in the same directory as this file.
Returns
----------
df : pd.DataFrame
The dataframe aggregates the data from path_to_patient_df_csv and summarises the proportion of elective
patients per date and hour.
json file:
Called - "create_hourly_elective_prob.json". This file is required to run the forecast and contains the df as summarised above.
The file is saved in the directory define as the input.
"""
# read in path to pandas dataframe.
df_csv = pd.read_csv(path_to_patient_df_csv)
# Group by "ADMIT_HOUR" and "ELECTIVE" and count number of patient in those categories.
df = (
df_csv.groupby(["ADMIT_HOUR", "ELECTIVE"])["DIM_PATIENT_ID"]
.count()
.reset_index()
)
# Group by "ADMIT_HOUR" and "ELECTIVE" and count number of patient in those categories.
df = df.pivot(
index="ADMIT_HOUR", columns="ELECTIVE", values="DIM_PATIENT_ID"
)
# Check there are only two categories in field "ELECTIVE" if not throw an error
assert (
len(df.columns) == 2
), f"ELECTIVE field must only contain 1 and 0. Current df columns: {df.columns}"
# Add together the non elective and elective patient groupped by admit_hour
try:
df["sum"] = df[0] + df[1]
# Throw value error if the columns 0 and 1 are not found
except KeyError as e:
raise ValueError("Elective field must only contain 1 and 0.") from e
# For each hour state the proportion of elective patients per hour
df["elective_prob"] = df[1] / df["sum"]
# Round to two decimal places and fill in missing values
df["elective_prob"] = df["elective_prob"].round(2).fillna(0)
# Write dataframe to json format
df["elective_prob"].to_json(
f"{output_directory}/hourly_elective_prob.json"
)
# Message to show script has run
print(
f"Fake Data Generated! File saved: {output_directory}/hourly_elective_prob.json."
)
return df
def create_specialty_info_json(
path_to_patient_df_csv: str,
path_to_is_medical_json: str,
output_directory: str = ".",
) -> pd.DataFrame:
"""
This function uses data from "patient_df.csv" to create "specialty_info.json". This is needed
to create the forecast. It is important that "patient_df.csv" follows this data dictionary including
all required fields and field formats: "../../config/patient_data_dictionary.json".
Parameters
----------
path_to_patient_df_csv : str
The input is the path to the "patient_df.csv" file. With format: "../../config/patient_data_dictionary.json"
path_to_is_medical_json: str
This json file maps the categories "ADMIT_SPEC" to whether it is "is_medical" or not with "true" and "false".
An example file to create can be found here: "../../config/fake_data_categories/fake_speciality_is_medical_mapping.json"
output_directory: str
The directory where "specialty_info.json" will be saved. The default is to be saved in the same directory as this file.
Returns
----------
df : pd.DataFrame
The dataframe aggregates the data from path_to_patient_df_csv and summarises the proportion of elective
patients per date and hour.
json file:
Called - "specialty_info.json". This file is required to run the forecast and contains the df as summarised above.
The file is saved in the directory define as the input.
"""
# read in path to pandas dataframe.
df_csv = pd.read_csv(path_to_patient_df_csv)
# Group by admission speciality and count number of patients
# reset index
df = df_csv.groupby(["ADMIT_SPEC"])["DIM_PATIENT_ID"].count().reset_index()
# Define the portion patients of that speiciality by taking the total patients
# and dividing by the total for that specilaity
df["probability"] = df["DIM_PATIENT_ID"] / sum(df["DIM_PATIENT_ID"])
# Round to two decimal places and fill in missing values
# AS commented out as prevents sum to 1 used later in patient_sampler.py#L279
# df["probability"] = df["probability"].round(2).fillna(0)
# define if the department is medical or not
# Load data_description.json to get columns required for training data
mapping = load_is_medical_mapping(path_to_is_medical_json)
# Add mappings to dataframe
df = df.merge(mapping, on="ADMIT_SPEC")
# Format for json"
df.set_index("ADMIT_SPEC", drop=True, inplace=True)
df_dict = df[["probability", "is_medical"]].transpose().to_dict()
# Save to json file
with open(f"{output_directory}/specialty_info.json", "w") as outfile:
json.dump(df_dict, outfile)
# Message to show script has run
print(
f"Fake Data Generated! File saved: {output_directory}/specialty_info.json."
)
return df
def load_is_medical_mapping(path_to_is_medical_json: str) -> pd.DataFrame:
"""
This function takes the defined json value
Parameters
----------
path_to_is_medical_json: str
This will contain a list of specilaities listed in `ADMIT_SPEC` in the `patient_df.csv` file and state whether
it is medical or not by `true` or `false` in a python dictionary format, e.g. {"Urology" : false, "Cardiology": True}.
An example of the file which needs to be created can be found here: "../../config/fake_data_categories/fake_speciality_is_medical_mapping.json".
Returns
----------
mapping_df : pd.DataFrame
The dataframe contains the contents from the json file.
"""
# Load json file
with open(path_to_is_medical_json, "r") as file:
mapping = json.load(file)
# Read contents and fornat
mapping_df = pd.DataFrame.from_dict(mapping, orient="index")
mapping_df.reset_index(level=0, inplace=True)
# Rename columns in dataframe
mapping_df.rename(
{"index": "ADMIT_SPEC", 0: "is_medical"}, axis=1, inplace=True
)
return mapping_df
|
import datasetRead
import dataInputFormat
import pickle
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from datasetRead import Dataset
from dataInputFormat import DataInput_Categorical, DataInput_Labelled
d = Dataset()
dIFLabelled = DataInput_Labelled()
dIFCategorical = DataInput_Categorical()
class ourSVMClassifier:
def trainModel(self):
d.ReadLabelledDataSet()
self.TrainedSVMclf = SVC(kernel="linear")
self.TrainedSVMclf.fit(d.X_train, d.Y_train)
def dumpPickle(self):
SVMPickleFile = "Pickle_SVMClf.pkl"
SVMPickledModel = open(SVMPickleFile,'wb')
pickle.dump(self.TrainedSVMclf,SVMPickledModel)
SVMPickledModel.close()
def loadPickle(self):
SVMPickleFile = "Pickle_SVMClf.pkl"
SVMPickledModel = open(SVMPickleFile,'rb')
self.SVMclf = pickle.load(SVMPickledModel)
def accuracyCheck(self):
d.ReadLabelledDataSet()
print("\nSVM Classifier:")
test_predicted = self.TrainedSVMclf.predict(d.X_test)
print("Accuracy for Testing Dataset:",accuracy_score(d.Y_test, test_predicted))
train_predicted = self.TrainedSVMclf.predict(d.X_train)
print("Accuracy for Training Dataset:",accuracy_score(d.Y_train, train_predicted))
def runModel(self,inputPrediction,t1,t2):
winnerTeam = self.SVMclf.predict([inputPrediction])
return winnerTeam[0]
class ourMLPClassifier:
def trainModel(self):
d.ReadCategoricalDataSet()
self.TrainedMLPclf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(100, 32), random_state=1)
self.TrainedMLPclf.fit(d.X_train, d.Y_train)
def dumpPickle(self):
MLPPickleFile = "Pickle_MLPClf.pkl"
MLPPickledModel = open(MLPPickleFile,'wb')
pickle.dump(self.TrainedMLPclf,MLPPickledModel)
MLPPickledModel.close()
def loadPickle(self):
MLPPickleFile = "Pickle_MLPClf.pkl"
MLPPickledModel = open(MLPPickleFile,'rb')
self.MLPclf = pickle.load(MLPPickledModel)
def accuracyCheck(self):
d.ReadCategoricalDataSet()
print("\nMulti Layer Perceptron Classifer:")
test_predicted = self.TrainedMLPclf.predict(d.X_test)
print("Accuracy for Testing Dataset:",accuracy_score(d.Y_test, test_predicted))
train_predicted = self.TrainedMLPclf.predict(d.X_train)
print("Accuracy for Training Dataset:",accuracy_score(d.Y_train, train_predicted))
def runModel(self,inputPrediction,t1,t2):
ourPrediction = self.MLPclf.predict_proba([inputPrediction])
dIFCategorical.hashingTargetWinners()
totalPrediction = ourPrediction[0][dIFCategorical.winnerIndex[t1]] + ourPrediction[0][dIFCategorical.winnerIndex[t2]]
predictionT1 = (ourPrediction[0][dIFCategorical.winnerIndex[t1]]/totalPrediction) * 100
predictionT2 = (ourPrediction[0][dIFCategorical.winnerIndex[t2]]/totalPrediction) * 100
predictionT1 = format(float(predictionT1), '.4f')
predictionT2 = format(float(predictionT2), '.4f')
if dIFCategorical.winnerIndex[t1] in [1,6,8,13,15,17,18,21] and float(predictionT1) < 10.0:
predictionT1 = float(predictionT1) + 20
predictionT2 = float(predictionT2) - 20
elif dIFCategorical.winnerIndex[t1] in [1,6,8,13,15,17,18,21] and float(predictionT1) < 20.0:
predictionT1 = float(predictionT1) + 10
predictionT2 = float(predictionT2) - 10
if dIFCategorical.winnerIndex[t2] in [1,6,8,13,15,17,18,21] and float(predictionT2) < 10.0:
predictionT2 = float(predictionT2) + 20
predictionT1 = float(predictionT1) - 20
elif dIFCategorical.winnerIndex[t2] in [1,6,8,13,15,17,18,21] and float(predictionT2) < 20.0:
predictionT2 = float(predictionT2) + 10
predictionT1 = float(predictionT1) - 10
winnerTeam=""
if predictionT1>predictionT2:
winnerTeam=t1
else:
winnerTeam=t2
return winnerTeam
class ourDTClassifier:
def trainModel(self):
d.ReadCategoricalDataSet()
self.TrainedDTclf = DecisionTreeClassifier()
self.TrainedDTclf.fit(d.X_train, d.Y_train)
def dumpPickle(self):
DTPickleFile = "Pickle_DTClf.pkl"
DTPickledModel = open(DTPickleFile,'wb')
pickle.dump(self.TrainedDTclf,DTPickledModel)
DTPickledModel.close()
def loadPickle(self):
DTPickleFile = "Pickle_DTClf.pkl"
DTPickledModel = open(DTPickleFile,'rb')
self.DTclf = pickle.load(DTPickledModel)
def accuracyCheck(self):
d.ReadCategoricalDataSet()
print("\nDecision Tree Classifier:")
test_predicted = self.TrainedDTclf.predict(d.X_test)
print("Accuracy for Testing Dataset:",accuracy_score(d.Y_test, test_predicted))
train_predicted = self.TrainedDTclf.predict(d.X_train)
print("Accuracy for Training Dataset:",accuracy_score(d.Y_train, train_predicted))
def runModel(self,inputPrediction,t1,t2):
ourPrediction = self.DTclf.predict([inputPrediction])
dIFCategorical.hashingTargetWinners()
indexTeam1 = dIFCategorical.winnerIndex[t1]
indexTeam2 = dIFCategorical.winnerIndex[t2]
winnerTeam = ""
if ourPrediction[0][indexTeam1] == 1:
winnerTeam = t1
elif ourPrediction[0][indexTeam2] == 1:
winnerTeam = t2
else:
winnerTeam = "DT Classifier Can't Predict!"
return winnerTeam
|
print(__file__)
print("Loading isstools, preparing GUI...")
import functools
import isstools.xlive
import collections
import atexit
import PyQt5
from bluesky.examples import motor
motor.move = motor.set
detector_dictionary = {colmirror_diag.name: {'obj': colmirror_diag, 'elements': [colmirror_diag.stats1.total.name, colmirror_diag.stats2.total.name]},
screen_diag.name: {'obj': screen_diag, 'elements': [screen_diag.stats1.total.name, screen_diag.stats2.total.name]},
mono_diag.name: {'obj': mono_diag, 'elements': [mono_diag.stats1.total.name, mono_diag.stats2.total.name]},
dcr_diag.name: {'obj': dcr_diag, 'elements': [dcr_diag.stats1.total.name, dcr_diag.stats2.total.name]},
#pba1.adc1.name: {'obj': pba1.adc1, 'elements': ['pba1_adc1_volt']},
pba1.adc3.name: {'obj': pba1.adc3, 'elements': ['pba1_adc3_volt']},
pba1.adc4.name: {'obj': pba1.adc4, 'elements': ['pba1_adc4_volt']},
pba1.adc5.name: {'obj': pba1.adc5, 'elements': ['pba1_adc5_volt']},
pba1.adc6.name: {'obj': pba1.adc6, 'elements': ['pba1_adc6_volt']},
pba1.adc7.name: {'obj': pba1.adc7, 'elements': ['pba1_adc7_volt']},
pba1.adc8.name: {'obj': pba1.adc8, 'elements': ['pba1_adc8_volt']},
pb1.enc1.name: {'obj': pb1.enc1, 'elements': ['pb1_enc1_pos_I']},
}
motors_dictionary = {#jj_slits.top.name: {'name': jj_slits.top.name, 'description':jj_slits.top.name, 'object': jj_slits.top},
# jj_slits.bottom.name: {'name': jj_slits.bottom.name, 'description':jj_slits.bottom.name, 'object': jj_slits.bottom},
# jj_slits.outboard.name: {'name': jj_slits.outboard.name, 'description':jj_slits.outboard.name, 'object': jj_slits.outboard},
# jj_slits.inboard.name: {'name': jj_slits.inboard.name, 'description':jj_slits.inboard.name, 'object': jj_slits.inboard},
beamstop.horizontal.name: {'name': beamstop.horizontal.name, 'description': beamstop.horizontal.name, 'object':beamstop.horizontal},
beamstop.vertical.name: {'name': beamstop.vertical.name, 'description': beamstop.vertical.name, 'object': beamstop.vertical},
ip_y_stage.name: {'name': ip_y_stage.name, 'description': ip_y_stage.name, 'object': ip_y_stage},
sample_stage1.rotary.name: {'name': sample_stage1.rotary.name, 'description':sample_stage1.rotary.name, 'object':sample_stage1.rotary},
sample_stage1.x.name: {'name': sample_stage1.x.name, 'description':sample_stage1.x.name, 'object':sample_stage1.x},
sample_stage1.y.name: {'name': sample_stage1.y.name, 'description':sample_stage1.y.name, 'object':sample_stage1.y},
sample_stage1.z.name: {'name': sample_stage1.z.name, 'description':sample_stage1.z.name, 'object':sample_stage1.z},
sample_stage1.theta.name: {'name': sample_stage1.theta.name, 'description':sample_stage1.theta.name, 'object':sample_stage1.theta},
sample_stage1.chi.name: {'name': sample_stage1.chi.name, 'description':sample_stage1.chi.name, 'object':sample_stage1.chi},
}
shutters_dictionary = {
shutter_fe.name: shutter_fe,
shutter_ph.name: shutter_ph,
shutter_fs.name: shutter_fs,
}
sample_stages = [{'x': sample_stage1.x.name, 'y': sample_stage1.y.name}]
print(mono1)
newApp = PyQt5.QtWidgets.QApplication(sys.argv)
xlive_gui = isstools.xlive.XliveGui(plan_funcs=[tscan, tscan_xs3, get_offsets, xs_count],
prep_traj_plan=prep_traj_plan,
diff_plans=[count_qas, dark_frame_preprocessor],
RE=RE,
db=db,
accelerator=nsls_ii,
mono=mono1,
sdd = xs,
shutters_dict=shutters_dictionary,
det_dict=detector_dictionary,
aux_plan_funcs ={
'set_reference_foil': set_reference_foil,
'get_reference_foil': get_reference_foil,
},
motors_dict=motors_dictionary,
general_scan_func=general_scan,
sample_stages = sample_stages,
window_title="XLive @QAS/7-BM NSLS-II",
)
sys.stdout = xlive_gui.emitstream_out
def xlive():
xlive_gui.show()
#sys.stdout = xlive_gui.emitstream_out
#sys.stderr = xlive_gui.emitstream_err
#from isstools.xview import XviewGui
#xview_gui = XviewGui(PB_PULSES_PER_DEGREE)
## jlynch 8/30
#import pyinstrument
#profiler = pyinstrument.Profiler()
#profiler.start()
#print('starting pyinstrument profiler')
## jlynch 8/30
xlive()
|
def suggest_params(trial):
dropout = trial.suggest_float('dropout', 0, 0.3, step=0.05) # used twice
activation = trial.suggest_categorical('activation', ['linear', 'leakyrelu']) # used for conditional sampling
rna_hidden = trial.suggest_int('rna_hidden', 500, 2000, step=250) # hdim should be less than rna_hidden
hdim = trial.suggest_int('hdim', 100, min(rna_hidden, 800), step=100) # shared_hidden should be less than hdim
shared_hidden = trial.suggest_int('shared_hidden', 100, min(hdim * 2, 500),
step=100) # zdim should be less than shared_hidden
num_layers = trial.suggest_int('num_layers', 1, 3, step=1) if activation == 'leakyrelu' else 1
rna_num_layers = trial.suggest_int('rna_num_layers', 1, 3, step=1)
loss_weights_kl = trial.suggest_float('loss_weights_kl', 1e-10, 1e-4, log=True)
params = {
'batch_size': 512,
'learning_rate': trial.suggest_float('lr', 1e-5, 1e-3, log=True),
'loss_weights': [1.0, 0.0, loss_weights_kl],
'joint': {
'activation': activation,
'batch_norm': True,
'dropout': dropout,
'hdim': hdim,
'losses': ['MSE', 'CE'],
'num_layers': num_layers,
'shared_hidden': [shared_hidden] * num_layers,
'zdim': trial.suggest_int('zdim', 5, min(shared_hidden, 50), step=5),
'c_embedding_dim': 20,
},
'rna': {
'activation': 'leakyrelu',
'batch_norm': True,
'dropout': dropout,
'gene_hidden': [rna_hidden] * rna_num_layers,
'num_layers': rna_num_layers,
'output_activation': 'linear'
},
'seq_model_hyperparams': None,
}
return params
|
import copy
import torch
from federatedscope.core.auxiliaries.optimizer_builder import get_optimizer
from federatedscope.core.trainers.trainer import GeneralTorchTrainer
from federatedscope.core.optimizer import wrap_regularized_optimizer
from typing import Type
def wrap_DittoTrainer(
base_trainer: Type[GeneralTorchTrainer]) -> Type[GeneralTorchTrainer]:
"""
Build a `DittoTrainer` with a plug-in manner, by registering new functions into specific `BaseTrainer`
The Ditto implementation, "Ditto: Fair and Robust Federated Learning Through Personalization. (ICML2021)"
based on the Algorithm 2 in their paper and official codes: https://github.com/litian96/ditto
"""
# ---------------- attribute-level plug-in -----------------------
init_Ditto_ctx(base_trainer)
# ---------------- action-level plug-in -----------------------
base_trainer.register_hook_in_train(
new_hook=hook_on_fit_start_set_regularized_para,
trigger="on_fit_start",
insert_pos=0)
base_trainer.register_hook_in_train(
new_hook=hook_on_batch_start_switch_model,
trigger="on_batch_start",
insert_pos=0)
base_trainer.replace_hook_in_train(
new_hook=_hook_on_batch_forward_flop_count,
target_trigger="on_batch_forward",
target_hook_name="_hook_on_batch_forward_flop_count")
# evaluation is based on the local personalized model
base_trainer.register_hook_in_eval(
new_hook=hook_on_fit_start_switch_local_model,
trigger="on_fit_start",
insert_pos=0)
base_trainer.register_hook_in_eval(
new_hook=hook_on_fit_end_switch_global_model,
trigger="on_fit_end",
insert_pos=-1)
base_trainer.register_hook_in_train(new_hook=hook_on_fit_end_free_cuda,
trigger="on_fit_end",
insert_pos=-1)
base_trainer.register_hook_in_eval(new_hook=hook_on_fit_end_free_cuda,
trigger="on_fit_end",
insert_pos=-1)
return base_trainer
def init_Ditto_ctx(base_trainer):
"""
init necessary attributes used in Ditto,
`global_model` acts as the shared global model in FedAvg;
`local_model` acts as personalized model will be optimized with regularization based on weights of `global_model`
"""
ctx = base_trainer.ctx
cfg = base_trainer.cfg
ctx.global_model = copy.deepcopy(ctx.model)
ctx.local_model = copy.deepcopy(ctx.model) # the personalized model
ctx.models = [ctx.local_model, ctx.global_model]
ctx.optimizer_for_global_model = get_optimizer(
cfg.optimizer.type,
ctx.global_model,
cfg.optimizer.lr,
weight_decay=cfg.optimizer.weight_decay)
ctx.optimizer_for_local_model = get_optimizer(
cfg.optimizer.type,
ctx.local_model,
cfg.personalization.lr,
weight_decay=cfg.optimizer.weight_decay)
ctx.optimizer_for_local_model = wrap_regularized_optimizer(
ctx.optimizer_for_local_model, cfg.personalization.regular_weight)
ctx.model = ctx.global_model
del ctx.optimizer
# track the batch_num, epoch_num, for local & global model respectively
ctx.num_train_batch_for_local_model, ctx.num_train_batch_last_epoch_for_local_model, \
ctx.num_train_epoch_for_local_model, ctx.num_total_train_batch = \
ctx.pre_calculate_batch_epoch_num(cfg.personalization.local_update_steps)
# In the first `num_train_batch`, `num_train_batch_last_epoch`, and `num_train_epoch`,
# we will manipulate local models, and manipulate global model in the remaining steps
ctx.num_train_batch += ctx.num_train_batch_for_local_model
ctx.num_train_batch_last_epoch += ctx.num_train_batch_last_epoch_for_local_model
ctx.num_train_epoch += ctx.num_train_epoch_for_local_model
def hook_on_fit_start_set_regularized_para(ctx):
# set the compared model data for local personalized model
ctx.global_model.to(ctx.device)
ctx.local_model.to(ctx.device)
ctx.global_model.train()
ctx.local_model.train()
compared_global_model_para = [{
"params": list(ctx.global_model.parameters())
}]
ctx.optimizer_for_local_model.set_compared_para_group(
compared_global_model_para)
def _hook_on_batch_forward_flop_count(ctx):
if ctx.monitor.flops_per_sample == 0:
# calculate the flops_per_sample
x, _ = [_.to(ctx.device) for _ in ctx.data_batch]
from fvcore.nn import FlopCountAnalysis
flops_one_batch = FlopCountAnalysis(ctx.model, x).total()
# besides the normal forward flops, the regularization adds the cost of number of model parameters
flops_one_batch += ctx.monitor.total_model_size / 2
ctx.monitor.track_avg_flops(flops_one_batch, ctx.batch_size)
ctx.monitor.total_flops += ctx.monitor.flops_per_sample * ctx.batch_size
def hook_on_batch_start_switch_model(ctx):
last_epoch_use_local_model = ctx.cur_epoch_i == (ctx.num_train_epoch - 1) and \
ctx.cur_batch_i <= ctx.num_train_batch_last_epoch_for_local_model
use_local_model = last_epoch_use_local_model or ctx.cur_epoch_i <= ctx.num_train_epoch_for_local_model or \
ctx.cur_batch_i <= ctx.num_train_batch_for_local_model
if use_local_model:
ctx.model = ctx.local_model
ctx.optimizer = ctx.optimizer_for_local_model
else:
ctx.model = ctx.global_model
ctx.optimizer = ctx.optimizer_for_global_model
# Note that Ditto only updates the para of global_model received from other FL participants,
# and in the remaining steps, ctx.model has been = ctx.global_model, thus we do not need register the following hook
# def hook_on_fit_end_link_global_model(ctx):
# ctx.model = ctx.global_model
def hook_on_fit_start_switch_local_model(ctx):
ctx.model = ctx.local_model
ctx.model.eval()
def hook_on_fit_end_switch_global_model(ctx):
ctx.model = ctx.global_model
def hook_on_fit_end_free_cuda(ctx):
ctx.global_model.to(torch.device("cpu"))
ctx.local_model.to(torch.device("cpu"))
|
from __future__ import print_function, unicode_literals, absolute_import
__all__=["emacs", "notemacs", "vi"]
from . import emacs, notemacs, vi
editingmodes = [emacs.EmacsMode, notemacs.NotEmacsMode, vi.ViMode]
#add check to ensure all modes have unique mode names
|
#
# The Template-Python distribution is Copyright (C) Sean McAfee 2007-2008,
# derived from the Perl Template Toolkit Copyright (C) 1996-2007 Andy
# Wardley. All Rights Reserved.
#
# The file "LICENSE" at the top level of this source distribution describes
# the terms under which this file may be distributed.
#
import locale as Locale
import re
import time as Time
from template.plugin import Plugin
from template.util import TemplateException
"""
template.plugin.date - Plugin to generate formatted date strings
SYNOPSIS
[% USE date %]
# use current time and default format
[% date.format %]
# specify time as seconds since epoch or 'h:m:s d-m-y' string
[% date.format(960973980) %]
[% date.format('4:20:36 21/12/2000') %]
# specify format
[% date.format(mytime, '%H:%M:%S') %]
# specify locale
[% date.format(date.now, '%a %d %b %y', 'en_GB') %]
# named parameters
[% date.format(mytime, format = '%H:%M:%S') %]
[% date.format(locale = 'en_GB') %]
[% date.format(time = date.now,
format = '%H:%M:%S',
locale = 'en_GB) %]
# specify default format to plugin
[% USE date(format = '%H:%M:%S', locale = 'de_DE') %]
[% date.format %]
...
DESCRIPTION
The Date plugin provides an easy way to generate formatted time and
date strings by delegating to the POSIX strftime() routine.
The plugin can be loaded via the familiar USE directive.
[% USE date %]
This creates a plugin object with the default name of 'date'. An alternate
name can be specified as such:
[% USE myname = date %]
The plugin provides the format() method which accepts a time value, a
format string and a locale name. All of these parameters are optional
with the current system time, default format ('%H:%M:%S %d-%b-%Y') and
current locale being used respectively, if undefined. Default values
for the time, format and/or locale may be specified as named
parameters in the USE directive.
[% USE date(format = '%a %d-%b-%Y', locale = 'fr_FR') %]
When called without any parameters, the format() method returns a
string representing the current system time, formatted by strftime()
according to the default format and for the default locale (which may
not be the current one, if locale is set in the USE directive).
[% date.format %]
The plugin allows a time/date to be specified as seconds since the epoch,
as is returned by time().
File last modified: [% date.format(filemod_time) %]
The time/date can also be specified as a string of the form 'h:m:s d/m/y'.
Any of the characters : / - or space may be used to delimit fields.
[% USE day = date(format => '%A', locale => 'en_GB') %]
[% day.format('4:20:00 9-13-2000') %]
Output:
Tuesday
A format string can also be passed to the format() method, and a locale
specification may follow that.
[% date.format(filemod, '%d-%b-%Y') %]
[% date.format(filemod, '%d-%b-%Y', 'en_GB') %]
A fourth parameter allows you to force output in GMT, in the case of
seconds-since-the-epoch input:
[% date.format(filemod, '%d-%b-%Y', 'en_GB', 1) %]
Note that in this case, if the local time is not GMT, then also
specifying '%Z' (time zone) in the format parameter will lead to an
extremely misleading result.
Any or all of these parameters may be named. Positional parameters
should always be in the order ($time, $format, $locale).
[% date.format(format => '%H:%M:%S') %]
[% date.format(time => filemod, format => '%H:%M:%S') %]
[% date.format(mytime, format => '%H:%M:%S') %]
[% date.format(mytime, format => '%H:%M:%S', locale => 'fr_FR') %]
[% date.format(mytime, format => '%H:%M:%S', gmt => 1) %]
...etc...
The now() method returns the current system time in seconds since the
epoch.
[% date.format(date.now, '%A') %]
"""
# Default strftime() format:
FORMAT = "%H:%M:%S %d-%b-%Y"
LOCALE_SUFFIX = (".ISO8859-1", ".ISO_8859-15", ".US-ASCII", ".UTF-8");
GMTIME = { True: Time.gmtime,
False: Time.localtime }
class Date(Plugin):
"""Plugin to generate formatted date strings."""
def __init__(self, context, params=None):
self.params = params or {}
def now(self):
return int(Time.time())
def format(self, *args):
"""Returns a formatted time/date string for the specified time (or
the current system time if unspecified) using the format, locale,
and gmt values specified as arguments or internal values set
defined at construction time.
Specifying a true value for gmt will override the local time zone
and force the output to be for GMT. Any or all of the arguments
may be specified as named parameters which get passed as a
dictionary as the final argument.
"""
args, params = self._split_arguments(args)
args = list(args)
def get(name):
if args:
return args.pop(0)
else:
return params.get(name) or self.params.get(name)
time = get("time") or self.now()
format = get("format") or FORMAT
locale = get("locale")
gmt = get("gmt")
try:
# If time is numeric, we assume it's seconds since the epoch:
time = int(time)
except StandardError:
# Otherwise, we try to parse it as a 'H:M:S D:M:Y' string:
date = re.split(r"[-/ :]", str(time))
if len(date) < 6:
raise TemplateException(
"date", "bad time/date string: expects 'h:m:s d:m:y' got: '%s'"
% time)
date = [str(int(x)) for x in date[:6]]
date = Time.strptime(" ".join(date), "%H %M %S %d %m %Y")
else:
date = GMTIME[bool(gmt)](time)
if locale is not None:
old_locale = Locale.setlocale(Locale.LC_ALL)
try:
for suffix in ("",) + LOCALE_SUFFIX:
try_locale = "%s%s" % (locale, suffix)
try:
setlocale = Locale.setlocale(Locale.LC_ALL, try_locale)
except Locale.Error:
continue
else:
if try_locale == setlocale:
locale = try_locale
break
datestr = Time.strftime(format, date)
finally:
Locale.setlocale(Locale.LC_ALL, old_locale)
else:
datestr = Time.strftime(format, date)
return datestr
def calc(self):
self.throw("Failed to load date calculation module")
def manip(self):
self.throw("Failed to load date manipulation module")
def throw(self, *args):
raise TemplateException("date", ", ".join(str(x) for x in args))
|
import numpy as np
import torch
from torch.autograd import Variable
from tqdm import tqdm
import utils
from Classifiers.Fashion_Classifier import Fashion_Classifier
from Classifiers.Mnist_Classifier import Mnist_Classifier
from Classifiers.Cifar_Classifier import Cifar_Classifier
from Data.load_dataset import load_dataset_full, load_dataset_test, get_iter_dataset
from log_utils import *
from Data.data_loader import DataLoader
from Evaluation.tools import calculate_frechet_distance
mpl.use('Agg')
class Reviewer(object):
def __init__(self, args):
# parameters
self.args = args
self.epoch_Review = args.epoch_Review
self.sample_num = 64
self.batch_size = args.batch_size
self.save_dir = args.save_dir
self.result_dir = args.result_dir
self.sample_dir = args.sample_dir
self.dataset = args.dataset
self.log_dir = args.log_dir
self.gpu_mode = args.gpu_mode
self.model_name = args.gan_type
self.data_dir = args.data_dir
self.gen_dir = args.gen_dir
self.verbose = args.verbose
self.lr = args.lrC
self.momentum = args.momentum
self.log_interval = 100
self.sample_num = 100
self.size_epoch = args.size_epoch
self.gan_type = args.gan_type
self.conditional = args.conditional
self.device = args.device
self.trainEval = args.trainEval
self.num_task = args.num_task
self.task_type = args.task_type
self.context = args.context
self.seed = args.seed
if self.conditional:
self.model_name = 'C' + self.model_name
# Load the generator parameters
# The reviewer evaluate generate dataset (loader train) on true data (loader test)
# not sur yet if valid should be real or not (it was before)
dataset_train, dataset_valid, list_class_train, list_class_valid = load_dataset_full(self.data_dir,
args.dataset)
dataset_test, list_class_test = load_dataset_test(self.data_dir, args.dataset, args.batch_size)
# create data loader for validation and testing
self.valid_loader = get_iter_dataset(dataset_valid)
self.test_loader = get_iter_dataset(dataset_test)
if self.dataset == 'mnist':
self.input_size = 1
self.size = 28
elif self.dataset == 'fashion':
self.input_size = 1
self.size = 28
elif self.dataset == 'cifar10':
self.input_size = 3
self.size = 32
if self.dataset == 'mnist':
self.Classifier = Mnist_Classifier(args)
elif self.dataset == 'fashion':
self.Classifier = Fashion_Classifier(args)
elif self.dataset == 'cifar10':
self.Classifier = Cifar_Classifier(args)
else:
print('Not implemented')
# this should be train on task
def train_classifier(self, epoch, data_loader_train, ind_task):
self.Classifier.net.train()
train_loss_classif, train_accuracy = self.Classifier.train_on_task(data_loader_train, ind_task=ind_task,
epoch=epoch,
additional_loss=None)
val_loss_classif, valid_accuracy, classe_prediction, classe_total, classe_wrong = self.Classifier.eval_on_task(
self.valid_loader, self.verbose)
if self.verbose:
print(
'Epoch: {} Train set: Average loss: {:.4f}, Accuracy: ({:.2f}%)\n Valid set: Average loss: {:.4f}, Accuracy: ({:.2f}%)'.format(
epoch, train_loss_classif, train_accuracy, val_loss_classif, valid_accuracy))
return train_loss_classif, train_accuracy, val_loss_classif, valid_accuracy, (
100. * classe_prediction) / classe_total
def compute_all_tasks_FID(self, args, Best=False):
if Best:
id = "Best_"
else:
id = ''
list_FID = []
for ind_task in range(self.num_task):
list_FID.append(self.compute_FID(args, ind_task, Best))
assert len(list_FID) == self.num_task
list_FID = np.array(list_FID)
np.savetxt(os.path.join(self.log_dir, id + 'Frechet_Inception_Distance_All_Tasks.txt'), list_FID)
def compute_FID(self, args, ind_task, Best=False):
if Best:
id = "Best_"
else:
id = ''
# load true data : upperbound_disjoint
if 'upperbound' in self.task_type:
test_file = self.task_type + '_' + str(self.num_task) + '_test.pt'
else:
test_file = 'upperbound_' + self.task_type + '_' + str(self.num_task) + '_test.pt'
true_DataLoader = DataLoader(torch.load(os.path.join(self.data_dir, 'Tasks', self.dataset, test_file)), args)[
self.num_task-1]
# load generated data
path = os.path.join(self.gen_dir, id + 'train_Task_' + str(ind_task) + '.pt')
gen_DataLoader = DataLoader(torch.load(path), args)
# compute FID
return self.Frechet_Inception_Distance(gen_DataLoader, true_DataLoader, ind_task)
def review(self, data_loader_train, task, Best=False):
if Best:
id = "Best_"
else:
id = ''
if self.dataset == 'mnist':
self.Classifier = Mnist_Classifier(self.args)
elif self.dataset == 'fashion':
self.Classifier = Fashion_Classifier(self.args)
else:
print('Not implemented')
best_accuracy = -1
train_loss = []
train_acc = []
val_loss = []
val_acc = []
valid_acc = []
valid_acc_classes = []
if self.verbose:
print("some sample from the generator")
path = os.path.join(self.sample_dir, id + 'samples4review_task_' + str(task) + '.png')
data_loader_train.visualize_sample(path, self.sample_num, [self.size, self.size, self.input_size])
print("Task : " + str(task))
early_stop = 0.
# Training classifier
for epoch in range(self.epoch_Review):
tr_loss, tr_acc, v_loss, v_acc, v_acc_classes = self.train_classifier(epoch, data_loader_train, task)
train_loss.append(tr_loss)
train_acc.append(tr_acc)
val_loss.append(v_loss)
val_acc.append(v_acc)
# Save best model
if v_acc > best_accuracy:
if self.verbose:
print("New Best Classifier")
print(v_acc)
best_accuracy = v_acc
self.save(best=True)
early_stop = 0.
if early_stop == 60:
break
else:
early_stop += 1
valid_acc.append(np.array(v_acc))
valid_acc_classes.append(np.array(v_acc_classes))
# Then load best model
self.load()
loss, test_acc, classe_prediction, classe_total, classe_wrong = self.Classifier.eval_on_task(
self.test_loader, self.verbose)
test_acc_classes = 100. * classe_prediction / classe_total
if self.verbose:
print('\nTest set: Average loss: {:.4f}, Accuracy : ({:.2f}%)'.format(
loss, test_acc))
for i in range(10):
print('Classe {} Accuracy: {}/{} ({:.3f}%, Wrong : {})'.format(
i, classe_prediction[i], classe_total[i],
100. * classe_prediction[i] / classe_total[i], classe_wrong[i]))
print('\n')
# loss, test_acc, test_acc_classes = self.test() # self.test_classifier(epoch)
np.savetxt(os.path.join(self.log_dir, id + 'data_classif_' + self.dataset + '-task' + str(task) + '.txt'),
np.transpose([train_loss, train_acc, val_loss, val_acc]))
np.savetxt(os.path.join(self.log_dir, id + 'best_score_classif_' + self.dataset + '-task' + str(task) + '.txt'),
np.transpose([test_acc]))
np.savetxt(
os.path.join(self.log_dir, id + 'data_classif_classes' + self.dataset + '-task' + str(task) + '.txt'),
np.transpose([test_acc_classes]))
return valid_acc, valid_acc_classes
def eval_on_train(self, data_loader_train, task):
if self.dataset == 'mnist':
self.Classifier = Mnist_Classifier(self.args)
elif self.dataset == 'fashion':
self.Classifier = Fashion_Classifier(self.args)
else:
print('Not implemented')
self.Classifier.load_expert()
self.Classifier.net.eval()
print("trainEval Task : " + str(task))
loss, train_acc, classe_prediction, classe_total, classe_wrong = self.Classifier.eval_on_task(data_loader_train, self.verbose)
train_acc_classes = 100. * classe_prediction / classe_total
if self.verbose:
print('\nTest set: Average loss: {:.4f}, Accuracy : ({:.2f}%)'.format(
loss, train_acc))
for i in range(10):
print('Classe {} Accuracy: {}/{} ({:.3f}%, Wrong : {})'.format(
i, classe_prediction[i], classe_total[i],
100. * classe_prediction[i] / classe_total[i], classe_wrong[i]))
print('\n')
return train_acc, train_acc_classes
def eval_balanced_on_train(self, data_loader_train):
cpt_classes = np.zeros(10)
for i, (data, target) in enumerate(data_loader_train):
for i in range(target.shape[0]):
cpt_classes[target[i]] += 1
print(cpt_classes.astype(int))
return cpt_classes.astype(int)
def review_all_tasks(self, args, Best=False):
# before launching the programme we check that all files are here to nnot lose time
for i in range(self.num_task):
if Best: # Best can be use only for Baseline
path = os.path.join(self.gen_dir, 'Best_train_Task_' + str(i) + '.pt')
else:
path = os.path.join(self.gen_dir, 'train_Task_' + str(i) + '.pt')
assert os.path.isfile(path)
for i in range(self.num_task):
if Best: # Best can be use only for Baseline
path = os.path.join(self.gen_dir, 'Best_train_Task_' + str(i) + '.pt')
else:
path = os.path.join(self.gen_dir, 'train_Task_' + str(i) + '.pt')
data_loader_train = DataLoader(torch.load(path), args)
self.review(data_loader_train, i, Best)
def review_all_trainEval(self, args, Best=False):
if Best:
id = "Best_"
else:
id = ''
list_trainEval = []
list_trainEval_classes = []
list_balance_classes = []
# before launching the programme we check that all files are here to nnot lose time
for i in range(self.num_task):
if Best: # Best can be use only for Baseline
path = os.path.join(self.gen_dir, 'Best_train_Task_' + str(i) + '.pt')
else:
path = os.path.join(self.gen_dir, 'train_Task_' + str(i) + '.pt')
assert os.path.isfile(path)
for i in range(self.num_task):
if Best: # Best can be use only for Baseline
path = os.path.join(self.gen_dir, 'Best_train_Task_' + str(i) + '.pt')
else:
path = os.path.join(self.gen_dir, 'train_Task_' + str(i) + '.pt')
data_loader_train = DataLoader(torch.load(path), args)
if self.conditional or Best:
train_acc, train_acc_classes = self.eval_on_train(data_loader_train, self.verbose)
list_trainEval.append(train_acc)
list_trainEval_classes.append(train_acc_classes)
else:
classe_balance = self.eval_balanced_on_train(data_loader_train)
list_balance_classes.append(classe_balance)
if self.conditional or Best:
assert len(list_trainEval) == self.num_task
list_trainEval = np.array(list_trainEval)
list_trainEval_classes = np.array(list_trainEval)
np.savetxt(os.path.join(self.log_dir, id + 'TrainEval_All_Tasks.txt'), list_trainEval)
np.savetxt(os.path.join(self.log_dir, id + 'TrainEval_classes_All_Tasks.txt'), list_trainEval_classes)
else:
assert len(list_balance_classes) == self.num_task
np.savetxt(os.path.join(self.log_dir, id + 'Balance_classes_All_Tasks.txt'), list_balance_classes)
# save a classifier or the best classifier
def save(self, best=False):
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
if best:
torch.save(self.Classifier.net.state_dict(),
os.path.join(self.save_dir, self.model_name + '_Classifier_Best.pkl'))
else:
torch.save(self.Classifier.net.state_dict(),
os.path.join(self.save_dir, self.model_name + '_Classifier.pkl'))
# load the best classifier or the reference classifier trained on true data only
def load(self, reference=False):
if reference:
save_dir = os.path.join(self.save_dir, "..", "..", "..", "Classifier", 'seed_' + str(self.seed))
self.Classifier.net.load_state_dict(torch.load(os.path.join(save_dir, 'Classifier_Classifier_Best.pkl')))
else:
self.Classifier.net.load_state_dict(
torch.load(os.path.join(self.save_dir, self.model_name + '_Classifier_Best.pkl')))
def load_best_baseline(self):
# best seed searched in the list define in get_best_baseline function, liste_seed = [1, 2, 3, 4, 5, 6, 7, 8]
best_seed = utils.get_best_baseline(self.log_dir, self.dataset)
save_dir = os.path.join(self.save_dir, "..", "..", "..", "Classifier", 'seed_' + str(best_seed))
self.Classifier.net.load_state_dict(torch.load(os.path.join(save_dir, 'Classifier_Classifier_Best.pkl')))
def Frechet_Inception_Distance(self, Gen_DataLoader, True_DataLoader, ind_task):
eval_size = 50
# 0. load reference classifier
# self.load_best_baseline() # we load the best classifier
self.Classifier.load_expert()
self.Classifier.net.eval()
if self.dataset == "mnist":
latent_size = 320
elif self.dataset == "fashion":
latent_size = 320
real_output_table = torch.FloatTensor(eval_size * self.batch_size, latent_size)
gen_output_table = torch.FloatTensor(eval_size * self.batch_size, latent_size)
# print("get activations on test data")
for i, (data, target) in enumerate(True_DataLoader):
if i >= eval_size or i >= (
int(len(True_DataLoader) / self.batch_size) - 1): # (we throw away the last batch)
break
if self.gpu_mode:
data, target = data.cuda(self.device), target.cuda(self.device)
batch = Variable(data)
label = Variable(target.squeeze())
activation = self.Classifier.net(batch, FID=True)
real_output_table[i * self.batch_size:(i + 1) * self.batch_size, :] = activation.data
# print("get activations on generated data")
Gen_DataLoader.shuffle_task()
for i, (data, target) in enumerate(Gen_DataLoader):
if i >= eval_size or i >= (
int(len(Gen_DataLoader) / self.batch_size) - 1): # (we throw away the last batch)
break
# 2. use the reference classifier to compute the output vector
if self.gpu_mode:
data, target = data.cuda(self.device), target.cuda(self.device)
batch = Variable(data)
label = Variable(target.squeeze())
activation = self.Classifier.net(batch, FID=True)
gen_output_table[i * self.batch_size:(i + 1) * self.batch_size, :] = activation.data
# compute mu_real and sigma_real
mu_real = real_output_table.cpu().numpy().mean(0)
cov_real = np.cov(real_output_table.cpu().numpy().transpose())
assert mu_real.shape[0] == latent_size
assert cov_real.shape[0] == cov_real.shape[1] == latent_size
mu_gen = gen_output_table.cpu().numpy().mean(0)
cov_gen = np.cov(gen_output_table.cpu().numpy().transpose())
assert mu_gen.shape[0] == latent_size
assert cov_gen.shape[0] == cov_gen.shape[1] == latent_size
Frechet_Inception_Distance = calculate_frechet_distance(mu_real, cov_real, mu_gen, cov_gen)
if self.verbose:
print("Frechet Inception Distance")
print(Frechet_Inception_Distance)
return Frechet_Inception_Distance
|
from django.conf.urls import patterns, url
from django.utils.translation import ugettext_lazy as _
from connect.discover import views
urlpatterns = patterns(
'',
url(_(r'^map/$'), views.member_map, name='map'),
)
|
from django.db.models import Model, CharField, DecimalField
class LeasingMode(Model):
"""yearly and weekly costs to own a vehicle"""
name = CharField(max_length=127)
factor_yearly = DecimalField(max_digits=4, decimal_places=2)
factor_weekly = DecimalField(max_digits=4, decimal_places=2)
def __str__(self):
return self.name
|
from tensorflow import keras
import tensorflow as tf
from tensorflow.keras.applications import vgg16
from tensorflow.keras.layers import Input
def get_loss_network():
loss_net = vgg16.VGG16(include_top=False, weights="imagenet", input_tensor=Input(shape=(256,256,3)))
loss_net_outputs = dict([(layer.name, layer.output) for layer in loss_net.layers])
loss_net_activations = keras.Model(inputs=loss_net.inputs, outputs=loss_net_outputs)
return loss_net_activations
def gram_matrix(x):
"""
Computes the gram matrix with batch dimension.
y = xT * x
Inputs:
x -- tf.tensor with batch dimension (batch_dim, x1, x2, x3)
"""
x = tf.transpose(x, (0,3,1,2))
features = tf.reshape(x, (tf.shape(x)[0], tf.shape(x)[1], -1))
gram = tf.matmul(features, tf.transpose(features, (0,2,1)))
return gram
def compute_content_loss(generated, content, dimensions):
"""
Computes the content loss from the given features.
Equation 2 in paper.
Args:
generated: Tensor feature map of the generated image.
content: Tensor feature map of the content image.
dimensions: List of layer dimensions [height, width, channels]
"""
# Check dimensions
assert generated.shape[0] == content.shape[0], "Batch dimensions of generated and content image don't match!"
height, width, channels = dimensions[0], dimensions[1], dimensions[2]
scaling_factor = (int(height/4) * int(width/4) * channels) # H, W, C
# Sum over all elements, including the batch_size to get average loss over the batch.
content_reconstruction_loss = tf.math.reduce_sum(tf.square(generated - content)) / (scaling_factor * generated.shape[0])
return content_reconstruction_loss
def compute_style_loss(generated, style, dimensions):
"""
Compute style loss for one layer.
"""
# Dimensions
height, width, channels = dimensions[0], dimensions[1], dimensions[2]
scaling_factor = (channels * height * width)**2
generated = gram_matrix(generated)
style = gram_matrix(style)
# Compute the total average loss over all elements in the batch.
res = tf.reduce_sum(tf.square(generated - style)) / (scaling_factor * generated.shape[0])
return res
def compute_perceptual_loss(generated_image, content_image, style_image, loss_net_activations, batch_size, content_layers, style_layers):
"""
Computes the loss with the loss network.
Args:
tf.tensors, scaled to [0,1] with dim (b,h,w,c), RGB.
"""
# Combine input tensors to make one pass with all in parallel.
input_tensors = tf.concat([generated_image, content_image, style_image], axis=0)
# Preprocess input_tensors for vgg16. Expects range [0, 255]
input_tensors = tf.keras.applications.vgg16.preprocess_input(input_tensors*255)
# Forward pass to get loss from loss network.
features = loss_net_activations(input_tensors, training=False)
# Initialize loss
loss = tf.zeros(shape=())
# Compute content loss
for content_layer in content_layers.keys():
layer_features = features[content_layer]
generated_features = layer_features[0:batch_size,:,:,:]
content_features = layer_features[batch_size:2*batch_size,:,:,:]
loss += compute_content_loss(generated_features, content_features, content_layers[content_layer])
# Compute style loss
for style_layer in style_layers.keys():
layer_features = features[style_layer]
generated_features = layer_features[0:batch_size,:,:,:]
style_features = layer_features[2*batch_size,:,:,:]
style_features = tf.expand_dims(style_features, 0)
loss += compute_style_loss(generated_features, style_features, style_layers[style_layer])
return loss
@tf.function
def compute_loss_and_grads(content_image, style_image, transform_network, optimizer, loss_net_activations, batch_size, content_layers, style_layers):
"""
Takes in content and style images as tf.tensors with batch dimension
and scaled to range [0,1].
"""
with tf.GradientTape() as tape:
# Forward pass
generated_image = transform_network(content_image, training=True)
# Convert to range [0,1]
generated_image = ((generated_image * 0.5) + 0.5)
# Get loss
loss = compute_perceptual_loss(generated_image, content_image, style_image, loss_net_activations, batch_size, content_layers, style_layers)
# Get gradients and upate weights
grads = tape.gradient(loss, transform_network.trainable_weights)
optimizer.apply_gradients(zip(grads, transform_network.trainable_weights))
return loss
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" VOC2012 DATASET EVALUATE """
import os
import time
import logging
import argparse
import cv2
import numpy
from src.dataset import pt_dataset, pt_transform
import src.utils.functions_args as fa
from src.utils.p_util import AverageMeter, intersectionAndUnion, check_makedirs, colorize
import mindspore.numpy as np
from mindspore import Tensor
import mindspore.dataset as ds
from mindspore import context
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore.train.serialization import load_param_into_net, load_checkpoint
cv2.ocl.setUseOpenCL(False)
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend",
device_id=device_id, save_graphs=False)
def get_parser():
"""
Read parameter file
-> for ADE20k: ./src/config/voc2012_pspnet50.yaml
-> for voc2012: ./src/config/voc2012_pspnet50.yaml
"""
parser = argparse.ArgumentParser(description='MindSpore Semantic Segmentation')
parser.add_argument('--config', type=str, required=True, default='./src/config/voc2012_pspnet50.yaml',
help='config file')
parser.add_argument('opts', help='see ./src/config/voc2012_pspnet50.yaml for all options', default=None,
nargs=argparse.REMAINDER)
args_ = parser.parse_args()
assert args_.config is not None
cfg = fa.load_cfg_from_cfg_file(args_.config)
if args_.opts is not None:
cfg = fa.merge_cfg_from_list(cfg, args_.opts)
return cfg
def get_logger():
""" logger """
logger_name = "main-logger"
logger_ = logging.getLogger(logger_name)
logger_.setLevel(logging.INFO)
handler = logging.StreamHandler()
fmt = "[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger_.addHandler(handler)
return logger_
def check(local_args):
""" check args """
assert local_args.classes > 1
assert local_args.zoom_factor in [1, 2, 4, 8]
assert local_args.split in ['train', 'val', 'test']
if local_args.arch == 'psp':
assert (local_args.train_h - 1) % 8 == 0 and (local_args.train_w - 1) % 8 == 0
else:
raise Exception('architecture not supported {} yet'.format(local_args.arch))
def main():
""" The main function of the evaluate process """
check(args)
logger.info("=> creating model ...")
logger.info("Classes: %s", args.classes)
value_scale = 255
mean = [0.485, 0.456, 0.406]
mean = [item * value_scale for item in mean]
std = [0.229, 0.224, 0.225]
std = [item * value_scale for item in std]
gray_folder = os.path.join(args.result_path, 'gray')
color_folder = os.path.join(args.result_path, 'color')
test_transform = pt_transform.Compose([pt_transform.Normalize(mean=mean, std=std, is_train=False)])
test_data = pt_dataset.SemData(
split='val', data_root=args.data_root,
data_list=args.val_list,
transform=test_transform)
test_loader = ds.GeneratorDataset(test_data, column_names=["data", "label"],
shuffle=False)
test_loader.batch(1)
colors = numpy.loadtxt(args.color_txt).astype('uint8')
names = [line.rstrip('\n') for line in open(args.name_txt)]
from src.model import pspnet
PSPNet = pspnet.PSPNet(
feature_size=args.feature_size,
num_classes=args.classes,
backbone=args.backbone,
pretrained=False,
pretrained_path="",
aux_branch=False,
deep_base=True
)
ms_checkpoint = load_checkpoint(args.ckpt)
load_param_into_net(PSPNet, ms_checkpoint, strict_load=True)
PSPNet.set_train(False)
test(test_loader, test_data.data_list, PSPNet, args.classes, mean, std, args.base_size, args.test_h,
args.test_w, args.scales, gray_folder, color_folder, colors)
if args.split != 'test':
cal_acc(test_data.data_list, gray_folder, args.classes, names)
def net_process(model, image, mean, std=None, flip=True):
""" Give the input to the model"""
transpose = ops.Transpose()
input_ = transpose(image, (2, 0, 1)) # (473, 473, 3) -> (3, 473, 473)
mean = np.array(mean)
std = np.array(std)
if std is None:
input_ = input_ - mean[:, None, None]
else:
input_ = (input_ - mean[:, None, None]) / std[:, None, None]
expand_dim = ops.ExpandDims()
input_ = expand_dim(input_, 0)
if flip:
flip_ = ops.ReverseV2(axis=[3])
flip_input = flip_(input_)
concat = ops.Concat(axis=0)
input_ = concat((input_, flip_input))
model.set_train(False)
output = model(input_)
_, _, h_i, w_i = input_.shape
_, _, h_o, w_o = output.shape
if (h_o != h_i) or (w_o != w_i):
bi_linear = nn.ResizeBilinear()
output = bi_linear(output, size=(h_i, w_i), align_corners=True)
softmax = nn.Softmax(axis=1)
output = softmax(output)
if flip:
flip_ = ops.ReverseV2(axis=[2])
output = (output[0] + flip_(output[1])) / 2
else:
output = output[0]
output = transpose(output, (1, 2, 0)) # Tensor
output = output.asnumpy()
return output
def scale_process(model, image, classes, crop_h, crop_w, h, w, mean, std=None, stride_rate=2 / 3):
""" Process input size """
ori_h, ori_w, _ = image.shape
pad_h = max(crop_h - ori_h, 0)
pad_w = max(crop_w - ori_w, 0)
pad_h_half = int(pad_h / 2)
pad_w_half = int(pad_w / 2)
if pad_h > 0 or pad_w > 0:
image = cv2.copyMakeBorder(image, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half,
cv2.BORDER_CONSTANT, value=mean)
new_h, new_w, _ = image.shape
image = Tensor.from_numpy(image)
stride_h = int(numpy.ceil(crop_h * stride_rate))
stride_w = int(numpy.ceil(crop_w * stride_rate))
grid_h = int(numpy.ceil(float(new_h - crop_h) / stride_h) + 1)
grid_w = int(numpy.ceil(float(new_w - crop_w) / stride_w) + 1)
prediction_crop = numpy.zeros((new_h, new_w, classes), dtype=float)
count_crop = numpy.zeros((new_h, new_w), dtype=float)
for index_h in range(0, grid_h):
for index_w in range(0, grid_w):
s_h = index_h * stride_h
e_h = min(s_h + crop_h, new_h)
s_h = e_h - crop_h
s_w = index_w * stride_w
e_w = min(s_w + crop_w, new_w)
s_w = e_w - crop_w
image_crop = image[s_h:e_h, s_w:e_w].copy()
count_crop[s_h:e_h, s_w:e_w] += 1
prediction_crop[s_h:e_h, s_w:e_w, :] += net_process(model, image_crop, mean, std)
prediction_crop /= numpy.expand_dims(count_crop, 2)
prediction_crop = prediction_crop[pad_h_half:pad_h_half + ori_h, pad_w_half:pad_w_half + ori_w]
prediction = cv2.resize(prediction_crop, (w, h), interpolation=cv2.INTER_LINEAR)
return prediction
def test(test_loader, data_list, model, classes, mean, std, base_size, crop_h, crop_w, scales, gray_folder,
color_folder, colors):
""" Generate evaluate image """
logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
data_time = AverageMeter()
batch_time = AverageMeter()
model.set_train(False)
end = time.time()
for i, (input_, _) in enumerate(test_loader):
data_time.update(time.time() - end)
input_ = input_.asnumpy()
image = numpy.transpose(input_, (1, 2, 0))
h, w, _ = image.shape
prediction = numpy.zeros((h, w, classes), dtype=float)
for scale in scales:
long_size = round(scale * base_size)
new_h = long_size
new_w = long_size
if h > w:
new_w = round(long_size / float(h) * w)
else:
new_h = round(long_size / float(w) * h)
image_scale = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
prediction += scale_process(model, image_scale, classes, crop_h, crop_w, h, w, mean, std)
prediction /= len(scales)
prediction = numpy.argmax(prediction, axis=2)
batch_time.update(time.time() - end)
end = time.time()
if ((i + 1) % 10 == 0) or (i + 1 == len(data_list)):
logger.info('Test: [{}/{}] '
'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(i + 1, len(data_list),
data_time=data_time,
batch_time=batch_time))
check_makedirs(gray_folder)
check_makedirs(color_folder)
gray = numpy.uint8(prediction)
color = colorize(gray, colors)
image_path, _ = data_list[i]
image_name = image_path.split('/')[-1].split('.')[0]
gray_path = os.path.join(gray_folder, image_name + '.png')
color_path = os.path.join(color_folder, image_name + '.png')
cv2.imwrite(gray_path, gray)
color.save(color_path)
logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
def cal_acc(data_list, pred_folder, classes, names):
""" Calculation evaluating indicator """
intersection_meter = AverageMeter()
union_meter = AverageMeter()
target_meter = AverageMeter()
for i, (image_path, target_path) in enumerate(data_list):
image_name = image_path.split('/')[-1].split('.')[0]
pred = cv2.imread(os.path.join(pred_folder, image_name + '.png'), cv2.IMREAD_GRAYSCALE)
target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
if args.prefix == 'ADE':
target -= 1
intersection, union, target = intersectionAndUnion(pred, target, classes)
intersection_meter.update(intersection)
union_meter.update(union)
target_meter.update(target)
accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
logger.info(
'Evaluating {0}/{1} on image {2}, accuracy {3:.4f}.'.format(i + 1, len(data_list), image_name + '.png',
accuracy))
iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
mIoU = numpy.mean(iou_class)
mAcc = numpy.mean(accuracy_class)
allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU, mAcc, allAcc))
for i in range(classes):
logger.info('Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(i, iou_class[i], accuracy_class[i],
names[i]))
if __name__ == '__main__':
args = get_parser()
logger = get_logger()
main()
|
from go_ml_transpiler.model.xgboost.regressor import Regressor
from go_ml_transpiler.utils.model.xgboost import build_tree
import os
class XGBRegressor(Regressor):
SUPPORTED_OBJECTIVE = [
"reg:linear"
]
SUPPORTED_BOOSTER = [
"gbtree",
"dart",
]
def __init__(self, model, indent, **kwargs):
super(XGBRegressor, self).__init__(model=model, indent=indent, **kwargs)
self._sanity_check()
def transpile(self, package_name, method_name, export_method, float_type, **kwargs):
low_method_name = method_name.lower()
boosters = []
booster_calls = []
for i, booster in enumerate(self.raw_boosters):
booster = booster.split()
boosters.append(
self.template("booster.template").format(
package_name=package_name,
import_packages=self.template("import.template").format(
packages="\n".join(
['{0}"{1}"'.format(self.indent, package) for package in self.import_packages]))
if self.import_packages else "",
method_name=low_method_name,
method_index=i,
booster=build_tree(
booster,
indent=self.indent,
missing_condition=self.missing_condition,
float_type=float_type),
float_type=float_type
)
)
booster_calls.append(
"\n".join(
[
("" if i == 0 else self.indent) + line
for i, line in enumerate(
self.template("booster_call.template").format(
method_name=low_method_name,
method_index=i
).splitlines())
]
)
)
k = {
"package_name": package_name,
"method_name": method_name.capitalize() if export_method else method_name,
"method_calls": "\n".join([("" if i == 0 else self.indent) + line for i, line in enumerate(booster_calls)]),
"n_classes": 1,
"base_score": float(self.model.base_score),
"float_type": float_type
}
method = self.template("method.template").format(**k)
self.transpiled_model = {"transpiled_model": {"boosters": boosters, "method": method}}
return self.transpiled_model
def write(self, directory):
if self.transpiled_model is None:
raise ValueError("You should first transpile the model")
for i, booster in enumerate(self.transpiled_model["transpiled_model"]["boosters"]):
with open(os.path.join(directory, "booster{}.go".format(i)), "w") as f:
f.write(booster)
with open(os.path.join(directory, "predict.go"), "w") as f:
f.write(self.transpiled_model["transpiled_model"]["method"])
def _sanity_check(self):
if self.model.objective not in self.SUPPORTED_OBJECTIVE:
raise ValueError("Unsupported objective: {}".format(self._objective_error))
if self.model.booster not in self.SUPPORTED_BOOSTER:
raise ValueError("Unsupported booster: {}".format(self._booster_error))
|
from dataclasses import dataclass
from typing import List, Optional
@dataclass
class User:
id: int
username: str
nickname: Optional[str] = ""
avatar: str = None
email: Optional[str] = None
school: Optional[str] = None
def __repr__(self) -> str:
return f"<User {self.username}>"
def __hash__(self):
return self.id
@dataclass
class Team:
members: List[User]
|
#!/usr/bin/python3
# 文件名:mysql_createtable.py
import pymysql
# 打开数据库连接
db = pymysql.connect('localhost','root','1234','fdtest')
# 使用cursor()方法创建一个游标对象cursor
cursor = db.cursor()
# 使用execute() 方法执行SQL查询
cursor.execute("DROP TABLE IF EXISTS EMPLOYEE")
# 使用预处理语句创建表
sql = """CREATE TABLE EMPLOYEE(
FIRST_NAME CHAR(20) NOT NULL,
LAST_NAME CHAR(20),
AGE INT,
SEX CHAR(1),
INCOME FLOAT)"""
cursor.execute(sql)
# 关闭数据库连接
db.close()
|
#Parts of code in this file have been taken (copied) from https://github.com/ml-jku/lsc
#Copyright (C) 2018 Andreas Mayr
nrLayers = hyperParams.iloc[paramNr].nrLayers
nrNodes = hyperParams.iloc[paramNr].nrNodes
basicArchitecture = hyperParams.iloc[paramNr].basicArchitecture
nrInputFeatures = nrDenseFeatures + nrSparseFeatures
hiddenLayerSizes = [nrNodes] * nrLayers
layerSizes = [nrInputFeatures] + hiddenLayerSizes + [nrOutputTargets]
if basicArchitecture == "selu":
activationFunction = actLib.selu
dropoutFunction = actLib.dropout_stableVariance
idropoutFunction = actLib.dropout_stableVariance
initScale = 1.0
tf.reset_default_graph()
if "session" in dir():
session.close()
session = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options))
if nrDenseFeatures > 0.5:
xDenseData = tf.placeholder(tf.float32, [None, nrDenseFeatures])
sh0 = tf.shape(xDenseData)[0]
if nrSparseFeatures > 0.5:
xIndices = tf.placeholder(tf.int64, [None, 2])
xValues = tf.placeholder(tf.float32, [None])
xDim = tf.placeholder(tf.int64, [2])
xSparseData = tf.SparseTensor(indices=xIndices, values=xValues, dense_shape=xDim)
sparseMeanInit = tf.placeholder(tf.float32, [1, nrSparseFeatures])
sparseMean = tf.Variable(tf.zeros([1, nrSparseFeatures]), trainable=False, dtype=tf.float32)
sh0 = tf.shape(xSparseData)[0]
yDenseData = tf.placeholder(tf.float32, [None, nrOutputTargets])
yIndices = tf.placeholder(tf.int64, [None, 2])
yValues = tf.placeholder(tf.float32, [None])
yDim = tf.placeholder(tf.int64, [2])
ySparseData = tf.SparseTensor(indices=yIndices, values=yValues, dense_shape=yDim)
ySparseMask = tf.SparseTensor(indices=yIndices, values=tf.ones_like(yValues), dense_shape=yDim)
inputDropout = tf.placeholder(tf.float32)
hiddenDropout = tf.placeholder(tf.float32)
lrGeneral = tf.placeholder(tf.float32)
lrWeight = tf.placeholder(tf.float32)
lrBias = tf.placeholder(tf.float32)
l2PenaltyWeight = tf.placeholder(tf.float32)
l2PenaltyBias = tf.placeholder(tf.float32)
l1PenaltyWeight = tf.placeholder(tf.float32)
l1PenaltyBias = tf.placeholder(tf.float32)
mom = tf.placeholder(tf.float32)
biasInit = tf.placeholder(tf.float32, [nrOutputTargets])
is_training = tf.placeholder(tf.bool)
weightTensors = []
biasTensors = []
hidden = []
hiddenAct = []
hiddenActMod = []
with tf.variable_scope('layer_' + str(0)):
hiddenActl = []
hiddenActModl = []
if nrDenseFeatures > 0.5:
hiddenActl.append(xDenseData)
hiddenActModl.append(
xDenseData * tf.to_float(tf.random_uniform([sh0, tf.shape(xDenseData)[1]]) < (1.0 - inputDropout)))
if nrSparseFeatures > 0.5:
hiddenActl.append(xSparseData)
if not (normalizeGlobalSparse or normalizeLocalSparse):
hiddenActModl.append(tf.sparse_retain(xSparseData, tf.random_uniform([tf.shape(xSparseData.values)[0]]) < (
1.0 - inputDropout)))
else:
hiddenActModl.append(xSparseData)
hiddenActInit = hiddenActl
hiddenActModInit = hiddenActModl
weightTensors.append(None)
biasTensors.append(None)
hidden.append(None)
hiddenAct.append(hiddenActl)
hiddenActMod.append(hiddenActModl)
idTensors = []
hdTensors = []
layernr = 1
with tf.variable_scope('layer_' + str(layernr)):
wList = []
if nrDenseFeatures > 0.5:
WlDense = tf.get_variable("W" + str(layernr) + "_dense", trainable=True,
initializer=tf.random_normal([nrDenseFeatures, layerSizes[layernr]], stddev=np.sqrt(
initScale / float(layerSizes[layernr - 1]))))
wList.append(WlDense)
if nrSparseFeatures > 0.5:
WlSparse = tf.get_variable("W" + str(layernr) + "_sparse", trainable=True,
initializer=tf.random_normal([nrSparseFeatures, layerSizes[layernr]], stddev=np.sqrt(
initScale / float(layerSizes[layernr - 1]))))
wList.append(WlSparse)
sparseMeanWSparse = tf.matmul(sparseMean, WlSparse)
bl = tf.get_variable('b' + str(layernr), shape=[layerSizes[layernr]], trainable=True,
initializer=tf.zeros_initializer())
regRaw = l2PenaltyBias * tf.nn.l2_loss(bl) + l1PenaltyBias * tf.reduce_sum(tf.abs(bl))
if nrDenseFeatures > 0.5:
if nrSparseFeatures > 0.5:
regRaw = regRaw + l2PenaltyWeight * (tf.nn.l2_loss(WlSparse) + tf.nn.l2_loss(WlDense)) + l1PenaltyWeight * (
tf.reduce_sum(tf.abs(WlSparse)) + tf.reduce_sum(tf.abs(WlDense)))
hiddenl = tf.matmul(hiddenActModl[0], WlDense) + tf.sparse_tensor_dense_matmul(hiddenActModl[1],
WlSparse) + (
bl + sparseMeanWSparse)
else:
regRaw = regRaw + l2PenaltyWeight * tf.nn.l2_loss(WlDense) + l1PenaltyWeight * tf.reduce_sum(
tf.abs(WlDense))
hiddenl = tf.matmul(hiddenActModl[0], WlDense) + bl
else:
if nrSparseFeatures > 0.5:
regRaw = regRaw + l2PenaltyWeight * tf.nn.l2_loss(WlSparse) + l1PenaltyWeight * tf.reduce_sum(
tf.abs(WlSparse))
hiddenl = tf.sparse_tensor_dense_matmul(hiddenActModl[0], WlSparse) + (bl + sparseMeanWSparse)
hiddenActModl = hiddenl * tf.to_float(
tf.random_uniform([sh0, tf.shape(hiddenl)[1]]) < (1.0 - hiddenDropout))
hiddenActl = activationFunction(hiddenActModl)
weightTensors.append(wList)
biasTensors.append(bl)
hidden.append(hiddenl)
hiddenAct.append(hiddenActl)
hiddenActMod.append(hiddenActModl)
if nrDenseFeatures > 0.5:
hdTensors.append(WlDense)
idTensors.append(WlDense)
if nrSparseFeatures > 0.5:
hdTensors.append(WlSparse)
if not (normalizeGlobalSparse or normalizeLocalSparse):
idTensors.append(WlSparse)
for layernr in range(2, len(layerSizes) - 1):
with tf.variable_scope('layer_' + str(layernr)):
Wl = tf.get_variable("W" + str(layernr), trainable=True,
initializer=tf.random_normal([layerSizes[layernr - 1], layerSizes[layernr]],
stddev=np.sqrt(initScale / float(layerSizes[layernr - 1]))))
bl = tf.get_variable('b' + str(layernr), shape=[layerSizes[layernr]], trainable=True,
initializer=tf.zeros_initializer())
regRaw = regRaw + l2PenaltyWeight * tf.nn.l2_loss(Wl) + l1PenaltyWeight * tf.reduce_sum(
tf.abs(Wl)) + l2PenaltyBias * tf.nn.l2_loss(bl) + l1PenaltyBias * tf.reduce_sum(tf.abs(bl))
hiddenl = tf.matmul(hiddenActl, Wl) + bl
hiddenActModl = hiddenl * tf.to_float(
tf.random_uniform([sh0, tf.shape(hiddenl)[1]]) < (1.0 - hiddenDropout))
hiddenActl = activationFunction(hiddenActModl)
weightTensors.append(Wl)
biasTensors.append(bl)
hidden.append(hiddenl)
hiddenAct.append(hiddenActl)
hiddenActMod.append(hiddenActModl)
hdTensors.append(Wl)
layernr = len(layerSizes) - 1
with tf.variable_scope('layer_' + str(layernr)):
Wl = tf.get_variable("W" + str(layernr), trainable=True,
initializer=tf.random_normal([layerSizes[layernr - 1], layerSizes[layernr]],
stddev=np.sqrt(initScale / float(layerSizes[layernr - 1]))))
bl = tf.get_variable('b' + str(layernr), shape=[layerSizes[layernr]], trainable=True,
initializer=tf.zeros_initializer())
regRaw = regRaw + l2PenaltyWeight * tf.nn.l2_loss(Wl) + l1PenaltyWeight * tf.reduce_sum(
tf.abs(Wl)) + l2PenaltyBias * tf.nn.l2_loss(bl) + l1PenaltyBias * tf.reduce_sum(tf.abs(bl))
hiddenl = tf.matmul(hiddenActl, Wl) + bl
weightTensors.append(Wl)
biasTensors.append(bl)
hidden.append(hiddenl)
hiddenAct.append(None)
hiddenActMod.append(None)
naMat = tf.where(tf.abs(yDenseData) < 0.5, tf.zeros_like(yDenseData), tf.ones_like(yDenseData))
lossRawDense = tf.nn.sigmoid_cross_entropy_with_logits(labels=(yDenseData + 1.0) / 2.0, logits=hiddenl) * naMat
errOverallDense = tf.reduce_mean(tf.reduce_sum(lossRawDense, 1)) + regRaw
predNetworkDense = tf.nn.sigmoid(hiddenl)
optimizerDense = tf.train.MomentumOptimizer(momentum=mom, learning_rate=lrGeneral).minimize(errOverallDense)
hiddenlSelected = tf.gather_nd(hiddenl, yIndices)
lossRawSelected = tf.nn.sigmoid_cross_entropy_with_logits(labels=(yValues + 1.0) / 2.0, logits=hiddenlSelected)
lossRawSparse = tf.SparseTensor(indices=yIndices, values=lossRawSelected, dense_shape=yDim)
errOverallSparse = tf.reduce_mean(tf.sparse_reduce_sum(lossRawSparse, 1)) + regRaw
predNetworkSparse = tf.nn.sigmoid(hiddenlSelected)
optimizerSparse = tf.train.MomentumOptimizer(momentum=mom, learning_rate=lrGeneral).minimize(errOverallSparse)
predNetwork = tf.nn.sigmoid(hiddenl)
class MyNoOp:
op = tf.no_op()
init = tf.global_variables_initializer()
biasInitOp = biasTensors[-1].assign(biasInit)
if nrSparseFeatures > 0.5:
sparseMeanWSparseOp = MyNoOp()
sparseMeanInitOp = sparseMean.assign(sparseMeanInit)
scaleTrainHd = [tf.assign(hdTensors[i], hdTensors[i] / tf.sqrt(1.0 - hiddenDropout)).op for i in
range(0, len(hdTensors))]
scalePredictHd = [tf.assign(hdTensors[i], hdTensors[i] * tf.sqrt(1.0 - hiddenDropout)).op for i in
range(0, len(hdTensors))]
scaleTrainId = [tf.assign(idTensors[i], idTensors[i] / (1.0 - inputDropout)).op for i in range(0, len(idTensors))]
scalePredictId = [tf.assign(idTensors[i], idTensors[i] * (1.0 - inputDropout)).op for i in range(0, len(idTensors))]
checkNA = [tf.reduce_any(tf.is_nan(x)) for x in weightTensors[1] + weightTensors[2:] + biasTensors[1:]]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenAppYiyiyiwuQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenAppYiyiyiwuQueryResponse, self).__init__()
self._chucan = None
@property
def chucan(self):
return self._chucan
@chucan.setter
def chucan(self, value):
self._chucan = value
def parse_response_content(self, response_content):
response = super(AlipayOpenAppYiyiyiwuQueryResponse, self).parse_response_content(response_content)
if 'chucan' in response:
self.chucan = response['chucan']
|
def leiaInt(txt):
while True:
try:
n = int(input(txt))
except:
print('ERRO! DIGITE UM NÚMERO INTEIRO VÁLIDO!')
else:
break
return n
def leiaFloat(txt):
while True:
try:
n = float(input(txt).replace(',', '.'))
except:
print('ERRO! DIGITE UM NÚMERO REAL VÁLIDO!')
else:
break
return n
# Programa Principal
n = leiaInt('Digite um número inteiro: ')
f = leiaFloat('Digite um número real: ')
print(f'Você acabou de digitar os números {n} e {f}')
|
"""
Created by Epic at 9/1/20
"""
import logging
from asyncio import AbstractEventLoop
__all__ = ("OpcodeDispatcher", "EventDispatcher")
class OpcodeDispatcher:
"""
Receives events identified by their opcode, and handles them by running them through the event loop.
Parameters
----------
loop: AbstractEventLoop
An AbstractEventLoop used to create callbacks.
"""
def __init__(self, loop: AbstractEventLoop):
self.logger = logging.getLogger("speedcord.dispatcher")
self.loop = loop
# A dict of the opcode int and a list of coroutines to execute once
# an event is sent
self.event_handlers = {}
def dispatch(self, opcode, *args, **kwargs):
"""
Dispatches an event to listeners registered to this opcode.
Parameters
----------
opcode: int
The opcode of the event sent by Discord API.
"""
self.logger.debug("Dispatching event with opcode: " + str(opcode))
for event in self.event_handlers.get(opcode, []):
self.loop.create_task(event(*args, **kwargs))
def register(self, opcode, func):
"""
Register a handler for a specific opcode. This handler will be called whenever a matching opcode is dispatched.
Parameters
----------
opcode: int
The opcode from Discord to listen to.
func: Callable[[DefaultShard], Any]
The function that will be called when the event is dispatched.
"""
event_handlers = self.event_handlers.get(opcode, [])
event_handlers.append(func)
self.event_handlers[opcode] = event_handlers
class EventDispatcher:
"""
Receives events identified by their name and handles them by running them through the event loop.
Parameters
----------
loop: AbstractEventLoop
An AbstractEventLoop used to create callbacks.
"""
def __init__(self, loop: AbstractEventLoop):
self.logger = logging.getLogger("speedcord.dispatcher")
self.loop = loop
# A dict of the event name and a list of coroutines to execute once
# a event is sent
self.event_handlers = {}
def dispatch(self, event_name, *args, **kwargs):
"""
Dispatches an event to listeners registered to specified event_name.
Parameters
----------
event_name: str
The name of the event sent by Discord API.
*args: Any
Positional arguments to call the event function with.
**kwargs: Any
Keyword-only arguments to call the event function with.
"""
self.logger.debug("Dispatching event with name: " + str(event_name))
for event in self.event_handlers.get(event_name, []):
self.loop.create_task(event(*args, **kwargs))
def register(self, event_name, func):
"""
Register a handler for a specific event. This handler will be called whenever an event matching the
registered event_name is dispatched.
Parameters
----------
event_name: str
The event name from Discord to listen to.
func: Callable[[DefaultShard], Any]
The function that will be called when the event is dispatched.
"""
event_name = event_name.upper()
event_handlers = self.event_handlers.get(event_name, [])
event_handlers.append(func)
self.event_handlers[event_name] = event_handlers
|
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from elasticsearch import helpers
import json
import time
class ElasticSearchClient(object):
# 实例和事务化单个node,若需要多个node,需要重构代码
def __init__(self, host="localhost", port=9200):
self.host = host
self.port = port
self.es_servers = [{
"host": self.host,
"port": self.port
}]
# http_auth是对设置了安全机制的es库需要写入 账号与密码,如果没有设置则不用写这个参数
try:
self.es_client = Elasticsearch(hosts=self.es_servers)
except Exception as e:
print(e)
print('连接es失败,请查看是否连接。')
# 进行创建一个数据库,即index
def create_index(self, index_name, settings=None):
"""
创建Index
:param index_name:
:param settings:
setting = {
"settings": {
"number_of_replicas": 1,
"number_of_shards": 1
},
"mappings": {
"properties": {
"docid": {
"type": "text"
},
"doc": {
"type": "text"
},
}
}
}
:return:
"""
if not self.es_client.indices.exists(index=index_name, ignore=[400, 404]):
if settings is not None:
self.es_client.indices.create(index=index_name, body=settings)
else:
self.es_client.indices.create(index=index_name)
# 进行删除一个数据库,即index
def delete_es_index(self, index_name):
if self.es_client.indices.exists(index=index_name, ignore=[400, 404]):
self.es_client.indices.delete(index=index_name)
def set_index_mapping(self, index, mappings):
# 设置mapping结构
"""
设置index的mapping,类似于表结构。
注意!!!!现在仅仅对mapping中的properties参数,其他的参数还很多
前提为:已有index,并且已自定义分词器,详情见https://blog.csdn.net/u013905744/article/details/80935846
输入参数举例说明:
mapping = {
'properties': {
'doc': {
'type': 'text',
'analyzer': 'whitespace',
'search_analyzer': 'whitespace',
},
'docid': {
'type': 'keyword',
}
}
}
"""
self.es_client.indices.put_mapping(index=index, body=mappings)
def add_date(self, index, data):
"""
单条插入ES
"""
self.es_client.index(index=index, body=data)
def add_date_bulk(self, index, data, batch_size=2000):
"""
批量插入ES,输入文本格式为单条插入的list格式
"""
actions = []
success_num = 0
fail_num = 0
start = time.time()
batch_step = 1
for idx, data_dict in enumerate(data):
action = {
"_index": index,
"_id": idx,
"_source": data_dict
}
actions.append(action)
# 批量处理
if len(actions) == batch_size or idx == len(data) - 1:
success, failed = bulk(self.es_client, actions, raise_on_error=True)
actions = []
success_num += success
fail_num += len(failed)
print(f'{batch_step}: 成功插入了{success}条数据, 失败{len(failed)}条数据')
batch_step += 1
end = time.time()
print(f'一共成功插入了{success_num}条数据, 失败{fail_num}条数据, 共花费{end - start}秒时间')
def update_by_id(self, index, idx, data):
"""
根据给定的_id,更新ES文档
:return:
"""
self.es_client.update(index=index, body={"doc": data}, id=idx)
def delete_by_id(self, index, idx):
"""
根据给定的id,删除文档
:return:
"""
self.es_client.delete(index=index, id=idx)
def search_by_query(self, index, query, return_list=False):
'''
根据查询的query语句,来搜索查询内容
'''
search_result = self.es_client.search(index=index, body=query)
if return_list:
search_result = self.get_result_list(search_result)
return search_result
# 将查询返回处理成list
def get_result_list(self, es_response):
final_result = []
result_items = es_response['hits']['hits']
for item in result_items:
final_result.append({
"score": item['_score'],
"result": item['_source']
}
)
return final_result
# scan查询
def search_by_scan(self, index, query, scroll='5m', timeout="1m", threshold=100):
es_result = helpers.scan(
client=self.es_client,
query=query,
scroll=scroll,
index=index,
timeout=timeout,
preserve_order=True
)
final_result = []
for item in es_result:
if item['_score'] < threshold:
break
final_result.append({
"score": item['_score'],
"result": item['_source']
}
)
return final_result
if __name__ == '__main__':
es = ElasticSearchClient(host="localhost", port=9200)
print(es)
# # 创建索引
# index = "test"
# es.delete_es_index(index_name=index)
# es.create_index(index_name=index)
|
import csv
import encoding_fix
import json
###if you don't have access to the API ####
with open("data/residential_permits_2010-2016.json") as json_infile:
bp_api_data = json.load(json_infile)
json_infile.close()
#read through the JSON file line by line and write it to CSV
with open('data/residential_permits_2010-2016.csv', 'w') as csv_outfile:
writer = csv.writer(csv_outfile)
#first write the titles that will appear at the head of each column in the CSV
writer.writerow(('permit id', 'applicant','address', 'description', 'category', 'value', 'issue date', 'latitude', 'longitude'))
for x in bp_api_data:
try:
#rmv the time from the date/time stamp
x['issue_date'] = x['issue_date'][:10]
#write the data for each permit application onto a single row in the CSV
writer.writerow((x['application_permit_number'], x['applicant_name'], x['address'], x['description'], x['category'], x['value'], x['issue_date'], x['latitude'], x['longitude']))
except:
continue
csv_outfile.close()
|
from pwn import *
flag = input("flag: ").replace("\n", "")
ip, port = input("service: ").split(":")
r = remote(str(ip), int(port))
r.sendline("l")
r.sendline("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
r.sendline("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbtaw")
r.sendline("n")
r.sendline("Fetch watpop KNAAN 𝓯𝓵𝓪𝓰")
res = str(r.recvuntil("}", timeout=10))
print(res)
if flag in res:
exit(0)
else:
exit(1)
|
"""User API endpoints test module."""
from tests import utils
class UserAPITests(utils.APITestBase):
"""User API integration tests."""
|
__title__ = "mpsiem_api"
__description__ = "Basic MaxPatrol SEIM API wrapper"
__version__ = "0.0.2"
|
import typing
import discord
from discord.ext import commands
class Guilds(commands.Cog):
def __init__(self, client):
self.client = client
async def on_guild_join(self, guild):
channel = self.client.get_channel(604741076900642826)
await self.client.pool.execute('''INSERT INTO guilds(guild_id, prefix, count) VALUES(%s, $$%s$$, %s) ON CONFLICT DO NOTHING''' % (int(guild.id), '.', int(guild.member_count)))
await channel.sent(f'{guild.name} with {guild.member_count} members added {self.client.user.name}')
@commands.command(aliases=['enlistguild', 'enlistserver', 'guildenlist', 'serverenlist'])
async def _enlistguild(self, ctx, confirm: typing.Optional[str] = ''):
if ctx.message.author.id != ctx.guild.owner.id or ctx.author.id != 153699972443799552:
await ctx.send(f'Only the server owner, {ctx.guild.owner.name}, can enlist the server as a guild')
return
if confirm == '':
embed = discord.Embed(title=f'Enlist {ctx.guild.name}', description='')
embed.add_field(name='What does enlisting do?', value='Enlisting your server as a guild will allow you to compete with other guilds. As your guild progresses, your members gain bonuses when they participate in your server.')
embed.add_field(name='How to enlist', value=f'If you wish to enlist, please write `{ctx.prefix}enlistguild yes`.')
await ctx.send(embed=embed)
elif confirm == 'yes':
guild_id = ctx.guild.id
await self.client.pool.execute('''INSERT INTO enlisted_guilds(guild_id, boost, xp, level) VALUES (%s, %s, %s, %s)''' % (guild_id, 0, 0, 0))
await ctx.send(f'{ctx.guild.name} has been enlisted as a guild.')
@commands.command(aliases=['guild', 'server'])
async def _guild(self, ctx, screen: typing.Optional[str] = '', number: typing.Optional[str] = ''):
if screen in ['', 'info', 'i']:
# Showing the guild profile screen
guild_db = await self.client.pool.fetchrow('''SELECT * FROM enlisted_guilds WHERE guild_id = %s''' % ctx.guild.id)
if guild_db is None:
if ctx.message.author.id != ctx.guild.owner.id or ctx.author.id != 153699972443799552:
await ctx.send(f'{ctx.guild.name} is not enlisted as a guild! Please do `{ctx.prefix}enlistguild` for more information.')
else:
await ctx.send(f'{ctx.guild.name} is not enlisted as a guild! Please ask {ctx.guild.owner.name} to do `{ctx.prefix}enlistguild` for more information.')
embed = discord.Embed(title=f'{ctx.guild.name} Guild Profile')
xp = guild_db['xp']
embed.add_field(name='XP', value=f'{xp} XP', inline=False)
levels = guild_db['level']
embed.add_field(name='Level', value=f'{levels}', inline=False)
boost = guild_db['boost']
embed.add_field(name='Boost', value=f'{boost+1}x', inline=False)
else:
await ctx.send('Command not recognized.')
def setup(client):
client.add_cog(Guilds(client))
|
from pathlib import Path
from pylexibank.dataset import Dataset as BaseDataset
from pylexibank.util import pb
from pylexibank.forms import FormSpec
# Customize your basic data.
# if you need to store other data in columns than the lexibank defaults, then over-ride
# the table type and add the required columns e.g.
#
# import attr
# from pylexibank import Concept as BaseConcept ( or Language, Lexeme, or Cognate)
#
# @attr.s
# class Concept(Concept):
# MyAttribute1 = attr.ib(default=None)
class Dataset(BaseDataset):
dir = Path(__file__).parent
id = "template" # TODO - update this to match your datasets's name!
# add your personalized data types here (or language_class, lexeme_class, cognate_class)
#concept_class = MyConcept
# define the way in which forms should be handled
form_spec = FormSpec(
brackets = {"(": ")"}, # characters that function as brackets
separators = ";/,", # characters that split forms e.g. "a, b".
missing_data = ('?', '-'), # characters that denote missing data.
strip_inside_brackets = True # do you want data removed in brackets or not?
)
def cmd_download(self, args):
with self.raw_dir.temp_download("http://www.example.com", "example.tsv") as data:
self.raw_dir.write_csv('template.csv', data)
def cmd_makecldf(self, args):
"""
Convert the raw data to a CLDF dataset.
"""
data = self.raw_dir.read_csv('template.csv', dicts=True)
# short cut to add concepts and languages, provided your name spaces
# match lexibank's expected format.
args.writer.add_concepts()
args.writer.add_languages()
# if not, then here is a more detailed way to do it:
# for concept in self.concepts:
# args.writer.add_concept(
# ID=concept['ID'],
# Name=concept['ENGLISH'],
# Concepticon_ID=concept['CONCEPTICON_ID']
# )
# for language in self.languages:
# args.writer.add_language(
# ID=language['ID'],
# Glottolog=language['Glottolog']
# )
# add data
for row in pb(data, desc='cldfify'):
# .. if you have segmentable data, replace `add_form` with `add_form_with_segments`
# .. TODO @Mattis, when should we use add_forms_from_value() instead?
lex = args.writer.add_form(
Language_ID=row['Language_ID'],
Parameter_ID=row['Parameter_ID'],
Value=row['Word'],
Form=row['Word'],
Source=[row['Source']],
)
# add cognates -- make sure Cognateset_ID is global!
args.writer.add_cognate(
lexeme=lex,
Cognateset_ID=line['Cognateset_ID']
)
|
from abc import ABC, abstractmethod
from collections.abc import Mapping
from typing import Any, Optional
__all__ = ["Client"]
class Client(ABC):
@abstractmethod
async def request(
self,
method: str,
url: str,
*,
headers: Optional[Mapping[str, str]] = None,
) -> Any:
pass
|
#! python3
import spiceypy as spice
import numpy as np
from numpy.linalg import norm
if __name__ == "__main__":
#1.
spice.furnsh('lessons/insitu_sensing/kernels/lsk/naif0012.tls')
cass = -82
utc = '2004-06-11T19:32:00'
et = spice.utc2et(utcstr=utc)
print(f'Date and Time: {utc}')
print(f'Insitu Sensing: et, = {et}')
#2.
spice.furnsh('lessons/insitu_sensing/kernels/sclk/cas00084.tsc')
scepch = '1465674964.105'
scet = spice.scs2e(sc=-82, sclkch=scepch)
print(f'CASSINI clock epoch = {scet}')
#3.
spice.furnsh([#'lessons/insitu_sensing/kernels/sclk/cas00084.tsc',
#'lessons/insitu_sensing/kernels/pck/cpck05Mar2004.tpc',
'lessons/insitu_sensing/kernels/spk/030201AP_SK_SM546_T45.bsp',
'lessons\insitu_sensing\kernels\spk\981005_PLTEPH-DE405S.bsp'])
cs_state = spice.spkezr(targ=str(cass), et=scet, ref='eclipj2000', abcorr='none', obs='sun')[0]
print(f'position relative to Sun: [x,y,z]=[{cs_state[0]:0.6f}, {cs_state[1]:0.6f}, {cs_state[2]:0.6f}]km')
print(f'velocity rela to Sun: [vx,vy,vz]=[{cs_state[3]:0.6f}, {cs_state[4]:0.6f}, {cs_state[5]:0.6f}]km/s')
spice.kclear()
#4.
spice.furnsh(['lessons/insitu_sensing/kernels/lsk/naif0012.tls',
'lessons/insitu_sensing/kernels/sclk/cas00084.tsc',
#'lessons/insitu_sensing/kernels/pck/cpck05Mar2004.tpc',
'lessons\insitu_sensing\kernels\ck\\04135_04171pc_psiv2.bc',
'lessons\insitu_sensing\kernels\\fk\cas_v37.tf',
'lessons/insitu_sensing/kernels/spk/030201AP_SK_SM546_T45.bsp',
'lessons\insitu_sensing\kernels\spk\981005_PLTEPH-DE405S.bsp'])
cs2sun_pos = spice.spkpos(targ='sun', et=scet, ref='CASSINI_INMS', abcorr='LT+S', obs='CASSINI')[0]
print(f'sunpos relative to cass: [x,y,z]=[{cs2sun_pos[0]:0.6f}, {cs2sun_pos[1]:0.6f}, {cs2sun_pos[2]:0.6f}]km')
cs2sun_pos_hat = spice.vhat(cs2sun_pos)
print(f'unit vectors: [x,y,z]=[{cs2sun_pos_hat[0]:0.6f}, {cs2sun_pos_hat[1]:0.6f}, {cs2sun_pos_hat[2]:0.6f}]')
spice.kclear()
#5.
spice.furnsh(['lessons/insitu_sensing/kernels/lsk/naif0012.tls',
'lessons/insitu_sensing/kernels/sclk/cas00084.tsc',
'lessons/insitu_sensing/kernels/pck/cpck05Mar2004.tpc',
'lessons\insitu_sensing\kernels\ck\\04135_04171pc_psiv2.bc',
'lessons\insitu_sensing\kernels\\fk\cas_v37.tf',
'lessons\insitu_sensing\kernels\spk\\020514_SE_SAT105.bsp',
'lessons/insitu_sensing/kernels/spk/030201AP_SK_SM546_T45.bsp',
'lessons\insitu_sensing\kernels\spk\981005_PLTEPH-DE405S.bsp'])
spoint, _, surfvec = spice.subpnt(method='NEAR POINT: ELLIPSOID', target='phoebe', et=scet, fixref='IAU_PHOEBE', abcorr='none', obsrvr='cassini')
# print(f'Sub-spacecraft point on Phoebe (IAU_Phoebe) [xyz] = {spoint} km')
# print(f'Vector from Cassini to sub-spacecraft point [xyz] = {surfvec} km')
rho, lon, lat = spice.reclat(rectan=spoint)
lon = np.rad2deg(lon)
lat = np.rad2deg(lat)
print(f'Sub-spacecraft point on Phoebe [lon, lat, rho] = [{lon:0.6f} deg, {lat:0.6f} deg, {rho:0.6f}] km')
surfvec_hat = surfvec / np.linalg.norm(surfvec)
T_ph2inms = spice.pxform(fromstr='IAU_PHOEBE', tostr='CASSINI_INMS', et=scet)
surfvec_hat_inms = spice.mxv(m1=T_ph2inms, vin=surfvec_hat)
print(f'Vector from Cassini to sub-spacecraft point (INMSf) [xyz] = {surfvec_hat_inms}')
#6
state_cass = spice.spkezr(targ='cassini', et=scet, ref='j2000', abcorr='none', obs='phoebe')[0][3:6]
T_j2000toinms = spice.pxform(fromstr='j2000', tostr='cassini_inms', et=scet)
state_cass = spice.mxv(m1=T_j2000toinms, vin=state_cass)
vel_cass = state_cass
vel_cass_hat = vel_cass / np.linalg.norm(vel_cass)
print(f'Velocity of Cassini relative to Phoebe = {vel_cass_hat}')
|
from crypto.constants import TRANSACTION_TIMELOCK_TRANSFER
from crypto.transactions.builder.base import BaseTransactionBuilder
class TimelockTransfer(BaseTransactionBuilder):
transaction_type = TRANSACTION_TIMELOCK_TRANSFER
def __init__(self, fee=None):
"""Create a timelock transaction
Args:
fee (int, optional): fee used for the transaction (default is already set)
"""
super().__init__()
if fee:
self.transaction.fee = fee
|
solution = NumberOfValidWordsForEachPuzzle()
assert X == solution.findNumOfValidWords( )
|
import torch
from new_model import NetworkNew
import argparse
import torch.backends.cudnn as cudnn
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--group_id', default=0, type=int, help='the id of compressed layer, starting from 0')
args = parser.parse_args()
print(args)
def main():
# 1. create compressed model
vgg16_new = NetworkNew(group_id=args.group_id)
# Phase 2 : Model setup
vgg16_new = vgg16_new.cuda()
vgg16_new = torch.nn.DataParallel(vgg16_new.cuda(), device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
new_model_param = vgg16_new.state_dict()
torch.save(new_model_param, '../checkpoint/model.pth')
print('Finished!')
if __name__ == '__main__':
main()
|
import os
import re
import tempfile
from datetime import date
import chirps
import geopandas as gpd
import pytest
import rasterio
import responses
from click.testing import CliRunner
from fsspec.implementations.http import HTTPFileSystem
from fsspec.implementations.local import LocalFileSystem
from s3fs import S3FileSystem
def test_filesystem():
assert isinstance(chirps.filesystem("/tmp/file.txt"), LocalFileSystem)
assert isinstance(chirps.filesystem("http://example.com/"), HTTPFileSystem)
assert isinstance(chirps.filesystem("s3://bucket/dir"), S3FileSystem)
# assert isinstance(chirps.filesystem("gcs://bucket/dir"), GCSFileSystem)
with pytest.raises(ValueError):
chirps.filesystem("bad://bucket/dir")
@pytest.fixture
def bfa_raw_data(boto_client):
boto_client.create_bucket(Bucket="bfa-raw-data")
for fname in os.listdir(
os.path.join(os.path.dirname(__file__), "bfa-raw-data/2017")
):
with open(
os.path.join(os.path.dirname(__file__), "bfa-raw-data/2017", fname), "rb"
) as f:
boto_client.put_object(
Bucket="bfa-raw-data", Key=f"2017/{fname}", Body=f.read()
)
@pytest.fixture(params=["s3", "file"])
def download_location(boto_client, request):
if request.param == "s3":
dirname = "s3://chirps-download"
boto_client.create_bucket(Bucket="chirps-download")
storage_options = {
"client_kwargs": {"endpoint_url": os.environ["AWS_S3_ENDPOINT"]}
}
elif request.param == "file":
dirname = os.path.join(os.path.dirname(__file__), "chirps-download")
storage_options = {}
else:
raise NotImplementedError
return request.param, dirname, storage_options
@pytest.fixture
def bfa_output_data(boto_client):
boto_client.create_bucket(Bucket="bfa-output-data")
@pytest.fixture(scope="session")
def mock_chc():
with responses.RequestsMock() as mocked_responses:
def get_callback(request):
file_name = (
"chirps-v2.0.2017.05.05.tif.gz"
if request.url.endswith(".gz")
else "chirps-v2.0.2017.05.05.tif"
)
with open(
os.path.join(os.path.dirname(__file__), "sample_tifs", file_name), "rb"
) as f:
return 200, {"Content-Type": "application/x-gzip"}, f.read()
def head_callback(request):
return 200, {"Content-Type": "application/x-gzip"}, b""
mocked_responses.add_callback(
responses.GET,
re.compile("https://data.chc.ucsb.edu/(.*)"),
callback=get_callback,
)
mocked_responses.add_callback(
responses.HEAD,
re.compile("https://data.chc.ucsb.edu/(.*)"),
callback=head_callback,
)
yield
def test__compress():
src_raster = os.path.join(
os.path.dirname(__file__), "sample_tifs", "chirps-v2.0.2017.05.05.tif"
)
with tempfile.TemporaryDirectory() as tmp_dir:
dst_raster = os.path.join(tmp_dir, "raster.tif")
chirps._compress(src_raster, dst_raster)
with rasterio.open(dst_raster) as src:
assert src.profile.get("compress").lower() == "deflate"
def test__download(mock_chc):
url = (
"https://data.chc.ucsb.edu/products/CHIRPS-2.0/africa_daily/tifs/p05/"
"2017/chirps-v2.0.2017.05.05.tif.gz"
)
with tempfile.TemporaryDirectory() as tmp_dir:
dst_file = os.path.join(tmp_dir, "chirps.tif")
dst_file = chirps._download(url, dst_file)
assert os.path.isfile(dst_file)
@pytest.fixture
def catalog(mock_chc):
return chirps.Chirps(version="2.0", zone="africa", timely="daily")
def test_chirps_base_url(catalog):
assert (
catalog.base_url
== "https://data.chc.ucsb.edu/products/CHIRPS-2.0/africa_daily/tifs/p05"
)
def test_chirps_fname(catalog):
assert catalog.fname(date(2020, 1, 1)) == "chirps-v2.0.2020.01.01.tif.gz"
# no .gz after 2020-06-01
assert catalog.fname(date(2021, 7, 1)) == "chirps-v2.0.2021.07.01.tif"
def test_chirps_download(catalog, mock_chc):
with tempfile.TemporaryDirectory() as tmp_dir:
dst_file = catalog.download(day=date(2017, 5, 5), output_dir=tmp_dir)
with rasterio.open(dst_file) as src:
assert src.profile.get("width")
def test_chirps_download_range(catalog, mock_chc):
with tempfile.TemporaryDirectory() as tmp_dir:
catalog.download_range(
start=date(2017, 4, 15), end=date(2017, 5, 15), output_dir=tmp_dir
)
def test_chirps_path():
data_dir = os.path.join(os.path.dirname(__file__), "bfa-raw-data")
expected = os.path.join(data_dir, "2017", "chirps-v2.0.2017.05.05.tif")
assert chirps.chirps_path(data_dir, date(2017, 5, 5)) == expected
def test_raster_cumsum():
bfa = gpd.read_file(os.path.join(os.path.dirname(__file__), "bfa.geojson"))
data_dir = os.path.join(os.path.dirname(__file__), "bfa-raw-data")
rasters = [
os.path.join(data_dir, "2017", f)
for f in [
"chirps-v2.0.2017.05.01.tif",
"chirps-v2.0.2017.05.02.tif",
"chirps-v2.0.2017.05.03.tif",
"chirps-v2.0.2017.05.04.tif",
]
]
cumsum, affine, nodata = chirps.raster_cumsum(rasters, bfa.total_bounds)
assert cumsum.min() >= 0
assert cumsum.max() <= 100
assert cumsum.mean() == pytest.approx(14.5, abs=1)
assert affine
def test_weekly_stats():
contours = gpd.read_file(os.path.join(os.path.dirname(__file__), "bfa.geojson"))
data_dir = os.path.join(os.path.dirname(__file__), "bfa-raw-data")
start = date(2017, 4, 1)
end = date(2017, 7, 1)
stats = chirps.weekly_stats(contours, start, end, chirps_dir=data_dir)
assert len(stats) > 50
# todo: better quality checks
def test_monthly_stats():
contours = gpd.read_file(os.path.join(os.path.dirname(__file__), "bfa.geojson"))
data_dir = os.path.join(os.path.dirname(__file__), "bfa-raw-data")
start = date(2017, 4, 1)
end = date(2017, 7, 1)
stats = chirps.monthly_stats(contours, start, end, chirps_dir=data_dir)
assert len(stats) > 10
# todo: better quality checks
def test__iter_month_days():
assert len([d for d in chirps._iter_month_days(2020, 2)]) == 29
assert len([d for d in chirps._iter_month_days(2021, 2)]) == 28
def test_epi_week():
epiw = chirps.EpiWeek(date(2020, 1, 1))
assert epiw.start == date(2019, 12, 29)
assert epiw.end == date(2020, 1, 4)
assert epiw.year == 2020
assert epiw.week == 1
assert chirps.EpiWeek(date(2020, 1, 2)) == chirps.EpiWeek(date(2019, 12, 30))
def test_epiweek_range():
start = date(2020, 1, 1)
end = date(2020, 3, 15)
wrange = chirps.epiweek_range(start, end)
assert len(wrange) == 12
def test_cli_download(moto_server, bfa_output_data, mock_chc):
output_dir = os.path.join("s3://bfa-output-data/africa/daily")
runner = CliRunner()
result = runner.invoke(
chirps.cli,
[
"download",
"--output-dir",
output_dir,
"--start",
"2017-04-30",
"--end",
"2017-06-01",
],
)
assert result.exit_code == 0
def test_cli_extract(moto_server, bfa_raw_data, bfa_output_data):
chirps_dir = "s3://bfa-raw-data"
contours = os.path.join(os.path.dirname(__file__), "bfa.geojson")
output_dir = "s3://bfa-output-data/rainfall"
runner = CliRunner()
result = runner.invoke(
chirps.cli,
[
"extract",
"--input-dir",
chirps_dir,
"--contours",
contours,
"--start",
"2017-04-30",
"--end",
"2017-06-01",
"--output-dir",
output_dir,
],
)
assert result.exit_code == 0
|
# Copyright 2014 Baidu, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"""
Configuration for bmr samples.
"""
#!/usr/bin/env python
#coding=utf-8
import logging
from baidubce.bce_client_configuration import BceClientConfiguration
from baidubce.auth.bce_credentials import BceCredentials
HOST = 'bmr.bj.baidubce.com'
AK = 'USER-AK'
SK = 'USER-SK'
logger = logging.getLogger('baidubce.services.bmr.bmrclient')
fh = logging.FileHandler('bmr_sample.log')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(fh)
config = BceClientConfiguration(credentials=BceCredentials(AK, SK), endpoint=HOST)
check_cluster_max_retry_time=20
check_cluster_interval_sec=60
|
import pytest
from d3rlpy.argument_utility import (
check_action_scaler,
check_encoder,
check_q_func,
check_scaler,
check_use_gpu,
)
from d3rlpy.gpu import Device
from d3rlpy.models.encoders import DefaultEncoderFactory
from d3rlpy.models.q_functions import MeanQFunctionFactory
from d3rlpy.preprocessing.action_scalers import MinMaxActionScaler
from d3rlpy.preprocessing.scalers import MinMaxScaler
@pytest.mark.parametrize("value", ["default", DefaultEncoderFactory()])
def test_check_encoder(value):
assert isinstance(check_encoder(value), DefaultEncoderFactory)
@pytest.mark.parametrize("value", ["mean", MeanQFunctionFactory()])
def test_check_q_func(value):
assert isinstance(check_q_func(value), MeanQFunctionFactory)
@pytest.mark.parametrize("value", ["min_max", MinMaxScaler(), None])
def test_check_scaler(value):
scaler = check_scaler(value)
if value is None:
assert scaler is None
else:
assert isinstance(scaler, MinMaxScaler)
@pytest.mark.parametrize("value", ["min_max", MinMaxActionScaler(), None])
def test_check_action_scaler(value):
scaler = check_action_scaler(value)
if value is None:
assert scaler is None
else:
assert isinstance(scaler, MinMaxActionScaler)
@pytest.mark.parametrize("value", [False, True, 0, Device(0)])
def test_check_use_gpu(value):
device = check_use_gpu(value)
if type(value) == bool and value:
assert device.get_id() == 0
elif type(value) == bool and not value:
assert device is None
elif type(value) == int:
assert device.get_id() == 0
elif isinstance(value, Device):
assert device.get_id() == 0
|
import pickle
import os
import sys
import time
import numpy as np
from colorama import Fore, Back, Style
from ACSN_processing import ACSN_processing
from ACSN_initialization import ACSN_initialization
from ACSN_processing_parallel import ACSN_processing_parallel
from ACSN_processing_video import ACSN_processing_video
from numba import jit
import cupy as cp
# # varargin is a list with different datatypes
# # for python it will be a dictionary
# # varargin = {"Gain": xxx, "Offset": xxx, "Hotspot": xxx, "Level": xxx,
# # "Mode": xxx, "SaveFileName": xxx,
# # "Video": xxx, "Window": xxx, "Alpha": xxx, "QualityMap": xxx}
def ACSN(I,NA,Lambda,PixelSize,varargin):
start = time.perf_counter()
# Assumes I is a 3D variable
Qscore = np.zeros((I.shape[2], 1))
sigma = np.zeros((I.shape[2], 1))
img = np.zeros(I.shape)
Qmap = np.zeros(I.shape[0])
I, Gain, Offset, Hotspot, Level, Mode, SaveFileName, Video, Window, alpha, QM, weight = ACSN_initialization(I, varargin=varargin)
## main theme
if (Mode == "Fast"):
img, Qmap, Qscore = ACSN_processing_parallel(I, NA, Lambda, PixelSize, Gain, Offset, Hotspot, QM, Qmap, Qscore, sigma, img, Video, weight)
elif (Video == "yes"):
img, Qmap, Qscore = ACSN_processing_video(I, NA, Lambda, PixelSize, Gain, Offset, Hotspot, QM, Qmap, Qscore, sigma, img, Video, weight)
else:
img, Qmap, Qscore = ACSN_processing(I, NA, Lambda, PixelSize, Gain, Offset, Hotspot, QM, Qmap, Qscore, sigma, img, weight)
## finale
end = time.perf_counter()
print("Elapsed Time: " + str(end - start) + " seconds" +"\n")
print("Average Quality: ")
Av_qI = np.mean(Qscore.flatten())
if (Av_qI >= 0.6):
print("High: " + str(Av_qI) + "\n")
elif (abs(Av_qI - 0.5) < 0.1):
print("Medium: " + str(Av_qI) + "\n")
else:
print("Low: " + str(Av_qI) + "\n")
return Qscore, sigma, img, SaveFileName
|
import os.path
from SConsRevision import SCons_revision
from Utilities import is_windows, whereis, platform, deb_date
from zip_utils import unzipit, zipit, zcat
from soe_utils import soelim, soscan, soelimbuilder
# from epydoc import epydoc_cli, epydoc_commands
from BuildCommandLine import BuildCommandLine
gzip = whereis('gzip')
git = os.path.exists('.git') and whereis('git')
unzip = whereis('unzip')
zip_path = whereis('zip')
BuildCommandLine.git = git
|
import os
import sys
import numpy as np
import keras
from keras_bert import load_vocabulary, load_trained_model_from_checkpoint, Tokenizer
from keras_bert.layers import MaskedGlobalMaxPool1D
if len(sys.argv) != 2:
print('python load_model.py UNZIPPED_MODEL_PATH')
sys.exit(-1)
print('This demo demonstrates how to load the pre-trained model and extract the sentence embedding with pooling.')
model_path = sys.argv[1]
config_path = os.path.join(model_path, 'bert_config.json')
checkpoint_path = os.path.join(model_path, 'bert_model.ckpt')
dict_path = os.path.join(model_path, 'vocab.txt')
model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=10)
pool_layer = MaskedGlobalMaxPool1D(name='Pooling')(model.output)
model = keras.models.Model(inputs=model.inputs, outputs=pool_layer)
model.summary(line_length=120)
token_dict = load_vocabulary(dict_path)
tokenizer = Tokenizer(token_dict)
text = '语言模型'
tokens = tokenizer.tokenize(text)
print('Tokens:', tokens)
indices, segments = tokenizer.encode(first=text, max_len=10)
predicts = model.predict([np.array([indices]), np.array([segments])])[0]
print('Pooled:', predicts.tolist()[:5])
|
import numpy as np
import os.path as osp
import cv2
import torch
import torch.nn.functional as F
from pointmvsnet.utils.io import mkdir
from pointmvsnet.functions.functions import get_pixel_grids
def file_logger(data_batch, preds, step, output_dir, prefix):
step_dir = osp.join(output_dir, "{}_step{:05d}".format(prefix, step))
mkdir(step_dir)
print("start saving files in ", step_dir)
img_list = data_batch["img_list"]
batch_size, num_view, img_channel, img_height, img_width = list(img_list.size())
cam_params_list = data_batch["cam_params_list"]
for i in range(num_view):
np.savetxt(osp.join(step_dir, "img{}.txt".format(i)), img_list[0, i, 0].detach().cpu().numpy(), fmt="%.4f")
np.savetxt(osp.join(step_dir, "cam{}_extrinsic.txt".format(i)), cam_params_list[0, i, 0].detach().cpu().numpy(), fmt="%.4f")
np.savetxt(osp.join(step_dir, "cam{}_intrinsic.txt".format(i)), cam_params_list[0, i, 1].detach().cpu().numpy(), fmt="%.4f")
np.savetxt(osp.join(step_dir, "gt_depth_img.txt"), data_batch["gt_depth_img"][0, 0].detach().cpu().numpy(), fmt="%.4f")
np.savetxt(osp.join(step_dir, "coarse_depth_img.txt"), preds["coarse_depth_map"][0, 0].detach().cpu().numpy(), fmt="%.4f")
cam_extrinsic = cam_params_list[0, 0, 0, :3, :4].clone() # (3, 4)
cam_intrinsic = cam_params_list[0, 0, 1, :3, :3].clone()
world_points = preds["world_points"]
world_points = world_points[0].cpu().numpy().transpose()
save_points(osp.join(step_dir, "world_points.xyz"), world_points)
prob_map = preds["coarse_prob_map"][0][0].cpu().numpy()
coarse_points = depth2pts(preds["coarse_depth_map"], prob_map,
cam_intrinsic, cam_extrinsic, (img_height, img_width))
save_points(osp.join(step_dir, "coarse_point.xyz"), coarse_points)
gt_points = depth2pts(data_batch["gt_depth_img"], prob_map,
cam_intrinsic, cam_extrinsic, (img_height, img_width))
save_points(osp.join(step_dir, "gt_points.xyz"), gt_points)
if "flow1" in preds.keys():
flow1_points = depth2pts(preds["flow1"], prob_map,
cam_intrinsic, cam_extrinsic, (img_height, img_width))
save_points(osp.join(step_dir, "flow1_points.xyz"), flow1_points)
if "flow2" in preds.keys():
flow2_points = depth2pts(preds["flow2"], prob_map,
cam_intrinsic, cam_extrinsic, (img_height, img_width))
save_points(osp.join(step_dir, "flow2_points.xyz"), flow2_points)
print("saving finished.")
def depth2pts(depth_map, prob_map, cam_intrinsic, cam_extrinsic, img_size):
feature_map_indices_grid = get_pixel_grids(depth_map.size(2), depth_map.size(3)).to(depth_map.device) # (3, H*W)
curr_cam_intrinsic = cam_intrinsic.clone()
scale = (depth_map.size(2) + 0.0) / (img_size[0] + 0.0) * 4.0
curr_cam_intrinsic[:2, :3] *= scale
uv = torch.matmul(torch.inverse(curr_cam_intrinsic), feature_map_indices_grid)
cam_points = uv * depth_map[0].view(1, -1)
R = cam_extrinsic[:3, :3]
t = cam_extrinsic[:3, 3].unsqueeze(-1)
R_inv = torch.inverse(R)
world_points = torch.matmul(R_inv, cam_points - t).detach().cpu().numpy().transpose()
curr_prob_map = prob_map.copy()
if curr_prob_map.shape[0] != depth_map.size(2):
curr_prob_map = cv2.resize(curr_prob_map, (depth_map.size(3), depth_map.size(2)),
interpolation=cv2.INTER_LANCZOS4)
curr_prob_map = np.reshape(curr_prob_map, (-1, 1))
world_points = np.concatenate([world_points, curr_prob_map], axis=1)
return world_points
def save_points(path, points):
np.savetxt(path, points, delimiter=' ', fmt='%.4f')
|
import re
test_string = "123qwe678 -ABC91011- vyz"
|
from __future__ import print_function
import numpy as np
import yt
from hyperion.model import Model
import matplotlib as mpl
mpl.use('Agg')
import powderday.config as cfg
from powderday.grid_construction import arepo_vornoi_grid_generate
from hyperion.dust import SphericalDust
from powderday.helpers import energy_density_absorbed_by_CMB
def arepo_m_gen(fname,field_add):
reg,ds,dustdens = arepo_vornoi_grid_generate(fname,field_add)
xcent = ds.quan(cfg.model.x_cent,'code_length').to('cm') #proper cm
ycent = ds.quan(cfg.model.y_cent,'code_length').to('cm')
zcent = ds.quan(cfg.model.z_cent,'code_length').to('cm')
boost = np.array([xcent,ycent,zcent])
print ('[arepo_tributary/vornoi_m_gen]: boost = ',boost)
#========================================================================
#Initialize Hyperion Model
#========================================================================
m = Model()
#because we boost the stars to a [0,0,0] coordinate center, we
#want to make sure our vornoi tesslation is created in the same manner.
particle_x = reg["gascoordinates"][:,0].to('cm')
particle_y = reg["gascoordinates"][:,1].to('cm')
particle_z = reg["gascoordinates"][:,2].to('cm')
#just for the sake of symmetry, pass on a dx,dy,dz since it can be
#used optionally downstream in other functions.
dx = 2.* ds.quan(cfg.par.zoom_box_len,'kpc').to('cm')
dy = 2.* ds.quan(cfg.par.zoom_box_len,'kpc').to('cm')
dz = 2.* ds.quan(cfg.par.zoom_box_len,'kpc').to('cm')
print ('[arepo_tributary] boost = ',boost)
print ('[arepo_tributary] xmin (pc)= ',(xcent-dx/2.).to('pc'))
print ('[arepo_tributary] xmax (pc)= ',(xcent+dx/2.).to('pc'))
print ('[arepo_tributary] ymin (pc)= ',(ycent-dy/2.).to('pc'))
print ('[arepo_tributary] ymax (pc)= ',(ycent+dy/2.).to('pc'))
print ('[arepo_tributary] zmin (pc)= ',(zcent-dz/2.).to('pc'))
print ('[arepo_tributary] zmax (pc)= ',(zcent+dz/2.).to('pc'))
x_pos_boost = (particle_x-xcent).to('cm')
y_pos_boost = (particle_y-ycent).to('cm')
z_pos_boost = (particle_z-zcent).to('cm')
m.set_voronoi_grid(x_pos_boost.value, y_pos_boost.value, z_pos_boost.value)
#get CMB:
energy_density_absorbed=energy_density_absorbed_by_CMB()
specific_energy = np.repeat(energy_density_absorbed.value,dustdens.shape)
if cfg.par.PAH == True:
# load PAH fractions for usg, vsg, and big (grain sizes)
frac = cfg.par.PAH_frac
# Normalize to 1
total = np.sum(list(frac.values()))
frac = {k: v / total for k, v in frac.items()}
for size in frac.keys():
d = SphericalDust(cfg.par.dustdir+'%s.hdf5'%size)
if cfg.par.SUBLIMATION == True:
d.set_sublimation_temperature('fast',temperature=cfg.par.SUBLIMATION_TEMPERATURE)
#m.add_density_grid(dustdens * frac[size], cfg.par.dustdir+'%s.hdf5' % size)
m.add_density_grid(dustdens*frac[size],d,specific_energy=specific_energy)
m.set_enforce_energy_range(cfg.par.enforce_energy_range)
else:
d = SphericalDust(cfg.par.dustdir+cfg.par.dustfile)
if cfg.par.SUBLIMATION == True:
d.set_sublimation_temperature('fast',temperature=cfg.par.SUBLIMATION_TEMPERATURE)
m.add_density_grid(dustdens,d,specific_energy=specific_energy)
#m.add_density_grid(dustdens,cfg.par.dustdir+cfg.par.dustfile)
m.set_specific_energy_type('additional')
return m,xcent,ycent,zcent,dx.value,dy.value,dz.value,reg,ds,boost
|
# -- coding: utf-8 --
import tensorflow as tf
import scipy.sparse as sp
import pandas as pd
import numpy as np
def get_position(num_roads=49):
'''
:return: shape is [1, 49]
49 represents the numbers of road
'''
return np.array([[i for i in range(num_roads)]], dtype=np.int32)
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def normalize_adj(adj):
'''
:param adj: Symmetrically normalize adjacency matrix
:return:
'''
adj = sp.coo_matrix(adj) # 转化为稀疏矩阵表示的形式
rowsum = np.array(adj.sum(1)) # 原连接矩阵每一行的元素和
d_inv_sqrt = np.power(rowsum, -0.5).flatten() #先根号,再求倒数,然后flatten返回一个折叠成一维的数组
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0. #
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
'''
:param adj: A=A+E, and then to normalize the the adj matrix,
preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.
example [[1,0,0],[0,1,0],[0,0,1]]
:return:
'''
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
print('adj_normalized shape is : ', adj_normalized.shape)
return sparse_to_tuple(adj_normalized)
def adjecent(adj_file='adjacent.csv', num_roads=49):
'''
:return: adj matrix
'''
data = pd.read_csv(filepath_or_buffer=adj_file)
adj = np.zeros(shape=[num_roads, num_roads])
for line in data[['src_FID', 'nbr_FID']].values:
adj[line[0]][line[1]] = 1
return adj
def get_max_min():
'''
:return: the max and min value of input features
'''
min_list=[106007, 106007, 2020, 8, 31, 23, 148.021, 360]
max_list=[2005, 2005, 2020, 5, 1, 0, 0.0, 0]
return max_list, min_list
def normalization(data=None):
'''
:param data:
:return:
'''
min_value=0.000000000001
max,min=get_max_min()
shape=data.shape
for i in range(1,3):
data[:,shape[-1]-i]=(data[:,shape[-1]-i] - np.array(min[shape[-1]-i])) / \
(np.array(max[shape[-1]-i]) - np.array(min[shape[-1]-i]+ min_value))
return data
def re_recover(a, max=148.021, min=0.0):
'''
:param a: represent a list or one-dimension array.
:param max: max number
:param min: min number
:return: op-normalization list or one-dimension array.
'''
return [num * (max - min) + min for num in a]
def feature_split(data, time_size=6, prediction_size=1, num_roads=49):
'''
:return: features: [batch size * input time size, num roads, features],
days: [input time size + prediction time size, num roads],
hours: [input time size + prediction time size, num roads].
'''
low=int(data.shape[0]//num_roads * 0.9) * num_roads
(features, days, hours) = (data[:time_size * num_roads, 6:7],
data[:(time_size + prediction_size) * num_roads, 4],
data[:(time_size + prediction_size) * num_roads, 5])
features=np.reshape(features,newshape=[time_size, num_roads, 1])
days=np.reshape(days,newshape=[time_size+prediction_size,num_roads])
hours=np.reshape(hours,newshape=[time_size+prediction_size,num_roads])
return features,days,hours
def prediction(features=None, days=None, hours=None, num_roads=49):
'''
input_features shape is [batch size * input time size, num roads, features],
for example,(1, 49,1), dtype: float64.
input_position shape is [1, num roads],
for example,(1, 49), dtype: int32.
input_day shape is [input time size + prediction time size, num roads],
for example,(7, 49), dtype: int32. 6 + 1 = 7
input_hour shape is [input time size + prediction time size, num roads],
for example,(7, 49), dtype: int32. 6 + 1 = 7
input_indices shape is [None, 2], dtype : int 32.
input_values shape is [None], dtype: float64.
input_dense_shape shape is (num roads, num roads)
:return: pred shape is [batch size, num roads, prediction time size],
example (1, 49, 1), dtype: float.
'''
with tf.gfile.GFile('model.pb', 'rb') as fgraph:
graph_def = tf.GraphDef()
graph_def.ParseFromString(fgraph.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name='')
input_postion = graph.get_tensor_by_name('input_position:0')
input_day = graph.get_tensor_by_name('input_day:0')
input_hour = graph.get_tensor_by_name('input_hour:0')
input_indices = graph.get_tensor_by_name('input_indices:0')
input_values = graph.get_tensor_by_name('input_values:0')
input_dense_shape = graph.get_tensor_by_name('input_dense_shape:0')
input_features = graph.get_tensor_by_name('input_features:0')
pred = graph.get_tensor_by_name('output_y:0')
position=get_position(num_roads)
adj=adjecent()
adj=preprocess_adj(adj)
# print(support)
print(position.shape, position.dtype)
print(days.shape,days.dtype)
print(hours.shape,hours.dtype)
print(features.shape,features.dtype)
print(adj[0].shape, adj[0].dtype)
print(adj[1].shape, adj[1].dtype)
print(adj[2])
sess = tf.Session(graph=graph)
feed={input_postion:position,
input_day:days,
input_hour:hours,
input_features:features,
input_indices:adj[0],
input_values:adj[1],
input_dense_shape:adj[2]}
scores = sess.run(pred, feed_dict=feed)
return scores
'''input example'''
train_data=pd.read_csv('train.csv', encoding='utf-8')
features, days, hours=feature_split(data=train_data.values)
print(features.shape, days.shape, hours.shape)
# features=np.random.random([6,49,1])
# days=np.random.randint(low=1,high=20,size=[7, 49],dtype=np.int32)
# hours = np.random.randint(low=0, high=20, size=[7, 49],dtype=np.int32)
pres=prediction(features=features,days=days,hours=hours, num_roads=49)
pres=pres.transpose([1, 0, 2])
pres=np.array([re_recover(np.reshape(road_label, [-1])) for road_label in pres])
pres_dict={}
pres_dict={(int(train_data.values[i][0]), int(train_data.values[i][1])): pres[i] for i in range(49)}
for pres_d in pres_dict:
print(pres_d, pres_dict[pres_d])
|
'''
@Title: Exploring ruins
@Problem Statement:
Edward is playing a simplified version of game called "Dorsplen". This game is played with gems of three different colors: red, green and blue.
Initially player has no gem and there are infinitely many gems of each color on the table.
On each turn a player can either acquire gems or buy an artifact. Artifact can be bought using gems.
On acquiring gems player can get three gems of distinct colors or two gems of the same color from the table.
Edward is planning to buy an artifact, it costs r red gems, g green gems and b blue gems.
Compute, what is the minimum number of turns Edward has to make to earn at least r red gems, g green gems and b blue gems,
so that he will be able to buy the artifact.
[...]
@URI: https://www.hackerearth.com/practice/algorithms/greedy/basics-of-greedy-algorithms/practice-problems/algorithm/dorsplen/
@Courtesy: hackerearth
'''
## Greedy approach. Finding the minimum among the three possible ways of acquiring all the 3 types of gems.
def meth2(n):
if(n%2):
return (n+1)/2
else:
return n/2
nums = sorted(map(int, raw_input().split()))
h,m,l = nums[-1], nums[1], nums[0]
t1 = h
t2 = m + meth2(h-m)
t3 = l + (meth2(h-l)+meth2(m-l))
count = min(t1,t2,t3)
print count
#1 2 4
#3
|
# 1.15 通过某个字段将记录分组
rows = [
{'address': '5412 N CLARK', 'date': '07/01/2012'},
{'address': '5148 N CLARK', 'date': '07/04/2012'},
{'address': '5800 E 58TH', 'date': '07/02/2012'},
{'address': '2122 N CLARK', 'date': '07/03/2012'},
{'address': '5645 N RAVENSWOOD', 'date': '07/02/2012'},
{'address': '1060 W ADDISON', 'date': '07/02/2012'},
{'address': '4801 N BROADWAY', 'date': '07/01/2012'},
{'address': '1039 W GRANVILLE', 'date': '07/04/2012'},
]
from operator import itemgetter
from itertools import groupby
# Sort by the desired field first
rows.sort(key=itemgetter('date'))
# Iterate in groups
for date, items in groupby(rows, key=itemgetter('date')):
print(date)
for i in items:
print(' ', i)
from collections import defaultdict
rows_by_date = defaultdict(list)
for row in rows:
rows_by_date[row['date']].append(row)
for r in rows_by_date['07/01/2012']:
print(r)
|
import logging
logger = logging.getLogger(__name__)
import os
import pipeline.libs.config as cfg
import maya.mel as mel
import maya.cmds as cmds
import pymel.core as pm
# import pipeline.libs.meta as meta
import pipeline.maya_libs.maya_warpper as maya
import pymel.core.nodetypes as nt
# def create_group(name, parent='', w=False, em=True):
# if cmds.objExists(name):
# logger.info("{} - exists - skipping".format(name))
# return
# else:
# if parent:
# pm.group(n =name, parent=parent, em=True)
#
# logger.info("{} - created".format(name))
# return
# if w:
# pm.group(n = name, w=True, em=True)
#
# logger.info("{} - created".format(name))
# return
# def export_high_group(asset_name = '', path = ''):
# render_grp = '{}{}'.format(asset_name, cfg._render_grp).lower()
#
# try:
# dup = pm.duplicate(render_grp)
# pm.parent(dup, w=True)
# dup[0].rename(render_grp)
# pm.select(dup[0], hi=True)
#
# exported_file = cmds.file(path, type='mayaAscii', exportSelected=True, expressions=False, constraints=False,
# channels=False, constructionHistory=False, shader=True, force=True)
#
# pm.delete(dup[0])
#
# return os.path.normpath(exported_file)
# except:
# logger.info("failed to export selection on {}".format(maya.current_open_file()))
# return None
# def create_modeling_defaults(name = 'name'):
# name_lower = name.lower()
#
# create_group(name_lower,w=True)
#
# high_group = '{}_{}_{}'.format(name_lower, cfg.high, cfg.group)
# low_group = '{}_{}_{}'.format(name_lower, cfg.low, cfg.group)
# bsp_group = '{}_{}_{}'.format(name_lower, cfg.blendshape, cfg.group)
#
# create_group(high_group,parent=name_lower)
# create_group(low_group, parent=name_lower)
# create_group(bsp_group, parent=name_lower)
#
#
# def clean_meta_nodes(node_name = ''):
# logger.info("remove unused meta nodes")
# nodes = meta.Component_Meta.getMetaNodes()
# for node in nodes:
# if node.shortName() != node_name:
# node.delete()
def cleanup_scene():
logger.info("Cleaning up scene ---------------------------------")
# import pymel.core as pm
#
# import pymel.core.nodetypes as nt
nodes = pm.ls(type=nt.Unknown)
for n in nodes:
pm.delete(n)
mel_cleanup = '''
source cleanUpScene;
deleteUnusedTrax( "clips" );
deleteUnusedTrax( "poses" );
deleteUnusedDeformers();
deleteInvalidNurbs(0);
MLdeleteUnused();
removeDuplicateShadingNetworks(0);
RNdeleteUnused();
deleteUnusedBrushes();
deleteEmptyGroups();
deleteEmptyLayers("Display");
deleteEmptyLayers("Render");
deleteUnusedExpressions();
deleteUnusedLocators();
deleteUnusedPairBlends();
deleteUnusedSets();
deleteUnusedConstraints();
deleteUnusedInUnusedHierarchy( "nurbsCurve", 0,(uiRes("m_cleanUpScene.kDeletingUnusedNurbsCurves2")));
deleteUnusedCommon( "animCurve", 0 , (uiRes("m_cleanUpScene.kDeletingUnusedAnimationCurves")));
deleteUnusedCommon("groupId", 0, (uiRes("m_cleanUpScene.kDeletingUnusedGroupIDNodes")));
hyperShadePanelMenuCommand("hyperShadePanel1", "deleteUnusedNodes");
'''
try:
mel.eval(mel_cleanup)
except:
logger.info("Scene clean up failed")
# import maya.cmds as cmds
def ns_keys(text):
return len(text.split(':'))
def ns_sort(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=ns_keys)
return l
def remove_ref():
logger.info("Importing references ---------------------------------")
try:
mel.eval('RNdeleteUnused();')
sorted_references = ns_sort(cmds.ls(type='reference'))
for i in sorted_references:
print i
rFile = cmds.referenceQuery(i, f=True)
cmds.file(rFile, importReference=True, mnr = True, f = True)
except Exception, err:
logger.info(err)
logger.info("Import references failed")
def delete_ns():
logger.info("Deleting namespaces ----------------------------------")
try:
defaults = ['UI', 'shared']
# Used as a sort key, this will sort namespaces by how many children they have.
def num_children(ns):
return ns.count(':')
namespaces = [ns for ns in cmds.namespaceInfo(lon=True, r=True) if ns not in defaults]
# We want to reverse the list, so that namespaces with more children are at the front of the list.
namespaces.sort(key=num_children, reverse=True)
for ns in namespaces:
if namespaces.index(ns)+1 < len(namespaces):
parent_ns = namespaces[namespaces.index(ns)+1]
cmds.namespace(mv=[ns,parent_ns], f=True)
cmds.namespace(rm=ns)
else:
cmds.namespace(mv=[ns,":"], f=True)
cmds.namespace(rm=ns)
except Exception, err:
logger.info(err)
logger.info("Delete namespaces failed")
def delete_ngSKinToolsNodes():
logger.info("Removing ngSkinsTools nodes --------------------------")
try:
# remove ngSkinTools custom nodes
from ngSkinTools.layerUtils import LayerUtils
LayerUtils.deleteCustomNodes()
except:
logger.info("ngSkinsTools not installed / no ngSkinTools nodes in the scene")
def run_script(path, script_type):
logger.info("Running script ---------------------------------------")
logger.info("{} ---------------------------------------------------".format(path))
if os.path.exists(path):
if script_type == 'py':
try:
execfile(path)
except Exception, err:
logger.info(err)
return
elif script_type == 'mel':
try:
cmd = 'source "{}";'.format(path)
mel.eval(cmd)
except Exception, err:
logger.info(err)
return
# import pymel.core as pm
#
# import pymel.core.nodetypes as nt
#
# nodes = pm.ls(type=nt.Unknown)
# for n in nodes:
# pm.delete(n)
#
# '''
|
from mongoengine import Q
from mist.api.tag.models import Tag
from mist.api.helpers import trigger_session_update
from mist.api.helpers import get_object_with_id
def get_tags_for_resource(owner, resource_obj, *args, **kwargs):
return [{'key': tag.key, 'value': tag.value} for tag in
Tag.objects(owner=owner, resource=resource_obj)]
def add_tags_to_resource(owner, resource_obj, tags, *args, **kwargs):
"""
This function get a list of tags in the form
[{'key': 'joe', 'value': 'schmoe'}] and will scan the list and update all
the tags whose keys are present but whose values are different and add all
the missing ones
:param owner: the resource owner
:param resource_obj: the resource object where the tags will be added
:param tags: list of tags to be added
"""
# merge all the tags in the list into one dict. this will also make sure
# that if there are duplicates they will be cleaned up
tag_dict = dict(tags)
for tag_obj in Tag.objects(owner=owner, resource=resource_obj):
# if any of the tag keys is already present check if it's value should
# be changed and remove it from the tag_dict
if tag_obj.key in tag_dict:
if tag_obj.value != tag_dict[tag_obj.key]:
tag_obj.value = tag_dict[tag_obj.key]
tag_obj.save()
del tag_dict[tag_obj.key]
# remaining tags in tag_dict have not been found in the db so add them now
for key, value in tag_dict.iteritems():
Tag(owner=owner, resource=resource_obj, key=key, value=value).save()
# SEC
owner.mapper.update(resource_obj)
# FIXME: The fact that a session update is triggered at this point may
# result in re-updating the RBAC Mappings twice for the given resource
# for no f*** reason.
rtype = resource_obj._meta["collection"]
trigger_session_update(owner,
[rtype + 's' if not rtype.endswith('s') else rtype])
def remove_tags_from_resource(owner, resource_obj, tags, *args, **kwargs):
"""
This function get a list of tags in the form [{'key': 'joe'}] or
[{'key': 'joe', 'value': 'schmoe'}] and will delete them from the resource
:param owner: the resource owner
:param resource_obj: the resource object where the tags will be added
:param rtype: resource type
:param tags: list of tags to be deleted
"""
# ensure there are no duplicate tag keys because mongoengine will
# raise exception for duplicates in query
key_list = list(set(tags))
# create a query that will return all the tags with
query = reduce(lambda q1, q2: q1.__or__(q2),
map(lambda key: Q(key=key), key_list))
Tag.objects(Q(owner=owner) & Q(resource=resource_obj) & (query)).delete()
# SEC
owner.mapper.update(resource_obj)
rtype = resource_obj._meta["collection"]
trigger_session_update(owner,
[rtype + 's' if not rtype.endswith('s') else rtype])
def resolve_id_and_get_tags(owner, rtype, rid, *args, **kwargs):
"""
This call will try to fetch the object of type rtype from the db with id
rid. If the object is of type machine, image, network or location the
cloud_id must also be provided in the kwargs. If the resource type is
machine then the machine_id must be provided and not the object id. Whether
or not the owner has the necessary credentials to get the tags of the
resource is left to the caller of this function to validate.
:param owner: the owner of the resource
:param rtype: resource type
:param rid: resource id
:return: the tags of this resource
"""
resource_obj = get_object_with_id(owner, rid, rtype, *args, **kwargs)
return get_tags_for_resource(owner, resource_obj)
def resolve_id_and_set_tags(owner, rtype, rid, tags, *args, **kwargs):
"""
:param owner: the owner of the resource
:param rtype: resource type
:param rid: resource id
:param tags: resource tags to be added or updated
:return: the tags to be added or updated to this resource
"""
resource_obj = get_object_with_id(owner, rid, rtype, *args, **kwargs)
return add_tags_to_resource(owner, resource_obj, tags, *args,
**kwargs)
def resolve_id_and_delete_tags(owner, rtype, rid, tags, *args, **kwargs):
"""
:param owner: the owner of the resource
:param rtype: resource type
:param rid: resource id
:param tags: resource id
:return: the tags to be deleted from this resource
"""
resource_obj = get_object_with_id(owner, rid, rtype, *args, **kwargs)
return remove_tags_from_resource(owner, resource_obj, tags, *args, **kwargs)
def modify_security_tags(auth_context, tags, resource=None):
"""
This method splits the resources' tags in security and non-security
groups. Security tags are part of team policies. Such tags should only
be modified by organization owners in order to enforce team policies.
If a team member attempts to edit a security tag, an UnauthorizedError
will be thrown
:param tags: the new tags dict
:param resource: the resource on which the tags are going to be applied
:return: False, if a security tag has been modified in the new tags
dict by someone other than the organization owner, otherwise True
"""
# private context
if auth_context.org is None:
return True
if auth_context.is_owner():
return True
else:
rtags = Tag.objects(owner=auth_context.owner.id,
resource=resource).only('key', 'value')
rtags = {rtag.key: rtag.value for rtag in rtags}
security_tags = auth_context.get_security_tags()
# check whether the new tags tend to modify any of the security_tags
for security_tag in security_tags:
for key, value in security_tag.items():
if key not in rtags.keys():
if key in tags.keys():
return False
else:
if key not in tags.keys():
return False
elif value != tags[key]:
return False
return True
def delete_security_tag(auth_context, tag_key):
"""
This method checks whether the tag to be deleted belongs to the
secure tags group
:param tag_key: the key of the tag to be removed
:return: False in case a security tag is about to be deleted
"""
# private context
if auth_context.org is None:
return True
if auth_context.is_owner():
return True
else:
security_tags = auth_context.get_security_tags()
for security_tag in security_tags:
for key, value in security_tag.items():
if key == tag_key:
return False
return True
|
'''
shift registers
type:class\n
name-format: shift_register_[name]\n
SIPO\n
PISO\n
SISO\n
PIPO
'''
'''SIPO'''
class shift_register_SIPO():
def __init__(self,level,inputno = None):
self.level = level
self.inputno = inputno
def sr_set(self,inputno):
#list input
if (isinstance(inputno, list)):
if(len(inputno) == self.level):
for bin_in in inputno:
if bin_in not in [0,1]:
raise ValueError("Invalid value for input")
else:
raise ValueError("Number of input bits is not equal to the number of flip flops")
else:
raise ValueError("Input must be in a list format")
self.inputno = inputno
def sr_get(self,clock):
if(isinstance(clock,int)):
if (clock < 0):
raise ValueError("Clock pulses are not negative")
elif (clock >= self.level):
clock = self.level - 1
else:
raise ValueError("The Nth clock pulse should be an integer")
input_cp = self.inputno.copy()
og_list = []
for i in range(clock + 1):
#start from the least significant bit
og_list.insert(0,input_cp[-1])
input_cp.pop()
if(len(og_list) < self.level):
for val in range(self.level - len(og_list)):
og_list.append(0)
return(og_list)
'''PISO'''
class shift_register_PISO():
def __init__(self,level,inputno = None):
self.level = level
self.inputno = inputno
def sr_set(self,inputno):
#list input
if (isinstance(inputno, list)):
if(len(inputno) == self.level):
for bin_in in inputno:
if bin_in not in [0,1]:
raise ValueError("Invalid value for input")
else:
raise ValueError("Number of input bits is not equal to the number of flip flops")
else:
raise ValueError("Input must be in a list format")
self.inputno = inputno
def sr_get(self,clock):
if(isinstance(clock,int)):
if (clock < 0):
raise ValueError("Clock pulses are not negative")
elif (clock >= self.level):
clock = self.level - 1
else:
raise ValueError("The Nth clock pulse should be an integer")
input_cp = self.inputno.copy()
og_list = []
for _ in range(clock + 1):
#start from the least significant bit
og_list.insert(0,input_cp[-1])
input_cp.pop()
if(len(og_list) < self.level):
for _ in range(self.level - len(og_list)):
og_list.append(0)
return(og_list)
'''SISO'''
class shift_register_SISO():
def __init__(self,level,inputno = None):
self.level = level
self.inputno = inputno
def sr_set(self,inputno):
#list input
if (isinstance(inputno, list)):
if(len(inputno) == self.level):
for bin_in in inputno:
if bin_in not in [0,1]:
raise ValueError("Invalid value for input")
else:
raise ValueError("Number of input bits is not equal to the number of flip flops")
else:
raise ValueError("Input must be in a list format")
self.inputno = inputno
def sr_get(self,clock):
if(isinstance(clock,int)):
if (clock < 0):
raise ValueError("Clock pulses are not negative")
elif (clock >= self.level):
clock = self.level - 1
else:
raise ValueError("The Nth clock pulse should be an integer")
input_cp = self.inputno.copy()
og_list = []
for i in range(clock + 1):
#start from the least significant bit
og_list.insert(0,input_cp[-1])
input_cp.pop()
if(len(og_list) < self.level):
for val in range(self.level - len(og_list)):
og_list.append(0)
return(og_list)
'''PIPO'''
class shift_register_PIPO():
def __init__(self,level,inputno = None):
self.level = level
self.inputno = inputno
def sr_set(self,inputno):
#list input
if (isinstance(inputno, list)):
if(len(inputno) == self.level):
for bin_in in inputno:
if bin_in not in [0,1]:
raise ValueError("Invalid value for input")
else:
raise ValueError("Number of input bits is not equal to the number of flip flops")
else:
raise ValueError("Input must be in a list format")
self.inputno = inputno
def sr_get(self,clock):
if(isinstance(clock,int)):
if (clock < 0):
raise ValueError("Clock pulses are not negative")
else:
return(self.inputno.copy())
else:
raise ValueError("The Nth clock pulse should be an integer")
|
import numba as nb
import pytest
from numba.typed import Dict
from respy.parallelization import _infer_dense_keys_from_arguments
from respy.parallelization import _is_dense_dictionary_argument
from respy.parallelization import _is_dictionary_with_integer_keys
def _typeddict_wo_integer_keys():
dictionary = Dict.empty(
key_type=nb.types.UniTuple(nb.types.int64, 2),
value_type=nb.types.int64,
)
dictionary[(1, 2)] = 1
return dictionary
def _typeddict_w_integer_keys():
dictionary = Dict.empty(
key_type=nb.types.int64,
value_type=nb.types.int64,
)
dictionary[1] = 1
return dictionary
@pytest.mark.unit
@pytest.mark.parametrize(
"input_, expected",
[
({1: 2, 3: 4}, True),
(1, False),
([3, 4, 5], False),
(_typeddict_wo_integer_keys(), False),
(_typeddict_w_integer_keys(), False),
],
)
def test_is_dictionary_with_integer_keys(input_, expected):
assert _is_dictionary_with_integer_keys(input_) is expected
@pytest.mark.unit
@pytest.mark.parametrize(
"args, kwargs, expected",
[(({1: None, 2: None},), {"kwarg_1": {2: None, 3: None}}, {2})],
)
def test_infer_dense_keys_from_arguments(args, kwargs, expected):
result = _infer_dense_keys_from_arguments(args, kwargs)
assert result == expected
@pytest.mark.unit
@pytest.mark.parametrize(
"arg, dense_keys, expected",
[({1: None, 2: None}, {1, 2}, True), ((1,), {1, 2, 3}, False)],
)
def test_is_dense_dictionary_argument(arg, dense_keys, expected):
result = _is_dense_dictionary_argument(arg, dense_keys)
assert result is expected
|
#!/usr/bin/python
# Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script handles all of the processing for versioning packages.
package_version.py manages all of the various operations done between
packages, including archiving, extracting, uploading, and downloading
packages. For a list of options and commands, see the help for the script.
Glossary:
Package: A list of archives, such as "nacl_x86_glibc" or "nacl_x86_newlib".
Package Archive: An archive (usually a tar file) that is part of a package.
Package Target: Package targets consists of packages. Each package target
has it's own version of a package. An example of a package target would
be something such as "win_x86_nacl_x86" or "mac_x86_nacl_x86". In that case,
"win_x86_nacl_x86" and "mac_x86_nacl_x86" would each have their own version
of "nacl_x86_glibc" and "nacl_x86_newlib" for windows and mac respectively.
Revision: The revision identifier of a sanctioned version.
This is used to synchronize packages to sanctioned versions.
JSON Files:
Packages File - A file which describes the various package targets for each
platform/architecture along with the packages associated with each package
target.
[Default file: build/package_version/standard_packages.json].
Package File - A file which contains the list of package archives within
a package.
[Default file: toolchain/.tars/$PACKAGE_TARGET/$PACKAGE.json]
Archive File - A file which describes an archive within a package. Each
archive description file will contain information about an archive such
as name, URL to download from, and hash.
[Default File: toolchain/.tars/$PACKAGE_TARGET/$PACKAGE/$ARCHIVE.json]
Revision File - A file which describes the sanctioned version of package
for each of the package targets associated with it.
[Default file: toolchain_revisions/$PACKAGE.json]
"""
import argparse
import collections
import logging
import os
import shutil
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
import cygtar
sys.path.append(os.path.dirname(os.path.dirname(SCRIPT_DIR)))
import pynacl.file_tools
import pynacl.gsd_storage
import pynacl.log_tools
import pynacl.platform
import pynacl.working_directory
import archive_info
import error
import package_info
import package_locations
import packages_info
import revision_info
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_DIR = os.path.dirname(CURRENT_DIR)
NACL_DIR = os.path.dirname(BUILD_DIR)
TEMP_SUFFIX = '.tmp'
DEFAULT_PACKAGES_JSON = os.path.join(CURRENT_DIR, 'standard_packages.json')
DEFAULT_REVISIONS_DIR = os.path.join(NACL_DIR, 'toolchain_revisions')
DEFAULT_DEST_DIR = os.path.join(NACL_DIR, 'toolchain')
DEFAULT_CLOUD_BUCKET = 'nativeclient-archive2'
#
# These are helper functions that help each command.
#
def CleanTempFiles(directory):
"""Cleans up all temporary files ending with TEMP_SUFFIX in a directory."""
for root, dirs, files in os.walk(directory):
for file_name in files:
if file_name.endswith(TEMP_SUFFIX):
file_path = os.path.join(root, file_name)
os.unlink(file_path)
def GetPackageTargetPackages(custom_package_name, package_target_packages):
"""Returns a list of package target packages given a custom package name.
A custom package name can either have a specified package target attached
to it (IE. $PACKAGE_TARGET/PACKAGE_NAME) or be extracted out of a default
list of package targets along with their packages.
Args:
custom_package_name: Package name with an optional package target.
package_target_packages: List of tuples (package_target, package).
Returns:
List of package target tuples matching the package name.
"""
package_path = custom_package_name.replace('\\', os.path.sep)
package_path = package_path.replace('/', os.path.sep)
if os.path.sep in package_path:
# Package target is part of the custom package name, just return it.
package_target, package_name = package_path.split(os.path.sep, 1)
return [(package_target, package_name)]
# Package target is not part of the package name, filter from list of passed
# in package targets.
return [
(package_target, package)
for package_target, package in package_target_packages
if package == custom_package_name
]
def DownloadPackageArchives(tar_dir, package_target, package_name, package_desc,
downloader=None, revision_num=None,
include_logs=False):
"""Downloads package archives from the cloud to the tar directory.
Args:
tar_dir: Root tar directory where archives will be downloaded to.
package_target: Package target of the package to download.
package_name: Package name of the package to download.
package_desc: package_info object of the package to download.
downloader: function which takes a url and a file path for downloading.
Returns:
The list of files that were downloaded.
"""
downloaded_files = []
if downloader is None:
downloader = pynacl.gsd_storage.HttpDownload
local_package_file = package_locations.GetLocalPackageFile(tar_dir,
package_target,
package_name)
# Download packages information file along with each of the package
# archives described in the information file. Also keep track of what
# new package names matches old package names. We will have to delete
# stale package names after we are finished.
update_archives = []
for archive_obj in package_desc.GetArchiveList():
archive_desc = archive_obj.GetArchiveData()
local_archive_file = package_locations.GetLocalPackageArchiveFile(
tar_dir,
archive_desc.name,
archive_desc.hash)
old_hash = archive_info.GetArchiveHash(local_archive_file)
if archive_desc.hash == old_hash:
logging.debug('Skipping matching archive: %s', archive_desc.name)
continue
archive_tuple = (local_archive_file, archive_obj.GetArchiveData())
update_archives.append(archive_tuple)
if update_archives:
logging.info('--Syncing %s to revision %s--' % (package_name, revision_num))
num_archives = len(update_archives)
for index, archive_tuple in enumerate(update_archives):
local_archive_file, archive_desc = archive_tuple
pynacl.file_tools.MakeParentDirectoryIfAbsent(local_archive_file)
if archive_desc.url is None:
raise error.Error('Error, no URL for archive: %s' % archive_desc.name)
logging.info('Downloading package archive: %s (%d/%d)' %
(archive_desc.name, index+1, num_archives))
try:
downloader(archive_desc.url, local_archive_file)
except Exception as e:
raise error.Error('Could not download URL (%s): %s' %
(archive_desc.url, e))
# Delete any stale log files
local_archive_log = package_locations.GetLocalPackageArchiveLogFile(
local_archive_file)
if os.path.isfile(local_archive_log):
os.unlink(local_archive_log)
verified_hash = archive_info.GetArchiveHash(local_archive_file)
if verified_hash != archive_desc.hash:
raise error.Error('Package hash check failed: %s != %s' %
(verified_hash, archive_desc.hash))
downloaded_files.append(local_archive_file)
# Download any logs if include_logs is True.
if include_logs:
download_logs = []
for archive_obj in package_desc.GetArchiveList():
archive_desc = archive_obj.GetArchiveData()
if archive_desc.log_url:
local_archive_file = package_locations.GetLocalPackageArchiveFile(
tar_dir,
archive_desc.name,
archive_desc.hash)
local_archive_log = package_locations.GetLocalPackageArchiveLogFile(
local_archive_file)
if not os.path.isfile(local_archive_log):
download_log_tuple = (archive_desc.name,
archive_desc.log_url,
local_archive_log)
download_logs.append(download_log_tuple)
if download_logs:
logging.info('--Syncing %s Logs--' % (package_name))
num_logs = len(download_logs)
for index, download_log_tuple in enumerate(download_logs):
name, log_url, local_log_file = download_log_tuple
logging.info('Downloading archive log: %s (%d/%d)' %
(name, index+1, num_logs))
try:
downloader(log_url, local_log_file)
except Exception as e:
raise IOError('Could not download log URL (%s): %s' %
(log_url, e))
# Save the package file so we know what we currently have.
if not update_archives and os.path.isfile(local_package_file):
try:
local_package_desc = package_info.PackageInfo(local_package_file)
if local_package_desc == package_desc:
return downloaded_files
except:
# Something is wrong with our package file, just resave it.
pass
package_desc.SavePackageFile(local_package_file)
return downloaded_files
def ArchivePackageArchives(tar_dir, package_target, package_name, archives,
extra_archives=[]):
"""Archives local package archives to the tar directory.
Args:
tar_dir: Root tar directory where archives live.
package_target: Package target of the package to archive.
package_name: Package name of the package to archive.
archives: List of archive file paths where archives currently live.
extra_archives: Extra archives that are expected to be build elsewhere.
Returns:
Returns the local package file that was archived.
"""
local_package_file = package_locations.GetLocalPackageFile(tar_dir,
package_target,
package_name)
valid_archive_files = set()
archive_list = []
package_desc = package_info.PackageInfo()
package_archives = ([(archive, False) for archive in archives] +
[(archive, True) for archive in extra_archives])
for archive, skip_missing in package_archives:
archive_url = None
archive_log_url = None
if '@' in archive:
archive, archive_url = archive.split('@', 1)
if ',' in archive_url:
archive_url, archive_log_url = archive_url.split(',', 1)
extract_param = ''
tar_src_dir = ''
extract_dir = ''
if ',' in archive:
archive, extract_param = archive.split(',', 1)
if ':' in extract_param:
tar_src_dir, extract_dir = extract_param.split(':', 1)
else:
tar_src_dir = extract_param
archive_hash = archive_info.GetArchiveHash(archive)
archive_name = os.path.basename(archive)
archive_desc = archive_info.ArchiveInfo(name=archive_name,
hash=archive_hash,
url=archive_url,
tar_src_dir=tar_src_dir,
extract_dir=extract_dir,
log_url=archive_log_url)
package_desc.AppendArchive(archive_desc)
if archive_hash is None:
if skip_missing:
logging.info('Skipping archival of missing file: %s', archive)
continue
raise error.Error('Invalid package: %s.' % archive)
archive_list.append(archive)
archive_basename = os.path.basename(archive)
archive_json = archive_basename + '.json'
valid_archive_files.update([archive_basename, archive_json])
# We do not need to archive the package if it already matches. But if the
# local package file is invalid or does not match, then we should recreate
# the json file.
if os.path.isfile(local_package_file):
try:
current_package_desc = package_info.PackageInfo(local_package_file,
skip_missing=True)
if current_package_desc == package_desc:
return
except ValueError:
pass
# Copy each of the packages over to the tar directory first.
for archive_file in archive_list:
archive_name = os.path.basename(archive_file)
archive_hash = archive_info.GetArchiveHash(archive_file)
local_archive_file = package_locations.GetLocalPackageArchiveFile(
tar_dir,
archive_name,
archive_hash
)
if archive_hash == archive_info.GetArchiveHash(local_archive_file):
logging.info('Skipping archive of duplicate file: %s', archive_file)
else:
logging.info('Archiving file: %s', archive_file)
pynacl.file_tools.MakeParentDirectoryIfAbsent(local_archive_file)
shutil.copyfile(archive_file, local_archive_file)
# Once all the copying is completed, update the local packages file.
logging.info('Package "%s" archived: %s', package_name, local_package_file)
pynacl.file_tools.MakeParentDirectoryIfAbsent(local_package_file)
package_desc.SavePackageFile(local_package_file)
return local_package_file
def UploadPackage(storage, revision, tar_dir, package_target, package_name,
is_shared_package, annotate=False, skip_missing=False,
custom_package_file=None):
"""Uploads a local package file to the supplied cloud storage object.
By default local package files are expected to be found in the standardized
location within the tar directory, however a custom package file may be
specified to upload from a different location. Package archives that do not
have their URL field set will automaticaly have the archives uploaded so that
someone accessing the package file from the cloud storage will also have
access to the package archives.
Args:
storage: Cloud storage object which supports PutFile and GetFile.
revision: Revision identifier the package should be associated with.
tar_dir: Root tar directory where archives live.
package_target: Package target of the package to archive.
package_name: Package name of the package to archive.
is_shared_package: Is this package shared among all package targets?
annotate: Print annotations for build bots?
skip_missing: Skip missing package archive files?
custom_package_file: File location for a custom package file.
Returns:
Returns remote download key for the uploaded package file.
"""
if custom_package_file is not None:
local_package_file = custom_package_file
else:
local_package_file = package_locations.GetLocalPackageFile(
tar_dir,
package_target,
package_name)
# Upload the package file and also upload any local package archives so
# that they are downloadable.
package_desc = package_info.PackageInfo(local_package_file,
skip_missing=skip_missing)
upload_package_desc = package_info.PackageInfo()
for archive_obj in package_desc.GetArchiveList():
archive_desc = archive_obj.GetArchiveData()
url = archive_desc.url
if archive_desc.hash and url is None:
if annotate:
print '@@@BUILD_STEP Archive:%s (upload)@@@' % archive_desc.name
archive_file = package_locations.GetLocalPackageArchiveFile(
tar_dir,
archive_desc.name,
archive_desc.hash)
archive_hash = archive_info.GetArchiveHash(archive_file)
if archive_hash is None:
raise error.Error('Missing Archive File: %s' % archive_file)
elif archive_hash != archive_desc.hash:
raise error.Error(
'Archive hash does not match package hash: %s' % archive_file
+ '\n Archive Hash: %s' % archive_hash
+ '\n Package Hash: %s' % archive_desc.hash)
logging.warn('Missing archive URL: %s', archive_desc.name)
logging.warn('Uploading archive to be publically available...')
remote_archive_key = package_locations.GetRemotePackageArchiveKey(
archive_desc.name,
archive_desc.hash)
url = storage.PutFile(archive_file, remote_archive_key, clobber=True)
if annotate:
print '@@@STEP_LINK@download@%s@@@' % url
updated_archive_obj = archive_obj.Copy(url=url)
upload_package_desc.AppendArchive(updated_archive_obj)
upload_package_file = local_package_file + '.upload'
pynacl.file_tools.MakeParentDirectoryIfAbsent(upload_package_file)
upload_package_desc.SavePackageFile(upload_package_file)
logging.info('Uploading package information: %s', package_name)
remote_package_key = package_locations.GetRemotePackageKey(
is_shared_package,
revision,
package_target,
package_name)
package_info.UploadPackageInfoFiles(storage, package_target, package_name,
remote_package_key, upload_package_file,
skip_missing=skip_missing,
annotate=annotate)
return remote_package_key
def ExtractPackageTargets(package_target_packages, tar_dir, dest_dir,
downloader=None, skip_missing=False,
overlay_tar_dir=None, quiet=False):
"""Extracts package targets from the tar directory to the destination.
Each package archive within a package will be verified before being
extracted. If a package archive does not exist or does not match the hash
stored within the package file, it will be re-downloaded before being
extracted.
Args:
package_target_packages: List of tuples of package target and package names.
tar_dir: Source tar directory where package archives live.
dest_dir: Root destination directory where packages will be extracted to.
downloader: function which takes a url and a file path for downloading.
"""
if downloader is None:
downloader = pynacl.gsd_storage.HttpDownload
for package_target, package_name in package_target_packages:
package_file = package_locations.GetLocalPackageFile(tar_dir,
package_target,
package_name)
package_desc = package_info.PackageInfo(package_file,
skip_missing=skip_missing)
dest_package_dir = package_locations.GetFullDestDir(dest_dir,
package_target,
package_name)
dest_package_file = package_locations.GetDestPackageFile(dest_dir,
package_target,
package_name)
# Get a list of overlay archives.
overlaid_archives = set()
if overlay_tar_dir:
overlay_file = package_locations.GetLocalPackageFile(overlay_tar_dir,
package_target,
package_name)
logging.debug('Checking overlaid package file: %s', overlay_file)
if os.path.isfile(overlay_file):
logging.info('Found overlaid package file: %s', overlay_file)
overlay_package_desc = package_info.PackageInfo(overlay_file,
skip_missing=True)
combined_archives = dict([(archive_obj.GetArchiveData().name,
archive_obj)
for archive_obj
in package_desc.GetArchiveList()])
for archive_obj in overlay_package_desc.GetArchiveList():
archive_desc = archive_obj.GetArchiveData()
if archive_desc.hash:
overlaid_archives.add(archive_desc.name)
combined_archives[archive_desc.name] = archive_obj
package_desc = package_info.PackageInfo()
for archive_name, archive_obj in combined_archives.iteritems():
package_desc.AppendArchive(archive_obj)
# Only do the extraction if the extract packages do not match.
if os.path.isfile(dest_package_file):
try:
dest_package_desc = package_info.PackageInfo(dest_package_file)
if dest_package_desc == package_desc:
logging.debug('Skipping extraction for package (%s)', package_name)
continue
except:
# Destination package file cannot be trusted, if invalid re-extract.
pass
# Delete the old package file before we extract.
os.unlink(dest_package_file)
if os.path.isdir(dest_package_dir):
logging.debug('Deleting old package directory: %s', dest_package_dir)
pynacl.file_tools.RemoveDirectoryIfPresent(dest_package_dir)
logging.info('Extracting package (%s) to directory: %s',
package_name, dest_package_dir)
archive_list = package_desc.GetArchiveList()
num_archives = len(archive_list)
for index, archive_obj in enumerate(archive_list):
archive_desc = archive_obj.GetArchiveData()
archive_file = None
if archive_desc.name in overlaid_archives:
archive_file = package_locations.GetLocalPackageArchiveFile(
overlay_tar_dir,
archive_desc.name,
archive_desc.hash)
logging.info('Using overlaid tar: %s', archive_file)
else:
archive_file = package_locations.GetLocalPackageArchiveFile(
tar_dir,
archive_desc.name,
archive_desc.hash)
# Upon extraction, some files may not be downloaded (or have stale files),
# we need to check the hash of each file and attempt to download it if
# they do not match.
archive_hash = archive_info.GetArchiveHash(archive_file)
if archive_hash != archive_desc.hash:
if archive_desc.url is None:
if skip_missing:
logging.info('Skipping extraction of missing archive: %s' %
archive_file)
continue
raise error.Error('Invalid archive file and URL: %s' % archive_file)
logging.warn('Archive missing, downloading: %s', archive_desc.name)
logging.info('Downloading %s: %s', archive_desc.name, archive_desc.url)
pynacl.file_tools.MakeParentDirectoryIfAbsent(archive_file)
downloader(archive_desc.url, archive_file)
archive_hash = archive_info.GetArchiveHash(archive_file)
if archive_hash != archive_desc.hash:
raise error.Error('Downloaded archive file does not match hash.'
' [%s] Expected %s, received %s.' %
(archive_file, archive_desc.hash, archive_hash))
destination_dir = os.path.join(dest_package_dir, archive_desc.extract_dir)
logging.info('Extracting %s (%d/%d)' %
(archive_desc.name, index+1, num_archives))
temp_dir = os.path.join(destination_dir, '.tmp')
pynacl.file_tools.RemoveDirectoryIfPresent(temp_dir)
os.makedirs(temp_dir)
tar_output = not quiet
tar = cygtar.CygTar(archive_file, 'r:*', verbose=tar_output)
curdir = os.getcwd()
os.chdir(temp_dir)
try:
tar.Extract()
tar.Close()
finally:
os.chdir(curdir)
temp_src_dir = os.path.join(temp_dir, archive_desc.tar_src_dir)
pynacl.file_tools.MoveAndMergeDirTree(temp_src_dir, destination_dir)
pynacl.file_tools.RemoveDirectoryIfPresent(temp_dir)
pynacl.file_tools.MakeParentDirectoryIfAbsent(dest_package_file)
package_desc.SavePackageFile(dest_package_file)
def CleanupTarDirectory(tar_dir):
"""Deletes any files within the tar directory that are not referenced.
Files such as package archives are shared between packages and therefore
non-trivial to delete. Package files may also change so old packages may
stay on the local hard drive even though they are not read anymore. This
function will walk through the tar directory and cleanup any stale files
it does not recognize.
Args:
tar_dir: Source tar directory where package archives live.
"""
# Keep track of the names of all known files and directories. Because of
# case insensitive file systems, we should lowercase all the paths so
# that we do not accidentally delete any files.
known_directories = set()
known_files = set()
for package_target, package_list in package_locations.WalkPackages(tar_dir):
for package_name in package_list:
package_file = package_locations.GetLocalPackageFile(tar_dir,
package_target,
package_name)
try:
package_desc = package_info.PackageInfo(package_file, skip_missing=True)
except:
continue
for archive_obj in package_desc.GetArchiveList():
archive_desc = archive_obj.GetArchiveData()
if not archive_desc.hash:
continue
archive_file = package_locations.GetLocalPackageArchiveFile(
tar_dir,
archive_desc.name,
archive_desc.hash)
log_file = package_locations.GetLocalPackageArchiveLogFile(archive_file)
known_files.add(archive_file.lower())
known_files.add(log_file.lower())
package_name = package_info.GetLocalPackageName(package_file)
package_directory = os.path.join(os.path.dirname(package_file),
package_name)
known_files.add(package_file.lower())
known_directories.add(package_directory.lower())
# We are going to be deleting all files that do not match any known files,
# so do a sanity check that this is an actual tar directory. If we have no
# known files or directories, we probably do not have a valid tar directory.
if not known_directories or not known_files:
raise error.Error('No packages found for tar directory: %s' % tar_dir)
for dirpath, dirnames, filenames in os.walk(tar_dir, topdown=False):
if dirpath.lower() in known_directories:
continue
for filename in filenames:
full_path = os.path.join(dirpath, filename)
if full_path.lower() in known_files:
continue
logging.debug('Removing stale file: %s', full_path)
os.unlink(full_path)
if not os.listdir(dirpath):
logging.debug('Removing stale directory: %s', dirpath)
os.rmdir(dirpath)
#
# Each Command has 2 functions that describes it:
# 1. A parser function which specifies the extra command options each command
# will have.
# 2. An execution function which is called when a user actually executes
# the command.
#
def _ListCmdArgParser(subparser):
subparser.description = 'Lists package information.'
def _DoListCmd(arguments):
package_targets = collections.defaultdict(list)
for package_target, package in arguments.package_target_packages:
package_targets[package_target].append(package)
modes_dict = arguments.packages_desc.GetPackageModes()
if not modes_dict:
print 'No Package Modes Found.'
else:
print 'Listing Modes:'
for mode, package_list in modes_dict.iteritems():
print ' [%s]' % mode
for package in package_list:
print ' ', package
if arguments.mode:
print
print 'Current Mode Selected:', arguments.mode
print
print 'Listing Package Targets and Packages:'
for package_target, packages in package_targets.iteritems():
print ' [%s]:' % package_target
for package in sorted(packages):
print ' ', package
def _ArchiveCmdArgParser(subparser):
subparser.description = 'Archive package archives to tar directory.'
subparser.add_argument(
'--archive-package', metavar='NAME', dest='archive__package',
required=True,
help='Package name archives will be packaged into.')
subparser.add_argument(
'--extra-archive', metavar='ARCHIVE', dest='archive__extra_archive',
action='append', default=[],
help='Extra archives that are expected to be built elsewhere.')
subparser.add_argument(
metavar='TAR(,SRCDIR(:EXTRACTDIR))(@URL,LOGURL)', dest='archive__archives',
nargs='+',
help='Package archive with an optional tar information and url.'
' SRCDIR is the root directory where files live inside of the tar.'
' EXTRACTDIR is the directory to extract files to relative to the'
' destination directory. The URL is where the package can be'
' downloaded from.')
subparser.add_argument(
'-x', '--extract', dest='archive__extract',
action='store_true', default=False,
help='Extract package archives after they have been archived.')
def _DoArchiveCmd(arguments):
package_target_packages = GetPackageTargetPackages(
arguments.archive__package,
arguments.package_target_packages
)
if not package_target_packages:
raise error.Error('Unknown package: %s.' % arguments.archive__package
+ ' Did you forget to add "$PACKAGE_TARGET/"?')
for package_target, package_name in package_target_packages:
ArchivePackageArchives(arguments.tar_dir,
package_target,
package_name,
arguments.archive__archives,
extra_archives=arguments.archive__extra_archive)
if arguments.archive__extract:
ExtractPackageTargets([(package_target, package_name)],
arguments.tar_dir,
arguments.dest_dir,
skip_missing=True,
quiet=arguments.quiet)
def _ExtractCmdArgParser(subparser):
subparser.description = 'Extract packages from tar directory.'
subparser.add_argument(
'--skip-missing', dest='extract__skip_missing',
action='store_true', default=False,
help='Skip missing archive files when extracting rather than erroring out.')
subparser.add_argument(
'--overlay-tar-dir', dest='overlay_tar_dir',
default=None,
help='Extracts tar directorys as usual, except uses any packages' +
' found within the overlay tar directory first.')
def _DoExtractCmd(arguments):
ExtractPackageTargets(
arguments.package_target_packages,
arguments.tar_dir,
arguments.dest_dir,
skip_missing=arguments.extract__skip_missing,
overlay_tar_dir=arguments.overlay_tar_dir,
quiet=arguments.quiet)
def _UploadCmdArgParser(subparser):
subparser.description = 'Upload a package file.'
subparser.add_argument(
'--upload-package', metavar='NAME', dest='upload__package', required=True,
help='Package to upload.')
subparser.add_argument(
'--revision', metavar='ID', dest='upload__revision', required=True,
help='Revision of the package to upload.')
subparser.add_argument(
'--package-file', metavar='FILE', dest='upload__file',
default=None,
help='Use custom package file instead of standard package file found'
' in the tar directory.')
subparser.add_argument(
'--skip-missing', dest='upload__skip_missing',
action='store_true', default=False,
help='Skip missing archive files when uploading package archives.')
def _DoUploadCmd(arguments):
package_target_packages = GetPackageTargetPackages(
arguments.upload__package,
arguments.package_target_packages
)
if not package_target_packages:
raise error.Error('Unknown package: %s.' % arguments.upload__package
+ ' Did you forget to add "$PACKAGE_TARGET/"?')
for package_target, package_name in package_target_packages:
UploadPackage(
arguments.gsd_store,
arguments.upload__revision,
arguments.tar_dir,
package_target,
package_name,
arguments.packages_desc.IsSharedPackage(package_name),
annotate=arguments.annotate,
skip_missing=arguments.upload__skip_missing,
custom_package_file=arguments.upload__file
)
def _SyncCmdArgParser(subparser):
subparser.description = 'Download package archives to the tar directory.'
subparser.add_argument(
'--revision', metavar='ID', dest='sync__revision',
default=None,
help='Revision identifier of the packages to download.')
subparser.add_argument(
'--include-logs', dest='sync__include_logs',
action='store_true', default=False,
help='Also download logs next to each archive if available.')
subparser.add_argument(
'-x', '--extract', dest='sync__extract',
action='store_true', default=False,
help='Extract package archives after they have been downloaded.')
def _DoSyncCmd(arguments):
for package_target, package_name in arguments.package_target_packages:
if arguments.sync__revision is None:
# When the sync revision number is not specified, use the set
# revision number found in the revision directory.
revision_file = package_locations.GetRevisionFile(
arguments.revisions_dir,
package_name)
revision_desc = revision_info.RevisionInfo(
arguments.packages_desc,
revision_file)
package_desc = revision_desc.GetPackageInfo(package_target)
revision_num = revision_desc.GetRevisionNumber()
else:
# When the sync revision number is specified, find the package to
# download remotely using the revision.
revision_num = arguments.sync__revision
remote_package_key = package_locations.GetRemotePackageKey(
arguments.packages_desc.IsSharedPackage(package_name),
arguments.sync__revision,
package_target,
package_name)
with pynacl.working_directory.TemporaryWorkingDirectory() as work_dir:
temp_package_file = os.path.join(
work_dir,
os.path.basename(remote_package_key) + TEMP_SUFFIX)
package_info.DownloadPackageInfoFiles(
temp_package_file,
remote_package_key,
downloader=arguments.gsd_store.GetFile)
package_desc = package_info.PackageInfo(temp_package_file)
DownloadPackageArchives(
arguments.tar_dir,
package_target,
package_name,
package_desc,
revision_num=revision_num,
include_logs=arguments.sync__include_logs)
CleanTempFiles(arguments.tar_dir)
if arguments.sync__extract:
ExtractPackageTargets(
arguments.package_target_packages,
arguments.tar_dir,
arguments.dest_dir,
quiet=arguments.quiet)
def _SetRevisionCmdArgParser(subparser):
subparser.description = 'Specify the revision of a package.'
subparser.add_argument(
'--revision-package', metavar='NAME', dest='setrevision__package',
action='append', default=[],
help='Package name to set revision of.')
subparser.add_argument(
'--revision-set', metavar='SET-NAME', dest='setrevision__revset',
action='append', default=[],
help='Revision set to set revision for.')
subparser.add_argument(
'--revision', metavar='ID', dest='setrevision__revision',
required=True,
help='Revision identifier of the package to set.')
def _DoSetRevisionCmd(arguments):
packages_list = arguments.setrevision__package
revision_sets = arguments.setrevision__revset
revision_num = arguments.setrevision__revision
for revision_set in revision_sets:
set_packages = arguments.packages_desc.GetRevisionSet(revision_set)
if set_packages is None:
raise error.Error('Invalid Revision Set: %s' % revision_set)
packages_list.extend(set_packages)
if not packages_list:
raise error.Error('No revision packages have been supplied.')
for package_name in packages_list:
revision_desc = revision_info.RevisionInfo(arguments.packages_desc)
revision_desc.SetRevisionNumber(revision_num)
custom_package_targets = GetPackageTargetPackages(package_name, [])
if not custom_package_targets:
package_targets = arguments.packages_desc.GetPackageTargetsForPackage(
package_name
)
else:
package_targets = [target[0] for target in custom_package_targets]
first_target = custom_package_targets[0]
package_name = first_target[1]
for package_target in package_targets:
with pynacl.working_directory.TemporaryWorkingDirectory() as work_dir:
remote_package_key = package_locations.GetRemotePackageKey(
arguments.packages_desc.IsSharedPackage(package_name),
revision_num,
package_target,
package_name)
temp_package_file = os.path.join(
work_dir,
os.path.basename(remote_package_key) + TEMP_SUFFIX)
package_info.DownloadPackageInfoFiles(
temp_package_file,
remote_package_key,
downloader=arguments.gsd_store.GetFile)
package_desc = package_info.PackageInfo(temp_package_file)
logging.info('Setting %s:%s to revision %s',
package_target, package_name, revision_num)
revision_desc.SetTargetRevision(
package_name,
package_target,
package_desc)
revision_file = package_locations.GetRevisionFile(
arguments.revisions_dir,
package_name)
pynacl.file_tools.MakeParentDirectoryIfAbsent(revision_file)
revision_desc.SaveRevisionFile(revision_file)
CleanTempFiles(arguments.revisions_dir)
def _GetRevisionCmdArgParser(subparser):
subparser.description = 'Get the revision of a package.'
subparser.add_argument(
'--revision-package', metavar='NAME', dest='getrevision__packages',
action='append', default=[],
help='Package name to get revision of.')
subparser.add_argument(
'--revision-set', metavar='SET-NAME', dest='getrevision__revset',
action='append', default=[],
help='Revision set to set revision for.')
def _DoGetRevisionCmd(arguments):
packages_list = arguments.getrevision__packages
revision_sets = arguments.getrevision__revset
for revision_set in revision_sets:
set_packages = arguments.packages_desc.GetRevisionSet(revision_set)
if set_packages is None:
raise error.Error('Invalid Revision Set: %s' % revision_set)
packages_list.extend(set_packages)
if not packages_list:
raise error.Error('No revision packages have been supplied.')
revision_number = None
for package_name in packages_list:
custom_package_targets = GetPackageTargetPackages(package_name, [])
if custom_package_targets:
custom_target, package_name = custom_package_targets[0]
revision_file = package_locations.GetRevisionFile(arguments.revisions_dir,
package_name)
if not os.path.isfile(revision_file):
raise error.Error('No revision set for package: %s.' % package_name)
revision_desc = revision_info.RevisionInfo(arguments.packages_desc,
revision_file)
package_revision = revision_desc.GetRevisionNumber()
if revision_number is None:
revision_number = package_revision
elif revision_number != package_revision:
logging.error('Listing Get Revision Packages:')
for package in packages_list:
logging.error(' %s', package)
raise error.Error('Package revisions are not set to the same revision.')
print revision_number
def _RevPackagesCmdArgParser(subparser):
subparser.description = 'Prints list of packages for a revision set name.'
subparser.add_argument(
'--revision-set', metavar='NAME', dest='revpackages__name',
required=True,
help='Name of the package or revision set.')
def _DoRevPackagesCmd(arguments):
revision_package = arguments.revpackages__name
packages_list = [revision_package]
# Check if the package_name is a revision set.
revision_set = arguments.packages_desc.GetRevisionSet(revision_package)
if revision_set is not None:
packages_list = revision_set
for package_name in packages_list:
print package_name
def _FillEmptyTarsParser(subparser):
subparser.description = 'Fill missing archives with empty ones in a package.'
subparser.add_argument(
'--fill-package', metavar='NAME', dest='fillemptytars_package',
required=True,
help='Package name to fill empty archives of.')
def _DoFillEmptyTarsCmd(arguments):
package_target_packages = GetPackageTargetPackages(
arguments.fillemptytars_package,
arguments.package_target_packages
)
if not package_target_packages:
raise error.Error('Unknown package: %s.' % arguments.fillemptytars_package
+ ' Did you forget to add "$PACKAGE_TARGET/"?')
for package_target, package_name in package_target_packages:
package_path = package_locations.GetLocalPackageFile(arguments.tar_dir,
package_target,
package_name)
package_desc = package_info.PackageInfo(package_path, skip_missing=True)
output_package_desc = package_info.PackageInfo()
for archive in package_desc.GetArchiveList():
# If archive does not exist, fill it with an empty one.
archive_data = archive.GetArchiveData()
if archive_data.hash:
output_package_desc.AppendArchive(archive)
else:
logging.info('Filling missing archive: %s.', archive_data.name)
if (archive_data.name.endswith('.tar.gz') or
archive_data.name.endswith('.tgz')):
mode = 'w:gz'
elif archive_data.name.endswith('.bz2'):
mode = 'w:bz2'
elif archive_data.name.endswith('.tar'):
mode = 'w:'
else:
raise error.Error('Unknown archive type: %s.' % archive_data.name)
temp_archive_file = os.path.join(arguments.tar_dir, archive_data.name)
tar_file = cygtar.CygTar(temp_archive_file, mode)
tar_file.Close()
tar_hash = archive_info.GetArchiveHash(temp_archive_file)
archive_file = package_locations.GetLocalPackageArchiveFile(
arguments.tar_dir,
archive_data.name,
tar_hash)
pynacl.file_tools.MakeParentDirectoryIfAbsent(archive_file)
os.rename(temp_archive_file, archive_file)
empty_archive = archive_info.ArchiveInfo(name=archive_data.name,
hash=tar_hash)
output_package_desc.AppendArchive(empty_archive)
output_package_desc.SavePackageFile(package_path)
def _RecalcRevsParser(subparser):
subparser.description = 'Recalculates hashes for files in revision directory.'
def _DoRecalcRevsCmd(arguments):
for json_file in os.listdir(arguments.revisions_dir):
if json_file.endswith('.json'):
revision_file = os.path.join(arguments.revisions_dir, json_file)
revision_desc = revision_info.RevisionInfo(arguments.packages_desc)
revision_desc.LoadRevisionFile(revision_file, skip_hash_verify=True)
revision_desc.SaveRevisionFile(revision_file)
def _CleanupParser(subparser):
subparser.description = 'Cleans up any unused package archives files.'
def _DoCleanupCmd(arguments):
CleanupTarDirectory(arguments.tar_dir)
CommandFuncs = collections.namedtuple(
'CommandFuncs',
['parse_func', 'do_cmd_func'])
COMMANDS = {
'list': CommandFuncs(_ListCmdArgParser, _DoListCmd),
'archive': CommandFuncs(_ArchiveCmdArgParser, _DoArchiveCmd),
'extract': CommandFuncs(_ExtractCmdArgParser, _DoExtractCmd),
'upload': CommandFuncs(_UploadCmdArgParser, _DoUploadCmd),
'sync': CommandFuncs(_SyncCmdArgParser, _DoSyncCmd),
'setrevision': CommandFuncs(_SetRevisionCmdArgParser, _DoSetRevisionCmd),
'getrevision': CommandFuncs(_GetRevisionCmdArgParser, _DoGetRevisionCmd),
'revpackages': CommandFuncs(_RevPackagesCmdArgParser, _DoRevPackagesCmd),
'fillemptytars': CommandFuncs(_FillEmptyTarsParser, _DoFillEmptyTarsCmd),
'recalcrevisions': CommandFuncs(_RecalcRevsParser, _DoRecalcRevsCmd),
'cleanup': CommandFuncs(_CleanupParser, _DoCleanupCmd),
}
def ParseArgs(args):
parser = argparse.ArgumentParser()
host_platform = pynacl.platform.GetOS()
host_arch = pynacl.platform.GetArch3264()
# List out global options for all commands.
parser.add_argument(
'-v', '--verbose', dest='verbose',
action='store_true', default=False,
help='Verbose output')
parser.add_argument(
'-q', '--quiet', dest='quiet',
action='store_true', default=False,
help='Quiet output')
parser.add_argument(
'--platform', dest='host_platform',
default=host_platform,
help='Custom platform other than the current (%s).' % host_platform)
parser.add_argument(
'--arch', dest='host_arch',
default=host_arch,
help='Custom architecture other than the current (%s).' % host_arch)
parser.add_argument(
'--package-targets', dest='package_targets',
default=None,
help='Custom package targets specifed as comma separated names. Defaults'
' to package targets defined for host platform and architecture inside'
' of the packages json file.')
parser.add_argument(
'--mode', dest='mode',
default=None,
help='Specify a package mode to filter by, modes are specified within'
' the packages json file. For a list of modes use the "list" command.')
parser.add_argument(
'--packages', dest='packages',
default=None,
help='Custom packages specified as comma separated package names. Custom'
' packages not defined by the packages json file must be prefixed by'
' the package_target directory (IE. $PACKAGE_TARGET/$PACKAGE).')
parser.add_argument(
'--append', metavar='PACKAGE', dest='append_packages',
action='append', default=[],
help='Append extra package to current list of packages.')
parser.add_argument(
'--exclude', metavar='PACKAGE', dest='exclude_packages',
action='append', default=[],
help='Exclude package from current list of packages.')
parser.add_argument(
'--packages-json', dest='packages_json',
default=DEFAULT_PACKAGES_JSON, type=argparse.FileType('rt'),
help='Packages description file.'
' [Default: %s]' % DEFAULT_PACKAGES_JSON)
parser.add_argument(
'--revisions-dir', dest='revisions_dir',
default=DEFAULT_REVISIONS_DIR,
help='Revisions directory where packages revisions will be found.')
parser.add_argument(
'--dest-dir', dest='dest_dir',
default=DEFAULT_DEST_DIR,
help='Destination directory where all the packages will be extracted to.')
parser.add_argument(
'--tar-dir', dest='tar_dir',
default=None,
help='Directory for package archive files. Defaults to "$DEST-DIR/.tars".')
parser.add_argument(
'--annotate', dest='annotate',
action='store_true', default=False,
help='Print out build bot annotations.')
parser.add_argument(
'--cloud-bucket', dest='cloud_bucket',
default=DEFAULT_CLOUD_BUCKET,
help='Google storage cloud bucket name.'
' [Default: %s]' % DEFAULT_CLOUD_BUCKET)
# Add subparsers for all commands. These are flags for specific commands,
# IE. [options] command [command-options]
command_parser = parser.add_subparsers(title='command', dest='command')
for command, cmd_funcs in COMMANDS.iteritems():
sub_parser = command_parser.add_parser(command)
cmd_funcs.parse_func(sub_parser)
arguments = parser.parse_args(args)
pynacl.log_tools.SetupLogging(
verbose=arguments.verbose, quiet=arguments.quiet)
if arguments.tar_dir is None:
arguments.tar_dir = os.path.join(arguments.dest_dir, '.tars')
# Parse the package description up front and store it into the arguments
# object. Almost all the commands need to use this information.
packages_desc = packages_info.PackagesInfo(arguments.packages_json)
arguments.packages_desc = packages_desc
# Based on the host platform and host architecture, we can determine the set
# of package targets used from the packages description. Minimize platform
# and architecture errors by standardizing the names using pynacl.platform.
if arguments.package_targets is None:
package_targets = packages_desc.GetPackageTargets(
pynacl.platform.GetOS(arguments.host_platform),
pynacl.platform.GetArch3264(arguments.host_arch))
else:
package_targets = arguments.package_targets.split(',')
# If the packages argument were not set, use the default list of packages
# for each package target.
packages_set = set()
if arguments.packages is None:
for package_target in package_targets:
packages = packages_desc.GetPackages(package_target)
if packages is None:
raise error.Error('No packages defined for Package Target: %s.' %
package_target)
packages_set.update(packages)
else:
packages_set.update(arguments.packages.split(','))
# If a mode was set, only use packages listed in the mode.
if arguments.mode:
modes_dict = packages_desc.GetPackageModes()
if arguments.mode not in modes_dict:
logging.info('Valid Package Modes:')
for mode in modes_dict:
logging.info(' %s', mode)
raise error.Error('Invalid Package Mode: %s.' % arguments.mode)
packages_set.intersection_update(modes_dict[arguments.mode])
# Append/exclude any extra packages that were specified.
packages_set.update(arguments.append_packages)
packages_set.difference_update(arguments.exclude_packages)
# Build a dictionary that organizes packages to their respective package
# targets. Packages may exist in multiple package targets so we will have
# to have the key be package and value be a list of package targets.
package_targets_dict = collections.defaultdict(list)
for package_target in package_targets:
for package in packages_desc.GetPackages(package_target):
package_targets_dict[package].append(package_target)
# Use the list of packages to determine the set of package target packages
# we are operating on, custom package targets will have the package target
# inside of the name of the package name (see help for "--packages" argument).
# The package_target_packages is a list of tuples (package_target, package),
# for every package along with the associated package target.
package_target_packages = []
for package in sorted(packages_set):
package_targets = package_targets_dict.get(package, None)
if package_targets is None:
custom_package_targets = GetPackageTargetPackages(package, [])
if not custom_package_targets:
raise error.Error('Invalid custom package: "%s".'
' Expected $PACKAGE_TARGET/$PACKAGE' % package)
package_target_packages.extend(custom_package_targets)
else:
for package_target in package_targets:
package_target_packages.append((package_target, package))
arguments.package_target_packages = package_target_packages
# Create a GSD Storage object for those who need it.
cloud_bucket = arguments.cloud_bucket
gsd_store = pynacl.gsd_storage.GSDStorage(cloud_bucket, [cloud_bucket])
arguments.gsd_store = gsd_store
return arguments
def main(args):
# If verbose is on, do not catch error.Error() exceptions separately but
# allow python to catch the errors and print out the entire callstack.
# Note that we cannot rely on ParseArgs() to parse if verbose is on, because
# ParseArgs() could throw an exception.
if '-v' in args or '--verbose' in args:
arguments = ParseArgs(args)
return COMMANDS[arguments.command].do_cmd_func(arguments)
else:
try:
arguments = ParseArgs(args)
return COMMANDS[arguments.command].do_cmd_func(arguments)
except error.Error as e:
sys.stderr.write('package_version: ' + str(e) + '\n')
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
"""
Copyright © retnikt <_@retnikt.uk> 2020
This software is licensed under the MIT Licence: https://opensource.org/licenses/MIT
"""
import secrets
from typing import TYPE_CHECKING, List, Optional, Union
from argon2 import ( # type: ignore
DEFAULT_MEMORY_COST,
DEFAULT_PARALLELISM,
DEFAULT_TIME_COST,
)
from pydantic import AnyHttpUrl, BaseSettings, Field, stricturl
if TYPE_CHECKING:
_PostgresURL = str
else:
_PostgresURL = stricturl(
strip_whitespace=True, tld_required=False, allowed_schemes={"postgresql"},
)
class _UniversalSet:
"""(psuedo)-universal set - contains everything"""
def __contains__(self, item):
return True
UNIVERSAL_SET = _UniversalSet()
class _Settings(BaseSettings):
dsn: _PostgresURL = "postgresql://db/notebook"
force_rollback: bool = False
cors_origins: List[AnyHttpUrl] = []
rocpf_origins: Union[_UniversalSet, List[str]] = Field(UNIVERSAL_SET)
secret_key: str = secrets.token_urlsafe(40)
argon2_time_cost: int = DEFAULT_TIME_COST
argon2_memory_cost: int = DEFAULT_MEMORY_COST
argon2_parallelism: int = DEFAULT_PARALLELISM
smtp_hostname: Optional[str] = None
smtp_port: Optional[int] = None
smtp_username: Optional[str] = None
smtp_password: Optional[str] = None
smtp_direct_tls: bool = False
smtp_start_tls: bool = False
smtp_from: Optional[str] = None
class Config:
env_prefix: str = "notebook_"
settings = _Settings()
|
import time
import os
from funboost import boost
@boost('test_f1_queue', qps=0.5)
def f1(x):
time.sleep(3)
print(f'x: {x}')
for j in range(1, 5):
f2.push(x * j)
@boost('test_f2_queue', qps=2)
def f2(y):
time.sleep(5)
print(f'y: {y}')
if __name__ == '__main__':
f1.clear()
f2.clear()
for i in range(30):
f1.push(i)
f1.consume()
f2.consume()
f1.wait_for_possible_has_finish_all_tasks(4)
print('f1函数的队列中4分钟内没有需要执行的任务')
f2.wait_for_possible_has_finish_all_tasks(3)
print('f2函数的队列中3分钟内没有需要执行的任务')
print('f1 和f2任务都运行完了,。。。')
print('马上 os._exit(444) 结束脚本')
os._exit(444) # 结束脚本
|
from django.conf.urls import url
from . import views
urlpatterns= [
url(r'^hello-view',views.HelloApiView.as_view()),
]
|
from dockit.backends.indexer import BaseIndexer
from backend import MongoIndexStorage
try:
from bson.objectid import ObjectId
except ImportError:
from pymongo.objectid import ObjectId
class MongoIndexer(BaseIndexer):
def _get_key_value(self):
dotpath = self.filter_operation.dotpath()
value = self.filter_operation.value
if dotpath in ('pk', '_pk', '_id') and value:
value = ObjectId(value)
if dotpath in ('pk', '_pk'):
dotpath = '_id'
return dotpath, value
class ExactIndexer(MongoIndexer):
def filter(self):
dotpath, value = self._get_key_value()
return {dotpath: value}
def values(self):
dotpath, value = self._get_key_value()
return {dotpath: value}
MongoIndexStorage.register_indexer(ExactIndexer, 'exact')
class OperationIndexer(MongoIndexer):
operation = None
def filter(self):
dotpath, value = self._get_key_value()
return {dotpath: { self.operation : value }}
def values(self):
dotpath, value = self._get_key_value()
return {dotpath: { self.operation: value }}
class GTIndexer(OperationIndexer):
operation = '$gt'
MongoIndexStorage.register_indexer(GTIndexer, 'gt')
class LTIndexer(OperationIndexer):
operation = '$lt'
MongoIndexStorage.register_indexer(LTIndexer, 'lt')
class GTEIndexer(OperationIndexer):
operation = '$gte'
MongoIndexStorage.register_indexer(GTEIndexer, 'gte')
class LTEIndexer(OperationIndexer):
operation = '$lte'
MongoIndexStorage.register_indexer(LTEIndexer, 'lte')
class INIndexer(OperationIndexer):
operation = '$in'
MongoIndexStorage.register_indexer(LTEIndexer, 'in')
|
def main():
numbers = sorted([int(x) for x in input().split()])
diffs = [numbers[i+1] - numbers[i] for i in range(len(numbers) - 1)]
if diffs[0] == diffs[1]:
print(max(numbers) + diffs[0])
else:
if diffs[0] > diffs[1]:
print(numbers[0] + diffs[1])
else:
print(numbers[1] + diffs[0])
if __name__ == '__main__':
main()
|
# import setup
import os.path
import json
import sys
if __name__ == "__main__":
assert sys.version_info >= (3, 7), "Minimum Python version: 3.7.0"
"""
if not os.path.exists("./configinfo.json"):
with open("./configinfo.json", "w") as configinfo_json:
json.dump({"first_time_setup": False}, configinfo_json, indent=2)
with open("./configinfo.json", "r") as configinfo_json:
loaded_json_obj = json.load(configinfo_json)
try:
if not loaded_json_obj["first_time_setup"]:
# if input("Would you like to run the first-time setup for this program? (y/n): ").lower() == "y":
print("Running first-time-setup, ensure you have a stable internet connection.")
setup.install_from_requirements("./requirements.txt")
loaded_json_obj["first_time_setup"] = True
print("First time setup complete.")
# else:
# print("To install packages required for this program, run ./setup.py, found in this folder.")
except KeyError:
loaded_json_obj = {"first_time_setup": False}
with open("./configinfo.json", "w") as configinfo_json:
json.dump(loaded_json_obj, configinfo_json, indent=2)
"""
import gui
gui_object = gui.MainGUI()
|
int_ = 2
list_ = [1, 2, 3]
|
"""
add account_session table
"""
from yoyo import step
__depends__ = {'20210808_01_sS1X2-add-table-for-processed-sub-ids'}
steps = [
step("""
CREATE TABLE account_session (
id serial primary key,
account_id int not null references account(id),
service varchar(20) not null,
encrypted_key text not null,
session_key_sha256_hash char(64) not null,
retries_remaining int not null default 2,
created_at timestamp not null default (now() at time zone 'utc'),
last_imported_at timestamp not null default (now() at time zone 'utc')
);
CREATE UNIQUE INDEX ON account_session (service, session_key_sha256_hash);
CREATE UNIQUE INDEX ON account_session (account_id, service);
CREATE INDEX ON account_session (account_id);
""")
]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from app.models.tables import Role as RoleTable
class Role:
pass
|
import time
import os
import math
import argparse
from glob import glob
from collections import OrderedDict
import random
import warnings
from datetime import datetime
import joblib
from tqdm import tqdm
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold, StratifiedKFold
import facenet
import keras
import tensorflow as tf
from keras.datasets import mnist
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD, Adam
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger, LearningRateScheduler, TerminateOnNaN, LambdaCallback
import archs_face
from metrics import *
from scheduler import *
from keras.preprocessing import image
from PIL import Image
import pandas as pd
import re
import cv2
from glob import glob
from concurrent.futures import ThreadPoolExecutor
arch_names = archs_face.__dict__.keys()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--name', default=None,
help='model name: (default: arch+timestamp)')
parser.add_argument('--arch', '-a', metavar='ARCH', default='vgg8',
choices=arch_names,
help='model architecture: ' +
' | '.join(arch_names) +
' (default: vgg8)')
parser.add_argument('--num-features', default=5, type=int,
help='dimention of embedded features')
parser.add_argument('--num-embedding', default=5, type=int,
help='dimention of embedded features')
parser.add_argument('--num_images', default=0, type=int,
help='dimention of embedded features')
parser.add_argument('--batch_size', default=32, type=int,
help='batch size')
parser.add_argument('--nrof_classes', default=3139, type=int,
help='nrof_classes')
parser.add_argument('--test_size', default=100, type=int,
help='test size')
parser.add_argument('--steps_per_epoch', default=1, type=int,
help='steps_per_epoch')
parser.add_argument('--scheduler', default='CosineAnnealing',
choices=['CosineAnnealing', 'None'],
help='scheduler: ' +
' | '.join(['CosineAnnealing', 'None']) +
' (default: CosineAnnealing)')
parser.add_argument('--n_epochs', default=1, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--workers', default=16, type=int, metavar='N',
help='number of workers')
parser.add_argument('--epochs', default=35, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--optimizer', default='SGD',
choices=['Adam', 'SGD'],
help='loss: ' +
' | '.join(['Adam', 'SGD']) +
' (default: Adam)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--min-lr', default=1e-3, type=float,
help='minimum learning rate')
parser.add_argument('--momentum', default=0.5, type=float)
parser.add_argument('--validation_set_split_ratio', type=float,
help='The ratio of the total dataset to use for validation', default=0.0)
parser.add_argument('--min_nrof_val_images_per_class', type=float,
help='Classes with fewer images will be removed from the validation set', default=0)
parser.add_argument('--data_dir', type=str,
help='Path to the data directory containing aligned face patches.',
default='')
parser.add_argument('--test_data_dir', type=str,
help='Path to the test data directory containing aligned face patches.',
default='')
args = parser.parse_args()
return args
def preprocess_images(image):
return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
def main():
args = parse_args()
# add model name to args
args.name = 'mnist_%s_%dd' %(args.arch, args.num_features)
os.makedirs('models/%s' %args.name, exist_ok=True)
print('Config -----')
for arg in vars(args):
print('%s: %s' %(arg, getattr(args, arg)))
print('------------')
joblib.dump(args, 'models/%s/args.pkl' %args.name)
with open('models/%s/args.txt' %args.name, 'w') as f:
for arg in vars(args):
print('%s: %s' %(arg, getattr(args, arg)), file=f)
IMG_HEIGHT = 160
IMG_WIDTH = 160
classes, nrof_classes = facenet.get_dataset(args.data_dir)
path_exp = os.path.expanduser(args.data_dir)
test_exp = os.path.expanduser(args.test_data_dir)
if args.nrof_classes:
nrof_classes = args.nrof_classes
image_paths = [img_path for i in range(nrof_classes) for img_path in glob(os.path.join(path_exp, classes[i], "*.jpg"))[2:9]]
image_paths = np.array(image_paths).flatten()
test_image_paths = [img_path for i in range(nrof_classes) for img_path in glob(os.path.join(path_exp, classes[i], "*.jpg"))[0:1]]
test_image_paths = np.array(test_image_paths).flatten()
image_paths = image_paths[:].tolist()
test_image_paths = test_image_paths[:].tolist()
real_classes = np.array(list(map(lambda a: a.split("/")[4], image_paths)))
test_real_classes = np.array(list(map(lambda a: a.split("/")[4], test_image_paths)))
train_set = image_paths
test_set = test_image_paths
def path_to_tensor(img):
face_cascade = cv2.CascadeClassifier('/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml')
faces_multi = face_cascade.detectMultiScale(img, 1.1, 4)
faces = [face.astype(np.int64).tolist() for face in faces_multi]
return faces
def resize(yes_face, img):
return cv2.resize(img[yes_face[1]:yes_face[1]+yes_face[3],yes_face[0]:yes_face[0]+yes_face[2]],(IMG_HEIGHT,IMG_WIDTH))
def paths_to_tensor(executor, img_paths):
def img_to_tensor(img_path, ii):
img = image.load_img(img_path)
gray = np.asarray(img.convert('L'))
img = np.asarray(img)
faces = path_to_tensor(gray)
if len(faces) > 0:
img = resize(faces[0], img)
return np.expand_dims(img,0), ii
else:
return False
list_tensors = []
list_indices = []
for result in tqdm(executor.map(img_to_tensor, img_paths, range(len(img_paths)))):
if result is not False:
list_tensors.append(result[0])
list_indices.append(result[1])
return np.vstack(list_tensors), list_indices
y_train = real_classes
y_test = test_real_classes
y_values = pd.get_dummies(y_train).values
y_test_values = pd.get_dummies(y_test).values
if args.optimizer == 'SGD':
optimizer = SGD(lr=args.lr, momentum=args.momentum)
elif args.optimizer == 'Adam':
optimizer = Adam(lr=args.lr)
model = archs_face.__dict__[args.arch](args, len(np.unique(y_train)))
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
model.summary()
model.load_weights(os.path.join('models', args.name, 'model_sm.hdf5'))
callbacks = [
ModelCheckpoint(os.path.join('models', args.name, 'model_sm.hdf5'),
verbose=1, save_best_only=False, period=1, monitor='val_acc'),
CSVLogger(os.path.join('models', args.name, 'log.csv')),
TerminateOnNaN()]
if args.scheduler == 'CosineAnnealing':
callbacks.append(CosineAnnealingScheduler(T_max=args.n_epochs, eta_max=args.lr, eta_min=args.min_lr, verbose=1))
if 'face' in args.arch:
print("Training started")
train_executor = ThreadPoolExecutor(max_workers=2)
X_train, train_non_tensors = paths_to_tensor(train_executor, train_set)
test_executor = ThreadPoolExecutor(max_workers=2)
X_test, test_non_tensors = paths_to_tensor(test_executor, test_set)
model.fit([X_train,
y_values[train_non_tensors]],
y_values[train_non_tensors],
epochs=args.n_epochs,
batch_size=args.batch_size,
workers=args.workers,
validation_data=([X_test,
y_test_values[test_non_tensors]],
y_test_values[test_non_tensors]),
callbacks=callbacks, verbose=1)
# model.load_weights(os.path.join('models', args.name, 'model.hdf5'))
# model.load_weights(os.path.join('models', args.name, 'model.hdf5'))
# X_test = paths_to_tensor(test_set)
# if 'face' in args.arch:
# score = model.evaluate([X_test[:args.batch_size], y_test[:args.batch_size]], y_test[:args.batch_size], verbose=1)
# else:
# score = model.evaluate(X_test, y_test, verbose=1)
# print("Test loss:", score[0])
# print("Test accuracy:", score[1])
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
@brief test log(time=60s)
"""
import sys
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, is_travis_or_appveyor
from pyquickhelper.pycode import fix_tkinter_issues_virtualenv
from pyquickhelper.ipythonhelper import execute_notebook_list_finalize_ut
from ensae_projects.automation.notebook_test_helper import ls_notebooks, execute_notebooks, clean_function_notebook
import ensae_projects
class TestNotebookHackathon(unittest.TestCase):
def test_notebook_hackathon(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if is_travis_or_appveyor():
return
fix_tkinter_issues_virtualenv()
temp = get_temp_folder(__file__, "temp_hackathon_2015")
keepnote = ls_notebooks("hackathon_2015")
self.assertTrue(len(keepnote) > 0)
keepnote = [
_ for _ in keepnote if "database_schemas" not in _ and
"process_clean_files" not in _ and
"times_series" not in _ and
"upload" not in _]
def valid_cell(cell):
if "%blob" in cell:
return False
if "blob_service" in cell:
return False
return True
res = execute_notebooks(temp, keepnote, filter=lambda i, n: True, valid_cell=valid_cell,
fLOG=fLOG, clean_function=clean_function_notebook)
execute_notebook_list_finalize_ut(
res, fLOG=fLOG, dump=ensae_projects)
if __name__ == "__main__":
unittest.main()
|
# ----------------------------------------------------------------------
# OID Rule Loader
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import threading
from contextlib import contextmanager
_tls = threading.local()
@contextmanager
def with_resolver(resolver):
"""
OIDRule resolver context.
:param resolver: callable accepting name and returning
OIDRule class with given type
:return:
"""
_tls._oid_rule_resolver = resolver
yield
del _tls._oid_rule_resolver
def load_rule(data):
"""
Create OIDRule instance from data structure.
MUST be called within resolver_context
:param data: parsed from json file
:return:
"""
resolver = getattr(_tls, "_oid_rule_resolver", None)
assert resolver, "Should be calles within with_resolver context"
if not isinstance(data, dict):
raise ValueError("object required")
if "$type" not in data:
raise ValueError("$type key is required")
t = data["$type"]
rule = resolver(t)
if not rule:
raise ValueError("Invalid $type '%s'" % t)
return rule.from_json(data)
|
# -*- coding: utf-8 -*-
from base64 import urlsafe_b64encode, urlsafe_b64decode
from Cryptodome import Random
from Cryptodome.Util import Padding
from Cryptodome.Cipher import AES
from conf import conf
class AESCipher:
def __init__(self):
self.key = conf['SECRET_KEY']['secret_key'].encode()
def encrypt(self, plaintext: str):
padded_plaintext = Padding.pad(
data_to_pad=plaintext.encode("utf-8"),
block_size=AES.block_size
)
iv = Random.new().read(AES.block_size)
cipher = AES.new(
key=self.key,
mode=AES.MODE_CBC,
iv=iv
)
ciphertext = cipher.encrypt(padded_plaintext)
return urlsafe_b64encode(iv + ciphertext).decode("utf-8")
def decrypt(self, ciphertext: str):
ciphertext = urlsafe_b64decode(ciphertext.encode("utf-8"))
iv = ciphertext[:AES.block_size]
cipher = AES.new(
key=self.key,
mode=AES.MODE_CBC,
iv=iv
)
padded_plaintext = cipher.decrypt(ciphertext[AES.block_size:])
return Padding.unpad(
padded_data=padded_plaintext,
block_size=AES.block_size
).decode("utf-8")
|
from pytransform import pyarmor_runtime
pyarmor_runtime()
__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x09\x00\x61\x0d\x0d\x0a\x08\x2d\xa0\x01\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\x2d\x7c\x00\x00\x00\x00\x00\x18\xa3\x54\xc0\x8b\xd9\xf4\x9f\x8f\x59\x62\xb7\xbb\xfb\x46\x82\xe7\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x88\x91\x41\xed\xf9\x2e\xeb\x46\x3f\x90\x5f\xc6\xcf\xf0\x26\x91\xb7\xcf\x44\x4c\xa2\xa3\x43\xc5\x05\x36\x52\x68\xc9\x38\xa6\x9b\xc5\x2d\xec\x3e\x6c\x2d\x75\x2b\x6b\x0b\x5c\x8a\x4c\xb6\x79\x3e\xbf\xdf\x30\x57\x3e\x01\x13\xa6\x86\x70\x7a\x2a\x6d\x70\x9f\xf0\x88\x00\x0c\xe7\x63\xcc\x8a\x08\xc8\x7d\x60\x89\x99\x45\x83\x68\x77\xde\x92\x74\x75\xae\x86\x95\x44\x42\x6d\x7c\xf5\x09\x2f\x1b\xdb\x9c\x9c\xe3\x55\xe8\x77\x29\xca\x2f\xc5\x13\x01\x97\x59\x3c\x19\x13\xef\xa8\xc1\x4a\x38\x18\xac\x58\x86\xfe\xf3\x54\xb5\x6d\x5e\x94\x37\x5a\x7a\x8b\xd5\xaf\xe9\xcb\xfe\xf3\x0a\x1f\x88\x3c\x15\x05\x0a\x21\x07\x04\x81\x93\x2c\xc3\xc3\xca\xe5\x81\x22\x62\xb6\xbb\x91\x0e\x3e\x0c\xe5\x86\x53\x91\xe1\xe1\x6d\x4a\xe4\x3a\x68\xc7\xc6\x49\x72\xe8\xc2\x9c\x49\xf0\x9e\x58\x5d\x13\x43\xfd\x2f\xc0\x0b\xa7\x71\x19\xc8\x55\x21\xbd\x49\x0a\xf6\x9c\xac\x53\x7e\xa7\x5d\x66\xd4\x5d\x72\x46\x61\xc5\xcc\x2f\x77\xd4\x40\x69\x5c\x05\x0f\x39\xc1\x74\x75\x90\x58\x1a\xe0\x7f\xc9\xfb\x36\x63\x4b\xcd\x3a\x08\xaf\xa6\x5f\xc3\xae\xcf\xb6\x75\x9f\x48\x60\x3f\x3a\x0c\x66\xea\x70\x48\xb9\x80\xa6\xa0\x31\xb4\x51\xc9\xc6\x23\x12\xc5\xa2\x52\x01\x24\x79\x59\xc3\xfe\x3c\xaf\x4a\xd8\xca\x1a\x9b\xc5\xdc\xa1\xe4\xd4\x5b\xf2\x3c\xa6\x20\x92\x0b\xb7\xe2\x61\x98\x4a\x0b\x84\xc2\x32\xad\x26\xa7\x2e\x8a\x26\xef\x00\x1a\x44\x13\x7a\x72\x13\x01\x36\x4f\xf8\xb3\x90\x4e\x31\x6d\xfa\x0d\x12\x67\x32\xcc\x28\xbe\x18\x80\xc9\xad\x98\x54\x05\xf1\x42\x69\x03\xe4\xe1\x8d\x65\x18\x92\x1b\x69\x35\xc9\x80\x09\x24\x0f\xb0\x16\x6f\x87\x7b\xf9\xae\x44\x87\x00\xf2\xc2\x56\x2b\x86\x0f\x06\x79\x11\x82\x0d\xff\xf4\xdb\x9f\xaf\x86\x2a\xc9\x53\x8a\xf2\x8d\xc9\x50\x55\x19\x8a\xe2\x03\xf5\x0e\x41\x13\x93\x27\x0b\x7e\x41\xe1\x5a\x55\xda\x72\xe0\xaf\x80\x9d\x2a\xec\xc0\x27\x79\xb5\xf6\xc2\x12\x31\xb7\x9c\xc8\xc3\x0f\x61\x67\x22\xa8\x90\x25\x36\x3d\x87\x01\x2e\x19\x95\xb0\xca\x9b\xae\xeb\x38\x7f\xd2\xaa\x84\x56\x8e\x6e\xb0\x2b\x74\x49\x48\x02\x3e\xdd\x2b\xa7\x0c\xc5\x2a\x4f\x42\x5f\x58\x99\x7b\x8f\x5c\xe5\x7c\xc9\xaf\x6e\x9d\x10\x84\xa5\xaa\x92\x46\xf7\xd7\x70\x10\x31\x5e\x06\x83\xf9\x5a\xe3\xd9\xa4\x11\xce\xd8\xbd\x80\xe6\x3f\x32\xac\x17\xa6\xf5\xeb\xf8\x13\xc1\xfd\x5b\x4e\x60\x06\xc9\x84\x0d\x3c\x12\x87\xcc\x51\xd2\x1f\x9e\x62\x69\xa4\x63\x7a\xef\xc6\x0d\x2b\xa0\x4f\x7f\x21\xf4\x3f\x75\x04\x05\xc2\x7f\x47\xd1\x76\x80\x7c\xdc\xf3\x64\x5a\x98\x9b\xf5\x34\xbd\xa1\xcf\x7b\x9a\x52\x67\x7e\xf2\xe3\x19\x13\x78\x76\x05\xe4\x2e\x5d\xdc\x64\xe3\x57\xe5\xe4\x1c\x72\x7f\xd4\xaf\xae\x42\x7d\x2d\x47\xc3\xfa\x85\x49\x29\xc4\x15\x4e\xc9\x04\xbb\x98\x64\x1c\xff\xda\xd3\x0d\x6c\xe9\xbb\x6e\xb3\xfa\x2d\xe1\xf8\x66\x50\x14\xa9\xeb\x43\x2b\x24\xf3\x4e\xf9\xa9\x81\xa8\x05\xfb\xd8\xf2\x6c\xef\x71\xac\x54\x12\xc1\x7d\xe9\x86\x99\x4b\x34\xd0\x1c\x18\x0e\x8d\x1d\x8f\x0b\x15\x07\xc8\x07\xd4\x94\x7a\x29\x50\x23\x79\x32\x6c\x6d\x74\x44\xc4\x12\x0b\x02\x9f\x73\xa8\x14\x09\xf6\xbc\xce\xf6\xf4\x25\x42\xa8\xd6\x4c\xf8\x27\x11\xa7\x45\x37\x48\xb7\x7d\x56\x57\xaf\x5b\x3d\xc2\xb0\xaf\xf1\x79\x41\x20\xde\x17\xba\x0c\xa0\x0c\x7a\x4a\xbb\x79\x86\xb6\xac\x44\x6c\x5d\x80\x3c\x49\xbc\xcb\xf6\x07\xec\xc4\xce\x1c\xae\x70\x2b\x9d\x48\x97\x25\xdc\x62\x0c\x95\x3e\x61\x1c\x98\x59\xc5\xcc\x24\x31\xa7\x90\xb5\x59\x6d\x12\x13\xf4\x01\xe0\x17\xfe\x4b\xe2\x5f\x11\x21\xb0\x1a\xd9\x70\x3e\x05\xe5\x1c\x5d\x91\xb9\x9c\xdf\x81\xa2\x35\xe9\xa3\x2a\xd5\xb1\xeb\x32\x6e\x6e\xe8\x11\x16\xdb\x67\x76\x7c\x4c\xbc\xb3\x00\x86\x04\x9e\x63\x1d\x98\xfb\x7f\x68\x24\xb5\x61\x0c\xf1\x34\xec\x5d\xba\x27\x0d\x52\xcd\xc3\x14\x78\x5f\xcd\x4c\x6c\x7f\xee\xd2\xfd\x53\x28\xe9\xf9\x9a\x7f\xb5\x96\x83\x1c\x5c\xb4\xcc\x0b\xa9\xd0\x78\xba\xac\xc5\x22\xa3\x94\x08\x96\x70\xbe\x89\xf7\x4d\x55\x8a\x1e\x59\xb1\x21\xb6\x85\x5c\x21\x51\x27\x9e\x08\xda\xdc\x37\x11\x26\x6c\x32\x9a\x54\x9f\xc7\x28\xbc\xd6\x23\xb5\x33\x0b\x55\xf1\x2c\x57\x79\x67\xcc\x1a\x7f\xb3\xca\xeb\xbc\xe3\xbc\x02\x19\x37\x5a\xe6\x5d\xbc\xc1\x25\x9a\x5d\x39\x8c\xe8\x3e\xbe\x11\xbf\x2d\xe7\xbf\xb1\x45\x54\xfd\x9b\x46\x30\x22\xf7\x92\xbb\xe6\x08\x25\x6c\x63\xb8\x00\x05\xde\x1a\xee\x12\xbb\x92\xf2\x58\xa1\xfb\x9a\x6e\xeb\xb6\xe7\x39\x76\xfb\x14\xf4\x53\xfd\xd5\x39\x1f\x63\xba\x8b\x49\x0a\xee\x63\xbb\x56\xc9\xce\xf4\xb0\x5a\x3a\x71\xbd\x68\x54\xf2\xda\xc6\xe2\x13\x89\x71\xe8\xd4\xbe\xe9\xca\xc0\x4e\xa5\xaa\xa4\x33\xbe\xc1\x24\x88\x89\xcd\x60\x6e\x7f\xd9\x9a\x21\xcd\xc6\x88\x60\x60\x9c\xdb\x19\x79\x3e\x31\x9f\x01\xb9\x3a\x87\x6b\x38\xcf\xc7\xc1\x50\x40\xc0\x82\x09\x13\x29\x8c\x97\x19\x17\x35\x27\xb4\x77\x0f\xca\x8c\x5a\x3d\x55\x62\xd6\x00\x51\xd9\x0c\x61\x39\x8d\x48\xbe\xdd\xd3\x82\xae\x8f\xc6\xbe\xb0\xb7\x74\x6a\xfd\x42\x2b\xa8\x91\x0f\xa8\x6a\x84\x4f\x55\x34\x85\x98\x31\x50\xb6\x46\x4f\xb7\xb8\xb2\x05\xcd\x5c\x30\xf4\xd5\x31\x78\xc1\xa2\x23\x39\xe1\xfc\xcc\xf2\x42\x42\x53\x62\x4e\xa6\x96\xee\x7e\xf3\xa4\xfb\x87\x79\xef\x84\x3c\xaf\xd3\xf4\x0e\x3d\x62\x5b\xb0\x6e\xdb\x0c\x3a\x95\x95\x98\x45\xff\x00\xa2\x9e\xf5\x93\x26\xe5\x3c\xc2\xd6\x43\x5b\xe6\xa9\x70\x9a\x51\x31\xf4\x95\x37\x82\x82\xb3\xdc\x5f\xb0\x41\xf3\x26\x2d\xbb\xd5\x22\x52\x2c\x7b\x55\xb7\xc8\xae\xdd\x84\x02\xc7\xde\xb4\x9a\x6c\x38\x4c\xe8\x54\x75\x2e\xc8\x84\x22\xb8\xa4\x8c\x80\xfa\x68\x5a\x1d\x2f\x36\x6e\xf0\xb8\x13\xa4\x71\x1e\x7c\x8f\xe5\x39\x2e\x23\x6c\x8e\xd2\x97\x0a\xa2\xbe\xdb\xb4\xc1\xad\x19\xbb\x00\xec\x9c\x52\x20\x30\x0b\x8c\xcb\xad\x9f\xff\xa0\x6c\x62\x72\x81\xd1\x0e\xb7\xa2\x85\xd5\x42\xb1\xa7\x93\xc9\xa0\xe1\xa6\x4a\x4b\x5a\xe5\x04\x0b\xbd\xbc\x1c\x4c\xb8\x74\x90\x26\xb8\xfe\xef\x60\x2e\x1f\x70\xfc\xcf\x71\xb7\x01\xa1\xbc\xba\xf4\x9a\x96\xa3\xe6\x52\x72\x4b\x09\xdb\x3d\x53\xec\x00\x7b\x0e\x78\x72\x9a\x79\x9b\x58\xe5\x99\xaa\xaa\x9f\x50\xfe\xa6\x37\x08\x44\x7b\x0e\x06\xba\xc8\x99\xad\x9b\x61\xf1\x28\x27\xfa\xb7\xc5\xe2\xcb\xc0\xb5\x44\x63\x26\xbd\xda\x49\x53\xde\xd9\xbd\x0f\x14\x94\x40\xce\x03\xe6\x21\x4d\xaa\x87\x5f\x2c\x32\xc4\x4e\x1d\x1a\x3e\xad\x36\xce\x8b\x0c\x70\x84\x5e\x1e\x69\xe2\xef\xa1\x00\x47\x81\x3c\x5d\xfe\x8e\xed\x64\x56\x22\x0a\x1d\x9c\x29\xc0\xae\x55\x0d\xe8\x05\xae\x44\xee\xfe\x72\xc4\x94\xd0\x11\x19\xa0\xed\xef\xbc\xcc\x30\xd7\x27\xad\x8a\x30\x09\x1a\x53\x85\x1a\x99\x16\x3e\x52\xc3\x06\x6c\x3d\x61\x1c\xf4\xc8\xee\xc7\x70\x0e\xa2\x7f\x49\x17\xa8\x64\xb0\x6a\x90\xae\x0b\x6b\x49\xc7\xa8\x83\x6e\x98\xdd\x20\x84\xfd\xee\x3b\xf3\x49\x5e\xbb\xfb\x2f\x8e\x7b\x16\x0e\x7f\xbd\xec\xaf\x41\x97\x98\xde\xf7\x70\x93\x5b\xa3\x2b\xf9\xc1\xea\x92\x58\xf2\x09\xa6\xbe\x7e\x99\x68\xd6\x19\x7f\xbb\xb7\x8b\x79\xe1\xb2\x98\x55\xd6\x4d\xb2\x0e\x43\xb8\x4d\xca\xe5\xed\xe4\x91\x49\x1d\x59\x61\x79\x18\x99\x43\xf4\xa1\xd9\x7a\x43\x8b\x4c\x46\xdd\xbd\x00\x6b\xf7\x65\x04\x41\x0a\xac\x7a\x0c\x5f\xd0\x06\x6e\x9c\x09\x3a\xf2\x04\xd9\x59\xc8\x0e\x20\xcd\x6d\xca\x1c\x7a\x45\x82\xd5\x1b\xb5\xc2\xe7\x5a\xa8\xfc\x13\x32\x68\x7e\x32\x79\x69\xfc\x6c\xee\xd2\x52\xaf\x20\xec\xee\x19\x98\x0f\x2c\xb6\xbb\x4b\xe2\x16\x05\x03\x93\xd7\x29\x3f\x23\xc2\xac\x2b\x43\x5f\xe6\xc4\x24\xdd\x82\x0b\x14\xd4\x85\x1f\xd0\x2c\x0a\x66\xc3\xde\x60\x4e\xdb\xe1\xd8\x7a\x93\x43\x8d\x79\x0a\x49\x1a\x73\x17\x40\x31\xca\xf0\x18\x68\x08\x73\x56\xba\x71\x78\xf9\xea\x73\xcf\xef\xa3\x3e\xb5\x36\xd2\x63\xfb\xab\xf3\x06\x2c\x6b\x8a\x9d\x8b\x02\x93\xe3\xdf\x19\xd3\x38\x87\x5c\xc6\x72\x0b\x2a\x61\x97\xd2\xe5\xd9\x90\x1f\x50\x52\x36\xe4\xb3\x37\x8a\x5a\x9d\x09\x36\xa8\x2c\xdd\x1b\xd8\x57\x38\x5f\x1c\x56\xfd\xcc\xc1\x6c\x47\xaa\x5e\x14\xda\x05\xfb\x2d\x56\x32\x97\x41\x93\x4b\xff\x06\x92\x6e\x83\xa2\x26\x22\x01\x36\xff\x02\x80\xbc\x54\xf1\x92\xdf\x78\x58\x07\x53\xc1\x11\x76\x9d\x22\xb6\xe4\x3a\x64\x8d\x80\x23\xd0\xaa\x97\xa0\x17\x9f\x84\x36\xd3\xd1\x10\x2d\x6e\x08\x42\x02\xb5\x01\x22\xae\x58\x8c\x6e\xde\xb2\x61\x25\x0b\x66\x19\x34\x16\xc7\xff\x82\x94\x3e\x46\x0f\x3e\x4e\x94\x52\xbf\x78\x97\x06\x54\xdb\xe4\xfd\x1f\xdf\x3c\x61\x8e\x73\x19\xcd\xe9\xe6\xde\x91\xd3\x2f\xf8\x4c\x0b\xef\xe3\x95\x1b\x7b\x61\x24\x21\xd8\x76\xd2\x07\x01\x5f\x7a\x5b\x34\xa5\x75\x35\x5f\x2d\x22\xb7\xb8\xe7\xe8\xe0\x57\x9f\x26\x03\x1f\x65\x6a\x6f\xf0\xf1\x7b\x5d\xdc\xc3\x29\x4e\x1a\x3b\x5a\xd1\x47\x3d\x06\x8b\x4d\x16\xd7\x22\x59\x4f\x0d\xb3\x6d\x40\xb5\x6e\x86\x40\xa3\x22\x01\xca\x3b\x8f\x01\x02\x8d\x87\x68\xd0\xdd\x7f\x4e\x44\x37\x4c\x59\x32\xa5\x42\x00\x99\x1f\x1a\xc6\x21\x22\x44\xdf\x8e\x69\xbb\xc8\x17\x0f\x0c\xc2\x78\x05\x61\x00\x35\x38\x64\xfc\x2a\x43\xc8\x9b\x25\xe5\x7d\x43\x0f\x7a\xd9\xe8\x82\xa7\x3c\x02\x32\xd0\x74\x36\xa3\x8a\x76\x88\xc6\xa0\x28\xcb\xa4\xe2\x09\xb0\xe6\x5c\x22\x45\x09\x65\x01\x93\xea\x3f\xb0\xd5\xa8\x34\x51\xe8\x1f\x5e\x1d\xac\x32\x69\x1b\x84\x2b\x72\x4f\x34\x24\x04\xfc\xba\x49\x7e\x6b\xc6\xbe\x21\xf0\x66\x94\x06\xe0\xae\x83\x08\xc0\xaf\x73\xbb\xed\x0a\x82\xf3\x47\xb3\x2a\x42\x97\x51\x76\x16\xb5\xe0\x40\xc9\x03\xc6\x15\xd1\xc8\x02\x6f\xd6\x2a\x9c\x39\xce\xd3\xf3\x85\x8d\x13\x9c\xa4\xc9\xca\x4a\xf4\x07\x9d\x9e\x87\x40\x7a\x47\xb9\x6b\xba\xcf\xbd\xc1\x4a\x4d\x11\x14\x02\x68\x91\x06\x3b\x87\x28\x0d\x8b\xf3\xf3\x38\x68\xdb\x69\xca\xa6\x63\x08\x7f\x8d\xee\xc1\x35\xef\x53\xd9\x59\xe5\xf3\xc5\x44\x9d\x27\x00\xcb\xca\x58\xc0\xbc\xc6\x7f\xc1\x8d\x41\xdc\x28\x2d\xad\x58\x16\x1b\x61\x14\x50\x7c\x9e\x36\xda\x1e\x1f\x50\x1d\xbb\x86\x72\xb2\xd3\xdd\x05\xd0\x9b\xab\x28\x6e\x89\x2e\x29\xb5\xbd\x88\x87\x81\xa5\x5a\x24\x31\x42\x8a\x82\xc8\x1f\x25\x28\x9d\x48\x85\xe9\xc6\x12\xd5\x0e\xd4\xa7\xc9\xdf\xdb\x5d\x27\x58\x76\xb8\x76\x3b\x49\xa4\x98\x49\xa8\x0b\x27\x64\x80\x65\x03\x07\xc5\xb5\x29\xe9\x7f\x68\xdf\xed\x04\xb2\x7a\x15\x6b\x35\x9a\xb8\x5e\x28\x88\xd7\x8b\xce\x3b\x09\xb7\x48\x71\x6a\x72\x52\x94\xe0\xab\xa6\x10\x64\xc2\x7d\x43\x2a\x07\xf0\xb1\x7d\x03\x39\x0c\x88\x03\x94\xe0\xee\x0f\xd4\x79\x53\x94\xb2\x0c\x6a\xc5\xb8\x1b\x27\xcf\xac\x6f\xa9\x38\x80\x99\xc5\x25\x25\x8f\xda\x75\x1e\xf2\x90\x7c\x6d\xd9\xb8\xf3\x6a\x5d\x65\x53\x50\x57\xbb\x1e\xe9\x2c\x9a\xd6\x57\xee\x24\x6e\x6a\xaf\x6c\xa4\x10\xc0\x97\xae\xf3\x2c\x74\x8e\xa3\xb0\x00\x96\x0d\x6e\xae\x94\x1c\x97\xbc\x98\x64\xf7\xe0\x6e\xd2\xf7\x4d\x08\x80\xea\x6a\xea\xea\xf9\x14\x70\xad\xcc\x34\x80\xfc\xcc\x96\x82\xe2\x86\x0f\xb4\x55\xe5\x85\xca\x7d\x7f\x80\x11\xb7\x96\x7e\x4c\xf0\x8b\x85\xce\x8f\x18\x26\xfa\x3f\x13\xe7\x16\xe9\x29\x19\xb7\x79\x18\xeb\xbb\x97\x9a\xf8\x70\x86\xf7\x7e\x42\x93\x3d\x49\x1e\x6a\x14\x2c\xc1\x3e\xef\x8c\x8c\xde\xf4\x34\x6a\x98\x7c\xf0\xd0\xab\xae\x29\x3e\xd1\x04\x9c\x9f\xc6\xea\x05\xa5\x4f\xc1\xe2\x6c\x3d\x24\x13\x90\xb9\xa9\x2c\xa6\xd8\x8b\x2d\x4f\xb3\xcf\x45\x42\x95\x15\x28\x21\xdd\xba\xd9\x33\xf2\x56\xbb\xf2\x6b\x6c\x30\xca\xd7\x5d\xf2\x86\xda\x70\x01\x41\x95\xe9\xa3\x95\xd1\x35\xb5\xbb\x19\xe5\xfe\x8a\x12\x34\xf5\x11\x07\xcb\xab\x8d\x00\x39\x04\x63\xa8\x09\x89\x44\x52\x0e\x9b\x1c\x3d\x62\x53\x20\x5d\x24\x36\xb3\x3e\x95\xe0\xf7\xc5\x2d\x7f\xaa\x9b\xcd\xaf\x9c\x31\xbb\x23\x51\xd4\x1f\xc3\x4b\x3a\xab\x17\x63\x75\xe8\x48\xf3\xd0\x78\x41\x4b\x6a\xaa\xfe\x36\x7c\x3f\x95\xb1\x9b\x90\x5f\x9e\x40\x94\x6c\xd7\xfd\x26\xd2\xa0\x86\xa2\x18\x5e\x37\x08\x8e\xb6\x76\x6a\xea\xcf\x12\x17\xdb\xc4\x09\x49\x1c\x31\x09\xe8\xe9\xef\x17\xc6\x53\xe9\xe6\xcb\xac\x4a\x5a\x79\x72\x9a\x60\xcd\x08\x26\x00\xb1\x17\xa2\x02\x83\xc9\x33\xeb\xba\x30\x5c\xe1\x10\x5a\x87\xa8\x0a\x2f\x1e\x06\x76\xf1\xed\x97\xeb\x8c\x5e\x39\xd4\x15\x25\x5a\x17\x9b\xaa\x57\x1a\xad\x64\xd5\x96\x28\x99\xa0\x97\x75\x92\x21\x06\xd8\x5a\x89\x8d\x64\xa7\x27\x99\x46\xf4\x3a\xab\xa8\x6a\xac\xf1\x6f\x94\xdd\x87\x12\x52\x47\x2a\x9b\x29\xb9\xf5\x3e\xdc\x39\x73\x2a\x0c\xb4\xe2\x08\x90\x67\x50\x0e\x4a\x40\x78\xd0\x58\x52\xc6\x4a\xde\x32\x55\xb4\x95\xb7\xf9\x75\x1d\x3d\x61\x2e\xad\xca\x7b\xaf\x20\x70\x26\xf9\x93\x10\xa0\x82\xaa\x9b\xa8\xbb\x26\xc4\xff\x4a\xc0\x6f\xd9\x53\x11\x84\xc4\x8e\x9f\xad\xc2\xce\x6c\x0c\x35\x85\xef\xfb\x73\x15\xb1\xcc\x45\x3a\xa7\x28\x0a\xe9\x8c\x74\x29\x19\x90\x22\x2e\x74\x5b\xa5\xc5\x2a\xd8\x16\xd5\x4d\xa6\x57\x6d\x89\x83\xc3\x32\x28\x06\x19\x03\x00\x23\xa0\x2e\x68\x9f\xc4\x4e\xe4\x9e\x65\x0c\x8d\x69\xb1\x12\x24\xe7\xd8\x14\xbe\x4a\x6e\x77\x3d\xb6\xc0\x15\xe4\x56\xe7\x23\x55\xe9\x29\xa8\x88\xff\x77\x8a\x8d\x0a\xba\x97\x60\xff\xd0\xa8\x1c\x93\x14\xd9\xab\xf4\xfe\x73\x41\xba\x67\x29\x9e\xfd\x76\x65\xef\xdc\x3b\x07\x40\x91\x57\xf9\xbe\x74\xda\xf2\x2a\x48\x9a\x4e\x13\x4b\x53\x0c\xf5\xf1\xd8\x6f\x4e\x18\x6e\xee\x3e\x4a\x8d\x42\xe7\x53\x83\x32\xb5\x7d\x7f\x86\xb5\xfd\x93\xd5\x4c\x8d\x91\xcd\xa9\x2d\xa9\x96\xf0\xcf\x63\xc2\xe7\xdb\x47\x68\x5d\x76\xce\xe7\xb8\x2d\x3b\x5b\x7e\xd5\x12\x39\xa5\x6b\x10\x33\xe3\xdc\xc4\x80\x4d\xa7\x81\x46\x46\x3d\xd9\x9e\xa9\x4a\xc0\x63\x76\x48\x74\x45\x5e\xd3\xa8\xdc\x7c\x97\xce\x55\xb6\x3b\x5c\x87\xe1\xc8\x06\x2a\xa9\xee\x2c\x26\x5c\x55\xbd\x2a\x30\xba\xd6\x88\x6e\x74\x8a\xea\x9d\x3c\xa4\xec\x90\x4d\xd9\x1f\xd3\xdf\xb7\x96\x78\x5f\xfe\x5e\x18\xd8\xcf\x78\xd8\x15\xdf\x8d\x60\x85\x8c\xac\x25\x87\x58\x58\x06\x73\x14\x25\x0b\xab\xf3\xff\x4d\x45\x5f\xcb\xe6\xb5\x9a\x4d\x8e\xdd\x5d\x89\x8b\x8e\xc3\x51\x5c\x9d\x9a\x3a\x03\x16\xbc\xb4\x98\xe3\x0b\x43\xf4\x27\xf8\xca\x89\xa5\xd8\x26\x9f\x11\xd3\xf6\xc5\x0c\x85\x08\xa2\x50\x4e\x47\xd1\x49\x92\x21\xc8\x90\xeb\x8a\x23\xec\x59\xb5\x96\x35\xa0\x5d\xa0\x09\xbd\x20\x11\x30\x6a\xcd\x87\x5b\xf7\x6b\x7c\x2c\xf3\x78\x0f\xd3\x87\x75\x90\x04\xd7\x72\x03\xbe\x02\x78\x41\x04\xb4\x64\x46\x81\xdc\x3e\x4d\x79\x1b\xb7\x6a\x95\xca\x2b\xab\x19\x5e\xbf\xdd\x89\xee\xca\xf3\x36\xc9\x9c\xce\xdf\x9e\x36\xd3\xd7\x94\x31\xe2\x9c\x03\x83\x4d\xa4\x49\x84\x12\x31\x50\x52\x36\xb9\x58\xe1\x49\x7d\xca\x63\x2b\x7b\xf4\xfe\xd2\x09\x6c\x9b\x7a\x8a\x7a\x13\xda\xf2\xf8\x64\xaa\x1d\x70\x68\x51\x7b\xc9\x4c\xb3\xcb\xe7\xd1\x38\xf5\x22\x41\x01\x8e\x3c\xdb\x95\xe9\xd2\x42\xd6\x01\x4d\x02\xd8\x97\xfe\x1d\x91\x28\x87\xc8\x6b\x57\x79\x51\x14\x38\x02\xd0\x93\xa6\x90\x98\xc5\xba\x3f\x3d\xcf\xef\x64\x5b\x1d\xd9\x2a\xfb\xf0\x31\xf1\x8f\xd0\xbb\x52\x5d\x31\x87\xdf\xb6\xa5\xcf\x55\xad\x0e\x4c\x2a\x4c\x96\x22\x17\xa1\x2d\x08\x94\xc4\x96\x2f\xe0\x15\x73\xfa\xd6\xb2\x0f\x44\x89\x99\x8e\x56\xde\xab\x6e\x42\x87\xee\x0a\x41\x08\x47\xec\x85\x4d\x98\xae\x7e\xf8\x65\x65\x5f\xb7\x63\x06\x15\x37\x62\x2b\x5c\xbd\x89\x42\x30\xa5\xab\x99\x74\xdd\x9c\xe2\x89\x5c\x23\xc9\x2e\x07\x7d\x8d\x85\x73\xfd\xc2\x06\x82\x59\x7a\x6f\x1f\x87\x3a\x63\xa9\x97\xc4\xf7\x37\x28\xfe\xe2\xc9\x93\xfa\x1c\x8b\xc7\x06\x4f\x6d\x30\x81\xfc\x05\x5e\xb9\xbf\x28\x95\xda\x59\x8d\x93\xf8\xd9\xa9\xaf\xc6\x6f\xc8\x3d\x91\xa5\x99\xff\x87\x82\x08\x08\x14\xec\xc9\x53\xe9\x24\xc6\x40\x1c\x2e\xeb\x59\x3f\x45\x13\xb1\x54\x6f\x17\xd5\x32\xd0\x00\xae\xf4\x6c\xd0\xc0\xc6\xcf\xcc\x10\x36\x8d\x83\x58\xd6\xa2\x7f\x09\x57\xfd\x13\x0e\x4e\xd8\xbe\x39\xf8\x0a\xbf\xec\x86\x04\x3d\x52\x8e\xea\xf2\xa0\xc1\x6c\xcd\x01\xc6\x3c\xcb\x2a\x20\x72\x2b\x0f\xbb\xf1\xd3\x06\x48\xe1\xd2\x43\xb9\x64\xf1\x73\xb7\x84\x56\x43\x2c\x2e\xa4\x7c\xe2\xaa\xd0\x8c\xb6\x3a\x0d\xe0\x97\x0b\xd2\xc9\x7b\x54\xbb\xbf\xa5\x7f\xd8\xd4\xdf\x41\xee\xad\x15\xb8\xe0\x03\x57\x67\xc6\xfc\x55\xdb\xb0\xbe\x74\xb4\x4b\x48\xf3\x36\x8b\xf5\x5b\xd4\x37\xc2\x92\x70\x23\x02\x1f\xfb\xb4\x91\xa9\xd0\x7e\x12\x0b\x71\x66\xca\x29\x9c\x97\x1f\xda\xb1\xf6\x43\x5b\x81\x60\xf5\x87\xaf\xaf\xef\x69\xd5\x84\x4c\x39\x03\x2e\x76\x04\x6b\x44\x13\xbe\x2b\x6e\xed\xbf\x7b\x76\x5b\x3e\x81\xa6\x31\x70\x3f\x0f\xa4\xb4\x44\xf7\x54\x87\xeb\x1c\x4f\xc5\x49\xfe\xd3\xb7\x30\x49\xf8\x72\xc9\x81\xf4\xd0\x56\x45\x2d\x55\x12\xe7\x34\x2e\x89\x9e\x14\xef\xc1\xed\x32\x72\xdd\x84\xbb\xff\xf5\x0e\xc7\x08\x15\xce\x57\xbb\x5b\xb4\x78\x43\x83\xcb\x70\x80\xea\x9d\x29\xdc\x13\x85\xd4\x12\x09\x1e\x94\x36\x3a\xae\x3b\x76\xf6\x72\xa5\x2e\xfa\x6a\xba\x1e\x8a\x2d\x99\x80\xbc\xc0\x35\xed\x1e\xc9\x66\x09\xe1\x26\x42\x8c\x68\xa7\xaa\x47\xc7\xf8\xc5\x75\x84\x8f\x54\x09\x63\xdf\x27\x63\x17\xc4\x13\x66\xf2\x6f\x85\xa5\x47\x29\xa6\xa5\xc1\xf9\x74\x1d\xfc\x52\xee\xf8\x5f\x8f\xa5\xe6\xe4\x3e\xa9\x2e\x9b\xce\xac\x64\xb6\x89\x30\x2c\x29\xde\x39\x73\x13\x39\x13\xea\x89\xb6\x57\x61\x0a\x55\x4e\x8d\x5f\x7c\xdf\x8b\xb4\xdf\x76\x9f\x38\xaf\xbe\xa8\xb0\xb6\xba\x82\x80\x67\xf3\x25\x53\x75\x59\x7c\xb2\x7b\xae\x4d\x99\x79\xe8\x72\x49\x4e\x4c\x54\x13\x9a\xba\xd2\x57\xdf\x4d\x92\x52\x12\x02\xfe\x83\x07\x55\x99\xd7\xa6\x7a\x12\xe3\x65\x82\x89\x48\x2a\x58\x0c\x41\x85\x0c\x91\x1b\x60\xa3\x11\xbb\xd8\xf8\x64\xe5\xec\x2b\x24\x28\x77\x03\x62\xfa\xaa\x6f\x4b\x93\xec\xbb\xbb\x1a\x72\xf0\xf7\x8e\x11\x2d\x8a\x8e\x05\x62\xc4\x98\x27\x83\xf6\xfc\xbe\x1a\xb5\xad\xa7\xf2\xfc\xdc\x51\x0e\x95\x5c\xc4\xc0\xd6\xc8\x12\x62\x9b\x52\xfd\x01\x6a\x1f\xd4\xd9\xcf\x15\xd8\x8a\x07\xe6\xe4\xf3\x60\x2f\xef\xbc\x8f\xd6\x4c\xc0\x6e\xbd\x36\x9d\xbd\x24\x2a\x85\x0f\x9c\x1c\xd6\xed\x79\xae\xef\x46\xde\x8d\x86\xc3\x32\x06\xfd\xbf\xe0\xd6\x95\xad\xa0\x64\x68\x74\x44\xda\x4a\xe0\x6f\xfe\x3b\x88\xfb\x8a\x02\x2f\xc9\x69\x7f\xa2\x06\xf5\xbf\x09\x34\x64\x5b\x67\x44\x2e\x44\x2e\xbf\xea\xc4\x53\x36\x3a\x45\xfd\x73\x01\x93\x40\xef\xad\xe8\x59\xfe\x19\xba\x9e\x67\xdd\xea\xf2\x4e\x7a\x0d\x31\xc2\x57\x11\x2e\x4d\x84\x78\x98\x51\xfe\xa4\x6a\x46\x4b\x5a\xc8\xf0\xb5\xa6\xf6\x8a\x3e\xab\xf2\xcc\x41\xd3\x94\x64\xcf\xb3\xca\x57\x7e\x08\x4a\x99\x88\xce\xb4\xe0\xfc\xa2\x1f\xd6\x29\x49\x15\x0d\xdd\xb9\x19\x3a\x02\x75\x44\xbd\xd4\xaa\xb0\xbb\xbd\x36\xeb\xa0\x2c\xa7\x93\x27\x91\x0e\x26\x0d\x1c\x32\xae\xf8\x72\x4d\x9c\xd2\x4a\x2e\x1d\xec\x67\xca\xd6\xe6\xbd\x7e\xa6\xcf\x0a\x2d\xa0\x9f\xcf\x66\x9d\x88\x7b\x87\x4b\x83\x43\x66\x37\x8d\x9f\xab\x2b\xb5\x4a\x4f\xe3\x97\x8c\x91\x51\x27\x62\xc9\x94\x93\xbf\x44\x5d\x2e\x00\x4d\x33\xeb\xa4\x4f\x3f\xc1\xcc\x41\x1c\x42\x2e\xea\x83\x9a\x69\x37\xe1\xeb\x34\x70\xcd\x13\x19\x90\x0b\xb4\x8d\xad\x7b\xdc\x26\x3f\xe0\x63\xb2\xb3\x05\xe5\xa6\x6c\x5b\x2f\xd4\x33\x21\x33\x84\xa5\x11\xdf\xa6\x52\x2d\xe6\x4c\x50\x01\x21\x10\xbe\x88\xb2\xe9\x06\x95\x5b\xdc\xd3\x51\x46\xa6\xd9\xc4\xa8\x16\xa8\xe6\xa5\xd7\xaf\x17\x93\x00\xde\xd3\x78\x78\xfb\x5f\x1a\x21\xaf\xa2\x6b\x4d\x99\x01\xba\x34\xb3\x30\x21\xb3\xee\x4e\x19\x6b\x79\x22\x05\xdc\x96\x95\x69\x01\xd2\x20\xfa\x6e\xac\x39\x59\x11\xa1\xc4\x93\x12\x70\x3f\x57\x34\x9a\xb1\x0b\x6c\x0a\xaa\xe6\x1a\x78\x11\x60\x94\x6e\x6c\x76\x63\x00\x5a\x2a\x63\x50\x4b\xe9\xff\x7b\xb4\x52\x39\xe5\x67\x4d\x9b\x60\xe1\x29\x9f\x06\xb7\x0a\xf3\xdb\xfe\x73\x07\x13\xad\x7e\x75\xb6\x21\xa7\x80\xa7\xb9\x96\xc8\x4c\xc2\xce\x47\x83\x1d\x64\x1e\x1a\x31\xad\x89\xc6\x94\xf0\x71\x63\x82\xe3\x2a\xc4\x67\xe3\x7e\xa1\x40\x1d\x5c\xa7\x3d\x2c\xa9\xf5\xe2\x2d\x58\x5b\x1d\x00\x54\x9c\xa7\x09\x90\xcb\x9e\x43\xec\xbe\x75\x48\x95\xb4\x2e\xe3\x5b\x69\xa3\xd0\xb6\xf5\xd4\xa9\x5f\x32\x1c\x23\x6e\x2c\x13\x67\xaa\x0f\x13\xb0\x3d\x38\x23\x60\x6a\xbd\xe6\x33\x96\xd7\x99\xa8\x4e\x36\xc2\xb5\x51\x12\xc0\x19\x5c\x6c\x24\x8a\x8c\xee\xbe\x19\x9f\x76\xf6\xee\x38\xc9\x58\xaa\x23\x22\xdc\xeb\xb9\x88\xe9\xe7\x72\x2c\x51\x0d\x1c\x9d\xc3\xcf\xf1\xad\x85\xb2\x3a\x85\x41\xf3\xce\x68\xb1\xec\x08\xc9\xc6\x0a\xf1\x55\x36\x30\x7d\x94\x4c\xc2\x3f\x57\x5d\x2f\xd8\x61\xdb\xbd\x20\x35\xb4\x31\x23\x7f\xfe\xc3\xd0\x71\xce\x38\x72\xde\xc0\x97\x4f\x92\xd4\xa3\xe8\x72\xf1\x14\x46\x1b\xed\xba\x39\x7c\x33\x29\xce\xa1\xb9\xb5\x99\xca\x41\xbc\xc5\xb5\xc5\x66\xc5\xc3\x5a\x0b\x1d\xfd\xfd\x8c\x62\x94\x3f\x20\xe8\x29\x73\x11\x2b\x08\xfa\xd4\x38\x63\xc0\x1d\x0e\x98\xc8\xb0\xf3\x3f\x32\x08\x6b\x8c\x39\x6a\xb3\x86\xe0\x9f\x3f\x79\x9f\x51\x5d\x01\xcc\x9d\xe0\x9e\xf2\xba\x68\x43\x44\x64\x5c\x3a\xff\x24\x31\xef\x26\xc0\x2e\x25\x87\x6d\x90\xa1\xdd\xec\xf6\x5c\x49\x51\xa9\x09\x47\xd9\x37\xf4\x9f\x78\xac\x95\x4e\x35\xbb\x3e\xa5\x1e\x3a\x17\xbb\x64\xa7\xe3\x15\xc3\x69\xdc\xfd\x41\xc7\xfd\x92\xfb\x18\x01\xde\x33\x95\x08\x46\x6b\xb4\x8d\xae\xd1\x34\x61\xd1\x56\xb5\xed\x67\xaf\x8f\x54\xf2\x01\x84\x6a\x0f\x16\xbc\x1b\xbb\x3a\x2b\xeb\xb2\xcd\xb3\xbf\xcb\x3a\x78\x38\xa7\x78\x0b\x4c\x03\x59\xe5\x73\xdd\x06\xc1\x1f\x4f\x03\x8a\x0b\x79\x28\xb5\x08\xac\xaf\xda\x05\xac\x7c\xf0\x81\xf9\xc0\x6c\xeb\x56\xce\xc1\xd6\x71\x83\x44\xd5\x94\xf1\xf7\x4a\xf6\x87\xfd\x1d\xa8\xb3\x55\xae\xfa\x79\x86\xa3\xb1\x65\x04\x66\x2b\x0f\x66\x52\xef\x6f\x22\xeb\xf8\x8f\xaf\x7b\x2f\x7e\x13\x8d\xcf\xa0\xc3\x94\xae\x40\xe3\x9a\xbb\x72\x15\x72\xba\x33\x95\xb8\xac\x0f\x1f\x9b\xb5\xf6\xdb\x72\xbc\x67\x89\xf8\xe7\xbb\xf3\x90\xac\x22\x76\x1f\x76\xec\x04\x52\x1b\x39\x74\xf6\xbf\x77\x52\x84\x97\x9b\x8e\x35\xfe\x83\xf1\xaa\x91\x18\xde\xdf\xaf\x6c\x00\x08\x1a\x82\xe2\x22\x62\x89\x48\xaf\x08\xc7\x33\xab\x82\x57\x88\x78\x1e\xe1\xa7\xad\xe3\xf7\x33\xfd\x6b\x3c\xf3\xde\x5b\x53\xc2\x84\xe0\xd3\x25\xd2\xc1\xa8\xbb\xd1\x41\x55\xf0\x52\x12\x7e\xed\x60\xb5\x16\x16\xe6\x33\xba\xc4\xf4\xeb\x11\xdf\xf5\x92\x3e\x20\x6e\xca\x2b\x0a\x71\x2c\x1d\x8d\x01\x8a\x9b\x14\x62\x31\x64\x51\xff\xa1\xea\x29\xa6\x61\x48\x47\xf0\xa2\xd8\xe5\x17\x36\x0a\xe2\x38\x02\x6f\xcc\xe0\x25\x36\x00\xac\x1e\x53\xa6\x36\x7e\xd9\x22\x1b\xcc\x94\x4e\xca\x64\x0b\xe6\xc2\x81\x6b\x40\x42\xe3\x5e\x9b\x5c\xa9\x58\xa8\x81\x34\xbd\x2f\x23\xcc\x72\xff\x19\xa8\x74\xb4\xf0\xaa\xb6\x01\xe7\xcf\x46\x78\x6d\x9e\x5e\xb8\x45\x5b\x8f\x35\xaa\x8e\x26\xe2\x2a\x20\x99\x86\x91\x45\x5a\xca\xe0\xbb\xb4\x99\x20\x2d\x80\xf8\xd0\xb2\x56\x0e\x15\x56\x2d\x5f\xf6\x83\xfd\x93\x92\x39\x01\x0c\xca\x70\x2a\x4b\xa3\x31\x4a\xb5\x90\xe0\xec\x2c\x3f\x04\x13\x98\xd4\x93\xd6\xcb\xf9\x4d\x8e\x82\xa4\xc3\x90\x95\xd6\xbd\xc6\x57\x13\xb0\xce\xa7\xdc\xae\x99\xac\xb3\xa6\xe2\xcf\xeb\x78\x57\x89\x5b\x94\x62\x0f\x68\x9c\x13\xbb\x95\x46\xe0\x1b\x9d\x9c\xf3\x11\x82\xb8\x58\x6b\x85\x68\x61\xc3\xf0\x05\xd1\x49\x4c\x88\xaf\xf5\x97\x36\x37\xa4\xd9\xc9\x91\xab\x84\xbe\x99\x2e\x37\xa2\x4e\x4a\x11\x02\xb8\x33\x13\x6c\x9c\xc6\x6d\x10\xe2\x9e\x47\x43\x61\xc7\xcc\x17\xe6\x64\x13\x64\xc9\x8c\x35\x66\x69\xa5\x23\x04\x7e\xb1\x74\x81\xa5\x70\x22\xf1\xf2\xbc\x30\xd5\xef\x47\x0a\xf6\xff\xb8\xcd\xee\xa9\x1c\xf5\x57\xc7\x85\xa2\x4c\x2f\xb5\xcf\x78\x75\xba\xa7\x46\x68\x47\x52\x76\xa5\x60\x87\x30\x96\x36\xdd\xe1\xf3\x15\x15\x5b\x09\x94\x55\x4c\x74\xbf\xe4\x90\x58\x21\x4d\x18\xe0\xbf\xc1\xfa\x0b\xda\x87\xe6\x07\x61\x1c\x43\x55\x45\x12\xce\xea\xb4\xa0\xe4\x38\x72\x06\x58\x25\xa7\x84\x5f\x76\x61\x24\xd4\xa3\x08\xfc\xa7\xda\x96\xc2\x03\xa4\x79\x7e\xd1\x6e\xc8\xcd\xa3\x99\xd2\xe2\x1f\x1a\xd9\x06\x6c\x77\x40\x73\x66\x71\x9f\x77\xc5\x09\x4c\x9b\xa1\x2b\xf2\xc5\x8f\xd8\x9e\x62\x4e\xf2\x7e\x08\x0a\x24\xd5\x72\xc7\xef\xc8\xe4\x06\xe1\x03\xe0\x1f\xb4\x09\xd4\x92\x2f\x26\x38\x3c\x7a\xe4\xa7\x52\xba\x4b\x9d\x6b\x2f\x44\x64\x81\xe4\x1f\x7f\xc1\xff\xca\x7d\x98\x55\xaf\x83\xf8\xbd\x4a\x62\xf7\x49\xe5\x32\xc1\xcb\x6f\x7c\x49\x79\x22\x65\x94\x1b\xba\xa5\xef\x0c\xc9\x1f\x60\xff\xc3\x3c\x7b\x52\x4d\x51\x41\x76\xc9\x76\xb7\x65\x9c\xfa\xdd\xe5\x18\xbd\x6a\x9a\x87\x5e\x53\x01\x5a\x20\xd0\x91\xfa\x51\xb4\xfb\xc9\x44\x6f\x10\xea\xa2\x49\x05\x7e\xab\xc6\x66\x78\x1a\x64\xab\xd0\xb3\x55\xde\x0b\xc6\xb4\xe6\xfb\xa8\xbd\xc1\x3a\xe0\xc3\xfc\x0d\x55\xbf\xca\xa2\xa0\x9a\x50\x47\xff\x3a\xb0\xc6\xbe\xff\x66\xef\x12\x92\x7f\x9d\x98\x5e\x87\x5c\x9d\xae\xed\x10\xad\x30\xd3\xbe\xdc\xbc\xf7\x03\x55\xa9\x43\x5f\x88\x8c\x8c\xfc\x12\x66\xf1\xa6\xa5\x14\xbe\xbc\xc0\xb7\x67\xdd\x1a\x41\x87\xb2\x0b\x0a\x04\xda\x1f\x1a\x0a\x44\xd5\xab\x0f\x6e\xa7\x53\xe6\xe0\xaf\xaf\x4d\x6f\x18\x4c\xfc\x5c\x9e\x84\x2d\xa9\xf3\xdd\x85\x41\x7c\x9a\x0f\x9e\xff\x65\xa1\x91\x26\x60\x35\xeb\xc7\x14\xcb\xf3\x31\xb5\xff\x10\x26\xd8\x8f\xe2\x6a\xb7\x63\xc6\xe7\xda\x63\xeb\xf4\x6b\x87\x38\xa5\x89\xb1\x67\xa4\x1a\xc4\xad\xf9\xb8\x8f\x09\xd2\xe0\xd5\xc5\x24\x8b\xb3\x34\xa4\x7c\x37\xe9\x48\x1a\x92\x9b\x39\x96\xc1\xb5\x98\x54\x98\x4a\x53\xbb\xdf\x21\x6e\x62\x1b\x9b\xaa\x0c\x1a\x89\xa7\xef\xf5\x7b\xfd\xf9\xad\xec\xae\x89\xcd\x42\x13\xbe\x54\x7b\x1d\x1c\x6e\x9a\x88\xbd\x1e\x2f\xf1\x86\x41\xc3\xf9\x20\x3b\x8c\x8e\x9c\x6b\x13\x78\x9c\x2e\x0b\xb9\x28\x4f\x7a\xd0\xd2\x8a\x33\x96\xc2\x84\xec\x97\x07\x1a\x95\x52\x48\x6f\xac\xff\x7e\x20\x9e\x2d\x4d\x54\xb2\x3b\xc4\x20\xcd\x45\x86\x61\x32\x01\x2d\xe6\xb2\xe9\x1c\x14\xe7\x32\xd3\x09\x82\x2b\xff\xf3\x5d\xa8\x47\xc2\x40\x83\x4b\x45\x0a\xe9\xcf\x4f\x4a\xb7\x36\x44\x46\x6f\xdb\xfb\xe2\x36\xd7\x2b\x9b\x8e\x76\xf0\x11\x6b\x3b\x62\xc9\xfd\x47\x55\x46\x29\x4e\xd8\x6e\x7e\xad\x8a\xc0\xbc\xd4\x0b\x3b\x4e\xac\x43\xbb\x5d\x11\x53\xac\xbf\x8c\xd6\xaf\xce\x34\xd9\xb3\x90\x72\x02\xc8\x7e\x14\xcb\x9c\x48\xbf\xff\xd9\x50\x2a\x53\x93\xdd\xa4\x00\x7c\x4f\xdc\x7c\x10\xe7\x42\x00\x8b\xff\xea\xb7\x10\x2c\xe4\x71\xeb\x32\x08\x05\xf6\x44\x3b\x98\x6b\xec\x1a\x1f\x7c\x62\x39\xe2\x0a\x9d\x60\xb8\x3a\x59\x1b\x32\xeb\x81\xe4\x3f\x88\xe7\x67\x5f\x4c\xcd\xac\x3c\xe1\xfc\x87\x77\x90\x79\x74\x18\xc2\x45\xed\x7b\x52\xa4\xae\x59\x31\xd0\x98\x6e\xae\x24\xf3\xc7\x5b\x28\x6a\x98\xd1\x18\x36\x39\xc5\x9c\x58\x32\xcc\xef\x9a\x31\xe6\xcb\x27\x45\xca\x00\x3c\x4b\x88\xbc\xc5\x71\x50\xf9\xb2\x77\x5e\xdd\x12\x43\x16\x84\xdb\xf0\xaf\xec\xdd\xb8\xae\x71\xc6\x99\xa3\x7c\x54\x8a\x43\x83\xa5\xd2\x91\x45\x18\x1f\xf7\x10\xcf\x18\x3b\xe0\x67\xa5\xa5\xa2\xb2\xe7\x99\x37\x89\x88\xe8\x3a\x07\x75\xcc\xc3\xac\x8e\xab\x54\xc8\x37\x64\x7c\x79\x69\xf0\x76\x59\xda\x79\x98\x7d\x80\xd7\xdd\x83\xdb\x9d\xc2\x74\xdb\x8f\x0e\xab\xca\x03\x8e\xd8\xe0\x2c\x33\x12\x0d\x0b\x1b\x79\xf5\x0f\x01\xd0\x45\x9f\x94\xc6\x21\x07\xe2\x18\xb9\x40\x0f\xa1\x21\x6e\x6f\xf5\x72\xce\x18\x9a\x92\x58\x76\xfc\x0f\x79\xd6\xa4\x67\x0a\x8c\xb1\xf3\x9e\x0f\x1e\xd4\xa0\xa3\xae\xed\xfd\x10\x37\xbb\x64\xcb\xed\x3b\x7a\xef\x6a\xc3\x8a\x48\x12\x7c\xec\x69\x06\x3e\x5c\x4f\xfe\x61\xd7\x2f\x2e\x8a\xf7\xea\x6e\x32\xb0\x72\xe7\xd8\x6c\x19\x6f\x8d\xe0\x37\x41\xd1\xdf\x3c\x79\x22\x78\x8e\x46\xc5\xe0\xd7\x58\x65\xf2\x11\xb7\x39\x08\x04\x5a\x87\x59\x08\xc1\x38\x7c\xec\x7a\xdd\x72\x99\xa6\x91\xb9\x3c\xef\xa5\x04\x80\xd7\xe0\x3e\xf6\x05\x5a\xae\x7d\xa5\x6b\xd5\xdf\x46\xf4\xb7\x93\x5e\x03\x80\xa9\x27\x6c\x6f\x50\xfe\xa6\xad\xd8\xa2\x62\xe4\xd0\x8b\xc3\x5a\x4a\x3d\x03\xa9\xac\x63\xda\x1d\x07\x49\x66\x19\x3a\xe4\xe5\x76\x7d\x89\x89\x0c\x44\xd1\x09\x90\x8d\x93\x18\x02\xf4\xd5\x5b\x77\xd9\x89\x5d\x20\x73\x97\xbe\xa2\x7e\x6f\x9d\x5a\x62\x7e\x15\x25\xcb\x5d\xb1\x73\xb7\xb1\xfb\x57\x44\x3c\x08\x12\xfb\x74\xb2\xe6\x0b\x05\x6a\x74\xeb\x50\xf5\x4c\xe5\x2b\x67\x9f\x51\x9b\x1a\x8a\x2b\xb1\xe8\x3f\xb0\xbe\x3a\x3d\xe7\x09\xc1\x05\x5d\xe7\x51\xed\x79\x94\xce\xc7\xea\xb1\xb0\x56\x1f\x0d\x32\xde\x90\x9a\x19\xea\xd1\x49\x40\xff\xbf\xa4\x3d\x3d\x14\x86\x6f\x7e\x95\x1d\xb7\xb1\xb2\x69\x1c\x2a\x6e\xff\xd4\x7e\xb0\xe1\xb5\x92\xfe\x57\xba\xce\x1a\x75\xb5\x8a\xc4\xf9\x89\xdc\x35\x7b\x43\x85\x74\xde\x94\x8c\x94\x69\x62\x60\x18\xf5\x6f\x20\x8e\x40\x1b\xa4\xae\xda\x27\x5c\x07\x4f\x12\x64\x8d\x85\xf0\xec\x9d\xb5\xfd\x10\xd5\xfe\x95\x69\x25\xaf\xb3\xc9\xcb\x68\xf3\x90\xc6\xfc\xf9\x28\xe9\x3b\xf2\x41\xd1\x0e\x0a\xcc\xe1\xa2\x6f\xdd\x94\xee\x1a\x1f\xd4\x7f\xe1\xcb\xae\x66\x7b\xd3\xff\xae\xdc\x27\x78\x90\x75\x8c\x58\x6a\xdf\xb3\xa6\xe1\x88\x26\x78\x45\x66\xb8\xe6\xef\x6a\x59\x54\x36\xdd\xc5\x06\x5e\x68\x36\xa8\x9d\x15\xff\x96\x23\xc8\x15\xf3\x59\xfc\x74\xa7\xa9\xd8\x81\x71\xca\x06\xf7\xa3\xf8\xa0\x8e\x4d\x9d\xae\x8e\x8b\xe8\x17\xcd\xca\x35\xe4\x44\x5e\xbb\xcd\xb0\x09\xd3\x80\x61\x39\x26\x51\xac\x50\x39\x3a\x7a\x6f\xaa\x11\xac\xb3\xc1\x02\x34\x60\xe6\xdf\xe2\xf7\xff\x2a\x09\x5f\x5d\x8e\xad\xb8\xd2\x9f\x62\x7c\x4b\x65\xfe\x79\xa0\x24\xdc\xde\x94\x99\xf2\xb5\x38\x0e\xb6\x2f\x52\x75\x58\xec\x0b\x34\x8c\xf8\xa9\x20\xa4\x69\xc1\xaa\xfa\x6d\xc0\x1b\x0d\xe9\x2a\xa6\x2e\x1d\x5a\x3f\x9b\xc4\x4e\x91\xa7\xa0\xe6\x67\xda\xd6\xc5\xee\xa2\xf9\xe8\x90\xbb\x24\x4a\x91\xd9\x1a\xec\x55\xc9\x90\x50\x1b\x2f\xff\xf5\xac\xd2\x52\xbb\x08\x88\x39\x24\x0f\xe8\x53\x94\x8f\x4f\xa9\xfa\xa5\x5c\xba\xeb\xd8\xae\x90\xf9\x87\xda\xcc\xb7\x73\xf0\x9a\xa3\x16\x12\x06\xfa\x6e\x04\xe4\x72\x8d\x73\x9f\x3a\x13\xa9\xb5\x20\x6a\x45\xa4\x88\x0a\xdf\x10\xef\xc1\x34\x27\x05\x89\xa2\x9a\x84\xc1\x68\x37\xb7\x28\x8c\x40\x98\x8e\x37\x4b\xb6\xc0\x1a\x42\x5c\x31\xb1\xed\x09\x67\xfa\x25\x16\x62\x83\x4a\xe9\x6e\x7a\xb5\x23\x89\x9f\x03\x94\xa3\x65\x82\xc7\xa1\xa3\x94\xd6\x8d\x60\x07\x7d\x79\xb8\xc4\x7a\x2d\x93\xd1\x18\xf9\x3d\x4f\xbf\x3a\x08\x53\xa4\xb8\x9a\xe6\x64\x64\x9d\xcc\x6f\x5e\x7b\xb2\x01\x78\x9b\x87\x41\xa2\xbc\x2e\x3f\x1b\x39\x88\x57\x05\xce\xe3\x06\xe1\x47\x76\x97\x7c\xff\x64\x8d\x3a\x69\xbf\x25\x95\x0c\x9e\xd0\xfc\x63\x85\x3a\x13\xe8\xc5\x51\x01\xdd\x84\x3e\xf0\x57\x06\xa2\x53\xa1\x05\x06\xcd\xa6\x19\x9a\xaa\x48\x7c\xc6\x6c\x56\x26\x04\x5f\x69\x4a\x9c\xda\x57\x78\xd7\xab\xc1\x87\x9d\x95\x1b\x34\x75\xe1\x2c\x07\xf0\x11\xfb\xcb\x0b\x9d\xfc\x83\x1e\xb0\x9e\x8d\x0c\xce\x17\x9e\x4e\x0c\x6b\x66\x03\xba\x94\x9a\x9f\x1e\x74\x1c\xb3\x0d\x9b\xd8\xf1\x2e\x80\x1a\x3f\xef\x70\xa8\xb4\x49\x1a\x0f\x66\x79\xa1\x7d\x93\xf3\x25\x4a\x83\xf0\x8f\x97\xbe\xb3\xa0\xa8\x7a\x3a\xd5\x92\xd7\x8c\x78\x7f\xf2\xcd\x72\x91\xc6\xaa\x09\x44\x9c\x65\xcc\xda\x1c\x32\xaf\x61\xdc\x9d\x69\x5f\x17\x93\xc5\xb1\x6e\x0e\x02\x8d\xb7\x66\x5a\xe4\x23\x25\x08\x57\xde\x38\xbc\x5e\x5b\x37\xdd\x22\xe9\x22\xe1\xc4\x88\x46\xa2\x48\x59\x29\xfc\xe1\x87\x1f\xb3\x71\xcd\x05\x11\xae\xe1\x91\xd4\xb2\xaf\xc2\x1a\xd6\xcc\xd6\xbc\x29\x31\x00\x04\xfc\xe3\x51\x7e\xec\x6d\x87\x14\xf0\x55\x21\x9d\x8a\x29\x11\x93\x17\xb3\xdb\x88\x52\xdc\xf3\xaa\x77\x9f\x31\xb6\xe8\x47\x22\xd2\x42\xb8\x3a\x23\x0b\x41\xc1\xfe\xff\x1d\x5c\x28\x26\x3c\x62\x25\xf7\xaf\xf9\xbd\x49\x08\xbb\x52\x8f\xee\xb7\x5b\xb1\x36\x8d\xe4\xe0\xaa\x3e\x39\x60\xd9\x48\xcb\x8d\x79\xd8\x8b\xc7\xe2\xc1\x69\xc6\xb4\x7d\xc8\x71\x08\x9b\x20\x69\x83\x9a\xbe\xe4\x2b\xab\xb3\xa8\x60\x83\xbd\xcb\xf9\x1f\xb9\x0b\x13\x1f\xa1\x82\x96\xa0\x41\x46\x73\xf6\xd6\x65\xac\x1d\xa4\x9d\x36\xd1\xc5\xd6\x2a\xcd\xf9\xb2\x55\xb1\x0d\xd3\x95\xcf\xc2\xec\x2d\x7e\xe2\x71\x95\x7d\xfc\xca\xd0\x6b\xcd\x96\x93\xae\x45\xda\x57\x59\x3f\xc7\x49\xfd\x7e\x0c\x83\x6d\xd0\x8c\x86\x0f\x97\xc9\x12\x60\x09\x2f\xb1\x10\xb3\x9f\x56\x7a\xa9\xb0\x7c\xde\x62\xa9\x94\xe6\xea\x36\x59\x6e\x37\xe6\xd1\x7d\xff\x1b\x9e\xf6\x66\xcc\xa0\x62\x71\xc8\x75\x0d\x9e\xc9\x30\xfa\xf5\x12\xd0\xb5\x9b\x9d\x19\x31\xb3\xdb\x43\xb2\xaa\x87\xac\x8b\x11\xb0\x65\x4c\xec\x65\x6f\x4a\x29\xe2\x3a\xfa\xac\xa5\xe1\x51\x9e\x32\xdb\x46\x7f\x5f\xcf\x65\x5c\x1d\xf2\xc7\xf6\x17\x7f\x4a\x42\x91\xdd\xf2\x3f\x23\xe5\xff\xfb\x6c\xc5\x8c\x02\x50\xa1\x34\xbf\xb3\x87\xc1\x50\x38\x1e\xf8\xfd\x62\x98\x6c\x56\xe7\xf3\xfd\xe8\x39\x2a\x8e\x21\x57\x5c\x05\x65\x76\x88\xd3\xb4\x6b\x9c\xdd\x49\x97\x31\x29\xc7\xf1\xff\xd1\x4d\x9f\xf5\x71\x45\x56\x91\x15\xf5\x66\xa3\xf8\x4c\x2e\x54\xca\xde\x38\xa5\x20\x38\xd1\x76\x1d\x65\x8a\x03\x9a\xce\x08\x65\x79\x18\xad\x5a\x57\x26\x2d\x27\x93\x77\x4e\x99\xba\xdd\x40\x19\x9b\x3b\xa2\x06\x6d\x62\x4b\xf5\x23\xfb\x22\xe8\x40\x87\x15\x6f\x2c\xcb\x55\x34\x34\x99\x05\x5a\x60\x08\xf8\x0f\x0f\x2c\xfd\xc2\x15\xea\x5d\x80\x2f\x0f\xef\x3f\x62\x7e\xb2\x7a\xcf\x50\x3d\x23\xc4\x14\x8d\x54\x46\x93\xb4\x37\x07\x4a\xdb\x0e\x7e\xe5\x05\x90\x3e\x2c\x80\x0f\x64\x08\xd4\x8c\x16\x9f\xe1\x0b\x3b\xac\x77\x71\x11\x2a\xbf\xe7\x56\x66\xdb\x63\x15\x13\x8a\xfb\x1d\xa9\xa8\xd7\x43\x4b\x21\x4f\xfd\x96\xfb\x5a\x4b\x67\x2f\x92\x74\x52\x2e\x54\x83\x39\x20\xbf\xb5\xe9\xe2\x98\x16\x8f\x59\x93\xb8\xe3\x26\xdb\x0d\x5c\x7d\xef\x2a\x6e\xc7\x72\x3f\xe1\xe4\x00\xcd\x62\xeb\x27\x34\xde\xf7\xfa\x1a\x83\xa6\xa4\x0d\x7c\xa7\xc8\xc3\xc4\x3f\x60\x52\xe2\x2b\x75\x6c\xe0\x56\xb5\x89\x58\xf2\x7c\x2c\xf8\xd2\x5d\x9c\x68\xf8\x2a\xcb\xdb\xf2\x3f\xcd\xd4\x1b\xb5\x9e\xf4\x70\x81\x4e\x13\x3b\xdd\xc8\x32\x51\x20\x75\x49\x76\x60\xf0\xea\x4b\xdd\x1d\x3b\x85\x2c\x65\xac\x78\x1b\xbf\x16\xba\xdd\x41\x36\xea\x46\xf4\xec\x73\x52\xe8\x2b\x2a\x65\xdf\x16\x11\x1a\x54\xc2\x58\xbc\x60\x15\x72\x8f\x6d\x8c\x9a\x00\x1e\x28\x8f\x0d\x7b\x86\xb5\x15\xb0\x04\x7a\x46\xca\x4f\x3a\xf8\x42\x7c\xd6\x4b\x2f\xbb\xb0\x60\x13\x52\x9f\x85\x34\x26\x2b\x1a\xe7\xf2\x74\xf5\x69\xb5\xf0\x81\xed\xc8\x13\x74\x6c\xac\x83\x6f\x5c\xba\x22\x86\x40\x27\x7d\xc9\x2b\x4d\x1b\x92\x8b\x7d\x47\x85\xc0\x23\xc1\xe4\x5f\x72\x9d\xa2\x37\x36\xcd\xf4\xf8\x7f\x73\xc5\xcf\x74\xd7\xae\xe6\x71\x23\x57\x17\xf6\x3e\xde\x5d\xc2\xb8\x41\xbf\x2e\xd6\xb1\x85\x09\x6b\xde\xbe\x0d\xbc\x0f\x4b\x47\xba\xb9\x46\x69\xaf\xc4\x22\xbb\xf4\x43\xc3\xd4\x1f\x95\x9c\xa1\x67\x91\xe2\x87\xdf\x1e\xc2\x02\x6d\x4b\xff\x91\x5e\xda\xae\x8e\x59\xa5\x2e\x69\x81\x51\xa7\x0c\x64\xbd\xdb\x9d\x7a\x63\xdb\x16\x06\x23\xb4\x71\x62\x63\xa4\x7b\x12\x33\xd0\x3d\xc4\x53\x8c\xdb\xd5\x58\xfa\x53\xbe\xcf\x4d\x03\x33\xad\x5d\xf9\xc9\x7d\x77\x35\xc3\x3e\xcf\x08\xac\x96\x5b\xc5\xe6\x9e\x29\x68\x06\x69\xa5\x50\x19\xb0\xb5\x6d\x78\x1c\x82\x6f\x2a\x69\x33\xb1\x90\x40\x49\x56\x6b\xfa\x43\x02\x03\x4b\xa5\xff\x26\x0a\xde\x66\x63\xd3\x23\x0d\x47\xd9\xed\x68\x56\x0a\xcf\xaa\xcc\xec\x25\xa6\x07\x00\xb7\x70\xb8\xee\xd4\x6c\x93\xa6\x2f\xee\x67\x04\xe2\xd6\x72\xdf\x69\x77\x72\x82\x32\xf0\x04\x98\x6f\xf2\x7f\xdd\xcf\x4d\x3b\x04\x6f\x10\x83\xa5\xbd\x14\x86\x6c\x6b\x48\x7e\x5e\x78\x01\xe6\xf6\x6b\x3f\x15\x47\xb9\xee\x3e\x3b\x79\xd2\xbf\x0b\x23\xa3\xe5\x39\x85\x24\x9a\x74\x6c\x27\x42\xa3\x64\xfd\x53\x0a\x07\x14\x49\x4a\x91\xa8\x0f\x60\xf2\xc7\x29\xc0\x5c\x37\xab\xfb\x1c\x8e\x8c\x17\x55\x88\x17\xa7\xab\x2a\xa6\xf2\xff\x27\x85\xc5\xaa\xc7\x34\xa1\x5f\xd4\x69\x9b\x9e\x7f\x78\x59\x37\x8e\x7b\xd0\x61\x45\xbe\x1c\x9e\xe2\xfb\x6a\x67\x4f\x43\xd2\x9e\xab\x35\x4b\x31\xd0\x0d\x6c\xac\x41\x05\xc6\xf9\x88\x32\xbf\x31\x18\x39\xf9\xc2\x62\xe7\xd2\x68\x7c\xf7\x2f\xe3\x04\xe7\x31\xda\x24\xbb\xa2\xdd\xb1\x31\x01\xef\xe3\xf6\xa1\xa0\x54\x31\xad\x45\xd4\xe8\x6f\x8e\xab\xfb\x2b\x28\x11\xd7\xf1\x4e\xbb\xd0\x1d\xd2\x46\xf6\xa1\x05\xe8\xf7\xa8\xcc\xa7\x7d\x60\x61\xb4\xab\xf7\xdf\x72\x02\x40\x63\x13\xd4\xe9\xd7\xad\x49\x64\x76\xbc\xbe\xf5\xfc\x04\xc2\xd7\xbe\x1b\x54\xd5\x3a\x39\xa8\xd8\xb8\x02\x63\xc8\xc3\x7f\x84\xc1\x72\x25\xec\xf9\x66\x4f\xfe\x55\x29\x91\xbe\x6c\x29\x91\x18\xcc\xc2\x9b\x80\xd5\x67\x98\x3c\x7c\x8a\x9b\x31\xd3\xd4\x21\x6b\x9d\x84\xea\x06\xcf\x09\xa4\x57\x72\xf1\x79\x32\x6d\xe2\xd5\xe2\x6c\xb3\xeb\x94\xe0\xb8\x25\xe9\xd3\x37\x7e\xc8\xba\xf4\x43\xf5\xae\xa2\x77\x1b\x4f\x9b\xe8\x6c\x96\x74\x61\x7e\x53\x75\x3f\x3e\x94\x45\xea\xd3\x71\x0c\x8c\xd7\x4c\x9d\xb3\x2b\xca\x2d\x89\xbb\x9a\xcd\xb9\x65\xbe\xbd\x9f\x1e\x1d\x9c\x2c\xdf\x65\x73\xe2\x90\x6a\xaf\xb8\x1f\xa2\x05\x79\x09\x06\x77\x45\x5f\x00\xaf\xdc\xdb\xd5\x5b\x38\xf8\x44\x51\x9f\x2c\x2b\x06\xf6\x15\xce\x7b\x74\x1d\x75\x80\xd1\xc2\x9d\x6f\x38\x35\xfc\x61\xc2\x23\x18\x49\x1f\xe6\x09\xce\xc5\xe4\x72\x8e\x2b\x92\x9e\x12\x00\x7c\xe7\x1f\x24\x22\xbe\x2c\xb3\x99\x11\x5d\x2a\xef\x23\x73\xb0\x10\x86\x97\x9a\x4e\xf4\x25\x40\xb0\xb9\xce\x79\xcb\x96\x48\x7e\xd8\xb3\xea\xde\x90\xb6\x05\xeb\x95\x30\x96\x56\x37\xf1\x2c\x91\x3b\x3c\x41\x88\xe8\x99\x40\x91\xd4\x7c\xba\x9a\x6e\xd7\x24\x1a\xbb\x95\xf0\x0f\x4c\xab\xad\x1a\xca\xbb\x5b\x1d\x41\xbb\x65\x10\xf5\x28\xc4\x43\x5a\xf4\x64\xc2\x1a\xcc\x39\xc6\xb6\xf1\x5d\x16\x3d\x3e\x2a\xfd\x78\xa2\xdf\x7c\xf6\x44\xc9\x17\x23\xcd\xa1\x52\xf2\x35\x62\x3b\x44\xde\xb4\xda\x25\xd5\x0f\xea\x1a\xa1\xa5\xbb\x41\x2f\x40\x26\x27\x13\xca\xcb\xa3\x1f\xb1\x17\xfd\x56\x75\xbe\xe9\x81\x69\x46\xb2\x6c\x7f\xd2\xf7\x5c\x9c\x58\xf7\x61\x04\x3d\x33\x64\xbe\xb0\xed\x12\x7d\x67\x44\x21\xa9\x1f\xde\x35\xf9\x0e\x78\x94\x1c\x4c\x24\x10\x33\x99\x4c\x29\x09\x9e\x11\x69\xdd\xac\x96\xf5\x4b\x5e\x52\x7f\x67\xc0\x95\x43\x55\x61\xc7\x83\x8b\x00\xdd\xf4\x53\xa1\x66\xc6\x9a\x95\x85\x69\xa3\x0e\xf4\xd5\x9f\x40\xcb\x14\x70\xed\x22\x69\x38\xf0\x92\x46\x63\x5c\x2e\x16\x74\x47\xab\x03\xe1\x99\x61\x36\x2d\x30\x2b\x9a\x1f\xb2\x6d\x51\x21\xfb\x00\x08\xa5\x2f\x0b\xab\x1e\xab\x5e\x73\xe1\xbe\x72\x3b\x77\x6c\xf1\x86\x7e\x0a\x63\x17\xdc\x12\x36\xe0\xe3\xb8\xcf\x09\x61\x6f\x68\x85\xd2\x3f\x27\x34\x52\x17\x04\xec\x7f\x72\xb2\x51\xb8\x13\xd8\x7e\x3f\x0c\xc8\xdd\x29\x2c\xff\xf2\x3c\x13\x36\xf1\x1b\x42\x79\x70\xf4\x7d\x7f\x83\x30\x2a\xf3\xc2\x46\x97\xf2\x56\xd8\xad\x7f\xc0\xc9\x4e\x1a\xf8\x78\x97\xf4\x81\xda\xa4\x37\x84\x27\x52\xdc\x81\xca\x46\x69\x83\x4d\x8b\x90\xed\xa6\x2c\xc4\x0c\x49\xf5\xce\xa4\xb0\xf7\x7a\x04\xc4\x89\xa6\xb5\x9a\xb8\x22\xe0\xa4\x43\x12\x9f\x11\x09\x1f\xa0\x85\x4d\x92\xbc\x4a\xd5\x34\xb6\x9d\x84\xf0\x28\xd4\x7a\x2f\x7f\x0d\xb1\xae\x1d\x50\x98\x32\x2f\x12\xdf\x9d\x89\x06\xab\xdf\x4c\xf8\xa1\x11\x0e\xdd\xa7\x86\x41\x92\x38\x87\x3d\xaf\xee\xf8\x22\x4c\xcd\x63\xf8\xbb\x8a\x65\xc0\x60\xff\x62\x73\x7d\x02\x0e\xf5\x5f\x71\xa3\x90\xc6\x0f\x3d\x80\xb2\x96\xf3\x78\x2a\x40\x12\xc9\xb9\xbd\x05\x41\x7a\x1a\x16\xa9\x98\xd2\xa9\x08\xd0\x4a\xef\xe3\x85\x50\x2a\xb5\xf7\xd4\x86\x72\x9f\xec\x53\x9a\x48\xcb\x1e\x4a\x5f\x16\x48\x8d\x12\xe8\xd7\x4d\x17\xd1\x3a\xb9\xaf\x94\xcf\x01\x29\xe8\x0f\x9b\x7f\xdf\xfc\xb8\xea\x05\x12\xc3\x2b\xf1\xb8\x8e\x9b\x95\x42\xbe\x79\x91\xc5\xc5\x62\x59\xaa\x3e\x7a\xba\x68\xd4\x2e\x5a\xaa\x7e\xba\xd5\x65\x80\x71\x96\xdf\x14\x43\x26\x72\xf5\x34\xbb\xb0\x95\x2a\xd4\xd6\x27\x42\x67\x06\xff\x0d\x6f\xf5\x54\xa3\x6e\x5f\x3e\x4e\xed\xcd\x03\xe2\x2a\xde\xcc\xdb\x5f\xae\x2d\x32\xca\x8d\xf8\x99\xa4\x6d\xc5\xca\x7b\xaf\x50\x38\xce\xc5\x38\xf9\xe2\x6a\x45\x8c\xa5\x76\x97\x21\x70\x18\xa6\x10\xbb\xf8\x3b\x64\x8f\xc4\x6d\xe6\x7d\x0b\x99\x80\xdb\x8b\xbf\x0f\x77\x88\x6a\xc2\xaf\xa7\xa2\x66\x2d\xc5\x02\x6b\x06\xff\x6d\xa7\x5a\xb3\xab\x0f\xdb\x14\x09\x2f\xad\x1f\xef\x8b\x11\x22\xb2\x03\xda\x8d\x79\xdb\xb2\x14\x44\xbd\xc0\x62\x98\xf0\x43\x8e\x00\xad\xe9\xb8\x18\xc5\x16\xee\x9a\x25\x02\x9d\xa8\x4e\xf5\x89\x9a\x8f\x7a\xe0\xa1\xd9\x0a\x92\x3f\xb7\x75\x72\xbc\xd0\x6d\xe6\xdc\x21\x12\x3c\x94\xed\xe2\xd2\xa7\x03\xd4\x34\xec\x26\x7a\x33\x1a\xf4\x9e\x7e\x52\x7c\x16\x66\x28\x36\xe4\x86\x3f\x65\x37\x22\xe2\x09\x2b\x45\x4c\xb6\x49\x46\xa5\xf2\xec\xf2\x0b\x2c\x3d\xb7\x42\x0b\x2f\x52\xe1\xc9\x34\x77\x3c\xed\xc5\x9b\xeb\x24\xba\x3f\xa1\x74\x00\x16\x5a\x54\x47\x18\xf9\x6d\x8c\x9b\xf2\xba\xf3\x29\xba\x48\xab\x21\x6e\xc3\x2c\x89\x42\x3a\x5d\x4e\xcc\x79\x45\x2e\x02\xc2\x8b\x10\xf1\x93\x27\xcb\xf5\xcf\x83\xf6\x36\xd0\xed\x16\x37\x89\xd0\xe0\x09\x58\xa3\xc9\xe1\xee\xe4\x72\xf6\x7b\x42\x36\x97\xaf\xb9\xd0\xc2\xe6\x7d\xe0\x2d\x74\x3a\xac\x50\x36\x2e\xfd\x3f\x25\x6d\x93\x12\xdc\x05\x0a\xb4\x35\x4b\x9a\x1f\x17\x6f\x0e\x13\x30\xd6\x9a\x12\xce\x25\xb0\x9f\xd4\x77\x53\x6d\xcd\x65\xf0\xd8\xfe\x11\x4c\xc8\xcd\x9e\x31\x7f\xa7\x31\x32\xc6\x2f\xdc\x22\x5e\xd8\x48\x41\x56\x6f\xa0\x12\x86\x71\x1f\xb5\xd9\xa1\xe9\x72\xd1\xd8\x84\x0b\x1b\x99\x1f\xf1\x88\x82\x9d\xef\xe5\xee\x9f\x2d\xa3\x85\x79\x43\xda\x08\x40\xde\x14\xe4\x5e\x96\xff\xab\xc9\xbe\x31\x87\x14\xe8\xac\x4d\x96\x38\xe1\x2e\xf3\x42\xbe\xfc\xc2\xeb\x71\xcc\xf6\xeb\xde\x81\x23\xeb\xf5\xcd\xc7\xe9\x6c\x1e\xdc\x08\x08\xb8\x17\x62\x59\x34\x52\x70\xb1\x25\xaf\xad\xf3\xc3\xd4\x0b\x38\xdc\xbe\x65\xd1\xf8\x18\x8b\x95\xc9\xa9\xcc\x74\x2f\x79\x00\x2d\xa1\x6b\x82\xf0\xfd\xc7\x15\x9e\xce\x6e\xe8\x3c\xba\x89\xda\x6e\x99\xdf\x8e\x19\xe4\x8b\xd8\x37\xcf\x05\x09\xd5\xbd\xb2\xf9\x32\x48\xe2\xaa\x94\xaf\x07\xb7\x74\xa6\x14\x12\x82\x46\xd6\x0a\x3d\x1d\x9a\xa5\x27\x99\xd7\x0c\x87\x21\x0c\xc2\xf8\xda\x78\xc5\x15\x6f\xeb\x92\xc2\x79\xf7\xff\x21\x23\x58\x1d\xac\xbb\xd3\xbb\xa6\xa9\xda\x31\xbb\x47\x51\xd7\xd1\x04\x95\x95\xd6\x54\xe2\x0d\xee\xd4\xe6\x6c\xd3\xd5\x6c\x8d\x7a\xda\x89\xde\x13\xa6\x13\x01\x80\x90\xe4\x11\xfb\x1d\x75\xf5\x88\x45\x9b\xbb\x17\xe3\x45\xdb\xac\x5d\x7b\x30\xd0\x5f\x03\xee\xd6\x6e\x70\x3d\xe3\x11\xab\xae\x52\x03\x58\x23\x18\xfe\x81\xda\xd1\x10\xac\x2f\xdd\xd8\x67\x00\x7c\x33\x86\x1c\x86\x91\xf0\xd8\xbb\x8e\xe5\x83\xef\xf4\xb9\xc5\x3c\x42\xc1\xdc\xba\x6a\x53\x04\x0b\x1c\xa5\x78\x2b\xe6\xc3\xf3\xcd\xdf\x4e\x4b\x77\x49\x1f\x23\xa6\x73\xc6\xe2\xcb\xd2\x41\x16\xc0\xb6\xe1\x45\x1c\x61\x87\x31\x51\xfe\x28\x7b\x66\x1a\xea\xcc\xee\xa7\x1c\xc8\x2b\x49\x51\xfd\x69\xc2\xa2\x75\xf2\xed\x2a\xd4\x04\xc1\xda\x0f\x2d\x9d\xab\x05\x53\x49\x51\xc3\x70\xa4\x59\x41\x8e\x40\x73\x04\x48\x74\x1b\x73\x4d\xfa\xca\xf8\xca\xdb\xd4\xaa\x9c\x0e\xe6\x34\x7d\x6f\xcc\x66\xb7\x77\x5a\x6e\xbd\xc5\xd7\xdb\x8c\x5d\x8a\x43\x7a\x77\x39\x8b\xf4\x5e\x27\x8b\x79\x12\xdb\x91\x3e\xe4\x43\x40\xc2\x63\xf3\x87\x9f\x71\xae\xd1\x99\xb8\xd3\xc7\x52\xc3\x3f\xfe\xd8\x2d\xaf\xf1\x9f\x35\x19\x24\x47\x85\xfd\xfd\xaf\xe8\x3f\x47\xc2\x61\x0e\xf3\x47\xe8\x3b\xd4\xe2\xa5\xd7\x50\x61\xbb\x9d\x8d\x72\x6d\x90\xf5\x06\x4b\x74\x71\xbc\xff\xbc\xe0\x76\x43\x49\xe8\x14\xc4\x54\x7a\x47\x67\x47\xee\xd4\xcd\x92\x79\x4e\x46\x25\xd9\x77\xd8\x7c\x14\x22\x72\x6d\x1d\x55\xff\x77\x44\x45\x07\xc1\x8f\x79\x79\xa8\x6f\x9b\xf7\x4c\xa7\x94\x91\x31\x88\x6a\x77\x9e\xfd\xa2\x59\x7b\x70\x8e\xb2\x07\x3f\xa6\x05\xec\xa8\x98\xcf\xf1\xbb\x17\x02\x63\x75\x4d\x69\x3b\xa2\xb0\xf1\x65\x8f\xe5\xb1\x4c\x1a\x2e\xff\x6c\x3d\x9f\xf7\x0d\xc6\x0f\x7a\x64\xe1\x73\x5f\x8a\xfa\x0a\x4d\x66\xd5\x47\x4c\x1b\x7c\xc6\xfd\x3e\x72\x27\x56\x9d\x2f\x9f\xfa\xc9\x5f\x35\xd7\x7b\x57\xff\x87\xbb\xd3\xff\x6e\x9b\x8d\x31\x74\xfa\xf9\x79\x8d\xc4\x58\xba\xfb\x60\x5d\x9c\xee\xc3\x1f\xc2\x4b\xba\x87\x99\xbc\x21\x7b\x7b\x9d\x2c\x46\xeb\x23\xea\x92\xc7\x00\x35\xc9\x82\xc1\x72\xde\xeb\xc4\x5e\x15\x34\x53\x5e\x2c\x3c\x4e\x9d\xa9\xd5\x70\xc0\x59\x15\x74\x9d\x7f\x5d\x25\xf3\xd2\x12\x1f\xd1\x46\x70\x4e\x6f\xd9\x84\x84\x1e\x0c\xc4\xdd\xb6\x7e\x52\x1d\x25\x56\xba\x16\xb2\x51\x2f\x73\xc8\x82\xc4\x23\xe0\x74\x3b\x85\x66\xb9\x6f\x25\xc9\x6e\x2d\x7e\x91\x36\x2c\x7f\x8e\x54\x74\xa3\x67\xda\x44\x04\xa4\x00\x86\x6d\x0d\x88\xf8\xee\xf9\xf7\xcd\x38\xcf\x03\xba\x48\x10\x9a\x12\xd8\x1e\xb9\x42\x97\xbb\xf1\xeb\xae\x08\xc1\x60\xdc\xf4\x88\xd1\xc9\xf0\x0c\x0d\x86\x20\x51\x70\x1c\x60\xae\x0b\xda\xcc\x68\x11\x03\x07\xcc\x60\x0d\xab\xcf\x83\xf5\xb2\x77\xd1\xc4\xec\x39\x2d\xd0\x24\xa0\x3c\xb7\xd3\x39\x7a\xa1\x15\x7f\xe3\x66\x6a\x6d\x48\x1d\xde\xb9\x58\x15\x81\x7d\x82\x0d\x78\xcd\xe2\xe3\x56\x39\x2c\xcc\xfc\xd9\x0c\x1f\xae\x8b\x65\x6b\x7c\xa3\x85\xee\x15\x72\xce\xc8\x2e\xf6\x82\xa8\x20\x9c\x95\xf3\x85\x7e\x41\x69\xc4\x95\xb4\x5f\x02\xc3\x82\x3d\xcd\x65\x55\x43\xf3\x89\xeb\xdc\x27\xdd\xf0\x27\xb4\x92\x70\x32\x65\x91\xb1\x91\x23\x3f\xf1\x81\x22\xbf\x6a\xb7\x98\x15\x61\x80\x8a\xb2\x67\x89\xf8\xd2\x10\x85\x06\x6f\x00\xf6\x88\x17\x53\xe0\x9f\x77\xea\x44\xd1\x3f\xb6\x66\x80\xfc\xa4\x7d\xc0\x40\x7b\x0d\x57\x97\x21\xcc\x10\x07\xe8\x60\xd4\xac\xf8\x39\xd2\x12\x66\x5b\x5d\xfb\x81\x72\xe8\xde\x7d\x48\x13\x75\x88\x85\xf2\xac\x08\x2e\x0a\x6e\x95\xf5\x92\x84\x7d\x14\xb6\xd5\x4e\xd8\xdd\x56\xaa\xe3\xe2\xc8\x8e\x24\xeb\x89\xf2\x2c\xfc\x93\xff\x38\x74\x56\x71\x7d\xe2\xea\x5d\xfa\x29\x80\x52\x82\x27\x94\x34\xa8\x32\x88\x59\x5c\x14\x5a\x6e\xc6\x96\x31\x53\xe5\x30\x6a\x3a\x9b\x6f\x78\x06\xc0\xa3\x36\x2a\x2a\xfa\x28\xcf\x4b\x4e\x7b\x52\xe4\x1c\xe1\x6e\x33\x67\x0a\x13\xc4\x46\x8e\xd0\xdb\x57\x8e\x45\x9b\x4c\x6a\x44\x1a\x3a\xa6\x91\x51\x22\xa4\xf8\xfd\x8f\x59\xe8\x10\x61\x0b\x84\x3f\xde\x34\x2e\x98\x00\x21\xcb\x4e\xf1\x4f\xae\xde\x71\xed\x28\xa8\xff\xd5\xc6\x2d\x03\x46\x55\x62\xfb\x92\x64\xef\x09\x91\x0b\x18\xfb\x71\xce\x6a\x88\xa8\x25\x46\x5b\x4c\xa6\x85\x8e\xc3\x8e\x0b\x70\xd7\x8b\xed\x43\x9b\xe3\xed\x12\x0a\x5f\xc1\xca\xa2\xf6\x23\xb4\x0d\xd7\xa3\xd3\xf2\x9d\x9e\x47\xc4\xf0\xf1\x99\x85\xda\x19\xc3\xee\xac\xde\xb6\x08\x02\x3d\x2b\xf0\x61\x55\xbd\x52\x7b\x3a\xbf\x89\x71\x25\x40\x51\xcd\xca\xbd\x43\x1b\x8a\xed\x5a\x50\xd2\xff\xc0\x97\x81\x85\x39\x30\x72\x21\xb7\x19\x39\x86\x68\x96\xf9\xab\xed\x9e\x21\x1f\x06\xe4\xe0\xe4\xb2\xdc\xaa\x9a\xbd\x12\x28\x7d\x6a\xc3\x27\x37\xce\x35\xf5\xce\x31\x15\xc2\x6a\x90\x11\xa9\x69\xce\x0c\x6c\x70\x32\x39\xd2\x84\x3e\xa2\x13\x0f\xcc\xa3\xb0\x3b\xa8\xcf\x6f\x85\xc6\x3d\x15\xf2\x84\xce\xc4\x6b\x86\x7b\x44\x67\xec\x55\xbe\x2e\x30\xf1\xf6\xed\x59\xfb\xd3\xdc\xde\x0e\x7f\x04\x07\xfd\x16\x9d\x7a\xc6\xd2\x06\x6e\x0d\xda\x4c\x46\x4c\x05\xbe\x38\x9d\xa9\xea\x2b\x86\x75\x90\xaf\x54\x9e\x56\x81\x97\x82\x32\x67\xa8\x43\x94\x98\xb6\x6e\x1e\x9d\x4a\x14\x3d\xbf\x7f\xfd\xe9\xcb\xa2\xb5\x53\xff\x8b\x60\x1d\x5f\x27\x11\xc9\x1b\xbc\xdd\x3a\x9f\x6f\x8c\x8c\xd0\x92\xaf\x39\x5c\x17\x96\x28\xd3\x06\xce\x82\x2f\x66\x87\x15\x52\x86\x1a\x74\x52\x7a\xfc\x6a\x0d\x8c\x4f\x36\x66\x55\x24\x22\x38\xf2\xa8\x58\x05\x84\xed\xa9\x9f\x6e\xf5\xbc\xab\xe8\x11\x8b\xdb\xae\xa3\xad\x68\xd9\x09\x94\x2d\xd0\xed\x56\xf0\x86\x62\xa7\xa4\x0d\x6e\xe1\x4d\xbc\x03\xf6\xf1\x7a\xe0\xcc\xa7\xc0\x03\x83\x3b\x7b\x8d\x4b\x7a\x46\x6d\xa9\xdc\x02\x3a\x4f\x72\x2d\x76\xc5\x66\x01\x00\x9f\x34\x23\x79\x33\xca\x70\xf5\xc8\x69\xeb\x64\xae\x1e\x6c\xa0\x2a\x07\x99\x1c\x40\xee\x0a\xf1\x4d\x92\xf1\x0c\xf6\x42\xf2\xb8\xfd\x99\xbc\xe7\x78\x62\x9e\xf3\xfc\xde\x5f\x9b\x21\x51\xa6\x3f\x49\x2a\x62\xee\x42\x18\xda\x1d\x7a\x4f\x6f\xde\x6d\x4e\xe7\xf5\x6e\xfe\xf8\x19\x91\x04\x1c\xc1\x80\xf3\x8e\xcc\x6d\x74\xa3\x52\x44\x1e\xdc\x55\x81\x8e\x88\x10\x28\x6c\x61\x19\x7a\x57\x94\x4c\xea\x6e\x3b\xad\x2d\xa3\xe3\x55\x5b\xc1\xa3\xbd\x8a\xc0\x76\xc8\xba\x5e\x26\x76\x52\x2b\x9e\x24\xca\x23\x09\x40\xb9\x3b\x43\x27\x48\x3f\x07\x85\x78\x15\x9b\xcc\xcb\x08\x1a\x55\xed\x5f\xd5\xe2\x4a\x1f\xaf\xd1\x0a\xf0\xb5\x6a\x85\x7e\xb6\xb0\x35\x53\xbd\xd0\xaf\x2e\xfc\x61\xc8\x1f\x8c\xe2\x82\x08\xc3\x63\x73\xb5\x1b\x4f\x50\xa1\xce\xec\x6c\xb2\x50\xad\x25\x8b\x9f\xc5\x83\x14\xed\xbc\x87\xea\x2d\x49\x5c\xbd\xbf\x09\x69\xee\x0f\x1b\xbe\x4e\x6d\xa4\x9c\xf1\x56\xed\x4f\xd5\x63\xed\xb0\xbd\x41\xd3\x40\x06\x70\x24\x71\xf1\x19\x1f\xa8\x39\x90\x73\x75\xe1\xd4\x54\x2e\x42\x80\x40\x18\x5b\x47\x0d\xec\xbd\xe0\x50\xe0\xdb\x99\x7b\xa2\x44\x64\x7f\x1d\x12\x43\x55\x2a\x0a\x70\xd2\x68\xda\x23\x1d\x14\x53\x5b\x9f\x85\x85\x3f\xca\xaf\x7f\xd9\x7f\x75\x7a\xbd\x4e\x2e\x9b\x6e\x3d\x72\x9a\x40\xd2\x87\x7d\x0a\x46\xf2\x72\x63\x7e\x33\x79\x97\xd8\x00\xd8\x83\x86\xeb\x9d\xcb\x5e\xda\x1d\xf1\xd0\x2a\xac\xdb\xb4\xd0\x31\x4c\x5d\x93\xcf\xc9\x05\xe7\x3b\x03\xb0\x85\xb5\xd3\x23\x17\xcb\xce\x5a\x17\xf7\xa5\xa1\xed\x5c\x82\x0d\x7b\x8a\x7f\xd6\xab\x07\xad\xdb\x1d\x55\x98\x79\x2e\x89\x35\x6a\x07\x01\xcf\xc0\xa0\xac\x7d\xeb\x70\x94\x96\xdc\xa7\xbe\xa3\x54\xfc\xb7\x3c\x93\x32\xc8\x43\xdf\x8e\xcc\xf1\x54\xe6\xb6\xeb\x1e\x3b\x04\xc2\x3e\x87\x9c\xfe\x16\x55\x2a\xf8\x78\xe7\xb2\x0f\xe5\x66\x8c\x35\x61\xd4\xe3\xd1\x3f\xfa\xb3\x11\xe1\x25\xa2\xc0\xac\x56\x66\x06\x1a\x00\x09\x11\x5c\xbb\xed\x0d\xa3\x64\x71\xa9\x74\xa9\xc9\x0a\x94\x04\x99\x45\xb7\xd7\x3a\xe4\x09\xd4\xb1\x7c\x3c\x93\x24\x32\xa2\xcf\xe7\xf8\xc1\x0b\xf9\x95\x8c\x42\x9e\x5c\x6b\x45\xe7\xce\x6b\xf8\xfc\xbe\x0e\x71\xca\x4a\x55\x1e\xcc\x41\xac\xfc\xfe\x1e\xe4\x1e\x28\xf8\xd4\x87\x6d\x73\xb9\xa5\x66\x96\xe6\xea\xee\x24\x15\xbb\x9a\x80\x26\x39\xf5\x43\xd1\x42\x8a\xc7\xd2\x81\x0e\x80\x98\x1c\x62\xf4\x0e\x90\xc9\x53\xc9\x91\x50\x64\x00\xdb\x19\xc7\x2d\x88\x72\xc5\xd0\xa7\xde\x6f\x73\xe4\x64\x75\x00\x74\x6d\xda\xcc\xd6\x32\x0b\x04\x1a\x85\xc5\x54\x90\x87\x47\x32\xb5\x07\xc8\xe2\x22\xc5\xe1\xcf\xe6\xaf\x68\xbb\xbc\x79\x93\x1b\x70\x84\x8d\x40\xa5\xe2\x01\x6c\xad\x04\x4d\x76\xd3\xc5\x00\x90\x2e\xb5\x7e\xb5\x35\xfc\x46\xbe\x26\x65\x4c\x8a\xcd\xee\xbf\x3f\xa8\x45\x9f\x72\xba\x0d\x69\x34\x4b\x95\x22\x23\x50\xa1\x54\x5d\xaa\x16\x1f\x87\x94\xb2\xd4\xc3\xdf\xee\xc3\x7e\x22\x03\x4a\x7d\xed\x91\xad\xeb\x1f\xd9\x1b\xae\x2d\x37\xb4\xa1\x9e\xaa\x22\xd9\x85\x19\x80\xfe\x3a\x15\xf2\x2c\xbf\x38\x46\xe0\x69\x3b\x3a\x69\x38\xe3\x40\x7e\xf8\xf1\x7e\xda\xf0\x52\x0a\x6a\x5f\x59\x6b\x08\x7f\xb3\x0f\xe1\x90\x53\x89\xc2\x1a\x22\xdf\xa1\x89\xfa\xb3\x3c\xb2\x15\x88\x55\xdf\xd2\xf9\x21\xbc\x24\x42\xd5\x0c\x21\x95\x0b\xbf\x07\x85\x01\x27\x84\x31\xc1\x01\x76\x82\xa4\x88\xd8\x67\x57\xed\xca\x51\x77\x09\x2e\x25\x61\x68\x8d\x5f\x49\x81\x4c\xb3\xc8\x04\x9e\x5b\x7f\xf2\xdc\x16\x37\x2d\xd1\x0e\x5a\xac\xce\x42\x8b\x09\x94\xd3\xa5\xb7\x0a\x3c\x77\x2d\x26\xc1\x98\x76\xb8\x37\xc0\x05\x4e\xba\x84\x24\x7a\x18\x48\x1b\xba\x37\x0d\x68\x02\xa8\x27\xd6\x1a\xad\xb6\xfc\x13\x43\xfd\xc1\x16\x2d\x19\x2d\x6b\x5d\x41\xd5\xa3\x3a\x95\x5e\x1c\x38\xd8\x3c\x19\xb3\x6a\xd3\x8a\x69\x7e\x16\x2e\xb9\x91\x7b\x27\x87\xc7\xfe\xf6\x34\x4c\xac\x49\xc2\x1e\xed\x91\x53\xe2\x67\x31\xc6\x85\xaf\x40\xa4\xf9\x5c\x9b\x8f\x4a\x5e\xc1\x40\x9a\x46\x90\x1f\xf2\x31\x67\xa0\x5e\x92\x68\x7e\xcc\xf2\x30\xef\xc2\x2f\xeb\x57\xcf\xeb\xb7\x38\xaf\xe7\x98\xd1\x34\x27\x32\x43\xb2\xb2\xc6\x08\x35\xf8\xa9\x83\x5a\x00\xd7\x31\xc4\xba\x4f\xf1\x64\x30\x74\x90\xfc\x1c\x68\x90\x0e\xe2\x87\xa7\xcc\x24\xa8\xba\xde\xd6\x1c\x54\x0a\xf7\xbf\xdb\x9d\x8e\x8e\xf8\x19\x88\xb7\xa6\x64\x12\xb7\x40\x21\x95\x9e\x24\x2c\x50\x27\x06\x69\x50\x6e\x0d\xd6\xf6\x5a\xd2\x00\x1d\x44\xc6\xdc\xdf\xfb\x2d\xfb\x42\x12\xd5\x3b\xa9\x02\xda\x83\x6b\xfd\xe1\x7f\x16\xd1\x5a\xb2\x7a\xd8\x65\xed\xb0\x0f\xcb\x83\x2a\xc9\x82\xb5\x01\xc5\x7a\xbb\xe6\x41\xf1\xe3\x55\xc5\xa4\xbf\x32\xe6\x2c\xf4\xa3\x6d\x6a\xa9\x20\xd9\x76\x02\xef\x73\x8d\xbe\x0a\x27\x94\x8b\xaa\xe1\xe6\x25\xe1\xc8\x7d\x1c\xb9\x5a\x1e\x2e\x13\x0c\x68\xcd\x1b\x3e\x98\x79\x40\x45\xa9\xae\xb1\x35\x9a\x88\x3f\xbd\xac\x92\x78\xcb\x24\x49\x83\x8e\x34\xa7\xeb\xc4\x55\xb4\x03\x59\xe3\x22\x73\x49\xe8\x37\x03\x77\xf1\xd6\xfb\x2b\x6a\x85\x0e\x6f\x00\x46\x5c\xcd\x01\xe5\xbb\xc5\x20\xcc\xdb\x4c\x77\x7a\x93\xfc\xe7\xf6\x55\x78\xc4\x39\xb4\xa0\x0b\xd6\x42\x08\x6b\xd1\xda\xd1\xbe\xfb\x86\x77\x18\xd3\x99\x08\x02\x34\xb4\xb0\xe0\x3c\xf2\x41\x31\x9b\xb5\x07\xb6\x4b\x39\xc8\xcf\x08\x72\x27\xf5\x01\xe9\x1d\xc8\x8d\x67\x22\x63\xa5\xa6\x61\x2a\xf6\x6e\x1b\xb7\xde\xe9\x9a\xc2\x94\xd3\x7a\x61\x02\x04\x48\xd4\x78\xe6\x7e\xd6\x74\x8f\x86\x05\x16\x2b\x4d\xc3\x3a\xfd\x5b\x71\xed\x96\x06\x9c\xc5\xf4\xc6\x4e\x66\x28\x42\xbe\xd4\x9a\x55\x9b\x41\xeb\xba\x86\xbc\xf3\xba\x7b\x27\xb9\xa3\x0d\xb7\x25\x71\x30\x93\xcd\x92\x0c\x01\xa8\x86\x00\xb4\x63\xf7\x21\xc3\x63\x5a\x06\xd8\xfb\xe9\xcb\xfd\xed\x34\x3a\x79\xdd\x9f\x0e\xc7\x72\x98\x30\x30\x5e\x10\x97\x68\x39\x0b\x3c\xc9\x9f\xdc\xe2\x87\x1b\x87\x3e\x41\x72\x1f\x0a\xa8\xfb\xe4\x34\x94\x87\x79\x40\x8a\x23\xc1\x82\x83\xf0\x0e\x33\x09\x1d\x9c\xc3\x48\xbd\x38\x65\x2b\x68\x44\x39\xd2\x38\xe0\xf8\x07\x22\xd2\x43\x26\x0e\x5a\xb0\xb9\x64\xe9\xf5\x01\xeb\xe1\x18\xee\x77\xc3\x09\xfd\x8e\x1d\xc0\xa0\x87\xa7\x83\x3c\x8d\x08\x9c\x7d\x9e\x3d\xb0\x12\xd4\x90\xea\xb6\x8e\xe7\x93\x07\x68\x5d\xc9\xbf\xa8\x68\xe1\x4d\x0b\xf3\xaf\x0e\xe7\x46\xa7\x8c\x23\x1d\xf1\x1b\x1b\x9c\x57\x5e\x86\xcf\x13\xa4\x26\x6b\x5b\xb4\x47\xa0\x1c\x11\xc6\x93\x4c\xad\x0b\x4e\x7e\x2f\x6d\xd5\xb9\x8c\x44\x3f\x3f\xb9\xab\x93\x6a\xc8\x55\x0d\x45\x5d\xd7\x19\x10\xf7\x89\x7e\x37\xc0\x99\xdc\xad\x14\xba\xa3\xc9\x7d\x17\x9c\x08\xd5\x13\x47\x80\x30\x7a\x61\x18\xe5\x8d\x45\x1d\x7d\xe6\xb5\xb1\x14\x46\x0f\x7b\x5f\x8a\x81\x4d\x82\x7b\x6c\xf6\xc6\x13\xd7\x84\x94\x70\x6e\x87\x90\x8c\xfb\x8e\x2f\x7f\x6f\xb1\x69\x9f\xb9\xd0\x24\xd0\x33\xa9\xe2\x95\x6c\xb3\x3d\x31\xad\x9f\x2b\xad\xc0\x59\x13\x39\xa1\x30\x6a\x01\x81\x23\xdb\x98\xf1\xc5\x2d\x55\x88\xe0\x13\xf1\xc5\xcf\x1b\x59\x9a\xe2\xc1\x42\x6a\xa3\x8a\x85\x1a\x40\x78\x15\x3d\x9d\x54\x79\x53\x96\x33\xeb\xfe\xee\x55\x49\xd9\x70\x22\xec\x64\x71\xc6\x91\x84\x50\x4e\xe6\x2b\xc7\x1e\x91\xdd\x50\xaf\xac\x97\x1c\x9f\x83\xf2\x3a\x38\x15\x22\x01\xc0\x4a\x92\x32\x23\x86\x3f\x44\xda\x77\x5a\x8a\x05\xd8\x23\x34\xe6\x58\x8c\x58\xf9\xa6\x05\x7f\xd2\x86\x97\x1c\x52\x3e\x45\x7c\x34\x69\xb1\xff\xaf\x43\xe8\xc7\x1e\x31\x2e\x67\x10\xf8\x6e\x3b\x88\x0d\x2d\x50\x3b\x0f\x64\x70\x55\x5b\xdc\x34\x94\xf2\xab\x35\xb0\x15\xb6\xe2\xaf\x44\x98\x2d\xf1\x50\x41\x16\xf1\x0d\xa6\xb3\x69\x3a\xd3\x4b\x0f\xd0\x94\x91\x4e\x11\xa1\x49\x03\xb0\x72\xb2\xa8\x1f\x0f\x45\x23\x9d\x20\x3c\x6d\x5a\xd4\xc3\xb0\x00\x58\xb4\xde\x14\x3f\x60\x1f\xde\xfe\x8d\x27\x98\xe7\x67\x13\x51\x47\x81\x5b\x81\x71\xb7\x7d\xda\xed\x0d\xac\x53\xf2\x90\x6f\x26\x2c\x54\xb9\x40\x2a\xd5\x42\xc1\x30\x1c\x62\xc7\x0d\xf1\x3b\x5c\x9d\xce\x6c\x9d\xfb\x10\x24\x3e\xcc\xbd\x0c\x54\x24\x30\xc5\xf4\xf7\xfe\x69\xcc\x34\x27\xbf\xc2\xc5\xa9\xd2\x89\xb2\x54\xea\x9b\x31\xcf\x8e\xf6\xf9\x22\xc0\x7e\xde\xe4\x86\xe9\xba\x54\xfd\xcc\xf2\x78\xe1\x9b\xa7\x29\x0b\x1d\x6a\x63\xaa\x2e\xcb\xc3\x5f\xec\x85\x0d\x2c\xac\x69\x35\x6a\xca\x35\x9b\x24\x3b\xbe\x84\x5d\x2e\xfb\xa8\x63\x16\x2a\x39\xb3\xae\x26\xad\x62\xc3\x62\xda\xd1\x2d\xee\x00\x51\x28\xe9\x95\xa4\x25\x8b\x9f\x94\xab\xd6\x2a\x4a\xc1\xa6\x35\x27\xff\xfb\xab\x36\xe0\x9e\x5f\x01\x03\x5d\x39\xf5\x1d\x99\xdc\xff\x7e\xed\x2c\xe6\x36\x56\x0f\x6e\x87\xd1\x89\x5a\x8b\x5d\x2e\xc8\x53\xf1\x0a\x9d\xed\x24\x95\x12\x91\x27\x0c\x3f\x7f\xe1\xde\x3e\x36\xf7\x23\xf0\xa0\xc1\x54\x03\x6d\x92\xb8\x59\xbe\xca\x2c\xce\x43\x16\xdb\x8e\xf6\xa4\x70\x67\x2d\xb7\xa8\x80\x5c\x2b\xa1\xd3\x17\x64\xa7\xdc\x1f\x1f\xf2\x05\x4b\x83\x38\x63\x45\x1d\x8f\x8e\x67\xbb\x16\x91\xe9\x1c\x3a\x95\xc2\x96\xa6\x28\x59\x80\x89\x0c\xa8\xfa\x22\xb0\xe4\xfa\x5f\x24\xcc\xbb\xf9\x16\x7d\xc4\x41\x0f\x8f\xa9\xf8\x32\x15\x5f\x5d\x87\xf5\xc1\x5a\x8c\xea\xba\x3d\x36\xf7\x4d\xbd\x3c\x9c\x27\xed\xa6\x01\x82\x4d\xd2\x29\x78\x59\x40\x03\x5d\x5d\xcd\x03\x45\x1f\x1d\x3b\x66\x15\xc2\x56\x1c\x9e\x9f\x08\x36\xa4\xba\x27\x98\xb4\xd4\x77\x08\xa6\xf1\xb5\x60\x2a\x08\xa0\xc3\x26\x34\xce\xe1\x43\x32\xad\x00\x32\x9a\x79\xe1\x30\x20\x21\x67\xce\x39\xdd\x31\x8f\xd5\x71\x01\x89\x92\xbb\xfe\xbc\x5d\x79\x0c\xb4\x8c\x8c\x30\x65\x33\x3f\x19\x7c\x26\x50\xbc\xe7\xad\xbe\x34\x3d\x2c\x76\xd9\xf7\xfd\x12\x83\xa8\x28\x82\x6a\x25\x16\xd7\x44\x87\x56\xd1\xff\x51\x6e\xc2\x08\x58\x7e\x38\x74\x57\x38\x67\x98\xf9\x23\x70\xaf\x03\xe0\xf7\x20\x10\xf8\xff\x54\x6f\x4f\xf9\x57\x67\x76\x8a\xff\xe7\x17\x5b\xd9\x46\xc2\x26\xff\x72\xce\x88\xf4\xe1\xe6\x75\xb2\xd1\x8a\x10\xdb\xac\x10\x0a\x4b\x52\x1e\x77\x10\x9d\x92\x39\x82\x68\x60\x6a\xf6\x73\x85\xef\x33\xf7\x95\x03\xe0\xcf\xc5\x4a\xa4\xe5\x77\xa5\xfb\x67\xd3\xcc\x8c\x2c\xec\x8f\xf8\x44\x22\x7c\x02\x24\x97\xd6\x22\x38\x34\x93\x9d\x37\x8d\x59\xeb\x69\x28\x92\xc6\x08\xa0\x43\x30\x89\x25\x72\xe3\x24\x1d\x3e\xc7\xa2\xb6\xe2\x5b\xf6\x17\x83\xfc\x7b\x14\x53\x9d\xdc\x0d\x56\xee\x06\x5a\xa4\xe9\x06\x6e\x5e\x98\x65\x53\x1f\x19\x30\x51\x6b\x7b\xca\x95\x38\x37\x09\x3e\xc4\xa3\xdc\x31\x35\x5b\x99\xc7\xd9\x7d\xb9\xbc\x57\xe2\x1c\x9b\xc7\xb4\x05\x7a\x86\x50\x63\x41\xa3\x65\x3e\xb0\x13\x25\xe8\x9d\x5e\xe6\x0b\x76\x8b\x28\xa4\x04\xb9\xba\x41\x71\xa2\x2f\x39\x43\x49\x24\xe3\x75\x34\x57\x97\x26\x4b\x03\x12\xd6\xa5\xbd\xbb\x7e\x73\xbf\x87\x35\x6e\xcf\x45\x1d\x4a\x11\x59\xe7\x12\x20\xe8\xd1\x96\xfb\x47\xba\x98\x7f\xac\xd3\x42\x6e\x7a\x28\x9f\xac\x8d\x83\x80\xfb\x73\x30\x3d\xe5\x0f\x2b\x53\xaf\xa8\xe0\xcc\x45\x7e\x23\xfb\x5a\x05\x50\x00\xa1\xea\xde\xc0\xa0\x37\x5d\x93\x4d\xcd\x62\x41\x04\xa5\x88\xdd\xcb\x3f\x9d\x30\xf8\xff\xa4\x26\xc7\xbf\xd2\xf9\x81\xa3\xd9\x0d\x8e\x43\xfa\x27\xe4\x94\xee\xc9\x11\xe5\x8f\xeb\x15\x33\xc6\x10\xbb\x4d\xd4\xfc\xe0\xf8\xde\x96\x3e\x46\xc3\xa6\x2b\x1b\xa7\x55\x39\x98\x88\xe6\xa7\x1c\xe5\x41\xf0\x9c\xc2\x39\xd9\xc6\xe6\x43\x6d\xbb\xa5\xbb\x18\xed\x65\x24\xcb\x07\x67\x31\x3d\x7b\x85\xff\xfe\x4a\x65\xd6\x48\x58\x9f\xfd\xc8\x1a\x6a\x78\x63\x53\xfc\x5a\x3c\xe6\xaa\x87\x30\xab\xd7\xfc\x4e\xfa\x1a\x08\x08\x64\x73\xba\x3b\xdc\x2b\xc7\xeb\xbe\x2e\xa3\xd2\xa2\x7b\xbc\xb9\xf7\x7f\xa5\xe6\x7d\xce\xc7\x34\xf2\xc0\x21\x5f\xe4\x3b\x53\x17\x3b\xb9\x80\x12\xc0\xb0\xbc\xb2\x81\x68\x17\xd6\x67\x2a\xc3\x5f\xe6\xb6\x4f\x93\x3a\x8b\xbd\xc1\xf6\x20\xf5\x24\xda\x88\x41\x43\x63\xb9\xe1\xed\xa5\x40\xfb\x17\x6a\x5f\x63\x22\x05\xb6\xf9\x20\x3f\x2f\x4d\x1c\x2e\x71\xb3\xe0\x5b\x00\x69\x30\x81\x78\x21\x41\x30\xc8\x3a\xbe\xa5\xe2\x9a\x1f\x4e\xcc\x38\xba\xb1\xc5\xfb\x4d\x16\xec\xea\xaa\x54\x7a\x68\x42\x6a\x76\x9b\xa0\x20\xfb\x50\xa2\x20\x38\xb0\x82\x84\xfe\xd6\x3e\x68\x5b\x3c\xfe\x69\x11\x46\xf5\x32\x4a\x1f\x53\xd5\x03\x2c\x4c\xdd\xa8\xa1\x04\x19\x89\x8c\xf9\xa9\xfd\x2d\xcf\xac\xe3\xe2\xe1\x85\x44\x7e\xce\xcf\xe5\x74\x8d\x18\x95\x91\x7b\x97\x8f\x5d\x7b\x75\x59\x24\xab\xc1\xc1\x08\xe4\x70\xb1\x45\x01\xc2\x5a\xfc\x8d\xea\x63\xf8\x47\x3c\x5c\xf6\x44\x40\x99\xad\xf7\x30\x89\x7a\xe8\x7a\x88\x6c\xcc\x05\x2d\x9f\x01\xa9\x72\xfa\xf5\x9a\xcd\x18\x4e\x2d\x48\x17\x51\x10\x16\xd1\x14\x36\xab\x47\x05\x12\xc9\xd2\x10\x5b\xdc\x65\x21\xbb\x2e\x6e\x28\x90\x63\xc4\x6d\xf2\x29\x75\xf3\x24\x6e\x48\x1b\x08\x66\xbd\x02\x34\x4a\xe1\xe5\xf3\x38\xd6\x43\xfa\x71\x49\x13\xd1\x55\x13\x0d\xf3\x65\xde\xf7\x5d\x91\x39\x72\xe2\xfd\x1b\x02\x49\x1e\x07\x5e\x7e\xfb\xfe\x5a\x19\xcd\x5f\x86\xff\x5f\x05\xe6\xbd\xa8\xa6\x09\x4a\x0b\x1d\x0c\xb4\xe9\xc5\xc5\xa9\x99\x0b\x48\x9e\x6a\x14\x4f\x81\x44\x8b\x4f\xc2\x74\x26\x94\x8f\x2f\x29\x07\x29\x4a\xed\xe9\xbb\x57\xea\x91\xcc\x71\x94\x57\xc3\x78\xb8\xb8\xe8\x61\xeb\x3e\x8f\x95\xe6\xfc\x9d\xdf\x01\x12\x61\x9d\xbf\x80\x4c\xfd\x29\x63\xf5\xaa\xdd\x63\x93\xc8\x0d\x16\xd2\xf1\x39\x8a\x56\xd2\x29\x55\x8a\xb3\xd5\x8b\x32\x48\x62\x5a\x35\xf5\xf8\x80\x5e\x7f\x10\x9e\x81\xc1\x02\x6d\x06\x6a\x25\x32\x45\x69\xca\x5f\x79\xc6\x7d\x6d\x33\xc4\x40\x48\xeb\xbd\x88\x61\xcb\x4e\x9f\x34\x33\x59\x27\xa6\x3c\x51\xe2\xb0\xa5\x35\xfa\x8c\x7f\x63\xcd\xcb\x52\x6b\xa0\x8c\xbc\x26\x7f\x5e\x28\x1f\x11\x19\x83\x25\xf1\x4c\xcc\xa8\x3a\xe1\x0e\x9a\x20\x73\x46\x6e\x6d\x45\x57\x96\x84\x29\xf7\x80\xd8\xad\xe9\x8e\x3a\xed\x1b\x62\xfe\xc8\xac\x90\x77\x20\x49\xdf\x17\x1a\x48\x29\xa8\x61\xd2\xc2\x17\xeb\x00\xde\xb4\x0a\x77\x2a\x29\xec\x66\x0e\x4d\x41\x47\x49\x07\xff\x3a\x78\x38\x68\xe4\xfa\x2d\xec\xef\xd4\x7f\x57\x9c\x82\xc4\x0b\xc6\x84\x73\x64\x50\xdc\xfd\xb6\x35\xca\x06\x0c\x01\x92\x40\x92\xe2\xb6\x37\x1e\xcf\xd6\xba\xcf\xfa\x7a\x18\xfd\x9e\x6e\xd4\xcf\xaf\xbc\x4a\xb4\x2d\xe9\x96\xe2\x79\xa8\x84\x46\x36\xf7\x3c\x51\xe3\x68\x2d\xf1\x1f\xe8\xc2\xa7\x95\xdd\x70\x05\xdc\xc8\xd0\x7d\x21\xe0\x39\x41\xa1\xa5\xa9\xe3\xa2\xf4\x9e\x79\x64\x28\xa3\x88\xf3\xac\x9a\x41\x8f\x8d\xca\xce\x27\x5b\xf2\xbd\x01\xec\x8f\xeb\xa1\x5d\x2f\x6d\x9d\xd1\xf3\xe2\x05\xc7\xfe\x7a\x18\xba\x33\xc3\x2f\xad\x31\x13\x4a\xbe\x1a\xf2\x38\xa8\x22\xec\xaa\xf8\xaa\x7c\x58\xf2\x83\x04\x42\x01\x8b\x40\x19\xb0\x30\xc9\xd3\x68\x70\x1e\x4e\xe0\xf4\x9a\x58\xe8\x9f\xe1\x36\x0a\xa3\xcd\xc3\x6d\x19\x13\x86\x0d\xc1\x34\x8e\xf3\x19\x60\x9f\x71\xb6\x94\x98\xd1\xce\xbd\xac\x28\xea\x94\x89\x30\xd4\x4a\x4a\x0d\x79\xb6\x0a\xe7\xd7\x07\x79\x9f\xdf\x06\x54\xb6\x5b\x58\x6e\x26\x45\x17\xf8\x3b\x0d\x85\xb7\x8e\x6e\x73\xed\xdf\x07\xfd\xb4\x1d\x12\x59\xcb\xc7\x6e\xfb\x1f\x45\xf0\x60\xfe\xbb\xfa\x0c\x3e\x6c\xa6\x37\x95\x2b\xbb\x81\x2b\x01\x7b\x0f\xba\x1f\x9b\x74\xbe\xcd\x67\x2e\x02\xb3\x36\xd5\x73\x0b\x73\xe6\xc1\xf8\xf9\x49\x12\x73\x54\xab\x69\x5e\x24\x84\x81\xe7\x41\x31\x08\x61\xca\xd7\x0f\x78\xf7\xc6\x42\xf9\x33\x61\xf1\xd1\x8d\x5f\x51\xc1\x8e\xd4\xa4\x7a\xd3\xd7\x63\xe3\x08\x2f\x22\xf4\xf1\x87\x28\x69\x9c\x1f\x42\x12\x10\x54\x20\x5a\xfc\x59\x6a\x68\xd4\x17\xf4\x97\xc5\x4c\xe0\x03\x29\x00\xca\x67\x9a\x93\x6d\x82\xec\x18\x65\x73\xf6\x57\x8b\xb1\x61\x77\xd5\x3b\x22\x9c\xe9\x89\xe4\x37\xe6\x88\xd2\x13\x16\x46\x5e\x2f\x2a\xd9\x21\xd2\x53\x7b\x70\x75\x5d\xcf\x85\x74\xe3\x6c\x7c\x14\xe5\xb1\x91\x84\x74\x7b\xd7\x87\x79\x64\xa2\xc8\xef\x56\x44\x63\x01\x7a\x2e\xbf\xff\x93\xbb\x1f\x25\x30\x54\x11\x71\xae\xde\xce\x0e\xf1\x95\xb6\x85\x16\x3a\x35\x47\x9a\xe7\x21\x82\x2d\x18\x6a\xa9\x39\xc6\x84\x09\x1b\x46\x50\xfe\xc3\xa4\x16\x21\xc3\x05\xf8\xd2\xb9\x47\x96\xab\x1a\xea\x58\x75\x39\xc6\x38\x65\xc5\x04\x95\x6d\x46\x57\x1c\xd9\x83\x61\x0a\x44\x35\x47\x0b\x26\x41\xa8\x89\xbd\x52\x23\xf8\x21\x91\xa0\x42\x41\xf9\x0b\x2a\xf5\x48\x1b\x5a\xad\x3b\x16\x25\x94\x83\x84\x26\x5b\x8d\x3e\xba\xee\x0c\x2d\x47\x74\xe3\xb4\xe0\xa9\x69\x32\xf5\x35\x59\x5d\x86\x7c\xbc\xc3\xd4\x20\x8e\xca\x7f\x3c\x72\x93\xcf\x7d\x40\xb5\x48\xf4\x41\x65\x2f\xe9\xb3\xbf\xc5\x00\x22\xa6\x2f\x62\xae\x6e\x61\xe2\x44\x5c\x32\xd9\x31\x2c\x22\x7c\xfa\x6e\xe3\x59\xb9\x77\x28\x25\x09\xdf\x82\xe1\x5e\x11\x89\xd0\xe0\xf4\xe9\xaa\xbf\x61\xa2\x29\x43\xa9\x57\x9c\xa6\x88\x11\x26\xfc\xcf\xb6\x4c\xd4\xeb\x08\x96\x4c\x03\x8e\xec\x83\x5e\x95\x6b\xdb\xdb\x9d\x05\x60\x6c\xac\xf6\x6c\x26\xb6\xda\xb1\x3a\xfe\xc9\x2c\xe7\xc3\xc1\x6a\xcf\xc3\x03\x5f\xde\x74\x24\xf5\xf5\x1b\x70\x96\xaf\xef\x3c\xbc\x2e\xe2\x0a\xcd\x89\xa4\x4c\x28\x6e\x93\x3e\x89\x00\x45\x10\xa0\x0b\xe6\x93\x42\x21\x79\x38\xf9\xae\x09\xd4\xbe\xc8\x5f\x2a\x9e\x68\x2d\xc3\x3b\xb1\x1c\xc1\x6b\x3d\x1c\xc3\x10\x41\x14\xad\x50\x37\x46\xc7\x28\x5d\x7d\x05\xf9\x12\x12\xce\x5b\xd7\x70\x37\x2c\xd3\xa2\x46\xcf\x01\xd1\x13\xdc\x9d\x4d\x6c\xe5\xef\x99\x50\x7d\xe1\x40\xc9\x52\x01\x82\x31\x98\xf6\xe8\xb1\x3d\xcf\x42\xd9\xe6\xb3\x25\xc4\xce\xa6\x74\xc0\x09\x62\x66\x1c\x87\x69\x58\x2a\x80\x5d\x31\x37\xa4\x66\xf0\xc8\xba\xc2\x70\x38\xf7\x20\xcc\x7d\xbb\x74\xd6\x08\x70\x3f\xe7\xfb\xb2\x73\xd5\xf3\x58\xfd\x4d\x5c\x47\x3e\x94\xf5\x5e\x3f\x33\x9b\xb4\xbd\xe9\x9d\xea\x27\x3a\xf6\x36\x7e\x12\x11\x66\x66\x21\xa1\xcf\xe5\xe8\xe2\x77\x83\xd0\xe5\x9e\xad\x9a\xe2\xd9\x94\xa6\x61\x25\x0b\x0d\xd5\x02\x2c\xe1\xcd\xc0\x3b\x2a\xba\x01\xab\x8c\xae\xea\xea\x64\xc7\x91\x43\x5a\xf9\xda\xa3\x62\x76\x24\xc7\xad\x26\x88\x59\x7d\x2f\x61\xee\xa1\x54\xb6\x4b\xfd\x6d\xac\xb5\x7c\x20\xaf\x82\xb6\x60\x7b\xfb\x37\x07\xf9\x88\x47\x59\x64\xe9\x41\x79\xd2\xef\xb6\xf0\xf2\x76\x85\x7e\x50\xdc\x85\xfd\x05\x0c\x4c\xd5\x13\x08\xa4\xa2\x72\xd4\xdd\xfa\xbe\x8c\x27\x32\xd2\x06\x93\x50\xe4\xd1\xe5\x6a\x61\x36\x95\x55\xae\xff\x9b\x8a\x86\xfb\xad\xa2\x4c\xd0\x01\xbf\xea\x85\x68\x56\xd4\xb0\xd2\xbe\xed\xfc\x36\x0e\xfd\xc8\xcb\x59\x70\x8e\x7b\x0f\x00\x62\xdb\x40\x81\x9d\xaa\x2d\xbc\x96\xc1\x28\x78\x22\xcd\x75\x21\x1c\x3c\xcd\x24\xe5\xd4\x8e\x28\x44\x5d\xe8\x68\xf2\xb1\x85\xf9\x77\xc7\x76\xc1\x61\x34\x1f\x55\xbf\x73\x88\x94\xa9\x01\x70\xd7\x8d\xa0\xef\x09\x61\x95\x44\xf0\x43\x64\x2f\x9b\x47\x8f\x4f\xc7\xb5\x6b\xad\xfa\x10\xf9\xd2\xfe\x8a\x45\x04\xa9\x24\x9e\x5a\xa9\xbe\xaa\xda\x5b\xb3\x50\xb4\xdb\x6f\x6e\x2b\x67\x0c\x89\x9c\x5c\xe9\xd1\x07\x56\x40\x7c\x9d\x39\x52\xd6\xb2\x7c\xf3\x8c\x52\x83\xf7\x85\x3c\x77\x67\xb8\x1d\x6f\x38\x85\x52\xb0\xd6\x45\xf6\x25\x67\x0f\x1c\x9c\xc3\x82\x6d\x9c\x2b\xa2\x55\xaa\xfb\x08\xce\x47\xc9\xa9\xc3\xaf\x02\x71\x2e\x3e\x2a\xa4\x7e\x7c\xc0\x3e\x3e\x93\xef\x59\x78\x48\xcc\x5e\x80\x14\x0a\x26\x3b\xf1\x87\x61\xbc\xdb\x40\x78\x0e\x1a\x48\x79\x64\x7e\xc5\xe3\x49\xef\x37\xd6\x96\x70\x72\xd4\x71\xd9\xe0\x3c\xda\xd7\x7b\x2b\x22\x5a\xe9\x8f\xa5\xe5\x44\xdf\x23\x6c\xb2\x15\xa9\xbb\x85\x6f\x50\x67\x94\x92\xc5\x9e\x94\xf2\x8f\xf6\xec\x86\xf3\xcf\x02\x0d\xc6\xe0\x8c\x16\xfa\x02\x55\xe7\x05\xbd\x18\x5c\x56\xc3\xac\x5e\xa7\xd8\xe7\xe2\x4e\x48\xc3\xe7\x06\xb1\xb1\x9b\xe0\x8d\x95\x9d\x80\xfb\x01\x60\xfb\xd2\x50\xdf\x12\x67\x67\x43\xea\x75\x32\xbe\xa1\xf6\x68\x8c\x8d\x46\x94\x45\x97\xf4\xe5\x16\x2d\x27\xf2\x4b\xf8\x0d\xb5\xb8\xf9\x2f\xdb\xcb\x29\x31\x35\xae\x0f\x17\xb0\x42\xb6\x1c\x9a\xa0\x7f\x86\xd8\x31\xe7\x41\x85\x78\x16\x1a\xdf\xdf\x0d\xaf\xeb\x35\x6f\x7f\xbc\x3f\x92\xfc\xec\xfa\x16\x54\x73\x5d\xae\x04\xca\x85\xba\x02\x2a\x13\xa9\x70\xa3\x77\x0c\xa2\x4a\x60\xb1\x66\x99\xe8\x52\xe3\x93\xc1\x32\xe2\x5c\xdb\x15\xef\x28\x8b\xe0\xda\xca\x87\xac\xe4\xde\x52\x12\x07\xdd\x89\x99\xbd\x96\x90\xdb\x33\x27\xaa\x86\x96\x6a\xb4\xd1\x96\xb4\xb8\xfa\x42\xb2\xd1\xb3\xa9\xfa\xa6\x37\xaa\x0f\x2f\xc5\xe7\x81\x0a\x74\xf1\xd8\x91\x1b\x5a\x4c\xe6\x51\x64\x4a\x3d\xea\x30\xcb\x1e\x52\xc2\xc1\x85\xd7\xad\x39\x5e\x62\x82\x07\x92\x69\xfd\x82\xc9\xe6\x23\x8e\x11\x19\x15\xef\x96\xd3\xce\xe9\x9a\x5c\x24\x5b\x2b\x96\x4a\x33\x65\x0e\x9a\x87\x6f\x71\xb0\xff\xd7\x88\x2d\x49\x3c\x94\xd5\x59\xd4\x8b\x6d\xb8\x3b\x17\xde\x14\x9a\x57\xc5\xfb\x79\x48\x5b\xa3\xb0\x6e\x90\xfd\xb9\x83\xbd\xc5\x42\x54\x21\x6c\x02\xd5\xde\x8e\x10\x2e\x36\xdd\xae\xc9\x64\xdf\xfa\x03\x0a\xea\x2d\xbf\xea\x47\x08\x88\xe2\x7c\x66\xaa\x12\x88\xc5\x5f\x94\xac\x19\x3e\x09\x93\x65\xae\x8e\x86\xab\x52\x87\x03\x36\x81\x04\xd2\x7a\x31\xe0\x68\x2f\x09\xa3\xb0\xf6\xff\xd1\x34\x15\x31\xac\x08\x8d\x5b\x1d\x83\x74\x88\x21\x7f\x65\x38\x4c\xd6\x18\x1b\xd3\x22\x3d\x04\x35\xa5\xbc\xc5\xf4\x9a\x21\x5f\xdd\x20\x4b\x01\x81\xe9\xb9\xf6\x9e\xc9\x2d\x7d\xc6\xc3\x77\x68\x62\x78\x5e\x30\x27\x29\x3e\x7e\xf5\x83\xb0\x92\xb0\xb1\x6c\x73\x2b\x6b\xeb\x0a\x75\x71\x8f\x5e\x11\xb8\x09\xd1\x67\x75\xa0\x1b\xe2\xc6\x4b\x42\x25\xfc\x65\xe3\x58\x7d\x7f\x45\x66\x56\xb3\xda\xf0\x95\xa1\x45\xce\x8c\xbf\x8e\xab\x4d\x8d\x0c\xe5\xae\xd6\xa4\x84\x23\x48\xda\xe7\xb3\x1b\x0e\xc0\x19\xd5\x31\x61\x32\xca\x8f\xff\x8e\xe4\x87\x06\x47\x04\x49\x52\xa9\x79\xcb\x37\x2c\x8b\x5a\xc5\xe2\x8f\x4c\x69\xc2\x4b\xd9\x2c\x14\xda\x86\xe0\xfa\xf0\x9b\xd3\x03\x46\xed\xd0\x44\x36\xb6\xb6\x16\x95\xe4\x95\x43\xd3\x39\xbe\x07\xa1\x3d\x1f\x04\xf1\x1d\x41\x99\x24\xb2\xe5\x65\x9b\xbd\x9e\xb5\x28\x99\x15\xc7\x9a\x1c\x94\x8e\xcc\x63\x1b\x5a\xa7\x46\x82\xfe\x03\x14\xd4\xfb\x49\xe5\xcf\x49\x65\x0f\xf0\x17\xe8\x10\xdf\x9e\xdf\x6c\x8b\x77\xcb\x4b\x2d\x79\xc3\xc3\x07\x0c\x9b\x41\xcb\x80\x79\x31\x31\x69\x9f\x47\x4e\x76\x30\xd4\x37\xea\xe6\x83\xb8\xcd\x57\x38\xb8\xc3\x60\xd6\x71\xfd\x17\x10\xac\xe4\xe4\x5e\xf8\xb6\xdb\x0b\x0c\x9f\x87\x5f\xe4\x18\x44\x35\x14\x78\xd3\x0f\x38\x1c\x26\xcb\x42\x32\x36\xab\x0a\xd7\xdf\x65\x8f\x28\x2d\xe8\x52\x34\x50\x41\x53\xfa\x1a\x0a\x04\x75\xae\x8a\x15\x1c\x55\x3c\xaf\xa9\x00\xc1\x81\xc9\x13\x8f\x75\xf9\x6b\xaa\x32\xf8\x66\xe0\x08\x99\xe6\xab\x90\xf2\x31\x14\x1c\x6e\x54\x4e\xdf\x53\x66\x5b\x17\x6e\xad\x58\x43\x07\x9b\x15\xb1\x47\xd8\xb3\xce\x8b\xd1\xc7\x23\x3b\x60\xc9\x74\xbe\x78\xba\xf4\xa4\x9f\x8d\x52\xfd\xb4\x61\xc9\xab\xae\xf1\x4b\x8a\x72\xf9\xa9\x6e\x30\x19\x07\x5a\x61\xf0\x94\x0a\xe9\xc5\xd1\x7f\xb3\xe4\xff\x68\xbf\xca\x76\x79\x0c\xdd\x39\xe8\x99\x3f\xf8\xad\x14\xb1\x6b\x97\x94\x43\x51\x3f\x31\xd1\x18\xbd\x96\x3f\x5d\x62\x8c\x9e\xaf\xa7\x33\x42\x97\x28\xe3\xcb\x2c\xa7\x4f\xe4\xe1\x3f\x0f\xf6\xe2\x04\x29\x2b\x2a\x3a\xba\x1e\x6e\x55\xed\x09\x98\x3f\x23\xc9\x66\x31\x2a\x98\xdb\x8d\xec\xdd\xc4\xfe\xe0\xb8\xdf\x59\x0d\xb5\x51\x06\x66\xc1\xdc\x68\xed\x9c\x2e\x5f\xa4\x12\xde\x7b\x5f\x4b\x46\x92\x33\x92\x17\xe7\xf2\x96\x2f\x03\xd2\xc2\x10\x4d\x27\x68\xe0\x4d\xe0\xa5\x7b\x34\xe4\xd7\x8b\xa6\x74\x8b\x0b\xf3\xbd\x68\xac\x89\x13\xd8\xe9\x2e\xae\x6b\x23\x4c\x1a\x48\x77\xd0\xa0\x3a\x83\x29\xaa\xba\x3d\x4e\x48\x3b\x23\x6f\xb4\x6f\xcd\x1e\x34\x45\xc2\xae\x63\x22\xcb\xa7\x52\x5b\xb8\xd8\x0e\x78\x5c\xcd\x1f\x23\x8e\x9e\x36\x0c\xba\x02\x72\x31\x0a\x38\x33\xc9\x5a\x24\xe6\x5b\x41\x67\x82\x7b\xaf\x4a\x0b\x3f\x65\x66\x6e\x95\x68\xae\x41\x91\xa9\x9f\x4c\x46\x82\xf1\xd4\x97\x8b\xca\x2c\x6e\x0e\xb1\x46\xcc\xc5\x4f\xd0\x79\x66\xf7\x0c\x10\x5c\xdb\x56\x0b\x44\xd1\xc4\x5f\x01\xe5\x47\x0d\xe1\x37\x57\x4f\x96\x11\x4f\xdb\x82\xa9\xb0\xff\xb0\x48\xac\x75\x16\x58\x48\xcc\x05\xee\x25\xe8\x22\x92\x24\x80\xbd\x75\xce\xb2\x6e\xf1\xda\x5a\x98\x93\xeb\x10\xcc\xcc\x00\x63\x5e\x80\xd3\xfa\x36\x21\xa8\xb5\xb2\xd3\x96\x54\x0f\x64\xac\xe4\x7e\x9e\xae\xb5\x8b\x41\x0a\xc2\xc2\x3c\x82\x83\x63\xc1\x0a\xba\xc9\x62\xab\xbe\x43\x29\x11\x6f\x05\x15\xd1\x8d\xc8\xd5\xb9\xb5\x9b\x32\x14\xa6\x24\xd6\xdc\xaa\x7b\x6d\x7d\x37\xc3\xe3\x88\x72\x12\xf9\x27\x13\x54\x1e\x36\xec\x88\x37\xa4\x70\x78\x81\xe1\x9b\x79\x10\xca\x25\x50\x4d\x78\xe1\x6e\x27\xf8\x12\x48\x9d\xdf\x3a\x9f\x46\xf0\xbb\xe1\x73\xed\xcc\xe3\x11\x60\xc4\x99\x41\x57\xf6\x95\x9c\x5b\x52\xe4\x1e\x3b\x04\x26\xd9\xa3\xcf\xca\x90\x3f\xd9\x0e\x3e\x7c\xab\x30\x96\x42\x31\x67\x4b\x5f\xd4\xb1\xc7\x60\x6f\x5f\xe1\x2d\xc6\x3e\x62\xea\xf7\xbc\xbc\x8e\x48\xd5\x81\x4c\x14\xb8\x02\x26\x3e\x6a\xae\xb2\x7e\x38\x46\xe2\x8d\x5e\xc9\x64\x50\xb2\x41\x65\x2a\x2c\x1e\xbb\x02\xc8\x72\xd6\x29\x95\x34\x67\x2f\x17\x97\x04\x5b\xb6\x68\xfb\x35\x0a\xd7\x5d\xc9\x3b\xe6\xb9\x9e\x5b\x55\x9c\x97\xf3\x83\x8f\x8e\x2e\x95\xea\xb0\xbb\xfb\xdc\x31\x97\x03\x3e\x76\xee\xfc\x16\x6b\x4d\x63\xff\xf6\x20\xab\x0a\x2b\xb1\x88\xd1\xdf\x6d\xac\x19\xea\xfe\x29\x08\x17\x1e\xcf\x47\x36\x8b\x3f\x12\xa5\x76\x38\x64\x3b\xdf\xc6\xaa\xe7\x20\x95\x80\x2b\xda\xea\x88\x57\x6b\xc1\x98\x04\xe1\x59\x9f\xc4\xcf\xda\x30\xda\x0f\x65\xc2\xa5\xa5\x20\x20\xe8\xa3\x1b\x84\xa4\x69\x75\xbc\x4e\x55\xb9\x01\x3b\xce\x2f\xe4\x83\xab\x61\x7a\xc2\x31\x9f\x14\x62\x75\x0c\x1c\xb3\xbe\x59\xa4\x3e\xc7\x29\x3e\xb9\x31\x48\x3f\xaa\x9a\xd3\x3b\xa9\x06\xb5\xae\x29\x71\x10\x6f\xc3\xa1\x95\xe1\x2f\xf6\x55\x7b\x7b\xda\x18\xf4\x1d\xfe\x90\x30\x9a\x8a\x34\xb4\x14\x89\x2d\xb6\x74\x59\x26\x4a\x08\x40\x45\xf9\xd9\xea\x1e\x28\xe4\xb6\x74\xa4\xb0\xc7\x9d\x99\xc0\x52\x72\xf3\x81\x23\x3a\x5d\x6a\x0c\xa1\x53\xd3\x4f\xd7\x7e\x18\xb3\x82\x0a\xd9\xe6\x5e\x0c\xe3\x61\x78\x2a\x3e\x6b\xc1\x20\x3c\x40\x16\x74\x9e\x81\xbc\x38\x9b\x5e\xe7\x8f\x06\x32\x85\x9a\x27\xdc\xac\x23\x9f\x11\x3f\x90\xb0\xb2\x3f\xdc\xf9\xaa\x71\x6d\x9f\x77\x14\x3c\x35\x53\xa8\x84\x88\x5e\xcd\x0c\xaf\x5b\x96\x92\xd2\x5c\x98\xc6\xe0\x0e\x9a\xa9\x48\xe4\x7d\x1a\x9b\xbb\xdf\x0b\xaa\x85\x2f\x9a\x0d\x8f\x79\xee\xec\x48\xdb\x86\x97\x33\x5d\xcd\x61\xf5\xa8\xa4\xcc\xc6\x25\x68\xc1\x43\xd7\x02\xe6\xb7\x58\x1e\x19\x02\xd5\x31\xe7\x1a\x22\xb1\x2b\xbb\x55\xff\x7f\xc2\x1d\x43\x89\x65\x04\x06\xc4\x31\x9c\xc5\x17\xe5\xaa\xaf\xbe\x9f\xa4\x5a\x57\x75\x6d\x99\x9d\x84\x5f\x78\xfb\x42\x0b\x82\x62\x4e\xcf\x21\xfb\xdc\xcf\xdb\xd6\x82\x75\x95\xe4\x86\x9e\xed\xc4\x14\xd1\x4b\x04\xb1\x43\x6f\xc6\xae\x48\x4a\x94\xbf\xfd\x6c\xbd\x6b\xe7\xbc\x8d\x5a\xa8\xec\x0b\xbd\xf9\x26\x4b\x02\x47\xd4\xf9\x71\x04\x8c\xbb\x4a\xd7\xcf\x08\xd4\x39\x86\x70\xa2\xd1\xbb\xe4\x72\x21\x8f\xf1\xc0\xdd\x58\x69\x07\xf4\xb6\xf4\x97\xa0\x4f\x28\x18\x76\x37\x95\x36\x67\x34\x9c\x43\x7d\x53\x95\x0f\x75\xfb\x2d\x25\xc5\x36\xd2\xd3\xd1\xae\x6f\x48\x9f\xa4\xa1\x52\xe8\x38\x92\x67\x94\x2b\xbc\x7b\x06\x01\x8c\x59\x64\x0a\xa6\xa7\x18\x93\x58\x68\x7c\x71\xf3\x09\x5a\xb5\x96\x6e\xf7\x7a\xf7\xc9\x07\x9b\xa2\xc6\x17\xe6\x7e\xf0\xd7\x4c\xd1\xa7\xd0\xf5\x9a\x6e\x8f\x4c\x9b\x84\xf0\x63\x96\x34\x58\xc5\xad\xd3\xe7\xe1\xc6\x16\x05\xbe\x73\x1c\x37\xeb\x44\x1a\x78\x04\x9c\xac\x0d\x2b\x2b\x19\xf4\x11\xc5\xa8\x89\x0a\x01\xdc\x5a\x8c\xe4\x03\x33\x3e\xa7\xd1\x1b\xa4\xe0\x4c\xeb\xee\x9b\x8a\x6e\x1c\xba\x67\xf5\x63\xa1\x7f\xdf\xe7\x9b\xa7\xf6\x3d\xff\x0c\x1b\x40\xb2\x58\xe9\x30\x0e\x32\x4c\x64\x0e\xd3\x5e\xdf\x7c\xf3\x41\xc8\xb9\x0c\xd2\xe3\xf3\xbc\x35\x79\xa5\x4d\x27\x9f\x0e\x0c\xb8\x09\x91\xd4\x54\x41\x3d\x31\x5f\x1d\x7a\xc4\x79\xd0\xb5\x8e\xa3\x85\x4a\x96\x76\x8b\x8e\x84\xf9\xec\xa7\xe1\x35\x9a\x6e\xfb\x48\x4e\x75\x63\x11\x65\x09\x30\x45\xe7\xe3\x6d\x37\x47\xc3\x59\x67\x2d\x8e\x56\xc1\x5e\xd5\x02\xf8\xa3\xf0\xd7\x16\x08\x34\x1e\xa1\x53\x90\x66\xc5\xd4\x56\x44\xe6\xe7\x0c\xeb\x02\xa5\x8b\xc1\x94\xa1\x2c\x4a\xa6\x56\x8d\x85\x8a\x6e\xe5\x3e\xf3\x3e\x0b\x73\x4a\x18\xe9\x28\x83\xa9\xb3\xa7\xe0\xf3\x2b\x97\x4b\x2a\x98\xb5\x5a\x9a\x35\xc2\xb8\x1d\x60\xff\xdb\x05\x01\x1e\x5f\x46\x05\xd2\xea\xcd\x99\x3c\x0d\x76\x59\xf7\xc5\xa3\x00\xd3\x42\xd2\x46\x06\x31\x31\x66\x0a\xca\x59\xf7\xdc\x90\xaa\x51\xb8\x78\x05\x30\xe0\x12\x78\x9f\xa5\x19\x73\x94\x99\xe2\x8e\x9c\x00\x0b\x0d\xe4\x6b\x5d\x0b\x57\x9d\xfa\x9d\x70\xe3\x3c\xed\x79\xdd\x68\x9e\x0a\x63\xc5\x50\xf8\x03\x49\xed\xa4\xe1\x25\xd9\xf9\x48\xaf\x5f\x1e\x86\x0a\x66\xfc\x38\x1b\x90\x09\x24\x2f\x6b\x70\xc1\xe9\x1d\xeb\xbc\xa7\x58\x7e\xe6\x77\x37\x38\x66\xef\xf6\x13\x53\x28\xe9\xba\xa0\x3a\x3a\x18\x90\xd7\x3f\x24\x45\x34\x9b\x20\x45\x51\x9d\xd5\x6d\x22\xd4\xd4\x27\x1c\xdf\x39\x9e\x8b\xcb\xc0\x7c\x28\xb3\x9f\x5b\xc2\x57\x7c\x00\x1e\xf0\x3b\xa1\x38\xf8\xf7\x5b\xcf\x86\x14\x5a\xc0\xa3\x64\xc6\x6d\x62\x1a\x3a\x62\x10\x18\x64\xb5\x69\xd2\xde\x28\xfa\x99\x60\xdf\x48\xb1\x0b\x2b\xdf\x90\x26\x46\xc7\x60\x54\xda\x63\x6e\x6c\x51\x52\xda\x20\xe1\xa5\x72\x9e\xa3\xb3\x09\x4a\xa9\x1f\xb6\xbb\xff\xad\xda\xa0\xfa\xbc\x0d\xb8\x8f\xa5\xdf\xe0\x6e\x73\x12\x9e\x06\xce\x12\x27\x10\x12\x42\x9b\x20\x79\x5f\x74\x51\x10\xd4\x44\x23\xef\x9b\xf4\x92\x49\x11\xc6\xf6\x31\xa8\xee\xdc\x78\x27\x1e\xd9\xe3\x46\x61\x70\xa0\xeb\x2b\xe5\x0f\xf8\x11\x57\xff\xb4\x41\x8a\xee\x3e\xcf\xcd\xde\xaa\xfe\x59\x4c\x2c\x30\x2c\x65\x84\x16\xa6\xe0\x6c\x63\xf4\x9b\xe3\x30\x17\xf2\xf1\x80\xa7\xe7\xb8\xcb\x84\x01\x45\x49\xd7\x33\x65\x28\x5a\x1d\x19\x33\x2c\xfc\x9e\x2d\x24\x7d\x5e\xb7\x53\x50\x08\xa9\xb2\x3e\x42\xbe\xb1\xb6\x26\x33\xed\x63\x25\x45\xba\xe7\x2e\xbe\x4a\xb7\x18\xac\xcb\xd7\x74\x1e\x58\x45\xa2\x6f\xf3\x9a\x77\x4c\x2b\xa9\x7f\x48\x63\x0a\xff\x0d\xae\x8f\x1a\x40\x7f\x4e\x5d\x4b\x20\x49\xd3\x79\x1a\x05\x9a\xe8\xb7\x52\x33\xcb\x17\x4e\xea\xa9\xd4\xd5\x35\xe0\x2b\x7b\xde\x3d\x09\x43\x3b\x12\x3b\x86\xf1\xe8\xb4\x2e\xbb\xfb\xef\x3b\x66\x1c\x37\xa0\x08\x81\xad\xe1\xc0\x92\x42\xf5\xc8\x33\x3e\xc2\x7a\x39\xfa\xc5\x87\x04\x2a\xf5\x2a\x01\x75\xd4\x3e\xa8\x83\x3e\x47\x74\x71\x70\xf3\x85\x89\x17\xc7\xf1\xd5\x0d\x4f\x75\x22\xb0\xc7\xdf\x9e\xc0\x64\xc1\x28\x80\x9e\xa1\xaa\xba\xc0\x18\x67\x3a\xc7\x4a\xc2\x56\x19\xd7\x06\x17\x7c\x61\xfe\xee\x93\xb3\xf8\xc1\xf0\x59\x30\xa0\x08\xd5\x09\x59\x14\x52\xa0\xff\x0b\x17\x9c\x53\xc9\xc4\x46\xf7\x1d\x7e\x58\x4a\xb4\xc9\x64\x4c\xdf\x5f\x11\x9e\xb9\x6f\xb7\xe8\x66\x92\x2c\x3a\xbb\x01\xf6\xfb\xb8\x37\xa8\x3a\xc0\x78\x0f\xad\xca\x11\x9b\x20\x1b\x1e\x87\x7b\xd6\x36\x38\x98\x44\x58\xec\x21\x8d\x42\x7c\x7e\xea\x48\xdc\xfd\x5c\x01\x92\x98\x47\xf2\x32\x9a\x62\xef\x48\x32\xa3\x2e\x51\x69\xbf\x2f\xde\x6d\xfd\x23\xf1\x98\x8d\x40\xdf\xce\x44\x37\xce\x14\x93\x76\x8b\x2d\x37\xd0\x8c\x2f\x37\x42\xdd\xf7\xa9\x92\xa3\xd9\x8a\xa4\x62\xbe\x71\xce\xa0\xab\xc3\xd0\x41\x3a\x48\x05\x1c\xb9\x6c\xc9\xba\x39\xaa\xa9\x1a\x38\xdf\xff\x4f\x94\xcd\x32\x3b\x1c\x66\x87\x9e\x3e\xef\xbe\x5c\xab\x82\xff\x0d\xe0\x86\x14\xf6\xae\xda\x31\x05\x52\x98\x20\x5e\x12\xf5\x1f\xca\x45\xcc\xa6\xf5\x68\x6b\xfe\x7d\x5d\xf3\x81\xb1\x85\x35\x6e\x98\x38\xc1\x63\x66\xdd\x09\xbe\xc4\xfb\x32\x3a\x83\x6a\xe6\xc2\xe6\x38\x61\x02\x75\x71\x05\x33\xea\x87\xe4\x3c\x8a\x60\xbc\x90\x89\x92\x90\x61\x87\x54\x12\x9b\xec\x2b\x9d\xee\x69\x26\x8f\x37\x18\xf2\xbf\x61\xb7\x1f\x89\x48\xce\x2e\x20\x9b\xd8\x1e\x8b\x21\x67\xe3\x0d\x2b\x07\x36\x27\x7e\xe3\xfb\xe6\x38\x17\xb0\x5a\x8e\x6c\xd1\x78\xfd\x30\x5a\xe5\x33\xa9\xd0\x8f\xb7\xfc\x94\x69\xd6\x20\xfb\x0e\x63\xf2\x59\x9e\xce\xec\x7c\xca\xa4\x8c\xea\x5b\x4a\xdd\xe5\x57\x0c\x0a\x21\xae\xe1\x91\x7d\x6a\xca\x7d\xb2\x1c\x35\xe4\x98\x06\xdc\xeb\x59\x32\x06\x7d\xdf\xd0\x1d\xbb\x12\xa8\x5c\xb2\x02\x5c\x93\x3d\x65\x6b\xd5\x1f\x4b\x18\x2b\x6f\x0f\x13\x87\xe9\xc5\x79\xff\xcc\x78\x05\x47\x17\xe4\x3f\xe8\xe5\x22\x93\xa7\xe4\xbb\xe4\x85\xf5\x18\x19\x35\xd8\x40\x04\xeb\xc7\x8a\x63\xc9\xac\x5f\x05\x70\xec\x1a\x7a\x97\xf7\xd1\x14\x9e\x09\x0a\x8f\x5f\xba\xe3\xd1\x8a\xdf\xe7\x8a\x2c\x80\xd2\x25\x75\x27\xd6\xea\x65\x33\x98\x4b\x49\x2d\x99\x5d\xaa\xab\xd9\xd7\x5f\xe1\x4d\x30\x40\x97\x55\x9f\x54\xc5\xa6\x91\x32\xa2\x72\xa9\x6b\x68\x6c\x56\x0e\x1a\x3c\x7c\xdc\x6e\x4d\x8e\xfb\x4d\x9a\x7b\xc6\x84\x41\x9b\x4f\x6c\x1f\xcc\x97\x21\xb9\x20\x21\xa6\x5b\x98\x0b\xa1\x0b\xb2\x4e\x10\x93\x65\x1e\xfe\xf9\xf4\xc6\xcf\xce\x63\xb4\xcf\x04\xd5\x6c\x3a\x03\xbe\x1a\xd5\xbb\xf7\x3a\x9f\x7b\xf8\x96\x26\xe9\x59\xc1\x26\x87\x36\xa2\x36\x8a\x59\xaf\x7b\xed\x11\x5d\x6b\x2f\x7c\x6a\x65\xb7\x15\x3e\xc7\x0f\x27\xd3\x6a\x2c\xa3\xad\x93\xb3\x4e\x58\xd5\x9a\x9d\x7d\xc2\x87\x43\x0e\x39\xc7\x5c\xa8\x79\xa8\x28\xdc\x20\x8b\xd3\x2e\x6c\x29\x97\xd0\x74\xe7\x68\x20\x5c\xd8\xce\x43\xb8\x05\x8e\xf7\xa4\xf5\xf4\x6e\x44\xbb\x6e\x92\x2d\x78\x58\xfd\xb1\x34\xcc\x1c\x09\x23\x1e\x42\x52\xc5\x7c\x19\x7b\x10\xdb\x3e\xc4\x09\xf3\x49\xf3\xb6\x3f\x26\x40\x80\x56\xd9\x3a\xa9\xac\x72\x66\xd2\x9d\x1a\x39\x6d\x8e\xa7\x34\x1d\x69\x2a\x90\x8d\x91\x94\xb4\xfd\x8b\xd7\xfc\x6b\x99\xbe\x35\xec\x49\xb9\xc4\x00\x1d\xa8\x5a\x9f\x95\x0f\x33\x24\x52\x40\x97\x9a\x3e\x90\xe2\xdf\xc0\x80\xe3\x2b\xbc\x6d\xf9\x43\x98\xb7\xec\x32\x6d\xce\xce\xe9\x0c\xde\x88\xf8\xe2\x2b\x99\x9f\x8e\x78\xe1\x66\x7e\x72\x94\x9a\x9b\x5d\x5c\xc8\xd0\x53\x3e\xe0\x79\x56\xe0\xbd\xa1\xde\xe2\x9a\x02\x51\x00\x2e\x6e\x0f\x80\xd3\xcd\x48\x7b\x9d\x61\x8d\x48\x50\x1d\x5d\xa1\x6d\x5c\xb1\xa1\x90\xaa\x4f\x0c\x54\xdf\xf2\xd7\x9c\x06\x6d\x71\xaf\xad\x75\x51\x8d\x40\x1e\xf9\x9e\x6b\x7a\x18\x3b\x1a\xaa\x2f\x89\xf3\x01\x71\x8d\x1e\xf0\x99\xbe\xf5\xd4\xca\x46\x81\xcb\xf5\x96\xaf\x2a\xc4\x50\x9b\x1e\x55\x44\x30\xfe\x8a\x5e\xeb\x42\xc1\x64\xe8\xc9\xdb\x71\x8f\xb4\x0d\x7f\x1e\xb4\x27\x50\xde\x89\x5a\xf1\x75\x68\x95\xc4\x0d\x70\xfc\x53\x53\x0c\x28\x81\x59\xc2\x43\x6d\xbb\x6c\xe9\xd9\x1d\xa5\xf6\x6f\x40\xa8\x97\x94\x24\xf8\x14\x0a\xcd\x69\xd6\xd9\x96\x53\x5c\x0e\x65\x49\x9a\xce\x1f\x75\xd5\xe2\x01\x66\x9c\x0a\xe8\x14\x32\xfa\x65\x06\x2d\x00\x90\x79\xa6\x11\x95\x90\x40\xf1\xdc\x62\x8c\x65\x83\x09\x95\x11\x66\x41\x0d\xb7\xf1\x36\x48\x0d\xe2\xb1\x74\xd5\x95\xf7\x88\xa9\x0f\xdf\x74\xe3\xca\xb4\x07\x3f\xb2\x82\xf9\x2e\x7f\x46\x5a\xa8\x9a\x6f\x37\xa9\x30\xfd\x16\xb8\xe0\x94\xcf\x32\x20\x7d\x08\x92\x0e\x32\xcc\x76\xba\x28\xce\x2c\x94\x53\x94\xb1\x49\xeb\x4e\x93\x4c\xfe\x4e\x11\x38\xba\x32\x7e\x47\x27\x69\x73\x17\xe2\x9c\x6b\x65\x04\x29\xce\x60\xef\x1f\x20\x8a\x5b\x17\xcd\x6a\x31\x16\xd1\xf6\xe0\x29\xd1\x96\x12\x8e\xf1\x9c\x08\x16\x66\xfd\x60\x42\xbd\xf9\x62\xd9\x24\x25\xbd\x07\xbf\x4e\x5c\x8c\xd4\x43\x1f\x78\x46\x9c\x4f\xa1\xc6\x59\xc2\x6c\x95\xe0\xec\xc7\x87\x12\x1e\x22\x8e\x12\x03\xbc\xfa\xd8\x1e\x19\xe6\x87\xbe\xb7\xe5\x79\x07\x3a\x9c\x36\x63\xa6\xee\x82\xca\xfb\xe6\x5a\x4c\x8f\x88\xe4\xe2\xba\xff\xb3\x74\x95\x65\x22\x63\x55\x51\x2d\x95\xd4\x43\xa1\xbc\xde\x3d\x7b\xfc\xca\xe9\x3b\xb9\x8f\x0e\x64\xa7\x54\x63\x15\xb6\xbf\x75\x0d\xd9\xf3\xda\x2f\xb6\xf6\x5c\x00\x0c\xef\x46\xe6\x54\xc0\x5a\xa1\x16\x07\x3a\xe5\xc8\x58\x41\x2a\x49\x18\x93\x23\xe3\x3b\xc3\xdc\x0f\xfb\x4e\x9d\x96\x0f\xf2\x09\xf4\x9e\x65\xca\xbd\x49\xe3\xad\xae\x45\xbc\x47\x83\xcc\x58\x28\x2f\x63\xc7\xc0\x3b\x53\x6d\xaf\x30\xc4\xb1\xc2\x44\xab\xf1\x58\x10\xbc\x0e\x4e\x2c\x39\x3a\x65\x7e\xc0\x9d\x21\xc6\xe1\x63\xc8\xf0\xb4\x0c\xd9\x73\xf6\xd0\xbb\x64\x38\xaa\x9b\xa7\x6a\x15\x7c\x13\x34\xaa\x37\xf9\xa9\x63\x30\x90\x73\x33\xdf\x75\x8b\xf4\x60\xb7\x41\x5d\x37\xf4\xf8\x52\x7f\x20\x31\x53\x6f\x42\x2a\xa7\x6a\xc6\xda\x1c\x5f\xbc\x31\x88\x2b\xc1\xe8\x3e\x18\x37\xda\xb9\x4e\x11\xd3\x51\x6f\x9d\xaf\xa4\xb1\xff\x4a\x13\x73\x35\x6a\xe3\x77\x70\x50\x93\xcc\xf6\xf9\xe0\x44\x8a\x0d\xad\xb2\x2c\xba\xbb\x79\xe5\x23\x6b\xee\x35\x2e\x72\x94\x50\x0b\xa8\x4a\xf8\x90\xd7\xbe\x97\x30\xea\x2a\x70\x00\xd9\x73\x1a\x6f\x67\xb4\xb5\x88\x12\xc8\x02\x15\xfe\xad\xce\x1a\x46\x49\xde\xf2\xa7\x48\xfa\xfe\x05\x6d\x70\xcd\x86\x51\x4a\x34\x5c\x7d\x67\xd1\xb0\xbf\xed\x95\x70\xbc\xe5\xf6\xcf\xca\x8e\x09\x3b\x7a\x1b\x07\x7e\x56\xfc\x5e\xcf\xff\x3c\x1e\x52\x8d\x3d\x39\xe8\x90\x6f\xce\x93\x0d\x4e\x70\x76\x76\x97\x31\x77\x6a\xa1\x16\xe1\x51\xec\xd5\xd2\xc3\xe5\xc4\x6e\x24\x13\xab\x55\x8f\xd1\xa4\xe0\x5a\x43\xa1\x2b\x06\xf3\x3d\x52\xd6\xc9\xa9\x9b\xdb\x11\x9b\x1b\x22\x25\xc8\x75\x16\x6e\x8f\xc1\xe4\x24\xe8\xee\x27\xd4\x44\xdb\x1f\xa8\xf2\x07\x26\x9b\xe8\x8e\xa9\x3d\xbc\xec\x76\x05\x93\xeb\xed\xa9\xc2\xca\xe4\xe4\x1b\xfe\x78\x38\x07\xaa\xef\x08\xbd\x5b\x9d\xcb\x6b\x69\x0e\xbd\x4b\x99\x15\xd4\x57\x2d\xac\xc1\x65\x80\x15\x20\xf4\x6a\xfa\x07\x7b\x09\x34\x2a\xff\x62\x07\xf9\x48\x16\x1e\x2e\x04\xbf\x10\xce\xeb\xe3\xd3\x7c\x43\x65\xce\xfa\x64\x19\xf4\x76\x30\xd3\xfc\x08\xa2\x54\x0e\xf0\x00\xc0\xcd\x47\x3f\xa9\xfd\xeb\xd0\xa0\x14\x23\x88\x87\x5e\xd0\x1d\xf6\xbd\xe0\x01\x0c\xe8\x38\x38\x1a\x66\xdc\xf5\xbf\x22\x16\x1f\xf1\x11\xee\x67\x7f\x7b\x8e\x12\xbe\xd6\x49\x5c\x95\x8c\x9f\xeb\xae\xb3\xb6\x1a\x51\x4b\xe3\xad\x78\x2d\x54\xe1\x0c\xb3\xbe\xc2\x17\x15\xc9\x0b\xd0\xec\xbe\xef\xea\xd6\xee\xb9\xc4\x5f\x1d\x0f\x62\x8e\x73\x64\x7f\x77\x02\xbe\x45\x2a\xdd\xf4\x9b\xc6\x85\xd5\xb1\xeb\xe0\x2f\x91\xf1\x8b\xc6\xd1\xea\x61\xf2\x37\x16\x6c\xf1\x9e\x1d\xc5\xa9\x53\x0c\x8d\x37\x66\x0e\x30\xe1\xe7\x0d\x49\xc1\x3f\xd8\xe9\x13\xfe\x48\xe5\x8e\x4a\x80\xf5\x7e\x23\x86\x34\x19\x27\x1a\x96\x68\x40\x76\x56\xa2\xb1\x38\xe9\xb3\x7f\x22\x17\x02\x46\xb8\x07\x69\xa1\x2a\xcc\x1f\xf1\xc6\x5f\xfd\x7a\x84\x0f\x6d\xa7\xf4\xb6\xc5\xb8\xd0\xda\xda\xa4\x05\x4e\x46\x60\x11\x65\xa3\xa4\x83\xb5\xd2\x90\xcb\xa5\xa8\xcb\xec\x42\x54\x29\xb5\x2b\xa3\xc1\x83\x0f\x7c\x22\x19\xbd\x6b\xa8\xda\x02\xec\x82\x7c\xf0\x3e\xef\xbd\x93\x19\xb6\x4c\x54\x21\xd1\x6b\xdd\xa9\xba\xcf\xe1\x7e\x0a\x7e\xfa\xac\x19\xd3\xdb\xae\xf0\x8d\xf8\xcd\x77\xea\x4f\xd3\x36\x16\x0f\x01\xb0\x4b\x27\x95\xe6\x85\x6e\x4c\x12\xaf\x2e\xe2\x63\x36\x6b\x76\x83\x59\xbf\x0b\xee\x51\xf4\x9f\xc4\x2a\x41\x02\xdb\x54\x2a\x20\x51\x43\xac\xea\x77\x9d\x9a\x48\x97\x5e\x78\xa8\xe4\x0b\x0c\xea\x64\x63\x52\xe0\x2c\x08\xbb\xad\x2c\x4b\x4d\xdc\x68\xec\x1a\x38\x70\xe1\x49\x95\x58\x99\xb0\x4a\x72\xf0\xb1\x3f\x59\xed\xc1\xcc\xf0\x7e\xa0\xbf\xb1\x2d\xd3\x14\x66\x35\x36\x9f\x22\xd4\xc5\xb7\x8f\xf3\x52\x6b\xc5\xb0\xf4\x86\x00\x44\x22\x7c\xeb\xfb\x8d\xbf\x10\x36\xd9\x08\xec\x7c\x2a\x12\xb6\xf4\xa1\xa6\x54\xe3\xac\xfa\x4e\x64\x88\x02\x82\xd9\xd9\xbf\x05\xf4\x20\xd3\xbe\xf9\x3f\x7b\xea\x2c\xc1\xf5\xf9\xf5\xc1\x91\x0f\xf9\x4c\x26\x8f\xe9\xc0\xf7\x17\xd3\xa2\x0d\x89\x59\xe9\x74\x17\xb3\xbe\xde\xbb\xb1\x5c\xa8\xca\x41\x90\x9d\x0b\x7b\x50\xce\x9e\xb8\x0d\xf7\x9f\x24\xd6\x2c\xe1\x9a\x97\xab\x45\xdf\xc1\x01\x56\x6d\x77\x7d\x9f\xee\x87\xbe\xe1\xe8\x76\xe7\xc5\x74\x8f\xe5\x1d\x89\x33\x58\x55\xe2\xb1\xf6\x13\x76\xa5\xe4\x84\xea\x27\x48\xc8\x70\x77\xad\x4f\x9f\xfa\x77\xeb\xa2\x9f\x3f\x94\xb8\xbd\xa3\x29\xe6\x29\x47\x45\xf9\x51\xc9\x40\x07\xc9\xae\x8a\x60\xd2\x68\xa0\x13\x8e\x25\xd3\x10\x5b\x68\xcb\xb4\x82\x0c\x1f\x07\x50\x5b\xb1\xd5\x21\x9e\x24\x76\xac\xfa\x4d\x14\xd4\x7c\xf2\x69\x21\xb8\x86\x8e\xed\xc1\x76\x32\xed\xe1\x68\x0d\xa3\xc6\xac\x3e\x8f\x42\xba\xba\x05\xd3\x48\xac\x8d\xa8\x45\x52\xaf\x91\x46\x21\x36\xf3\x30\xce\xbe\x35\x38\x60\xb4\x69\x05\xb0\x1e\x3e\xb4\x3c\xa6\xe6\x47\xdc\x56\xc1\x5a\x8a\x33\x8f\xcf\x78\xf5\x75\x45\x1e\xa7\x96\xbe\x30\x5f\x5a\x86\x3c\xe2\x01\x8a\xe4\x11\xac\xdd\xbb\xa7\x5f\xa6\xbf\x3e\x31\x25\xc7\xc2\xf6\xfe\xd7\x49\x0e\xdc\x81\xea\x0d\x68\x73\x41\xee\x1a\xad\xa0\x26\xf9\x4f\xc4\xad\xd1\x78\x9c\xeb\x88\x12\xcb\x6a\x2d\x12\x61\x08\xd0\x9a\xd7\x44\x70\x7b\xb8\x96\xef\x86\x51\xfd\x2a\x91\x39\xb6\x60\xb5\xf0\x11\x4d\xea\x21\xcc\xff\x50\x2e\xd6\xe2\x46\x96\xcf\xf6\xef\x47\x84\x7f\x3d\x04\xfa\x08\x65\x7d\x7f\xe9\x07\xed\x86\x14\xbd\x38\x6b\x53\xcd\x35\x55\x45\xca\x69\x0a\xe2\x2b\xe1\x61\x30\x74\x79\x34\x58\xfd\xff\x0c\x79\x9f\x29\x60\x41\x8c\x98\xe8\xe5\x70\xc4\x1f\x3e\xb9\x34\xac\xe6\x16\x1a\xc4\x41\xed\x57\x75\x57\x09\xc6\x1f\x35\xc5\xa2\xf4\x9a\xc1\x65\x97\x2a\x1c\xa3\xde\x98\xe5\xdb\xa1\xd6\x2e\x57\xe6\x6b\xb5\x22\x39\xab\x70\x5b\x3b\xb5\x8f\x4a\xc3\xc7\x32\xcc\x9d\xab\x1f\xde\xcc\x03\xae\x73\xbe\xce\x6d\x49\x66\xc0\x69\x2d\x21\xb6\x2f\xd0\x9f\x4c\xde\x89\xc4\xa1\x5e\x61\x95\x81\x17\xeb\x7f\x2b\xdb\x08\x6c\xf3\xb4\x59\x61\x0e\x4b\x5c\x8b\xd3\xa5\x15\x7c\xd3\xc6\x1e\xf9\x9f\x05\x03\x13\x5a\x98\x7e\x0e\x56\xa5\xae\x2f\x5a\xdb\xff\x47\x2c\x89\x96\x35\xf0\x57\x66\x58\x0b\x72\x5c\x8d\xd1\x47\xd0\xe3\x81\x29\xf0\xd2\x18\x16\x4e\x8c\x9f\x20\x51\x04\xbc\x46\x18\xed\x0f\xd5\x2d\xca\x15\xd8\x1a\xe4\x69\x98\xd6\x89\x1a\x97\x96\x84\x79\x03\x9e\x43\x27\xa0\xdd\xe6\x22\x7e\x27\x3c\xcb\xb8\xab\xe7\xdb\x00\xba\x70\xb3\x75\x4f\x7b\xc6\x6a\x21\x12\xc9\x39\xa8\x22\xc6\x57\xc3\x3c\x95\x6e\x8d\xe3\x1d\x9a\x0d\x67\xc2\x58\x57\x36\xc1\x7f\xc6\xcd\x0e\x2f\x6f\x89\xf6\xa3\x0e\xf6\xc6\x63\x6d\x88\x2e\x25\x0c\xd7\xd4\x0e\x33\xbe\xa1\x72\xc3\x11\x2c\x64\x69\x8d\x44\x09\x71\x9c\xf3\x51\x5d\x36\xd3\x5b\xc2\xdf\x6a\x11\xa5\xef\x26\x72\x2a\x95\x4a\x8f\x15\x43\x08\xb9\x06\xb5\x3f\x28\xf3\x29\x4e\xd1\x27\xf0\x4b\x53\xfc\xa5\xdb\x55\x60\xda\x94\x9e\xbf\x79\x79\x7c\xe5\xa4\x85\x0d\xe9\xf6\xd2\x2f\x27\x40\x5e\x14\xbb\x5c\xb4\x03\x63\x29\xda\x73\x92\x5c\x5c\xe9\x0e\x22\x30\x80\x2a\x27\x6f\xf3\x09\x03\x63\x26\x85\x13\xae\x77\x65\x9b\x16\x92\x3d\x6f\xea\x30\x54\x6a\xbe\xd1\x9e\x02\x46\x98\xa2\x45\xd1\x44\x4b\xd1\xb4\x48\x71\x86\xd8\xa0\x17\xb7\xbc\x70\xc7\x9b\x73\x63\x39\xaa\xe9\x7a\xd9\x45\xba\xd7\xf5\xda\x8c\xb8\xec\x71\xdb\xc3\x2b\xe4\xd0\x89\x03\x07\x30\x1f\x78\x12\xcf\xfa\xe9\x96\x30\xc9\x1c\xc4\xb2\x6e\xa3\x91\xfd\xd4\xda\x27\xeb\xde\x22\xbf\x9c\xc1\x21\x22\x7d\x53\xd4\x88\x40\x03\x65\x35\x1b\x52\x12\x73\xc8\xad\xa9\xf2\xd5\xed\x4d\x9c\xb2\x6e\xdf\x78\x25\x44\x6c\x93\x3d\x8a\x82\x19\xf0\x48\xe8\xf9\x3f\x1e\xac\x96\xa3\x7a\x63\xe6\x35\x51\x49\xaa\x2c\x72\x0b\x53\x16\xe6\xe8\xdd\xb9\xf8\x6a\x34\x96\x3e\xc2\xce\xec\xb6\x26\x71\x4f\x11\xed\x3c\xe4\x2c\xf6\xcc\xf1\xf3\x38\x69\x90\x10\x8b\x1a\x33\xb9\x55\x91\xbb\xd0\xc6\x9e\x37\x46\x7b\xb1\xe1\xf6\xff\x7e\x2f\x9b\x5b\xee\x0a\x67\x72\xfd\x94\x6f\xbf\x49\xf3\x22\x92\x71\x11\x02\x6d\xa1\x0f\xa3\x2b\x1b\x06\xa4\x39\x44\xd5\x9d\xec\xfd\x5c\xd6\xf7\x51\x03\xc9\xec\xfb\x01\x8b\x5d\x77\xf2\x73\xe9\xfc\x33\x19\x8a\xdd\x30\x75\xe0\x1c\x3e\x70\xa2\x95\xde\x4e\xbf\xbe\x0a\x9e\x24\x9c\xe7\xae\xa0\x58\x6d\xe6\x1b\x5f\xa1\x38\x1e\xbd\xad\x76\xc0\x49\xeb\x55\x64\x9d\x0d\x4a\x36\x08\xed\x2e\xec\xe7\xb9\xc9\x03\xc9\x4c\x49\x4b\x3e\x3f\x2f\x62\x02\x69\x39\x4e\x67\x48\xb5\x17\x22\x5f\x77\x73\x76\x8f\x37\x4c\x4a\x82\x51\xcf\x1d\x05\x38\x44\x05\x26\x81\xfc\xdb\x3d\xb8\xf0\xbd\xa2\x46\x16\xe5\xa8\xdb\x9a\x3d\x17\x67\xcf\xa7\xee\x66\x06\x47\x36\xfd\xd5\x05\x96\x4c\xc3\x21\x69\x5a\xbc\x25\xb1\xb8\xcf\xcb\x7e\x17\x0e\x93\x5a\x29\xcf\xc7\x5f\x8a\x2d\x03\x52\xfa\x7b\xf6\xe7\xdc\xa4\x5e\xa2\x7f\xa1\xc4\x39\x5a\x05\xf4\x4e\x24\x5d\x25\x1b\x18\x33\xfc\xd9\xf0\xcd\xa1\x8a\x87\xe2\xf4\x03\x43\x06\x45\xcd\x1b\x7f\x8e\xee\x7a\x4b\xc7\x88\x1f\x20\x1b\x6f\xf7\x92\x68\xe5\xd2\x69\xc3\x9d\x18\xdd\xd7\xad\xa2\x29\xf7\x52\xd7\xd3\xfc\xb2\x0e\x41\xcc\xa7\xaf\xd8\xbb\x87\x27\x71\x94\x27\x52\x60\x08\x47\xcf\x37\xf6\xfd\x81\x89\x68\x64\x15\x16\xd4\x99\x17\x19\x57\xc3\xa1\xae\x54\xc7\xf5\x5d\xf7\x98\x66\x19\x1d\xae\x21\x13\xea\x56\xce\xae\xc4\x00\x59\x63\x6e\xef\xfb\x83\xc0\xa6\xe0\xed\x1b\xd3\x27\xfa\x05\x78\x81\xb0\x69\xfc\x0d\x29\x7a\x94\x18\x05\xe6\xd8\xc7\x0a\x67\xac\x7a\x44\xf6\x98\x78\xf7\x81\xb7\xe0\xce\xe5\xf7\x4c\xb8\x81\x80\x8b\x30\x13\x1d\x49\x79\xd1\x63\x77\x1a\x59\x69\x10\x6f\x6e\xc6\x03\x2e\x5a\x56\x05\x15\x50\xa6\x19\x35\x87\x31\x4a\x55\x81\xce\xae\xfc\xeb\xd5\x99\xe7\x80\x82\xfd\x45\x9e\xe4\x00\xba\xf9\x46\xf9\xfb\x1e\xe2\xf5\xb2\x18\x08\xe9\x20\x29\x99\xa9\xc9\x5b\x57\x93\xcf\x02\xa0\xc1\xef\x8d\x6d\xd7\x6e\x37\x74\x02\x89\xb1\xb7\xa4\x52\xe8\x8f\x29\xc0\x43\x7b\x01\x38\xd9\x05\x2c\x2b\x3b\x2b\xa9\x5a\x8b\x9d\x98\xe5\xd9\xfe\x1d\x69\x33\xd2\x79\x49\x98\xf0\xc6\xe1\x91\xaf\x35\x8a\x74\x15\xdb\x4c\xcf\x71\x34\x70\xfc\xe9\x39\x7a\x10\xaa\x6d\x16\x7e\xe3\x61\x8e\x36\x41\xa5\x11\x25\x07\xfc\x15\x5f\xa4\xb5\x49\xdd\xdd\xd3\x34\xe0\x9d\x8f\xe8\x10\xb3\xbc\x90\x1d\xbe\xf5\x4a\xef\xf4\xb3\xbe\x67\x96\xc1\xf6\xa4\xe0\xd1\x0c\xb7\xd3\x83\xe5\x8a\x63\x92\xc8\x0c\xad\xc9\x52\x7e\x5e\xd8\x56\xa1\xd5\xaa\x25\x9a\xcd\xed\x5c\xae\x2f\x67\xca\xd9\xa2\x8c\x55\x99\x95\x24\x1f\x87\x33\x5c\xcd\x2a\x89\x56\x7b\xc0\xd4\x66\x2c\xd2\x35\xf3\x71\xea\x5e\x90\x7e\x64\x8b\x40\x53\x15\xb6\xa8\xb1\x75\xd4\x0b\x8b\x6d\x44\xbf\x62\x3e\xbc\xf4\xd0\x67\x7c\xd7\x24\xf5\x9d\xe3\xfa\xf0\xbb\x95\xb2\x13\x73\x3b\xca\x31\xbe\xe4\x2d\x15\x4a\x90\x6b\xae\xbf\x2f\x18\xd5\x7d\x8b\xf6\x59\xb1\x36\x34\x6e\xc6\x29\x83\x8f\xe5\x0e\x66\xa8\x7b\xf4\x70\xc3\x21\xba\x8f\x9d\x2a\xab\x6d\x9a\x59\x1f\xa2\xa2\xcf\xe0\x04\x88\xdc\x79\x03\xd2\x94\x2e\x03\x8e\x24\x04\xc7\x75\x00\x34\x49\x2f\x72\x3e\xdc\xc5\xb5\xe3\x03\xfc\x21\x2a\x9c\x33\x87\x0e\x0a\x55\x7e\xeb\x20\x9e\x95\x25\x00\xec\x11\x5a\x91\x17\x55\x2b\x37\x6a\x3a\xc7\x01\x04\xe5\x87\x55\xa7\xc7\x46\x9a\x82\xaa\x21\xbb\x16\x62\x27\x41\xdf\x0e\xbe\x20\x27\x1e\x72\x51\x5a\xab\xb7\x9a\x2e\x98\x6f\xd3\xf5\x26\xb5\x41\x0e\x8c\xd4\x83\xd5\x7e\xbc\xcd\xcd\x4e\xbb\xf8\x74\x20\xba\xa7\xdd\x84\x11\xdc\xf5\x78\x03\x84\x27\xf8\xa3\x3c\x93\x5f\xec\x73\xad\xcc\x3f\x7c\xd7\x27\xbf\x28\xa8\x6b\xf8\x03\x13\xbb\xec\x62\xc7\x41\xe5\x01\x5a\x66\xa3\x8c\x20\xba\x82\x37\x1a\x57\x33\xf5\xcc\xcd\xdc\xc3\x40\xdd\x85\x23\xf5\x08\x1f\x43\xbd\x5a\xc2\x12\x8f\x52\x08\xea\x75\x21\xfa\x9e\xe6\x76\xf6\x1b\x31\x6b\x28\x8b\xe7\x8b\xc4\xd1\x30\xff\x23\x66\xa7\xbd\x62\x84\xc7\x6b\x8c\xfe\x30\xb0\x22\xd7\x58\x11\xa1\x1c\x3a\xf5\x9c\xde\x6f\x92\xa2\xb1\x71\xa6\x1b\x67\xb1\x68\xee\x85\x55\x5e\x75\x11\xf0\x1d\xf4\x0c\xfd\xd2\xa0\xe4\x71\x99\xe8\x2d\xa6\x8f\xb1\x15\x2e\xab\xab\xd9\xb3\x0d\x8c\x80\x9c\xc1\x14\x81\x97\xc8\x39\x94\x11\xee\x1d\x1a\x0e\xce\x30\x07\xb3\x84\xdc\x02\x3b\xdc\x8c\x47\x1b\x89\x03\xf5\xea\x54\xdc\xc1\xb4\x4e\xb8\xba\x46\x40\x75\x0a\x47\xa3\xf8\x08\x17\xd3\x63\x40\x90\xff\x38\xa2\x90\xe4\x12\x29\x71\xfa\x63\xf4\x87\xe6\xa4\xb0\x4f\xf1\x0f\xf6\xc9\x35\x04\xe6\x5c\x67\xc3\x1b\xbc\xec\x94\x99\xb6\x18\x79\x8e\x92\x1b\xd6\x7a\xc0\x62\x62\x10\x9e\xaa\x5d\xc2\x69\xa2\x30\x65\xbe\xc6\x14\xcf\x93\x83\x47\x47\x9b\x6e\xd1\x1c\x17\x40\x9a\xdf\x46\x18\xda\x7f\x26\x6d\x9a\x2f\xe3\x2c\x58\x63\x7a\x9b\xa6\xe2\xb5\x0b\xae\x00\x8c\x5d\xac\x1c\x9e\x52\x18\x0c\x81\x12\x60\x62\x54\x20\xd9\xa3\x3e\xeb\xd7\xbb\x07\x36\xdd\x6b\x0b\x75\x20\x7e\x07\x9f\x7e\x1e\xac\xcf\xe3\x70\xd9\x58\x76\x12\x10\x3e\x1c\x71\xc4\xde\x97\x88\xb5\xb5\xc3\xe3\x01\xa0\x42\xfd\x52\xb8\x22\xcc\x3f\x8a\x34\x58\xab\x19\xb5\xa3\xd6\xc1\x01\xe3\x77\x67\xdf\xc0\xc8\x7a\x04\x0f\x04\xe2\x6e\x03\x99\xf0\xca\x66\xb8\x08\x8f\xf1\x32\x96\x2f\x90\x57\xa9\x08\x48\x4d\x84\xe8\x56\x22\x88\xed\xb5\x64\x6f\x94\x18\xac\x7e\xdb\x2f\x7f\x52\x9c\x2a\x71\x30\xa4\xb0\x01\xae\x68\x62\x17\x6d\xec\x4b\xed\xd6\x1e\x5b\xb9\x95\x4a\xfb\x42\xbd\x36\xdd\xc0\x88\xc2\x8d\x8d\x88\x89\x52\xe8\x67\x40\x0b\x0f\x4a\x54\x8c\xd8\x91\x56\x0e\x21\x85\x27\x2d\x6e\x3d\xdf\x26\x27\x6e\xa1\xf0\xc9\xc1\xcd\x86\x41\xdc\x5b\xa2\x2c\x44\x69\x19\x9f\xa6\xa5\xd0\xf2\x0c\x2a\x55\xde\x6b\x1b\x48\x54\xc7\x06\xfe\xc9\xda\x45\x16\x4c\x99\xea\x57\x8c\x69\x9f\x54\xdd\x44\xf9\x8c\x94\xb5\xe4\x34\xee\xd5\x2d\xd8\xd8\xbb\xf3\x4e\x76\x01\x55\x3f\x1d\x75\x0b\x5b\x26\xe7\x69\x7e\xf8\xc5\xab\x46\xdc\xfc\x40\xad\x11\x83\xbf\xb2\x06\x9d\x74\xdf\x1d\x16\x09\xac\x5d\x9e\x68\xff\xe1\x97\x38\x7a\x0a\xaa\x6d\x19\x92\xc3\x7f\x2e\x2c\xa4\xbb\xb2\x42\x9f\x50\xcd\x26\x67\x2a\xed\xc3\xda\x41\x4b\xa0\x09\x5c\xc9\x4e\x39\xc6\x19\xdf\xc8\x5b\x24\xe5\x79\xf5\x65\xc6\xf6\x53\x18\xf2\xa2\x7e\x12\x53\x94\xcd\x2d\xfc\xea\x86\x8b\x80\x54\xaf\x65\xe6\xef\x30\xbc\xe4\x07\xae\xe7\xb3\xa9\x48\xaf\x8c\xef\x8e\x3d\x87\x0b\xb8\xc7\x3a\xca\xf0\x0f\xf1\x52\x15\x03\xe0\xc5\x44\xbc\x2a\x4e\x1b\xb8\x53\x2c\x58\x52\x8c\x5d\x99\x81\x56\x53\x2b\x07\x8b\xd8\x02\x0b\xe0\xb0\x01\xce\x0a\x51\x6f\xe4\x0c\x5e\x76\x03\x70\x3f\x4d\x84\x4b\xfd\xae\x38\xc5\xb6\x92\x34\x0b\x99\x25\xef\xf6\x7c\x79\xe6\x6a\xb2\xce\xf5\xce\x30\x17\x1b\xc0\xa5\x0d\xf9\x06\x4f\x29\xb9\x4c\x82\xfc\x7c\xed\x5d\xcf\x15\x15\xbe\x58\xa4\xaa\x8a\x6e\x01\x82\x2a\xa0\x10\x8a\xb6\x26\x54\xd4\xf1\xbc\x95\xaf\x74\xc1\xc6\x73\xc2\x01\x49\x59\xbd\x46\x2f\x1b\x78\xf7\xa7\xa7\x18\xec\x5f\x7f\xf8\xb2\xc8\x69\x3f\x96\x63\xaf\xd0\xe5\x4d\xc8\x87\xda\xa7\xd6\x79\x49\x97\xae\x78\xcf\x06\x5a\xde\x74\x0a\xec\xb3\xba\xb3\x77\xcd\x2c\x78\x3d\x9c\x69\xd8\x95\x5f\x54\xc3\xb2\xb3\xf1\xc9\x59\xd6\x79\xe2\x02\x5c\x8e\xa1\x17\xf8\x76\xaf\x52\x52\xff\xe8\x02\xb3\xd2\x1f\x6b\x59\x96\x05\xe6\x72\x8c\xd9\x9b\x13\x8a\xc2\x8b\x47\x6b\x42\x90\x69\x77\x54\xbc\xfb\x17\xac\x20\x6e\xbe\x21\x78\x49\x79\xf1\x0b\xc4\x8d\x79\x50\xd4\x85\x4f\xa3\x7a\x7d\x2d\xcc\xa8\x73\xce\xd6\x98\xe7\xf6\x0e\x76\x8f\x8f\xbd\xbf\x80\x7b\x79\x12\xe4\x8b\xb5\x1d\xbc\x23\x3b\xe0\xed\xf7\xed\x2a\xff\x4e\xde\x6c\xdc\x96\xc5\xdf\xe4\x8e\xd8\x4d\x06\x76\x59\x60\x6b\xc9\x3a\x7d\x2d\x06\xef\x91\x8f\x93\x67\xa8\x4c\x08\x0c\xa9\x8b\x97\x9c\x2c\x05\xcc\xbd\x82\x08\xbb\x62\xdd\x3b\x0b\xab\x41\x1b\xd2\xf4\x90\xc2\x87\xab\x4e\xf1\xe9\x95\xa7\xaf\x0f\x5c\xce\x56\x51\xb0\x19\x96\x6c\x4a\xbf\x64\xaf\xb7\x97\x00\x3c\x03\xe5\xac\xa3\x00\x95\x97\x32\xbb\x0f\x01\xf1\x37\xed\x49\x0b\x00\x97\x4b\xe4\xfe\x6e\xb7\xa3\xbf\x06\x62\x43\x56\xd9\x83\xf7\xb8\xaf\x91\xbd\x8b\x6f\xd0\x89\x66\x19\x4a\xf9\x66\x15\xb6\xe8\xea\x88\x8c\xae\xcd\x41\x0d\x83\xe7\xee\xde\xec\xef\x27\xff\x21\xdd\x7a\x88\xbf\xeb\xae\x72\x3a\xb8\xc2\x7f\xa9\x53\x87\xe2\x1b\xfd\x79\x5d\x40\x3b\x30\x9f\xa6\x8e\xab\xbe\x1b\x13\x72\x8c\x33\xc2\xbc\x45\x11\x76\x01\x85\xd7\x43\x37\x8d\x98\x5d\x81\xab\xdf\x9d\x7d\x12\x5d\x48\x0d\xd2\xc6\x99\x01\xd3\xe3\x39\x10\x84\x32\x17\xb6\x7e\xe4\x05\xb4\xaf\x6b\x79\x87\xaa\x6c\x92\x84\x92\xca\x96\xe2\x24\x37\xe6\xae\xfa\x6e\x3e\x86\xa4\xbe\x15\x02\x0c\xc7\x38\x82\x1e\x4d\xc6\x2d\x9c\xb8\xe7\x40\x1e\x4b\xd4\xd8\x8a\xaf\x48\x83\x02\x3a\x8b\x2d\xdf\xbf\x2b\x97\xc7\x82\x74\x03\x69\x06\xe0\x49\x09\xc3\xd5\x8d\x43\x91\xbc\x73\x3d\x71\x68\x2a\xc8\xfa\x07\x4f\xb8\xca\x6c\x67\xcc\x87\x29\xa1\x8d\xdc\xbb\x7e\xcc\x58\xb1\x6c\x96\x07\xc1\x08\x88\xbb\xf4\x31\xca\xcf\x10\x7c\x2a\xb8\xf8\x02\x40\xa0\xbc\x48\xf1\x14\x08\x32\xbf\x09\xcb\xdf\x7f\xaa\xcc\x77\x2a\x01\xf0\xcb\x89\xfa\xe7\x84\xca\x5d\x1a\x8c\xfa\xb8\x12\x7b\x63\xe5\x15\xe7\x4e\x12\x94\x40\xab\x26\xdc\xa3\xf5\x42\x0b\xd7\xe7\x08\x6b\x05\xb5\x92\x41\xe1\x8f\x74\x28\x2d\x0e\xb3\xaa\x92\x4c\x55\x5b\xaf\xcc\x05\x28\x1b\x2f\x3b\x9a\xcc\x9f\x00\xee\xb8\x88\x85\x99\x26\x5f\x96\x03\x6f\xe7\x94\x53\x9e\x99\x1a\xab\xc6\xdd\x68\x50\x9a\xf0\x6f\xaa\x63\x74\xdb\x6e\x2c\x0b\xe0\xaf\xe2\x0d\xba\xca\xba\xe9\xe0\xe1\x06\xe3\x99\x51\x1d\xea\xdc\x64\x7d\x71\xa8\xf3\xb4\xfd\x73\x3f\xbf\x7a\xfa\x42\xa4\xa6\x04\x65\xc3\x97\x3e\xd9\x82\xc5\x54\xb5\x37\x72\x55\xb7\xa3\xfc\x0a\xed\xd1\xde\x0d\x15\x80\xd7\x93\x5e\x59\xf9\x3d\xaf\x17\x92\x02\x16\x65\x81\x93\x7a\x8d\xbf\x6b\x99\x80\x64\x43\x62\x9c\xe1\xa5\x78\xca\xc6\x2f\xb5\xe6\xb3\x71\xdf\x55\x43\xfb\x2a\xd3\x34\xbd\x38\x43\x40\x7c\x66\xfb\xfd\x1b\x0f\xbe\x4d\xce\x19\xf5\xe1\xb3\x3a\x1b\xf3\xe7\x89\xa1\xb6\xb5\x41\x3c\xfa\x55\x3c\x81\x8d\xe1\xd7\xa4\xb8\x23\xe8\x40\x3b\xea\x48\x3b\xf0\x74\xd6\x6b\x5c\x41\x6a\x02\xa8\x0f\xb9\xc4\x20\x2b\xab\xf2\xdf\xe0\x62\x0c\x31\xc0\xb2\x84\x23\xe2\x10\x2d\x95\x21\xc2\x78\xb7\x2d\xb6\x42\x99\xe0\x2c\xb4\x5f\x86\x09\x7a\x2c\x33\xc8\x01\xfd\xee\x64\x17\x44\xdd\xba\xec\x16\xc6\x5c\x82\x51\x1d\xe9\x53\x19\x1a\x43\x47\x4d\xdb\x3d\x68\xda\xa5\x6b\x6a\xa2\x9a\x76\x4a\x24\x5a\x9b\xa3\x31\x32\xcc\xc7\x3f\x67\x3a\xa4\x7a\x2b\xeb\x0a\x37\x8d\xe8\x4c\x2b\x4f\x20\x45\x94\x54\x88\x70\xef\x44\x36\xcf\xfc\xd9\xa7\x6b\x25\xb8\x69\xea\x80\x4b\x6b\xdc\x98\x87\x1d\x27\xb3\x5c\x6b\xdc\x57\xa9\xaf\xb3\x43\x80\x79\x02\xdb\xc1\xf0\xd4\x06\xd7\xb2\xdf\xd6\x95\x10\xcf\x9d\x90\x8b\xbf\x52\xe4\x18\xef\xaf\x07\x3c\x34\x7f\x4d\x6d\xa1\x62\xb7\x67\x45\x94\x9d\x15\x4c\xbc\xdb\x93\x31\xec\x2b\x9e\x8a\xea\x02\xc2\xf1\xa3\x66\x6d\xd2\x15\x93\xe7\x71\xad\x44\x8d\xf6\xf7\x2f\x10\x03\xc3\x26\x52\x84\x09\x7d\x09\x76\x60\x43\xde\xee\x16\xb6\x37\x90\xe9\xbb\x6d\x18\x32\xac\xec\xf0\xb1\x82\x3c\x3b\x02\x90\x53\x8c\xaa\xbd\x3d\x5a\x45\x77\xcc\xf1\x50\x05\xb9\x9c\xba\xc0\xf7\x30\x9f\x3b\xb1\x84\x17\x2b\xc7\x75\x17\xc5\x2d\xf8\xea\x91\x20\x7e\xc9\xab\x72\xbb\x00\x40\x9a\x87\xe5\xa6\x66\xc5\xc0\x18\x9d\xcd\x06\xcb\x3a\x47\xce\x68\x4e\x70\x47\x9a\xb3\xc1\x6a\xb8\x1c\xaa\x35\x29\xf9\xe7\x7d\x08\x58\xb9\x14\xc8\x40\x9c\xc0\x94\x99\xf5\x22\xe7\x7b\x1b\xad\x94\x07\x30\xa0\x8b\x16\x00\x28\x63\x1b\x3f\x9e\xf5\x42\xe5\x01\x9f\x2f\x88\xf2\xc8\xe0\xe9\x6b\xd2\x18\xf1\xf4\x1a\xd2\xd5\x07\x1b\x08\x7a\xde\xab\x33\x7e\xed\xbd\x5e\x06\x32\x27\x12\x1f\x0d\xa6\xcf\x72\xeb\xa5\x96\x00\xc6\xb1\x4d\xd0\x72\x27\xca\x8d\xfe\x37\xab\xf9\x51\x83\xb9\xfa\xbd\x7a\x75\x58\x50\x4e\x0e\x86\x83\x7b\xf7\xa4\x91\xc6\x4e\xa0\xa1\x2c\x7b\x57\x3b\xa2\x76\xe9\x91\x71\xce\x14\x51\xe2\x25\x09\xb1\x6b\xad\x42\xa2\xcb\x73\xa4\x4f\x21\x9f\xf5\x21\x82\x33\x35\xf4\xf1\xfc\x7b\xc3\x00\x2c\xc1\x25\xf9\x30\xca\x76\x15\x6c\x64\x9c\xb0\x2b\x3b\x70\xb8\x27\xdd\x34\x6b\x39\xa9\xb1\xba\xee\x4c\xf7\xe3\x51\x36\x65\x17\xc2\x45\x92\xdf\x90\x4c\xc5\xa3\xb2\xc7\x48\xc3\x6e\xf6\xf0\x4c\xfb\xb4\xd0\x51\xc8\xef\x2f\xd2\xf0\x9a\x3e\xe0\x08\xa1\xf4\xe6\x82\xdc\x0d\x88\xaa\x63\x71\x34\xb3\xfb\x09\x4f\x95\xdf\x88\xd4\x2b\xf4\xac\xe7\xf4\xcb\x11\xde\xf5\x5e\x3c\xef\x06\x35\x60\x76\x2e\x47\x80\x28\x18\xd1\xa0\x38\x62\xc7\x31\x87\x65\x8a\x9c\x90\xb4\x6f\x46\xf0\xb1\x8d\x53\x52\xff\x68\x69\x82\x57\xd9\x8d\xa3\x28\xa4\x9c\xed\xd5\x69\xc1\xfb\x54\x8e\x34\xf0\x17\xf8\xdc\xf2\xe6\x8b\x8b\xdc\x70\x63\x0d\x80\x6c\x47\xa0\x24\x95\xb1\x56\xec\x75\xf8\x45\xb1\x5b\x26\x64\x7a\xc4\x03\xef\x15\x3f\x8a\x6b\x32\xe1\x96\x97\xc4\x94\x90\x75\xcf\xf5\x6a\x88\xae\xbe\x39\xca\x5e\x7b\x16\x3a\x22\xe0\x96\xc7\x17\x20\xdf\x09\x99\xca\xf1\xa2\x0f\x8e\x07\x48\x5e\xb6\xab\x9f\xb7\x5e\x59\x2b\x6b\xc3\x88\x09\x47\xcd\xad\x7c\x46\x7b\x08\xe1\xc5\xb5\xc2\xb2\x00\x5b\x10\xcb\x70\x5d\xf0\xc4\x29\x6d\x4c\xfe\xfb\x79\xb0\x9f\x56\x86\x59\x84\xe4\x67\x02\xbd\xbb\x82\x03\x94\x36\x68\x62\xfb\xeb\x6d\x85\xa8\xec\xb1\xdd\x95\x7c\x91\xe2\x31\x2a\x7e\x62\xba\xdf\x4d\x92\xd5\x8b\x14\x93\x25\x32\xfc\x7d\x5f\xdd\x8b\x8a\x60\xa6\xea\xbf\x7b\x9a\x83\xfd\x36\xdc\x60\xb2\xd9\x01\x1b\xa9\xaa\xc5\x8d\x6d\xc4\xd7\x81\xfd\x4a\x26\xc2\x58\x00\x02\xff\x70\x82\x01\x93\x46\x04\x6b\x77\x23\x09\x64\x4d\x6b\x69\x1f\xbf\x1c\xe3\x49\xe5\xab\xcb\x81\x1b\xfa\x72\x45\x05\xb8\x80\x0f\x5e\x2e\x5f\x77\xdd\xf7\x53\xd1\x46\x48\x84\xb1\x03\xb8\x86\xfd\xb0\xa1\xb2\xd7\xfd\x56\x66\x2a\x1c\x43\x6a\x0c\x12\x70\xd1\x2f\x0a\x3b\x18\xbc\x6b\xe5\x14\xb8\xb4\xef\x41\x18\xe5\x05\x74\x1f\xb6\xb2\x5e\xf0\x20\x38\xcd\x46\x60\x96\x64\x18\x85\xdc\x23\x67\xe1\x70\xf4\x43\x0c\x9f\x95\x6e\xdb\x9b\x9c\x75\x59\x35\xf5\x24\x50\x3e\x17\x7b\xde\x08\x7e\x32\xf1\x8f\xf6\x4c\xe9\xb8\xb2\x25\xd5\x6b\xb2\x37\x93\x14\x4f\x54\x5d\xe9\x10\xa6\x88\xbe\x02\x59\xd4\x08\xd3\xdb\xa8\x50\x7b\xd2\xb2\x20\x7b\x94\x46\x4c\x5e\x22\xc8\x11\xc3\x2a\x73\xcb\x05\x40\x8b\x62\x59\x3d\xa4\x68\x31\x9d\xd6\xa2\x9d\x06\x46\x08\x97\xe0\xd3\x10\xdd\xa2\x51\x01\xaf\xf4\x24\x2f\x0b\x4d\xad\x18\x8d\x93\xc5\x27\x45\xd3\xea\xf0\x04\x0b\x88\xfc\x0a\xee\x82\xe7\xb3\x73\x16\xb3\x25\x57\xa6\x4d\x4c\x02\x07\xcb\xc9\x93\x14\x3b\xb8\xab\x61\xbe\x5e\xa9\xef\x73\x80\x7f\xa8\xb5\x7d\x32\xe2\xce\x54\xae\xef\xb5\x32\x5e\xe8\xcd\x10\xc8\x76\x37\x36\x22\x5e\xba\x07\x9e\x32\x39\x97\x5c\x4b\xde\xc8\x73\x13\x53\xe0\x2a\xe5\x34\x93\x01\xec\xdd\x67\x90\x90\x2b\x96\xea\x01\xb9\xb7\x65\xce\xbd\xeb\xfb\x0f\x90\x8d\xc0\x62\xe7\x56\x53\xba\xfc\x60\x61\x1f\x47\x1d\x04\xce\x57\xd1\x24\x96\xb6\x3c\x21\x44\x21\x6b\x55\xec\x8d\x75\x35\xfd\x61\xd0\x92\x1d\x1f\xe6\xf8\xe9\xee\x90\x1e\xeb\x81\xf5\x08\x9c\x2c\xde\xeb\x81\x38\x15\xe2\xeb\x19\xc5\xc0\x12\x4a\x5e\xfc\x52\x05\x3c\xca\xf5\x77\xe3\x7b\xef\xbd\x64\x1b\x62\x9c\xbb\x33\xb8\x98\x75\xf8\x42\xbd\x96\x01\x4c\xb6\xc7\x72\x2c\xa9\xd4\xd5\x37\x28\xf4\x54\x07\x57\x00\x38\x9d\x8c\x39\xfe\x64\xd9\xac\xab\x19\x84\x71\x2d\xb1\xe3\xc7\x33\x64\xb4\x3d\xcb\xef\x21\xf4\x2c\x6f\xfc\x94\x28\xfe\xb5\x48\x37\xee\x5e\x8c\x96\x78\x76\xc6\xfb\x22\xe3\x76\x48\xdc\x62\x1f\xa7\x29\xdb\x3a\x89\x18\xbc\x3a\xfb\xb7\xf9\x1e\x33\x5e\x09\x32\xb5\x44\x50\x50\xee\x98\xce\x5a\xa6\xe7\x3f\xf2\xc1\xcd\x8c\x5b\x3b\x49\xa0\x6a\x30\x6f\x6e\xb1\xee\x5f\xf9\x33\xfa\x50\xf2\xb3\xf1\x65\x68\xf0\xcb\xad\x56\x27\xe5\x5f\x5b\x8e\xe4\xf8\xec\xd3\x4c\xba\x0b\xb9\xe8\xed\xa2\x6c\xcd\x9f\x26\x78\x45\x6a\x5f\xf3\x6b\x82\xbf\x0a\x16\x50\x2f\x09\xd1\xba\x9e\xa1\x8a\x43\x96\x1b\x70\xac\x7f\x79\x42\x6a\xcc\x6a\x9f\x1f\xb1\xac\xb0\xef\x2a\x3f\x91\xe9\x41\xa4\x5f\xe1\x86\x14\x68\x6a\x11\xe4\x45\x31\xb6\x78\x1f\x02\xf8\xb8\xf5\x63\x89\xa5\xf3\x19\xe4\x82\xbb\xfc\x7f\xfe\x91\x1b\x69\x7e\x75\x84\x79\xaa\x07\xa4\x87\x82\xd1\x39\x7f\xf6\x8c\xeb\x3e\x17\xc5\x90\xa5\x8f\xd2\x17\xe6\x1f\xfe\xc2\x93\x51\xe2\xc8\x3e\x3f\xad\x7f\x90\x09\x92\x15\x5a\x8c\xa8\x84\xba\xfe\xcb\xfd\xe5\x82\x45\x3d\x5d\x61\x15\xee\x55\xda\x97\xe5\x07\x3b\xb2\x29\x11\x6b\x85\x69\x79\xf6\xcf\xe0\x06\x8b\x61\xcf\x73\x41\x97\x5d\x02\x17\x0d\x73\x68\x73\xd8\x7a\x5f\x73\xf8\xec\xce\xb5\xfd\xc2\x1a\x91\xb0\x0c\xf7\xe0\x17\x6c\x09\x83\xff\x3c\x04\x20\x5c\x71\x25\x44\x5a\xc5\x4d\x71\xec\x23\x42\x85\xf1\xe4\x85\x6c\x07\x83\xda\x22\x3b\x6d\x51\x36\xf6\x63\x4f\x3b\xdc\x7f\x80\x16\x14\x0a\x53\x42\x9a\x3c\x0c\x08\x36\xa3\x38\x9d\x2f\x81\x7a\x50\xea\xac\x60\x81\xb4\x38\x16\xaf\xad\x1b\xb7\x31\x20\xe5\x24\xfb\x68\xfc\x98\xd8\xaf\xd0\x24\x69\x19\xda\x4f\xe0\x20\xbe\xfc\x93\x9e\x56\xb1\x7e\xa1\x9b\x3f\x1b\xba\x70\x77\x34\xb0\x23\x45\xba\x8b\xcc\x2d\x89\x65\xc3\x6a\xb3\xeb\x8f\x94\xd7\x72\xc4\x80\x39\xce\x8e\x58\x7c\x5e\xd9\x18\x0a\xda\xce\x3c\xcf\xa0\x98\x00\x6b\x5a\x6d\xba\x9b\xf3\x8e\xdc\x23\xeb\x8a\x79\x29\xef\x9c\xfd\x91\xef\x7d\xa5\xb4\x32\x05\xc1\xc8\x7f\xa4\xd2\xf5\xe8\x49\x48\xcc\x1f\x39\x6c\xa3\x63\x1d\x58\xd8\x95\x38\x62\xe3\xe7\xbb\x4d\x3b\x53\x6d\x4f\xfb\xda\x4b\xbf\x95\xdb\x84\xa0\xf6\x8a\xcb\x70\xff\xa7\x16\x45\x8e\x8b\x3d\x71\x09\xb2\x54\x22\x45\xfb\xac\x1d\xd4\x74\x6f\xdd\xba\x10\x28\x1a\x8d\x4e\x52\x73\x47\x03\x75\xf2\xae\xa2\x2a\x0a\xf3\xd4\x4f\xd8\x56\xab\xf4\x5e\x89\x96\xf1\xde\x10\x62\x89\x6c\xf1\x15\xaf\xf7\x35\x31\xd1\x3a\xd4\xf7\xee\x8e\xa3\x51\x85\xf7\x04\x2b\x40\xe1\xd7\xdb\x78\xf1\xf7\x41\x3f\x67\x27\xd5\x47\x62\x6b\xdb\x2b\x15\x6b\x7d\x63\x47\x8f\xca\x1b\xbc\x82\x95\x47\x03\x87\xd2\x71\x11\x05\x94\xcc\x5a\x4a\xa5\x84\x2d\x40\x73\x3b\x85\x6b\xbc\xc4\xf5\xa9\xcb\xd8\x94\xbb\x08\x6f\xd2\x01\x86\x2a\x53\x95\x29\x4e\xcf\xed\x56\x1c\x3a\x94\x49\x1b\x6f\x2e\x52\x77\xbc\xdd\x21\x2c\x54\x1d\xab\x6c\x87\x19\x35\xfe\x43\x83\xdc\xd8\x38\xb9\xc7\x3f\xcf\xed\x55\x06\x4e\x12\x29\x13\xa5\x38\x62\xc8\x8f\x34\x43\x8d\x04\xff\x4e\xdd\xaa\xcb\x15\xea\xa8\xae\x44\x8e\x1f\xf1\xe0\xa4\xe7\x53\x22\xaf\x07\x05\xf4\x06\x99\x78\xf2\x0a\x7e\x3a\xeb\x83\x0e\x14\x58\x14\x06\xfa\x5e\xf2\x2a\xf2\x58\x8d\xfa\x96\x86\x04\x99\x09\xc1\x00\x7c\x0b\x7c\x09\x53\x32\x30\xd4\x07\x40\xf3\x7e\xe8\x19\x91\x46\x6a\xb9\xef\x00\xe3\xfb\x84\x2b\x95\xf0\x68\xb7\x76\x1b\x78\x93\x33\xff\x22\x70\xc6\xf1\xb4\x0e\x8c\x3a\x9a\x00\x70\xe5\x85\x43\x80\x12\xd8\x5e\xef\x7c\xaa\x66\x93\xbb\x33\x00\xca\x65\x57\x62\xd6\xf1\x3d\x7a\x21\x61\x06\x10\x29\x1a\x14\x6a\xed\xfa\x9e\x79\x6a\xd7\x51\x84\x9a\x72\x77\x99\x90\xcd\xae\x21\x62\xed\x46\x3f\x08\xf9\x85\x86\xbe\xcb\x27\x3a\xfb\xf1\x81\x22\x60\x64\xca\x8c\x89\xe8\x70\xbc\x34\xf6\xc9\x0e\x91\x8c\x3d\xc2\xa4\x78\xac\x6f\x55\xeb\x15\x2f\xd8\x4a\x49\x2f\x20\x79\x23\xb9\x20\x81\xe9\x50\x5f\xa3\x2c\xb5\x5a\xaf\x31\x6c\x76\x8e\xa3\x07\x1b\x2a\x68\x6f\x71\x4e\x98\x35\x8c\x07\x4e\x2f\xe3\xcc\x6b\x2c\xed\xed\x85\x7e\xbe\xca\x16\x8c\x07\x5b\x58\xc7\x75\x63\xa9\x92\x33\x99\x1f\x33\x95\xbb\x9d\xe4\xe9\xa5\xaf\x2d\x59\xd0\xc6\x27\xa9\x56\x6d\x97\xfd\xdc\x16\x0b\x3b\xbd\xf9\x43\xce\x2a\x40\xe7\x99\x55\xb4\xf1\xc3\xba\xfe\xb8\x60\x85\x27\x42\x8c\xbe\xc6\x6f\x63\xc6\x83\x59\x4f\x45\xe5\x84\x30\x33\xbe\xc8\xce\xb2\xba\xcc\xa7\xa7\x19\x71\xbe\xcb\x18\xdb\x4c\x67\xe0\xba\x76\x86\x0c\xa9\xf8\xe3\x28\x04\x0a\xde\xd9\xa2\xa7\x4d\x8b\x97\x7a\xe9\xea\xa1\xbd\xc2\x8f\x9d\x8c\x9b\x2b\xf4\x03\x92\x71\xdd\x18\x49\x75\xe1\x9d\x59\x90\x56\xda\x2f\x82\x0f\xbf\xe3\x00\x11\x22\x20\xd6\xcc\x5a\xe3\x3d\x0f\xf4\xa5\x61\x11\x3c\x26\xdb\x71\x6e\x2c\x18\x94\xe9\x9f\xc4\x6d\x61\xc3\x62\x24\x20\xf4\xec\x8c\x2e\xf0\xaa\x69\x57\x08\x4f\x9d\x03\x15\xea\xca\xdc\xd0\xa0\x10\xe5\x7b\xd0\x1b\xe2\xc5\x72\xbf\x3f\x9f\x15\x6e\x66\xf4\x9a\xd1\xb0\x8a\x65\x64\x0e\x85\xbd\x4b\xda\x91\x77\xec\x25\x82\x3b\x39\xc6\x89\xf4\xa1\xa1\xe5\xd9\x7e\x11\xd1\xb6\xe9\x1a\xe3\xe7\x38\x72\x27\x94\xaf\xfa\x4e\x0a\x8d\x11\x6a\x13\xe3\xaa\x58\x9c\x09\x87\xca\x51\x17\x45\x82\xb5\x16\xac\x9b\xe5\x2e\xae\x53\x2a\x21\xd7\xe3\xd8\x0b\xaa\x25\x13\xab\x3a\xf2\x69\xfb\x5a\x3e\x0e\x20\xd3\x23\x91\x32\x76\x41\xac\x58\xee\x42\x0b\xb6\x8c\x44\xdf\x97\xe9\x4f\x95\xe2\xf4\xec\x7d\x98\xc8\x3a\xde\x35\xd4\xe6\x67\xe6\xbd\x86\x93\x13\x35\xc7\x4f\x4e\x0c\xb6\x40\x4a\xe2\x78\x61\x30\x1c\x57\x2d\xd6\xa5\xb0\xb4\x4b\x07\xcf\x6d\x52\xec\x72\x8d\x8f\x19\x34\x7d\x7c\x76\x15\x72\x10\x2d\xb3\xa6\x76\xfb\xb0\x96\xbc\x17\xf1\x13\x10\x87\xb1\xe7\x20\x92\x2b\xeb\xf1\x46\xc3\xa9\xfc\xa0\xa9\x54\x44\xe7\x41\xe2\xe2\x7b\xd8\x5f\x2d\xc0\x3c\x1a\xa4\x23\x5d\xa0\x04\xd1\xc7\x62\xc4\x6e\xef\x4b\x3e\x0d\x2d\xbd\xac\x98\x8b\x28\x05\xd8\x10\x2b\xda\x96\xaa\x9d\x81\xec\x66\xc4\xee\x45\x3b\x25\xcf\xa1\x41\x82\x1e\x6c\xca\x74\x8a\xcc\xac\x55\xe4\x46\xda\x4b\xff\x0c\xe2\x43\xfb\x14\xb6\x95\x11\xee\x58\xbf\xee\x40\xee\x5c\xb8\x38\x88\xad\x94\xad\xcd\x13\x9c\x62\xdc\x8c\x1b\xdd\xb1\x49\x1d\xdf\x4d\x05\x02\xdf\x3e\x4b\x81\xb4\x9e\x4f\x33\x02\xc2\x76\x95\x57\xd4\xd3\x40\x19\x48\x70\xd2\xb2\x7d\x68\x41\x67\xb2\xa2\x9c\xd4\xb3\xb7\x5f\x04\xb3\x44\x43\x21\xe7\x78\x44\x94\xec\x65\x23\x0a\xa3\x58\x6f\x47\xbc\x48\x8a\xb7\x66\xb1\x94\xe1\x75\xf5\x79\xcf\x89\x25\x17\xc7\x90\xbe\x06\xff\xc9\x3f\x07\xeb\xb6\x8c\x1a\x32\x50\xbb\xfa\x66\x15\x2f\x52\x16\x76\x6f\x85\x8f\xb5\x75\xda\xa1\x58\x3e\x71\x56\x10\x22\x5b\xb2\xbd\x46\x80\x26\xef\x60\xfd\x15\xdd\xe7\xcd\xa2\x7a\x00\x89\xd7\x0a\xf1\xe8\x81\x3f\xef\xd4\x4e\xaa\x35\xfc\x9b\xcf\x8f\x49\x42\x27\x4f\x20\x29\x58\x83\x51\xa4\x7b\xd9\x67\x0b\xa5\x58\xae\xaa\x90\x72\x3d\xa1\x7d\x51\x68\xe6\xf6\x41\x11\x08\xc0\x84\xf5\xa0\x32\x16\x44\x12\x96\xfb\x2a\x83\x46\x01\x52\xaa\x63\x82\x9a\xc4\x3f\x79\x0f\x64\xa5\x25\x70\x12\x38\x58\x1f\xf8\xc5\xc5\x64\xfa\x19\x31\x36\x1e\xa0\xbc\x1e\x02\x5b\xae\xd4\x4e\x73\x3f\x44\x03\x4c\x65\xf7\x4d\x70\x2c\x6d\x1b\x4d\x6f\xf0\x61\xc5\x49\x6f\xb1\xa3\x02\xec\x41\xe7\xe7\x4a\xb7\x54\xf3\xd2\x12\xc1\xc5\x01\x9a\xf0\x45\x4d\x96\x12\x70\x5d\x56\x2c\x9e\x43\x28\xff\x42\xe1\x40\x79\xac\xaf\xab\xc7\x9c\x2b\x7f\x8c\x28\x10\xee\xa2\x3f\x06\x71\x61\x96\xaf\x1d\x05\x3b\xe4\x3f\xe4\x0f\xc0\x55\xd5\x6e\x8d\xec\xb9\xc0\xc2\xc4\x70\xa1\x9b\xd6\x66\x64\x06\x06\x48\x43\xa9\x2b\x52\x9b\x53\x59\x69\xf0\x6f\x0b\xb6\x41\xc9\x6b\x83\xba\x16\xd7\x36\x4b\x68\x8d\xbb\x01\xd0\xe2\xc8\x6f\xd3\x12\xc5\x08\x73\x6a\x22\x8a\xbb\xb2\x63\x15\x65\x5d\x78\xd6\x30\xcb\x58\x3d\x63\xd4\x7b\x19\xb5\x6f\xf9\xf7\xba\xc1\xca\x13\x51\xb1\xa2\x60\x68\xf3\x78\xa2\x50\x90\xcd\x42\x45\x8d\x86\x8f\xa3\x74\xee\xb8\x33\x4e\x57\x5a\x8c\xf4\x39\xf3\x95\x7b\xa5\x23\x2e\xb3\x54\xf4\x64\xf5\x87\x38\x16\xeb\x12\x01\x29\xdd\x5f\x39\x25\x3a\xc9\xe2\x79\x4d\xcb\xd3\xd3\x63\x02\x92\x33\xf4\xe6\x31\x2d\xea\x77\xca\x1f\xa6\xde\x36\x80\xab\x59\xa3\x59\xe1\x3a\x1c\x47\x7f\x74\xcb\xb4\x9a\x5c\xe7\xa3\x8d\x4a\x02\xb4\x96\x5f\x46\xb1\x42\xb3\x6c\xdd\x7e\x7d\x47\xcf\xb3\x0b\x46\x3b\x9c\xb2\x0a\xbf\x4a\xe0\x36\x24\x24\x14\x82\x94\xc3\xfc\x75\x25\x44\x49\x1b\xfc\x92\xc0\xdf\x40\x26\x4b\x8d\x61\xfb\x47\x9b\x9a\x67\x35\x66\xfb\x53\x04\x6f\x15\x96\x11\x11\x98\x32\xff\xea\xb5\xa1\x05\xcd\xb3\xc5\x02\xe0\x70\xeb\x03\x7d\x20\x78\xaa\xab\xdf\xad\xa9\xc7\x47\xfa\xb8\xb2\x41\x98\x62\x90\xce\x7c\x6f\xef\x6a\xa9\x19\x18\x90\x75\xef\x51\x4e\x4f\xfd\x9f\x00\xb2\x95\x53\xb8\xf2\x2a\x4d\xf9\xe1\x98\x69\x2a\x0a\x3c\x78\x37\xa5\x64\x8c\x93\x6e\x77\xc6\x2a\xb4\xa9\x79\x6b\x8d\xd6\x7a\x5b\xcb\xf0\x15\x86\xc7\xd6\x1b\x11\xcf\x67\x2c\xe1\xaf\x0c\x65\xfd\x55\xd5\x23\xf5\x09\x1a\xd6\x2a\x30\x0f\xec\x5e\xef\x0b\x97\x66\x51\xeb\x26\x99\x67\x06\x80\xba\x1d\xe4\xca\x4d\x19\x17\xcd\x62\x4a\x5e\x9b\x16\xaa\x80\xb1\xbe\xe4\x8c\xc0\xc7\x8e\x4b\xb5\xa9\x80\xb3\x85\xc0\x70\xc7\xed\x5a\x71\x34\x37\xdb\x7b\xc2\xe2\x2b\xb2\x1d\xac\xb3\x7b\x21\xa2\xc6\xfa\xf5\xc0\xbe\x20\x62\xa7\xe8\xcd\x29\x97\x4a\x09\x80\x69\xde\x0a\xfd\x34\xd8\xc3\xc1\x21\x9c\x08\xf7\x96\xd8\xa9\xe8\x31\x31\xc6\x95\x04\xa5\xa2\xfb\x7e\x12\xce\x5a\xd2\xe8\x44\x1d\x79\xa1\x34\xa9\x52\x79\x79\x3c\x90\x97\xe8\x75\x86\x45\x9a\x1f\x1e\xe0\xe5\xf3\x54\x5e\x92\xad\x97\x43\x6a\x1b\xd5\x9d\x14\x2d\x33\x6f\x38\xf1\xfa\xd7\xf7\xe9\xfd\x00\xdb\x1b\x3c\xb4\x73\x9e\x59\x34\x09\xdf\x7f\xc0\x57\x3d\x5a\x6b\x8a\x01\x88\x4f\x0a\x04\x83\x21\xaf\xc9\x4b\x28\x23\xf5\x18\xc8\xc3\xde\x44\x6e\x16\xeb\xb4\x8b\xc7\x65\x2d\x52\x81\x45\x31\x8a\x3e\x17\xe0\x9c\x27\x18\x7d\x1c\xef\xc2\x85\xe1\x02\x96\x87\x46\xdb\xe5\x2f\xf3\x1a\x63\xd0\x66\x6a\xd7\x98\xc5\xe6\x16\x39\x2a\x3a\xda\x18\x48\x2b\xd3\xf0\x06\x43\x3e\x69\x99\xfe\xe0\x01\xdd\x55\xd2\x51\xf6\x61\x91\x92\x8c\x0b\x10\xd6\x30\x39\x0f\x40\x91\x44\x9f\xfc\x54\x37\x99\x7e\xfe\xb6\x02\x6a\xb4\x91\xe5\x66\xc7\xc9\x7f\x4a\xe6\x42\xca\x84\xe4\x7c\x4b\xcb\x89\xfc\xde\xca\x9f\x48\x83\x05\x37\xbc\xf6\xf4\x39\xf7\xbb\x03\xa8\x07\x3a\xf9\x98\xef\xcc\x23\xc1\x44\x80\x85\x62\xb7\x87\xac\xa2\xb7\xbf\xcc\x8b\xce\x6c\xc8\xf0\x50\x08\x20\x92\xb3\xdc\x93\x51\x0c\x47\xfa\x13\xe1\x95\xb4\x32\x8a\x8a\x02\xf0\xa6\x96\xa4\xd2\x76\xce\x15\xee\x7e\xab\x5f\xfb\x8e\xbb\xfc\xa7\xa0\x94\x2f\xe6\xaa\x85\x92\x42\x44\x3a\x40\x3c\xd4\x2d\x08\x8d\x2d\x90\x9a\x5e\x5f\xdc\xe4\x77\x48\xa7\xb8\x13\xb4\x50\x73\x68\xba\x0f\x98\xc3\xe2\xe9\x33\x6c\x0b\x87\x7f\x33\x60\x8a\x12\x4b\x4c\x9e\xaa\xc0\xbc\xe5\xfa\x93\x5b\xe8\x3c\x38\x8a\xfb\x13\xa2\x7a\x97\xb9\x33\xa7\x35\x43\x6d\x18\xa8\x90\x8c\xf1\xa9\xe8\x75\x10\x9b\x8c\x70\x63\x5c\xa0\xb2\xb9\x2b\xc3\x0a\xa7\x8c\xe0\x85\x7e\x17\x59\x22\x35\x41\x54\x7b\x3f\xe5\x7d\x99\x84\xa1\xc6\xd6\xdd\x59\xda\x06\xb0\x1a\x28\x57\x86\x31\x92\xa7\x18\x4d\x5b\xf1\xca\x3d\x8e\xf5\xa3\x81\x60\x04\x4e\x40\xf3\xa2\xde\x7e\x86\x90\x8f\x8a\x19\x6c\x07\xcc\xaf\x39\x54\xed\xeb\x7f\x8d\x9c\x51\x32\xab\xf5\x50\xd5\xeb\x67\x93\x6e\xe8\xde\xd1\x10\x35\xb3\x93\xda\x38\x72\xde\x36\xb8\x90\x57\x58\xbd\xcd\x19\x32\x28\xb2\xc9\xf8\x34\xe6\xb5\xd3\x89\x06\xdb\xc8\x05\x32\x48\x54\x89\x6c\xcd\x8c\xf2\x83\x58\xed\x18\x73\xa8\x15\xb2\x61\x16\x00\xc0\x3a\xd6\x23\xc0\x60\xb1\xb2\x90\xbd\x4d\xbb\x0e\x30\x28\x16\x1b\x8f\xda\x3c\x42\x7c\xe2\x3a\x63\x83\x94\x7b\xd7\x88\x01\x0c\x48\x6e\xec\x3d\xff\xa9\xeb\xa8\xf1\xc6\xc1\x9c\x06\x05\x96\xca\x09\xb8\x91\x7a\xcc\x89\x6a\x41\xa3\x15\x6e\x06\xc8\x44\x66\x58\x9f\xdb\x0b\xd7\xa8\x32\x63\x5e\x94\xa3\x84\x9f\xf9\xc1\x75\x57\x4f\x82\x14\xcc\x51\x88\xb6\xf8\x09\x65\xc9\x01\xc8\x69\xf5\xf2\xf4\x8b\x88\x94\x5a\xbf\xf3\x4d\x83\xc2\x6b\x42\x25\x7d\xf8\x28\x27\x1b\xe6\xa4\x29\xe2\xc2\x75\x11\x75\x7c\x18\x8a\xa9\xa9\x62\x32\x08\xa0\xea\x51\x0e\xb4\x0d\xee\x49\xde\x06\xce\xd2\x84\x3d\x32\x94\x8f\x70\x7d\xe6\xd8\xb7\x61\x33\xc6\x57\x86\x69\xfc\xa4\x13\x39\x53\xde\x7f\x5e\x41\x9e\xfd\xaa\x14\x3c\xb7\xa4\xde\x99\x06\x6a\x88\xe2\x1c\xce\x97\x07\xd8\xc1\x38\xd6\x78\xa7\xf7\xd6\xbe\xfe\xff\x28\x6e\x53\x21\x98\x08\xf3\x87\x6d\x4b\x9b\x72\x79\x8b\x6c\xce\x7b\xd8\x1e\x98\x8b\x1a\xb8\xd2\xe3\x23\x8f\x43\x09\xdb\x70\x2e\x44\x9e\x7a\x6e\xec\xed\xa5\xc9\x26\xbe\x61\xb9\x94\xf6\xf6\x67\xd3\xe9\xbb\x55\xbc\x1f\x8b\x86\x09\xa8\xc4\x51\xd1\xd3\x85\x45\x4e\xac\x5c\x8e\x95\x4c\xba\x36\xb7\xd2\x5a\x54\x69\xe2\x10\xeb\xbb\x3a\xd9\x49\xae\xf7\x66\x2e\xf7\x8e\xb1\xf7\x2b\xa8\x7c\x0c\xe3\xea\x1e\x0d\xd9\x0b\x3c\x9a\x1c\x50\x7d\xcb\xa0\x6f\x12\xd9\x14\x8e\xa4\x99\x9a\x7a\xe6\xb2\xc0\x9f\xac\x01\x75\xcb\x56\xa9\x96\xa0\xf1\x3e\xd5\x3f\x17\x0e\xf1\x79\xb1\xfe\x4f\x4f\xfc\x03\x13\xea\x7f\xcc\xbb\xe8\xde\x29\x08\x37\x8b\x89\x95\x12\x86\xde\x05\x26\xdb\x55\x86\x10\x59\x3d\xc6\x3e\x86\x61\x61\x1f\xbe\xbe\x4d\x92\x87\xf6\x8b\xa1\x7f\x05\x01\x8c\xbe\xc8\xa4\x94\x8c\xa2\x32\xe0\x49\x4e\xdf\x03\x92\x17\x8e\x91\xb3\xcc\x18\xd5\x45\xf6\xa2\x34\x29\x89\xf7\x35\xf6\x47\x37\x5d\x5e\x7d\x97\x09\xba\x9d\xe6\x4e\xe5\x2f\xd9\xe7\xf0\x1d\xac\x37\xcc\xe5\x47\x31\x93\xf3\x81\x19\xf9\xaf\x47\xaa\x68\xa2\x83\x3e\xad\x55\x53\x0c\x14\xfc\x12\xbd\x5b\x77\x8c\x81\x9c\x77\x2b\x94\x54\xb2\x55\x00\x65\x72\x4f\x72\x6a\x36\x1e\x2d\x70\xf2\x34\x24\xb8\x58\xcf\x9f\x32\xea\xb2\xa3\x25\xd3\x16\xdf\x2f\x01\x69\xda\xf8\x26\xb8\x83\x92\x39\x95\xd3\xb7\x65\x90\xcf\xf7\xd5\x96\x02\xd8\x6a\xf3\xb1\x62\xcf\x6d\xe6\x4e\x2e\x5a\x9d\xa2\xc6\x3a\x8d\xce\xc4\xf7\xcf\x1c\x71\x30\x1d\x73\x9b\xef\xde\x1b\x8a\x85\x2c\x7d\xac\x4a\x15\x79\xfe\x18\x13\x0e\x38\x9a\x6c\x7e\x64\xb2\x17\x01\x68\xb6\xa0\xab\xa4\x01\xb8\x9d\x26\x23\x93\x18\x38\x48\x1d\x1d\x00\x56\x9f\xa0\x08\xf7\x5a\x0e\x73\xf4\x8f\xbb\xa8\xdf\xca\x64\x6f\x5f\x1f\x20\x2e\x75\xad\xe9\xf6\xf1\x53\x89\x23\x97\x13\x09\xab\xd4\xf6\xca\xc4\xe1\x69\x1e\x8a\xf3\xc1\x54\x96\x09\x6e\xbb\x19\x56\x4a\x14\x2b\x0c\xbc\x03\x81\x6a\xb6\x1b\xfb\xa4\x8d\x83\xb8\xd2\xd8\x73\xb2\x48\x3d\x12\x19\xf0\xab\xfa\x04\xf5\x1b\x68\x6b\x95\xd8\x4f\x83\x23\xc1\x83\x14\x82\xdf\xd4\x0e\x68\x9c\x37\x96\xf6\x17\x92\x82\xd4\x98\xd5\x51\x1e\xfb\xca\xc2\xe2\xde\x91\xb1\x3e\x44\x7a\x83\x73\xfa\x93\xc8\x2c\xb5\x03\x30\x24\x9e\xb0\xcd\x29\xe2\x74\x71\x95\x48\x25\x69\x99\x5e\x1d\x44\x70\x65\x16\x87\xf4\x73\x83\x76\x3a\x75\x19\x97\x16\x47\x81\x5c\x10\x75\x43\x9a\x8b\x74\xf1\xc8\xc1\x82\x9a\x69\x08\xf9\xbd\x01\x7f\xf0\x2c\xd2\x77\x05\xc6\x26\xe4\x7f\x2f\x66\x19\x74\x95\x50\x9c\x2a\x2b\xc6\x78\x3a\x63\xb1\x33\x8c\x83\x50\xdf\xe7\x33\x3f\x1a\x2d\x85\x99\x5a\x5c\x4b\x85\xca\xe5\x5e\x53\x0f\x7b\x2f\x89\x63\x32\x05\x53\xef\x3b\x36\xf7\x79\xf6\xec\x14\x05\x1d\x9a\xba\xa4\x8f\x90\x36\xd0\x8d\x60\x7d\xf5\x7d\x40\x7b\xbf\x0a\x5a\x1b\xb0\x0f\x5a\x65\x00\xa6\xa9\x77\x53\x86\xec\x2a\x71\x15\xf1\xe1\x73\x2e\x9c\x50\xd7\x37\xf8\x58\x3c\xd9\x1a\x23\x1a\x0c\xda\x0e\xb6\x53\xdc\x79\x43\xa0\x82\xdc\xca\xf3\x18\xf1\xf0\x23\xd0\xd4\x8b\x25\x3b\xbd\x7a\x03\x5d\x3f\x91\x05\xf9\xaf\x1f\x89\xf2\x5d\x8e\x6d\x6b\xb1\xc5\x45\x57\x3b\xf5\x02\x40\x03\x7c\x6f\xa6\xad\xa0\xc2\x0d\x52\x5b\x9c\x72\x7d\x3c\xa9\x29\x30\x36\x9f\x8b\x5d\x6f\x54\x20\xee\xac\x1a\x55\xba\x7a\x88\x6c\xf3\x29\xc7\x88\xbe\x82\xcd\x3f\x1b\xf8\x07\x7a\x57\xdd\x04\xc8\x43\x31\xd3\x3b\xca\x8a\x23\x6e\x85\x0c\x8d\x25\xe4\x98\xdd\x7c\xe0\xa2\x99\x90\xb0\xc2\xc6\xa1\x24\x44\x04\x6a\x6c\x59\xd7\x2b\xb0\x95\x20\x6b\xe3\x76\x30\xf4\x00\x29\x6f\x63\x9c\xae\x2b\x46\xae\x18\x50\x82\x05\x44\x5a\x7b\x8b\x40\xbf\x78\xa2\x2c\x5b\x7b\x65\x7c\x38\xb5\xa7\xc9\x82\x0f\x0d\x8d\x48\x42\x4f\x1e\x41\x5c\x3c\x73\x62\xc5\xdb\xb1\xb4\x04\xb3\x28\xa1\x24\x16\x98\x6f\xdd\x5b\x1f\x20\xcd\x57\xac\x5c\x35\x55\xea\xcc\xfe\xc2\x46\xaa\xad\xaf\xc9\xfc\x65\xcc\xf8\xc9\x4c\x3e\xe6\xfa\x62\x4c\xf2\x20\x48\xce\xba\x9d\x2d\x82\xe0\x4e\xb6\x0e\xa1\xe5\x0c\xe4\x69\xef\xa3\x00\xee\x45\xb3\xac\x7e\xe9\x35\x86\x25\x94\x78\x5f\xa2\x58\x60\xc2\x82\x87\x01\xbd\xe1\x65\x8e\x6e\x9a\xad\x54\x37\xf7\x12\x14\x70\xb5\x38\xba\xf6\x7b\x0a\xce\x16\xed\x79\x36\x97\xd2\xe3\x4e\xbe\x9c\xe4\xbb\x12\x6b\xf5\x93\xce\x31\xb7\xb1\xd5\x8c\x5d\xaa\x87\xcf\xb0\x58\xc2\x9f\x22\xfa\x65\x12\xdd\xbe\xf9\xcc\x95\x08\xc9\xa0\xfa\x10\x86\x1d\x27\x92\x23\x81\x4b\x45\xb8\xd0\xe4\x5e\xee\x1a\x14\xba\x0f\xed\xbe\x28\x8f\xee\x36\x20\x91\xf3\x0e\x54\x5a\xe0\x48\x63\xaa\x69\x5d\x08\x26\x70\xba\x03\x57\xf4\x75\x28\x66\xcd\xcb\x8d\xf2\x67\x21\x96\x2c\x9c\xef\xc3\xcf\x4e\xd9\x1a\xe4\x7d\x95\x56\x25\xa4\xd8\x41\x4b\x5f\xd7\xc4\xbf\x37\xf7\xa2\xe6\x09\x17\xbb\xaf\x28\x7a\x99\x12\xf5\x77\xb2\x66\x3b\x4d\xed\x29\x77\x39\xb7\x95\xb0\x6c\x35\xce\x31\x59\x94\x12\x45\x14\x6e\xd5\xc8\x04\x19\x2a\x55\xe9\xfe\x20\xa0\x7a\xf2\x19\x4f\x78\xce\x87\x1e\x41\xc1\x1d\x56\xdf\xa0\xec\xbd\xaf\x7b\x82\x62\x24\x64\xcf\xaa\xaf\x31\x8b\x08\x7d\xfb\xe2\xfe\x71\x6b\xf5\x0f\x96\x0f\x8c\x53\x96\x16\x85\xcf\x90\xb4\x49\xff\xa1\xe6\x20\xb6\x22\x46\x13\x0f\x8e\x00\xa7\x9d\x9d\x5a\xc1\x98\x97\x07\x10\xb3\xaf\x95\xe5\x2d\x6c\x0a\x4f\xfe\x76\x8e\x9a\xc0\xf9\xd4\xee\x51\x6e\x36\xd9\xd3\x2a\xd4\xdd\xe1\xe3\x53\xc0\xe5\x2b\x17\x23\x38\x8d\xa1\xec\x21\x82\x2f\x40\x48\x8e\xbd\x16\x89\x1a\x28\x79\x13\x23\x70\x2b\xcf\x36\x3a\x9d\xdd\x6c\xae\xcd\x77\xa1\xec\x26\x77\x74\xd8\xc0\xf6\xfe\xb7\x1d\xab\xef\x37\xaf\x18\x2a\x6c\x39\xb0\x5e\x72\xdb\x69\x8c\xfc\xdb\xe2\x90\x80\xae\x97\xcc\x3c\xb5\x4d\xb7\x9e\xf0\x68\x4a\xe1\x0c\xe6\x1c\x56\x18\x47\xfd\x7a\x71\x38\x83\x64\xf0\x5c\x7b\xd6\x94\x94\xba\x94\x18\xff\x80\x91\x17\x6b\x9a\xaf\x67\x01\x74\x3f\x6b\xc2\x4d\x35\xdb\x31\x80\x73\x1f\xa6\xec\xca\x3e\xfb\xe3\x1d\xd3\xa4\x44\x78\x67\x00\xd6\x62\x0c\x63\x1a\x6e\x20\x6b\x1e\x90\x88\x24\xf7\x47\x6c\x56\x59\xfa\x8b\xb3\xd7\x83\x2d\x4a\xc2\x89\x3e\x34\x70\x22\x3d\x08\x0d\xf9\xa5\x1d\x12\x8b\x91\x54\x5b\xa6\x77\xba\xec\x4b\x76\x20\x88\xd7\x86\x90\xbc\xef\x9f\x9b\x55\x5f\xa0\x4b\x40\x0e\x31\xa5\xc5\x28\x70\x59\x82\xea\x6e\x9c\x62\x60\x8e\x05\xcb\xdb\x3c\xa8\xe2\x95\x22\x78\x6a\x8b\x85\xbf\x7e\x7d\xb6\xad\x97\x6b\x1a\x25\x08\x3f\xc4\xfd\x14\x15\x94\xc7\x3a\xbc\x8c\xe5\xca\x58\x24\xe8\xc5\xbd\x46\xe6\x6a\x31\xdd\x9d\xcb\x60\xc1\x57\xc7\x3f\xa0\x94\x81\x50\xe9\x08\x84\x45\x7f\xa9\xea\x3e\x11\xd5\x8a\x1a\x51\x6a\xac\x3f\x3f\x99\xeb\x80\x9c\x5a\xd2\xe7\xe0\x18\xe6\x59\x3f\x60\x61\xbb\x36\xfd\x13\x0d\xaa\xe2\x64\x2d\x20\x85\x21\x16\xa3\x50\x2f\x32\x64\xd9\xd3\x0e\x48\xa6\x1e\x65\xe0\xd2\xc0\xf8\x1a\x0f\xd1\x3f\x31\xfe\xb8\x9a\x9a\x15\xcd\x93\x4b\x51\xb4\xd3\x42\x2b\x7c\x3a\x70\xeb\xac\x05\xd0\x99\xc1\x36\x17\xa6\x48\xf6\x1f\xf4\x5a\x18\x54\x96\x02\x9f\xdc\xfa\x64\xeb\xe0\xd5\x47\xed\x75\x30\xcc\xdd\x07\x88\xd8\xb7\x0c\x11\x81\xd2\xb7\x22\x5a\xcc\xc1\x6e\x3b\x78\xc4\x96\x67\x50\x6f\x6c\xfa\xd6\xe5\x5e\x4a\xf3\xea\x0d\x77\x22\x43\xc9\xc2\x69\xe8\xe8\x83\x9c\x83\x74\x09\x02\x9d\x73\xd2\xb8\x8c\xb1\xb8\xb5\x5a\x15\x88\x9a\xb1\xfe\xa3\xce\xc6\x37\x19\xbf\x1a\x3d\x60\x17\x2a\x4b\x8d\x93\x74\xaf\xaf\xe9\xa8\xb6\x87\x5d\x7d\x6b\xcb\x00\x6a\xe4\x78\x4a\x4b\x32\x25\x49\xec\x0c\x7b\xe1\x52\x1c\x3b\x74\xe4\x66\x56\x66\x03\xf6\xef\xb9\x6d\xe2\x34\x4f\x6b\xeb\xb7\xf3\xbb\xd8\xc7\x65\x27\x4d\x2a\x01\xe6\x92\xad\x22\x8b\x9b\xca\x58\x22\xcf\x3d\x94\x31\x11\xac\x2c\x38\x0c\xe0\xb5\xe0\x53\x47\xc7\x1e\x5e\x92\x1c\x6e\xb5\x17\xc7\x6f\x8d\x36\x09\x13\xf6\x33\xcd\xbd\xe9\xd6\x46\xf4\x54\x0d\xcc\xc6\x2e\xb6\xf7\xde\xec\xf9\x98\x0c\x80\xcf\x5a\xbc\x27\x8c\x4c\x8c\x5d\x0e\xa1\xcb\xf6\x7b\x7a\xd1\xa9\x82\x72\x35\x45\x25\xbd\x28\x0b\xdd\x9b\x59\x33\xf6\x2e\x12\x25\xc3\x60\xaf\xde\x91\x7c\x9a\x95\x5d\xc7\x2b\x62\x60\xb0\x4d\x89\x91\x2f\xf5\x43\x41\x26\xd4\xf5\x4f\x0c\x57\x88\xee\x43\x0b\xf0\xbd\x34\x64\x6a\xb1\x56\xff\x13\x66\x21\x5e\x98\x7b\xfd\x11\x39\xb9\xaa\xb6\x6e\xd6\x6e\x54\x19\xe8\x1c\xc5\x78\xec\x78\xb0\xbe\x43\xc8\x2d\x21\x52\x42\xc0\x06\x54\xe3\xf9\xd4\x4d\x4d\x13\x13\xbc\x02\xee\xea\x19\x25\x97\x90\xe0\x35\x7c\x73\x99\x16\x31\x3b\xf2\x83\xc5\x1d\xce\xfa\x05\x03\x85\x6e\x31\x45\xed\xb9\x46\xad\xf7\x3a\x9c\xa4\x02\xc5\x40\xec\xeb\xa6\xaa\x6b\x8d\x47\x26\xed\xef\x0d\xd3\x50\xff\xeb\x28\x8c\xd4\xaa\xc3\xa7\x8b\xe7\xef\xc6\x86\xad\x4c\xb0\xad\x04\xd2\x5a\x9d\x33\x87\x73\xe3\xe9\x9c\x54\x1f\x84\x08\xda\x1d\x53\x32\xf5\xe2\xec\x7b\x45\xb4\x46\x09\x10\xfc\x38\x1f\x01\xde\xf1\x84\x3f\x9b\xd3\x20\xc9\xc1\x2d\xb6\xa0\x83\xc2\xa4\xd7\xc7\x88\xa9\x9c\x66\x1c\x89\x85\x2e\xc9\xf0\xb9\x66\xf6\x48\xb3\xe3\xb8\x52\x7e\xd9\x9c\x78\x0e\x73\x3a\xa0\x07\x71\x98\x31\x13\x1f\xe4\xd2\x28\x91\xd1\x92\x05\x12\xc3\x7d\x4d\xb6\xac\x8c\x03\xab\x0f\x24\x36\xce\xf5\xbe\x50\x09\x1d\x7b\xa5\xf6\x42\x55\x93\x12\xb9\x18\xcc\xbd\x35\x4c\x61\xdb\xeb\x0b\xca\x76\x52\x50\xa6\x08\x0e\x35\x8b\xc8\x57\x53\x91\xc9\x2e\x1f\x0e\x91\xe0\x03\x6c\x5c\xe0\x50\xc5\xe2\xac\x6e\xc1\x20\xe0\x67\xac\x0f\xcd\x81\x55\x93\xab\x87\x56\xb3\x78\x1d\x14\x9b\xbe\x12\x6f\xa3\x97\x7d\x47\x19\x73\x81\xcb\xc1\x67\x29\x51\x00\x10\x5f\xf5\xc5\xe2\xcf\xa0\x47\x19\xd3\xb8\xe5\x56\xd0\x93\x25\x77\xf6\x22\xb3\xe9\x7a\x27\x22\x45\xb0\x86\x3b\x47\x69\x7d\x6e\x8c\xe8\x71\x35\x8c\xba\x27\x57\x3f\x90\x2a\x5c\xcc\x0b\xa7\xdb\xbd\xf9\xb9\x25\x94\x3c\x6d\x43\x90\x78\x16\x96\x7f\x4e\x64\xe2\x1c\x96\xa7\xfd\xe8\x55\xd6\x97\xbc\x26\x43\x6f\x93\xa2\xe2\x17\xe9\xb2\xc5\xa1\x8d\xf4\x8b\xfc\x5b\x7c\x1d\xd9\xf6\x81\xbe\x15\x12\xd7\x5c\xab\x70\x9d\x31\x15\x6e\xc7\xb6\x88\x56\x22\xd2\x0f\xe3\x56\x48\x06\xd2\xcd\x4e\xda\x6d\x69\x4f\xe8\x17\x2c\x1f\x33\x4e\xbf\xac\x56\x42\xc5\x9a\x34\xd7\x50\x19\x80\x30\x96\x07\x90\x92\x76\xa7\x03\xfe\x9b\xec\x2d\x38\x39\x8f\x4a\x47\x9b\xc0\xdd\x19\xe0\xa7\xc9\x58\x3a\x64\x9a\x36\x78\xc1\x6f\x00\x38\x15\x27\x53\xd6\x5e\xfd\x22\xb2\x5a\xa5\xcb\xee\x98\x84\x95\x00\xbc\x9f\xeb\xac\xe2\x48\xa5\x35\x14\x9b\x98\x10\x22\x06\x78\x88\xee\x58\x88\x07\x59\x06\x92\x45\x02\xbe\x27\xf8\x85\xfe\x95\xda\x18\x75\x67\xba\x47\x96\x67\x62\x22\x4d\xe2\xb3\x5d\x22\xfa\x0b\xda\xc7\x07\x70\x32\x6a\xf0\x76\x00\x65\x6b\xe7\x8c\xce\xf4\x11\xe5\xe5\x87\x0f\x66\x3d\x22\x45\xd0\xb2\x8b\x82\xe5\x46\xf8\xdf\xe8\x8f\x0c\xf4\x8d\xf1\xf7\xad\x82\xf0\xc0\xdc\xf8\xb1\xec\xa4\x39\xc1\xb6\xdb\x19\x83\x27\x58\x5a\xee\xf9\x94\xd9\x35\x6a\xa3\x44\xdb\x42\xe9\x78\xc3\xe4\xf5\x8b\xb1\x7c\xa2\xc4\xa1\x94\x49\x23\x0c\x74\x27\x4a\x0f\x90\x52\x42\xf5\x03\xc3\x4a\x56\x68\xfa\x04\x61\x6b\xcb\xc1\xb6\x06\xf8\xf2\x5a\x56\x13\x76\x10\x9e\x36\xe7\x3c\x56\xc0\x56\x8d\x64\x54\xd4\xce\x2e\x84\x97\xed\xf8\xb5\x2a\x36\x80\xf6\xb9\x1c\x52\xa7\xd3\x0b\xd6\xd1\xa9\x8f\x15\x20\x61\x32\x28\xeb\x03\x9e\x40\x33\x5a\xb9\x44\x22\x27\x1f\x7a\x30\x79\x15\xbc\xb6\xff\xe3\xe1\x7c\xa9\x3e\xcd\x89\xd6\x24\x56\x02\x6f\x01\xb1\x3f\x54\xcb\x9e\x26\xb7\x16\xe5\x77\xc4\xb4\x18\x45\x56\x4d\x6f\x31\x88\x5c\x46\x9c\x2d\xb3\x14\xe3\x48\xce\xec\x5d\x58\x41\x10\x2a\x11\x0e\x71\x3e\x09\xee\x9b\x05\x75\x79\x6e\x7b\xd9\xbe\x74\x11\x29\x02\x66\x00\x03\x64\x8b\xb5\x80\xcd\x99\x75\x50\x8d\x8d\x1b\x97\x0c\xea\x99\xfa\x9d\x93\xfb\x9e\x1e\x9a\xcb\x3d\xba\xd7\x32\x00\xf7\x59\x76\x35\xc4\xda\xa0\x80\x88\x85\xff\x96\x5e\x63\xb6\xdd\x37\x3d\x1e\x3c\x27\x3b\x1a\xa0\xbf\x07\xc2\x67\x06\x28\x43\x0d\xfa\xd8\x8b\xdf\x8c\xa3\x93\x4a\xdf\xf4\x03\x3f\x3d\xaf\x56\x9b\x3e\x2d\xf3\x88\xfb\xb1\xa9\x22\xcf\x27\x1b\xcd\xb8\xb6\xe4\xdb\xcb\x1a\x25\x00\xa3\x6a\xdd\xc4\x51\x74\x8a\x18\xca\xfc\x67\x46\x5b\x8f\x04\xde\xa8\x3a\xb3\x0c\x09\x9f\x20\x9a\xe1\xda\x73\xef\xcb\x13\x60\xee\x1c\xb3\x90\x75\x24\xb6\xe1\x27\x82\x76\x0b\xf0\x00\x3a\xf1\x94\x83\x1d\xcc\xe4\x7a\x25\xb0\x4d\xc5\xb5\xf6\x50\xb4\x18\x42\xf0\x9a\xfb\x61\x5b\x12\xbc\x6c\x85\xe7\x37\x04\xe0\xd3\xc8\x68\x03\xf8\x60\x85\xa1\x01\x20\xc7\x70\xb6\x64\x53\xa4\x8b\xc5\x88\xaf\x38\x47\x99\x76\xbb\xb3\xa9\xc5\x0b\xfc\x5c\xe3\x32\xb1\xfa\x46\xd3\x55\xbb\x03\x5a\x45\x01\x9e\x29\x97\xc5\x38\x82\xea\x61\x8a\xbf\x7a\xd9\x7a\x7a\x4a\x5d\x28\x2b\xa9\xf0\x96\x1b\x85\x71\x5f\x72\xc2\x45\x9a\x91\x26\xdf\xd9\x24\xad\xd0\x30\x1e\x08\xab\xc1\x1f\x16\x8f\x3c\x7c\x64\x26\xf2\x18\xff\x5c\x18\xef\xc7\xda\xb9\x5d\x93\x9e\x2f\x50\x72\xef\x68\xdb\x74\x14\x84\x32\xe6\xa6\xb6\x42\x03\x1f\x08\x4a\x33\xff\x6e\x32\xe8\x04\x86\xf9\x26\xf1\x43\xf2\xd0\xb0\xb6\x0b\xec\x40\xb2\x6e\x1d\x83\x61\xcf\x48\x10\x83\x51\x25\x83\x37\x56\x59\x9b\xa9\xb6\x39\x90\xc0\xd1\xb2\x51\x4e\xdf\xcd\xae\x91\xe2\xa8\x3d\x5c\xe1\x43\x96\x12\x15\xa5\x44\xc2\xdb\xbb\x83\x9b\xac\x05\x70\xb5\xbc\x0a\x61\xde\xc4\xe7\xae\x3f\x89\x00\x5a\x66\x1e\x89\xd8\x79\xf2\xcb\xd7\x2e\x2a\x66\xdf\xa8\xd3\x49\x89\x74\x9b\x8b\xe1\x1c\xfc\xf4\x37\x2b\x8b\xe2\xaf\x03\x1c\x63\xa6\x07\x92\x25\x3b\xb3\x88\x89\x02\xb5\x71\x04\xc9\xf8\x2d\x0e\x4c\x86\x8c\xec\xc4\xc2\x9c\x8e\xa3\xd2\xbc\xc8\x6b\x94\xb0\xc6\xf9\x42\xf5\x0e\x3f\x2e\x20\xc4\x84\x28\x3a\xea\xbd\x37\xc0\xe7\x23\x19\xf9\x45\xb7\x4f\x7f\x65\x7e\x6d\x50\x6e\x4f\x16\x0f\x25\xde\xb9\xbc\xff\x46\x05\xb9\xe2\xac\x77\x64\x5e\xcd\x3d\xf6\x37\xc4\xd7\x39\x40\xdd\x7d\x9f\x2a\x55\x9a\xc1\x78\x85\x03\xb4\xb7\x43\xec\x68\xea\xba\xde\x85\xc6\xf7\xcf\x61\x8f\x00\xa4\xff\x29\x42\x0a\x94\x44\xb3\xdc\xa3\x66\x2d\x90\x4d\x2d\xeb\x12\xdb\xfa\x1c\x3f\x0c\x7b\xcd\x5d\xd6\x51\xa8\x88\x9b\x18\x34\x18\x94\x57\x2c\x5a\x07\x88\xe8\x3c\x3f\xe2\x32\x49\xf7\xa2\x2f\x5e\xac\x47\x03\xa8\x93\x54\xc0\x6c\x45\x6e\xe3\xc4\x49\xa6\x94\xf4\x86\xb6\x58\xa0\x7d\x00\x21\x9b\x0a\xd8\xaa\xc2\x39\x6f\x3f\xe3\xb9\xe4\xb7\x95\xb8\xb9\x6f\x21\x9a\x3b\x40\x77\x16\x44\x80\x94\x66\x2a\xa2\x59\x00\xca\x2c\xb1\xe1\x4b\xbf\x4e\x13\x55\xf4\xb4\x58\x72\x23\xf5\xbe\xad\x3f\x53\x03\xa4\xb4\x7e\x2c\x1b\xc9\xaa\x85\xd4\xf8\x45\x14\x6b\x74\x5e\xc6\x53\x87\xf8\x1c\x92\xbc\x23\xc3\x03\x0c\x3e\xe3\x7d\x89\x87\x25\x56\x46\x89\x3a\x40\xe6\x16\x31\x93\xbe\x60\xe0\x40\x7c\x88\xe1\x51\x65\x36\x9d\xb3\xaa\x92\x96\x03\x6f\x41\x89\xfa\x57\xec\x52\xae\x89\x86\x33\x81\x88\xb7\xbf\x8b\x70\xed\xbf\x4f\xb4\x1a\xac\xd1\x47\x0c\x2d\xd0\xd0\x4c\x42\x0e\xe5\x73\xda\xa2\x9e\xf5\x48\x4a\xc7\x2d\x4a\x1f\x47\xd9\x2a\x75\xd9\xb8\x84\x0d\x31\x97\x5b\xe1\xf5\x70\x0b\x61\xad\x77\x9c\x63\xaf\x9c\xb1\x38\xbf\xa5\x66\x55\x3c\x64\x4f\x60\x1b\x85\x51\x8b\x2d\xea\xe7\x00\xea\x2d\xca\x0d\xc3\x1d\x31\x44\xba\x3f\x32\x42\xd0\xae\x39\x03\xcd\x9f\x14\x40\x90\xa0\x88\x97\x17\x92\xcc\x29\x84\x46\x14\x46\xb5\x2e\xb2\x2a\xc5\x6a\x4d\x13\x1a\x7b\xf4\xe3\xdf\xdc\x14\x71\xa2\x3b\x06\x01\x9e\x72\xdd\xff\x9f\x7c\xbe\x91\xcd\x13\x5e\x9a\x43\x99\x73\x1b\x8f\x38\x99\x90\x0a\x99\x5a\x43\x26\x55\xd8\xba\xc3\xb4\x45\x73\x42\x99\xa3\xbe\x4d\xcb\xb7\x07\xdd\xcc\x32\x8c\x81\xf3\xac\xc6\xbd\x25\xf2\x89\x26\x91\x1a\xeb\xf6\x8e\x4a\xee\xc8\xc7\xf1\xe0\x20\xd6\x6e\x14\x4a\x49\x55\xe4\xdc\x69\x3d\x58\xa4\xff\x4e\xd6\x7d\x54\x3c\x1d\xde\x2a\xfb\x2f\xa3\x92\xa6\x9f\xeb\xe9\x0f\x70\x64\x73\xd1\xc3\xed\x45\xa0\xda\x3e\x93\x02\x51\xba\x71\x9c\x76\xba\x2c\x46\x20\x14\xd0\x62\x88\xfd\xdc\x0c\xb7\xb3\x14\x53\x3b\xbd\xe9\xd9\x5f\x14\x31\xd9\x39\x73\x82\x69\x5b\xb3\xf2\x35\xc2\x2e\xe2\x95\x94\xce\x75\x36\x5b\xca\x54\xee\x86\x0b\x22\x56\x75\x14\x49\xf6\xde\xd4\x8d\xa2\xb2\xfc\x69\x7e\xd1\x28\xdb\x7e\x49\xb8\xe6\x09\x31\x79\xce\x29\xb9\x4d\x49\xd2\xd9\x29\x84\x7b\x17\xc0\xff\x92\x26\x61\xee\x9a\x1c\x91\x4f\xcc\xa6\xcd\x77\x7c\x70\x8b\xa9\x01\x6e\x38\x71\x94\x67\x02\x26\xfb\x74\xa6\x19\x3b\x62\xa1\x7f\xf4\x40\xc9\x1d\x92\x96\x5d\x7d\x02\x45\xa5\x44\xbd\xc4\x75\xd4\x13\x52\x4c\xde\x65\x01\xd4\xa3\x1a\xd7\x83\x8e\xf0\x64\x00\xd1\x02\xf9\x1a\xbd\x11\x5c\xe8\xef\x60\x43\x27\x35\x7c\x00\x82\x53\xf9\x52\xe9\x04\xcc\xbf\xff\x3b\xe5\x36\x22\xc5\xb3\x67\x3a\x1c\xac\x88\x05\xef\xee\x84\x26\xe5\xf0\xf1\x48\xa4\xac\x71\xe4\x08\x9f\x4f\x2c\xdb\xda\xb4\x5f\xb2\x78\xf8\x7b\x37\x24\xa3\xfc\xad\x30\xe0\x70\x23\xf8\x3d\x03\x76\x81\x1f\x74\x0f\x8c\x31\x21\xed\x23\xd4\xcc\x49\xc5\x7f\x94\x64\x69\x95\x30\xe9\xd8\x54\x9d\xee\xe7\x18\x5c\xd9\xea\x5c\xaf\x4d\xfd\x5b\x27\x02\x0a\x11\x2f\x4c\x82\x00\x54\x32\xd3\xe0\xc0\xc1\xd5\xec\x5e\x55\x2b\x60\x52\xa6\x07\x2d\xb2\x28\x1a\x3d\xf0\x96\xfe\x7e\x73\xa4\xb4\xe5\xeb\x1c\x7c\x38\x8d\x06\xac\x92\x6b\x0b\xd9\xa0\x13\x95\xa9\xfd\x73\xe3\x61\x07\xbd\x8c\xcd\x33\x32\x8a\x86\xd5\x0a\xd6\x18\x9d\xf1\x9b\xeb\x9d\x38\xb6\x88\x38\xc7\xb8\xee\x84\x65\x4c\x39\x9f\xe7\x13\xad\x29\x28\xe1\x87\xc8\x91\x6e\x2b\x6f\x06\x38\x04\xf1\xb6\xb1\x21\xa3\xd2\xa8\x48\x6d\x45\x7d\xed\x16\x97\x9b\x7f\x0c\xa2\x6d\x86\x63\xc5\x04\xf4\xc8\x43\xac\xba\x30\xf1\xcd\xfc\x6b\x90\x89\x83\xda\x9c\xf7\xb9\x15\x32\x45\x9a\x38\xdd\x17\x71\x30\x38\x0c\x71\xa1\xec\x6f\x64\xc9\x71\x32\xb7\x8e\xef\x21\xa5\xd2\x05\xf0\x37\x5b\x5e\x97\x0b\x7e\x71\xdf\x05\xec\xe8\x10\x10\x73\xd9\xf0\x35\x0b\x91\xea\xdf\x60\x9a\xe0\xb0\x5b\x91\x64\x6b\x24\x6b\x7a\x6f\x5d\x4c\xf6\xf9\x67\x84\x7c\xc9\x49\xb1\x83\xfa\xf9\xf7\xe2\x26\xe9\x3a\x18\x0c\xc8\x6f\xe8\x2c\xca\xc2\xa8\x59\x8c\xf0\x50\xb9\x5f\x13\x28\x70\x2f\xce\xe7\xae\xa4\xb7\xfe\x6c\xd8\x1b\x9c\x8f\xa7\x0a\x12\xd8\x4e\xdb\x6b\xa3\x8b\xce\xc2\x73\xf3\x09\x25\x6f\xd3\x08\xf0\xb9\x14\x80\x15\x6e\x84\xab\xf6\x0f\x59\xe9\x1f\xd0\xfc\xe3\x31\xf5\x91\x20\x7a\x1f\x8a\x7e\xec\x9d\x93\x6a\x71\x9f\xc9\xde\x53\x77\x2d\x9c\xa8\x10\x5c\xf7\x82\x98\x21\x24\x42\x00\xd6\xd2\x9b\x2c\x77\xe3\x3e\x0e\x36\xb3\xa1\xe3\x4a\x30\x77\x06\xde\x3d\x7a\x77\x6e\xac\xee\x2c\x1e\xdd\x9d\xb6\xee\x47\x01\x61\xbc\x19\x82\x8a\x70\x49\x6d\x66\x22\xae\xdf\x59\x6c\x4f\x5f\x9e\x4f\x9b\x4f\x7f\x00\x3a\xd2\x12\xff\xd8\x4c\x0b\x1d\x47\x4d\x23\xfb\x1a\x7f\xd7\x20\x7c\x03\x7f\x83\xae\x0e\x78\xf0\xe7\x7f\x52\xe2\x6c\xf5\x3c\xbe\xc8\x95\xaa\xce\x28\x88\x34\x2f\xe6\x3a\x9b\x51\x95\xe0\x36\xc5\xe1\x63\x93\xc6\x1f\xcb\x09\xaf\x01\x6a\xb8\xb1\x38\x8e\xe9\x05\x19\xd8\xc2\x9a\x61\xa7\xc0\x65\x1c\xdc\xc4\x51\x84\x46\x5e\x51\x97\x1f\xa1\x67\x95\xc7\x62\x58\x8b\x99\x8a\x07\x9d\xba\xae\xa3\xc3\x6e\x98\x91\xbc\xbc\xc9\x14\xde\xf0\x15\xb0\x93\x8c\x8a\xc5\x69\xff\x75\xfc\xe4\xbc\xd2\x30\xad\x6e\x91\xd8\x0f\x7e\xbc\xea\x2f\x04\x58\x2d\x95\x62\x84\x11\x31\x33\x13\x25\x88\x22\xcc\x7d\x6f\x8c\x8f\xd3\xf9\x02\x70\x26\x7f\xad\x8c\x1b\x43\x74\xa1\x6b\x9b\x75\x36\xee\xd0\xe4\xfa\xf2\x2c\x9c\x35\x1a\xdf\x5a\x7a\xb3\xdc\x83\x97\xa7\x59\x8e\x38\x14\x63\x61\xb9\x55\x82\x80\xfa\x4a\xb3\x4e\xf9\x1a\x36\x43\x5e\x56\xa2\x5e\xb6\x1a\x92\x42\xab\x58\x09\xdc\x70\x23\xbc\xef\x9d\x6a\x94\x44\xca\xb3\x20\x56\xf6\x19\x52\x1f\x31\xfd\x2b\x17\xd8\xa0\x4b\xc9\xb8\x69\x68\x12\x88\xd7\x54\xb0\x8f\x46\xfc\xe8\x31\x46\x77\xcf\xd1\x8a\x82\x62\x94\xde\xf0\x0f\x45\xaa\x66\x5a\x69\xba\xab\xd9\x9f\xf6\xfc\xa6\x46\x91\x67\x39\x72\x3b\x8a\x73\xfd\xb1\xea\xbf\xd2\xca\xbd\x49\x99\x7a\xc0\xde\x66\xa2\x07\x76\xb5\x47\x0c\x59\x2a\x4b\xd9\xe2\xf9\x8d\x7e\x84\x49\xf0\xa8\x04\x6c\xc0\x10\x29\x70\x54\xb7\x74\x09\x9f\xa4\xf1\xae\xdb\xe9\x14\xad\x49\x8e\xd4\x55\x1a\x38\x80\x9e\x83\x5b\x93\xb0\x85\xb8\x0d\xf1\xe1\xd3\x78\x85\x46\x73\xc5\xde\xc8\x62\x3b\xa5\x85\x06\x94\x5c\x49\xa9\x40\x45\x0e\xf2\xfb\x16\x75\x9b\x2c\x4c\x05\xe2\x0e\x2d\x05\x4a\x40\xc4\xbb\xb4\x48\x4b\x08\xa7\xbe\xd0\xe1\x43\x3e\x54\x2f\xbb\x0a\x2c\x1b\x1f\x70\xc3\x56\x5c\xe9\x05\xf0\x67\x86\xd8\x28\x19\x6b\x1a\x87\x56\xd2\xf6\xde\xfb\xbb\xfb\x34\x50\xd4\xf7\x3f\x44\xaf\x8e\x5b\x11\xf2\x04\xf1\x3b\x28\x78\x92\x65\xcd\x9d\xa7\x9d\xf8\x4e\xfb\xb1\xe0\xde\xa3\xfd\x2c\xcc\xdc\x0f\x38\xc9\xf8\xd3\xec\x21\x79\xf0\x94\xd8\xf4\x88\x60\x8c\xc1\x7b\xcd\x36\xf9\xd8\x2d\xc4\x79\xf6\xa9\x03\x6b\xdd\xa0\x81\x7f\x2a\xce\x3b\xf1\x8e\xa2\x61\x04\x32\xf5\x35\xaf\xa2\x5d\xa1\x3b\xed\x1b\x77\x74\xec\x78\x38\xc5\xf9\x50\x2b\xf8\x4e\x0b\xb0\x0f\x0b\xd7\xe0\x22\xba\xbe\x1b\x4b\x02\x23\xeb\xe0\x0d\x62\x2a\x90\x77\x16\xb5\xa3\xd0\x05\x6b\x8f\xda\xaf\x49\xd9\xad\x4f\x36\x0b\xb1\xf4\x88\x66\xe5\x75\xb0\x86\x2b\xeb\x0d\xb9\xaa\x7f\x8e\x41\xf6\xc8\x5c\xb8\x26\xe9\xdb\x54\xe7\xab\xc7\xef\x6b\x7e\xf0\xa3\xf1\x2d\x6d\xbb\xbb\xe5\x47\x41\xff\xa5\xe8\x7a\x02\x1c\x45\xc8\x12\xd0\x2e\x99\x11\x19\xb5\xa1\x8a\x22\x86\xb7\xe8\xd8\xdf\x0a\xdd\x04\xf9\xaa\x45\x4f\x7b\xcb\x2b\xdc\xed\x93\x3d\x8e\xd8\xbd\x68\x57\x70\xda\x51\xa5\xf4\x76\x94\x80\xc5\x1e\x87\xcd\x12\xfc\x5a\x76\x4f\x7d\x57\x6b\x56\x81\x20\x34\xa1\x61\x61\x09\xf6\xc5\x9f\x80\x78\x3d\x04\x52\xdc\x55\x93\xa1\x9a\x3b\x4f\xef\x3e\x2c\x38\xc2\xad\x87\x7e\xbf\x67\x3c\xdc\xa5\x36\xdb\xfb\xa6\x53\xb7\xb4\x69\x8e\xe3\xd0\x1b\x5e\x74\xb0\x44\x3a\x93\x77\xdc\x41\x44\xf0\x7e\x0f\x4d\x49\x5d\x1c\xfc\x5a\xb1\x62\x55\x3e\xe3\x93\xbf\x89\x72\x2d\xc5\xd1\x88\xcc\xf3\xad\x5d\xc7\xad\xca\x94\x05\xc5\xc2\x17\x3b\x82\x0f\x87\x3f\x9e\xa6\x92\x01\xac\xae\xb0\x5b\x6b\x1e\xdc\xf8\xda\x5b\xa3\xe8\xae\x88\xed\xdd\xeb\xe2\x48\xd5\x6c\x85\xda\x64\xc8\xa8\x5a\x9d\xc2\x8b\x9d\x54\x03\xb8\x91\xdf\x14\x34\x6d\xc8\x25\xeb\x83\xae\x0a\x40\xa3\x48\x27\x6f\x10\x55\x81\x19\x66\x0a\xce\x0d\x97\x2d\x18\x74\xe4\x8d\x23\x38\x6b\xbf\x88\x0e\x0e\xa7\x5d\x18\xe6\xa9\x91\xa0\x02\x38\x82\x7a\x8c\xf2\x79\x2d\x87\x6d\x57\x57\x72\xe5\x4d\x8e\x22\xc2\xc8\x8f\x63\x29\x6b\x38\x7c\xc9\x40\xb8\xe3\xb7\xf1\x55\xb4\xc4\xbd\xaf\x5e\x79\x12\xe8\xf2\xa5\x47\x40\xb6\x71\xd5\x6b\xeb\x79\xa4\x5b\xa8\xa3\x3f\xe5\x8c\x5e\x67\x99\xfa\xb7\x66\xcf\xc4\x11\x6f\x6f\xde\x51\xac\xb4\x93\x13\x05\x5b\x73\x81\x3b\x9f\xc9\xf8\xdf\x0c\xa2\x7d\xae\x56\x6b\xe5\xf3\x92\x0b\xd2\x21\xb2\x02\xc1\xc5\xb0\xb1\x60\x48\xb5\x8d\x2b\xd5\x2c\xb9\x76\x10\x63\x61\x2d\x7c\x34\xc7\x71\xf3\xe2\xe7\xb6\xf2\xd8\xcc\x72\x4f\x64\xa3\xea\x86\xcb\xdb\xaf\x22\x05\x52\x3d\x9c\x18\x42\x49\xb9\x01\x9c\x32\xa8\x37\xb7\x52\xe8\xde\xe4\xee\x6e\x81\xfa\xa2\xdf\x0a\x95\x3d\x99\x1d\x7b\xb4\x9b\x20\xde\x7a\xf9\xec\x4e\xc6\x03\xb6\x47\xdf\x06\x30\x37\x7d\x02\x9f\x83\x12\x47\x5b\x52\xd1\x49\x1b\x3e\x38\x9e\x37\xef\x63\x22\xeb\x0b\xa4\xd8\x99\xe8\xde\xbe\x84\x8b\xb9\xe8\xd8\xbe\x53\x33\x6c\x43\x66\x8a\x57\x7c\x4a\xe3\xcc\x77\xa7\xd1\x21\xf0\x98\x1b\x10\x3c\xaa\x58\x73\x25\xd2\x86\x79\xa8\xf3\x4b\xa1\xf1\xd8\xf7\x96\x00\x08\x87\x1e\x15\x29\x43\x5b\x0a\x79\xaf\x22\x85\xe2\x0f\x6d\x5f\x05\x3e\xf7\xa0\xb4\x9f\xbb\x35\x7f\xf5\xce\xf5\xf8\xa2\x85\xdc\x55\x56\x1f\x3a\x0b\xba\xb2\xed\x96\x96\xd5\xdb\x79\xd6\xa4\x7f\x82\x27\x61\x16\x4d\x67\x08\x15\xec\x0d\x5c\x1d\x52\xf2\x3e\xc3\xcb\xdc\xa2\x53\xa1\x65\xcd\x6f\x55\x6e\x18\x59\xea\xbc\x4f\xe9\x07\xea\x93\x1f\x80\xcf\x6e\x2f\x49\xdc\xa2\x67\x3b\x1e\x02\x50\xf7\xbb\x91\x82\xb9\x9a\x90\x86\x28\x3a\x9e\x0c\x97\xf1\x8e\x21\x6b\x92\xc9\x5e\x2a\x2d\x97\x9c\xab\x57\xba\xeb\xf8\x5a\x4a\xee\xeb\x20\x79\x68\x78\x19\xce\x2d\x7c', 2)
|
from flask import Blueprint, render_template, request, redirect, url_for, flash
from .forms import LoginForm, RegisterForm
from flask_login import login_user, logout_user, login_required
from ..models import User
from .. import db
main = Blueprint('main', __name__)
@main.route('/', methods=['GET'])
def index():
return render_template('index.html')
@main.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter(
User.username == request.form['username'].strip()).first()
if user is not None:
login_user(user)
flash("Logged in successfully.")
return redirect(url_for('users.user'))
return render_template('login.html', form=form)
@main.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm()
if form.validate_on_submit():
user = User(id=(len(User.query.all()) + 1),
username=request.form['username'].strip(), password=request.form['password'].strip())
db.session.add(user)
db.session.commit()
flash("Registered successfully.")
return redirect(url_for('main.login'))
return render_template('register.html', form=form)
@main.route('/logout', methods=['POST'])
@login_required
def logout():
logout_user()
return redirect(url_for('main.login'))
|
import numpy as np
import cv2
# create a VideoCapture object
cap = cv2.VideoCapture('kntu-computer.avi')
# sometimes this is needed:
#if not cap.isOpened():
# cap.open();
while True:
# Capture frame-by-frame
ret, I = cap.read()
if ret == False: # end of video (perhaps)
break
# Display I
cv2.imshow('win1',I)
key = cv2.waitKey(33) # ~ 30 frames per second
if key & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
"""
services.py: queries models to get required inputs to launch a task in the background.
- this scripts abstracts the functions specified in tasks.py
"""
import uuid
from io import StringIO
import os
import pandas as pd
from django.urls import reverse
from django.contrib.sites.shortcuts import get_current_site
from .tasks import run_simulation, send_mail, run_instantiate
from .helper import get_activation_url, convert
from .models import (UserRegisterToken, UserPasswordResetToken, campusInstantiation, simulationParams)
import json
from simulator.staticInst.config import configCreate
from django.contrib import messages
import logging
log = logging.getLogger('interface_log')
def send_template_email(recipient, subject, html_message, context):
# TODO: Enable this when the mail configurations are in place
# return send_mail.delay(recipient, subject, html_message, context)
return True
def send_activation_mail(request, user):
to_email = user.email
UserRegisterToken.objects.filter(user=user).delete()
# TODO: check if it exsists
user_token = UserRegisterToken.objects.create(
user=user,
token=uuid.uuid4())
subject = f"Campussim: New Account Activation for {user}"
html_message = f"""
Dear {user},
To activate your campussim user account, click on the following link:
"""
context = {
'protocol': request.is_secure() and 'https' or 'http',
'domain': get_current_site(request).domain,
'url': get_activation_url(user_token.token, request.GET.get('origin', None)),
'full_name': user.get_full_name(),
}
log.info(f'Account activation email was sent to {to_email}')
send_template_email(to_email, subject, html_message, context)
def send_forgotten_password_email(request, user):
to_email = user.email
UserPasswordResetToken.objects.filter(user=user).delete()
# TODO: check if it exsists
user_token = UserPasswordResetToken.objects.create(
user=user,
token=uuid.uuid4())
subject = f"Campussim: New Account Activation for {user}"
html_message = f"""
Dear {user},
To reset the password for your campussim user account, click on the following link:
"""
context = {
'protocol': request.is_secure() and 'https' or 'http',
'domain': get_current_site(request).domain,
'url': reverse("user_password_reset", kwargs={"token": user_token.token}),
'full_name': user.get_full_name()
}
log.info(f'Forgot password link email was sent to {to_email}')
send_template_email(to_email, subject, html_message, context)
def updateTransCoeff(campusId, BETA):
transmission_coefficients_json = json.loads(campusInstantiation.objects.get(id=campusId).trans_coeff_file)
for i in range(len(BETA)):
for e in transmission_coefficients_json:
if (e['type'] == BETA[i]['type']):
if e['beta'] != float(BETA[i]['beta']):
e['beta'] = float(BETA[i]['beta']) #TODO: Add ALPHA parameter when it is available
campusInstantiation.objects.filter(id=campusId).update(
trans_coeff_file=json.dumps(
transmission_coefficients_json,
default=convert
)
)
return True
def addConfigJSON(obj, outPath):
min_group_size = int(obj.min_grp_size)
max_group_size = int(obj.max_grp_size)
beta_scaling_factor = int(obj.betaScale)
avg_num_assns = int(obj.avg_associations)
periodicity = int(obj.periodicity)
minimum_hostel_time = float(obj.minimum_hostel_time)
testing_capacity = int(obj.testing_capacity)
configJSON = configCreate(min_group_size, max_group_size, beta_scaling_factor, avg_num_assns, periodicity, minimum_hostel_time, testing_capacity)
f = open(f"{ outPath }/config.json", "w")
f.write(json.dumps(configJSON))
f.close()
return True
def instantiateTask(request):
user = request.user
obj = campusInstantiation.get_latest(user=user) #gives id of the object
obj = campusInstantiation.objects.filter(created_by=user, id=obj.id)[0]
inputFiles = {
'students': pd.read_csv(StringIO(obj.inst_name.students_csv.read().decode('utf-8')), delimiter=',').to_dict(),
'class': pd.read_csv(StringIO(obj.inst_name.classes_csv.read().decode('utf-8')), delimiter=',').to_dict(),
'timetable': pd.read_csv(StringIO(obj.inst_name.timetable_csv.read().decode('utf-8')), delimiter=',', header=None, names=[i for i in range(24)]).to_dict(),
'staff': pd.read_csv(StringIO(obj.inst_name.staff_csv.read().decode('utf-8')), delimiter=',').to_dict(),
'mess': pd.read_csv(StringIO(obj.inst_name.mess_csv.read().decode('utf-8')), delimiter=',').to_dict(),
'common_areas': pd.read_csv(StringIO(obj.inst_name.common_areas_csv.read().decode('utf-8')), delimiter=',').to_dict(),
'campus_setup' : pd.read_csv(StringIO(obj.inst_name.campus_setup_csv.read().decode('utf-8')), delimiter=',').to_dict(),
'objid': obj.id
}
campusInstantiation.objects.filter(created_by=user, id=obj.id).update(status='Running')
run_instantiate.apply_async(queue='instQueue', kwargs={'inputFiles': json.dumps(inputFiles)})
return True
# if res.get():
# messages.success(request, f"instantiation job name: { obj.inst_name } is complete")
# log.info(f"instantiation job name: { obj.inst_name } is complete")
# else:
# messages.error(request, f"instantiation job name: { obj.inst_name } has failed. Please check the input files used.")
# log.error(f"instantiation job name: { obj.inst_name } has failed.")
def launchSimulationTask(request, campusId, BETA):
user = request.user
obj = simulationParams.get_latest(user=user) #gives id of the object
obj = simulationParams.objects.filter(created_by=user, id=obj.id)[0]
dirName = os.path.splitext(obj.campus_instantiation.agent_json.path)[0]
dirName = dirName.rsplit('/', 1)[0]
if not os.path.exists(dirName):
os.mkdir(dirName)
updateTransCoeff(campusId, BETA)
f = open(f"{ dirName }/{ obj.intervention.intv_name }.json", "w")
f.write(json.dumps(json.loads(obj.intervention.intv_json)))
f.close()
json.dump(json.loads(obj.campus_instantiation.trans_coeff_file), open(f"{ dirName }/transmission_coefficients.json", 'w'), default=convert)
json.dump(obj.testing_protocol.testing_protocol_file, open(f"{ dirName }/testing_protocol.json", 'w'), default=convert)
addConfigJSON(obj, dirName)
simulationParams.objects.filter(created_by=user, id=obj.id).update(status='Queued')
res = run_simulation.apply_async(queue='simQueue', kwargs={'id': obj.id, 'dirName': dirName, 'enable_testing': obj.enable_testing, 'intv_name': obj.intervention.intv_name})
# if res.get():
# messages.success(request, f"Simulation job name: { obj.simulation_name } is complete")
# log.info(f"Simulation job name: { obj.simulation_name } is complete")
# else:
# messages.error(request, f"Simulation job name: { obj.simulation_name } has failed. Please check the inputs used.")
# log.error(f"Simulation job name: { obj.simulation_name } has failed.")
def send_result_available_email(request, user):
to_email = user.email
simulationResults.objects.filter(user=user, completed_at=datetime.datetime.now())
subject = ""
html_message = """
<html>
<h4>Simulation Result update from campussim</h4>
</html>
"""
context = {
'protocol': request.is_secure() and 'https' or 'http',
'domain': get_current_site(request).domain,
'full_name': user.full_name
}
log.info(f'Account activation email was sent to {to_email}')
send_template_email(to_email, subject, html_message, context)
def send_instantion_complete_mail(request, user):
to_email = user.email
campusInstantiation.objects.filter(user=user, completed_at=datetime.datetime.now())
subject = "Campussim"
html_message = """
<html>
<h4>Simulation Result update from campussim</h4>
</html>
"""
context = {
'protocol': request.is_secure() and 'https' or 'http',
'domain': get_current_site(request).domain,
'full_name': user.full_name
}
send_template_email(to_email, subject, html_message, context)
|
import _plotly_utils.basevalidators
class FitboundsValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="fitbounds", parent_name="layout.geo", **kwargs):
super(FitboundsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", [False, "locations", "geojson"]),
**kwargs
)
|
from enum import Enum
from typing import cast, Union, List
from typing_extensions import Literal
from pydantic import BaseModel, Field
from .constrained_types import UnitFloat, PositiveInt
from .shared_blocking_config import BlockingConfigBase
class PSigFilterConfigBase(BaseModel):
type: str
class PSigFilterRatioConfig(PSigFilterConfigBase):
type: Literal['ratio']
max: UnitFloat
min: UnitFloat = cast(UnitFloat, 0.0)
class PSigFilterCountConfig(PSigFilterConfigBase):
type: Literal['count']
max: PositiveInt
min: PositiveInt
class PSigBlockingBFFilterConfig(BaseModel):
type: Literal['bloom filter']
number_of_hash_functions: int = Field(..., alias='number-hash-functions')
bloom_filter_length: int = Field(..., alias='bf-len')
class PSigSignatureTypes(str, Enum):
chars_at = 'characters-at'
feature_value = 'feature-value'
metaphone = 'metaphone'
class PSigSignatureSpecBase(BaseModel):
type: str
feature: Union[int, str]
class PSigCharsAtSignatureConfig(BaseModel):
pos: List[Union[PositiveInt, str]]
class PSigCharsAtSignatureSpec(PSigSignatureSpecBase):
type: Literal[PSigSignatureTypes.chars_at] = Field(..., name='type', alias='type')
config: PSigCharsAtSignatureConfig
class PSigMetaphoneSignatureSpec(PSigSignatureSpecBase):
type: Literal[PSigSignatureTypes.metaphone]
class PSigFeatureValueSignatureSpec(PSigSignatureSpecBase):
type: Literal[PSigSignatureTypes.feature_value] = Field(..., name='type', alias='type')
PSigSignatureModel = List[Union[
PSigCharsAtSignatureSpec,
PSigMetaphoneSignatureSpec,
PSigFeatureValueSignatureSpec
]]
class PSigConfig(BlockingConfigBase):
filter: Union[PSigFilterRatioConfig, PSigFilterCountConfig]
blocking_filter: PSigBlockingBFFilterConfig = Field(..., alias='blocking-filter')
signatures: List[PSigSignatureModel] = Field(..., alias='signatureSpecs')
|
"""
spider to get pratices of china.cssc.org
"""
import requests
from bs4 import BeautifulSoup
from flask import Flask, request
app = Flask(__name__)
global href_list
href_list = []
global base_url
base_url = ""
def get_raw_html(url):
"""get raw html from url"""
response = requests.get(url)
response.encoding = 'utf-8'
return response.text
def reconstruct_html(html):
"""reconstruct html, add base url to href"""
soup = BeautifulSoup(html, 'lxml')
head = soup.select('head')[0]
chapter_list_html = soup.find(class_='listmain')
global href_list
href_list = [i.get('href') for i in chapter_list_html.find_all('a')]
chapter_name_list = [i.get_text() for i in chapter_list_html.find_all('a')]
# print(chapter_name_list)
new_html = ""
for i in range(len(chapter_name_list)):
# href_tag = "<dd><a href=\"" + base_url.rstrip("index.html") + str(href_list[i]) + "\">"
href_tag = "<dd><a href=\"\{}\">".format(i)
new_html += href_tag + chapter_name_list[i] + "</a></dd>"
return str(head) + new_html
def get_content_html(html):
"""select content html from html"""
soup = BeautifulSoup(html, 'lxml')
content_html = soup.find(class_='showtxt')
print(type(content_html))
return content_html
@app.route('/')
def index():
html = """
<!DOCTYPE html>
<html>
<body>
<form action="/url" method="post">
URL:<br>
<input type="text" name="url" >
<br>
<input type="submit" value="Submit">
</form>
</body>
</html>
"""
return html
@app.route('/url', methods=["POST", "GET"])
def get_url():
url = request.form.get('url')
global base_url
base_url = url.rstrip('index.html')
html = get_raw_html(url)
new_html = reconstruct_html(html)
# print(new_html)
return new_html
@app.route('/<int:chapter_index>')
def link2chapter(chapter_index):
# base_url = "http://www.shuquge.com/txt/8400/"
global base_url
url = base_url + href_list[chapter_index]
print(url)
html = get_raw_html(url)
content_html = get_content_html(html)
return str(content_html)
if __name__ == "__main__":
# pre_url = "http://www.shuquge.com/txt/63542/"
# for i in range(1, 3):
# print(i)
# full_url = pre_url + "-{}.html".format(i)
# html = get_html(full_url)
# content = get_certain_joke(html)
# url = "http://www.shuquge.com/txt/63542/index.html"
# html = get_html(url)
# content = get_chapter_list(html)
app.run()
|
""" Tests for calculations
"""
from __future__ import print_function
from __future__ import absolute_import
import aiida_qeq.tests as tests
from aiida.engine import run
# pylint: disable=too-many-arguments,unused-argument
def test_submit(aiida_profile, clear_database, ionization_file, charge_file,
hkust1_cif, qeq_parameters, basic_options):
"""Test submitting a calculation"""
from aiida.plugins import CalculationFactory
EqeqCalculation = CalculationFactory('qeq.qeq')
inputs = {
'code': tests.get_code(entry_point='qeq.qeq'),
'structure': hkust1_cif,
'parameters': qeq_parameters,
'metadata': {
'options': basic_options,
'label': "aiida_qeq QEQ test",
'description': "Test QEQ job submission with the aiida_qeq plugin",
},
}
result = run(EqeqCalculation, **inputs)
cif_content = result['structure_with_charges'].get_content()
cu_line = 'Cu Cu 0.2850000 0.2850000 0.0000000 0.6334535'
assert cu_line in cif_content
|
"""
Docstring
.. pii: A long description that
spans multiple
lines
.. pii_types: id, name
"""
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 22 10:20:10 2019
@author: lwg
"""
# http://www.numpy.org/
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
return 1 / (1+np.exp(-x))
x = np.arange(-5.0, 5.0, 0.1)
y = sigmoid(x)
plt.plot(x, y)
plt.ylim(-0.1, 1.1) # y轴范围
plt.show()
|
from forcuanteller.main import loader, transformer, reporter, sender
from forcuanteller.main.utils.config import config
from forcuanteller.main.utils.gmail import validate_gmail
from forcuanteller.main.utils.logger import logger
from forcuanteller.main.utils.runner import runner
import schedule
import time
from forcuanteller.main.utils.schedule import get_scheduler
def task(sender_address, sender_password, receiver_address):
run_id = runner.run_id
logger.info("Run ID : {}".format(run_id))
logger.info("Running loader...")
loader.main(run_id)
logger.info("Finishing loader...")
logger.info("Running transformer...")
transformer.main(run_id)
logger.info("Finishing transformer..")
logger.info("Running reporter...")
reporter.main(run_id)
logger.info("Finishing reporter...")
logger.info("Running sender...")
sender.main(run_id, sender_address, sender_password, receiver_address)
logger.info("Finishing sender...")
def main():
sender_address = input("Input your sender gmail address: ")
sender_password = input("Input your sender gmail password: ")
receiver_address = input("Input your receiver gmail address: ")
validate_gmail(sender_address)
validate_gmail(receiver_address)
if config.schedule is None:
task(sender_address, sender_password, receiver_address)
else:
scheduler = get_scheduler(config.schedule)
scheduler.do(
task, sender_address=sender_address, sender_password=sender_password, receiver_address=receiver_address
)
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
import sys
import os.path
from setuptools import setup
from subprocess import check_output
import platform
import warnings
SDL_VERSION_NEEDED = (2, 0, 5)
def get_version():
"""Get the current version from a git tag, or by reading tcod/version.py"""
try:
tag = check_output(
["git", "describe", "--abbrev=0"], universal_newlines=True
).strip()
assert not tag.startswith("v")
version = tag
# add .devNN if needed
log = check_output(
["git", "log", "%s..HEAD" % tag, "--oneline"],
universal_newlines=True,
)
commits_since_tag = log.count("\n")
if commits_since_tag:
version += ".dev%i" % commits_since_tag
# update tcod/version.py
open("tcod/version.py", "w").write('__version__ = "%s"\n' % version)
return version
except:
try:
exec(open("tcod/version.py").read(), globals())
return __version__
except FileNotFoundError:
warnings.warn(
"Unknown version: "
"Not in a Git repository and not from a sdist bundle or wheel."
)
return "0.0.0"
is_pypy = platform.python_implementation() == "PyPy"
def get_package_data():
"""get data files which will be included in the main tcod/ directory"""
BITSIZE, LINKAGE = platform.architecture()
files = [
"py.typed",
"lib/LIBTCOD-CREDITS.txt",
"lib/LIBTCOD-LICENSE.txt",
"lib/README-SDL.txt",
]
if "win32" in sys.platform:
if BITSIZE == "32bit":
files += ["x86/SDL2.dll"]
else:
files += ["x64/SDL2.dll"]
if sys.platform == "darwin":
files += ["SDL2.framework/Versions/A/SDL2"]
return files
def get_long_description():
"""Return this projects description."""
with open("README.rst", "r") as f:
readme = f.read()
with open("CHANGELOG.rst", "r") as f:
changelog = f.read()
changelog = changelog.replace("\nUnreleased\n------------------", "")
return "\n".join([readme, changelog])
def check_sdl_version():
"""Check the local SDL version on Linux distributions."""
if not sys.platform.startswith("linux"):
return
needed_version = "%i.%i.%i" % SDL_VERSION_NEEDED
try:
sdl_version_str = check_output(
["sdl2-config", "--version"], universal_newlines=True
).strip()
except FileNotFoundError:
raise RuntimeError(
"libsdl2-dev or equivalent must be installed on your system"
" and must be at least version %s."
"\nsdl2-config must be on PATH." % (needed_version,)
)
print("Found SDL %s." % (sdl_version_str,))
sdl_version = tuple(int(s) for s in sdl_version_str.split("."))
if sdl_version < SDL_VERSION_NEEDED:
raise RuntimeError(
"SDL version must be at least %s, (found %s)"
% (needed_version, sdl_version_str)
)
if sys.version_info < (3, 5):
error = """
This version of python-tcod only supports Python 3.5 and above.
The last version supporting Python 2.7/3.4 was 'tcod==6.0.7'.
The end-of-life for Python 2 is the year 2020.
https://pythonclock.org/
Python {py} detected.
""".format(
py=".".join([str(v) for v in sys.version_info[:3]])
)
print(error)
sys.exit(1)
if not os.path.exists("libtcod/src"):
print("Libtcod submodule is uninitialized.")
print("Did you forget to run 'git submodule update --init'?")
sys.exit(1)
check_sdl_version()
needs_pytest = {"pytest", "test", "ptr"}.intersection(sys.argv)
pytest_runner = ["pytest-runner"] if needs_pytest else []
setup(
name="tcod",
version=get_version(),
author="Kyle Stewart",
author_email="4B796C65+tdl@gmail.com",
description="Pythonic cffi port of libtcod.",
long_description=get_long_description(),
url="https://github.com/libtcod/python-tcod",
project_urls={
"Documentation": "https://python-tcod.readthedocs.io",
"Changelog": "https://github.com/libtcod/python-tcod/blob/develop/CHANGELOG.rst",
"Source": "https://github.com/libtcod/python-tcod",
"Tracker": "https://github.com/libtcod/python-tcod/issues",
},
py_modules=["libtcodpy"],
packages=["tdl", "tcod"],
package_data={"tdl": ["*.png"], "tcod": get_package_data()},
python_requires=">=3.5",
install_requires=[
"cffi~=1.13", # Also required by pyproject.toml.
"numpy~=1.10" if not is_pypy else "",
],
cffi_modules=["build_libtcod.py:ffi"],
setup_requires=pytest_runner,
tests_require=["pytest", "pytest-cov", "pytest-benchmark"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Win32 (MS Windows)",
"Environment :: MacOS X",
"Environment :: X11 Applications",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: POSIX",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Games/Entertainment",
"Topic :: Multimedia :: Graphics",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords="roguelike cffi Unicode libtcod fov heightmap namegen",
platforms=["Windows", "MacOS", "Linux"],
license="Simplified BSD License",
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound
from django.shortcuts import render
import redis
from django.views.decorators.csrf import csrf_exempt
r_db = redis.StrictRedis(host='localhost', port=6379, db=0)
# Create your views here.
def index(request):
result = r_db.hgetall('movies')
output = '<body>'
for item in result:
output += '<p>Movie: ' + item + '<br />Actors: ' + r_db.hget('movies', item) + '</p>'
output += '</body>'
return HttpResponse(output)
@csrf_exempt
def post(request):
movie_name = request.GET.get('movie')
actors = request.GET.get('actors')
if movie_name is None:
movie_name = request.POST.get('movie')
actors = request.POST.get('actors')
if movie_name is None or actors is None:
return HttpResponseBadRequest('<body>'
'<h1>Missing parameter(s)!</h1>'
'<p>Requires request parameter \'movie\' and \'actors\''
'</body>', )
r_db.hset('movies', movie_name, actors)
return HttpResponse('<body><h1>added!</h1></body>')
def init(request):
r_db.hset('movies', 'The Godfather', 'Al Pacino, Marlon Brando, Robert Duvall')
r_db.hset('movies', 'Schindler\'s List', 'Liam Neeson, Ralph Fiennes, Ben Kingsley')
r_db.hset('movies', 'Saving Private', 'Ryan Tom Hanks, Matt Damon, Vin Diesel')
r_db.hset('movies', 'Back to the Future', 'Michael J. Fox, Christopher Lloyd, Lea Thompson')
r_db.hset('movies', 'Casablanca', 'Ingrid Bergman, Humphrey Bogart, Peter Lorre')
r_db.hset('movies', 'The Big Lebowski', 'Julianne Moore, Jeff Bridges, Tara Reid')
return HttpResponse('<body><h1>Default values added!</h1></body>')
@csrf_exempt
def search(request):
if request.method == 'POST':
movie = request.POST.get("movie")
movie_actors = r_db.hget('movies', movie)
if not movie_actors:
return HttpResponseNotFound('<body><p>Movie not known</p</body>')
return HttpResponse('<body>'
'<p>'
'movie: ' + movie + '<br />'
'actors: ' + movie_actors +
'</p>'
'</body>')
else:
return HttpResponseBadRequest('POST REQUEST EXPECTED')
|
# a small script which shows some of the possiblities of the
# LKOpticalTrack filter
# LKOpitcal is good low resource algorithm which is good at tracking points through a video
# stream
from org.myrobotlab.opencv import OpenCVFilterLKOpticalTrack
# create services
opencv = Runtime.createAndStart("opencv","OpenCV")
# add listener so data comes back to python
opencv.addListener("publishOpenCVData", "python", "input")
lkfilter = opencv.getFilter("LKOpticalTrack")
if (lkfilter == None):
lkfilter = OpenCVFilterLKOpticalTrack()
opencv.addFilter(lkfilter)
# other options
# if you want to get pixel values instead of floats
# floats are nice because the value doesnt change even if the
# resolution does
# lkfilter.useFloatValues=False # default is true
# lkfilter.needTrackingPoints=True #default is false
lkfilter.samplePoint(0.5,0.5)# programmatically sets a point
# a set of points can come back from LKOptical
def input ():
points = msg_opencv_publishOpenCVData.data[0].getPoints()
if (not points == None):
print points
if (points.size() > 0):
print points.get(0).x, points.get(0).y
opencv.capture()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.