blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ef98c254494f521be7c9abced196323637b851d9
|
ea0829e544b87fe6b58ef84b96ba90e036eda438
|
/Quicksort.py
|
077cc0f3268c27a0066e60b7b7584b1526186ed3
|
[] |
no_license
|
Najones19746/Quicksort
|
71bdf0fd0b770c6c1fef469c81a2dbbd3b19599c
|
0de906218d3ae1b0078b1abd2e4ea802b409a292
|
refs/heads/master
| 2020-04-24T16:02:23.828684
| 2015-05-15T04:57:56
| 2015-05-15T04:57:56
| 35,654,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
__author__ = 'Nick Jones'
def quick(x):
less = []
pivotList = []
more = []
if len(x) <= 1:
return x
else:
pivot = x[0]
for number in x:
number = int(number)
if number < pivot:
less.append(number)
elif number > pivot:
more.append(number)
else:
pivotList.append(number)
less = quick(less)
more = quick(more)
return less + pivotList + more
numbers = raw_input().split(' ')
print quick(numbers)
|
[
"najones19746@pluto.dsu.edu"
] |
najones19746@pluto.dsu.edu
|
a08c74244ee8e9ac57ff5813274dc481cce384cf
|
8cdb186042f7169053c663a197e5dc6540658b29
|
/forms.py
|
20e2ae1645465d44a560df4e6e9da8e0d0cd2769
|
[] |
no_license
|
jmc1284/flask-app
|
5b95a55ace458b9c6b65929f852d29ae26f250cd
|
5507a810ee6e64084d548bff4b8902d80f54656d
|
refs/heads/master
| 2023-05-10T18:50:07.871494
| 2020-04-04T18:53:41
| 2020-04-04T18:53:41
| 252,254,726
| 0
| 0
| null | 2023-05-01T21:23:09
| 2020-04-01T18:24:37
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,199
|
py
|
from flask_wtf import FlaskForm
from wtforms import IntegerField, SubmitField
from wtforms.validators import InputRequired, NumberRange
class BMIForm(FlaskForm):
height_ft = IntegerField('Height Feet', validators=[InputRequired(message='Please enter a valid integer'), NumberRange(min=1)])
height_in = IntegerField('Height Inches', validators=[InputRequired(message='Please enter a valid integer'), NumberRange(min=0, max=11)])
weight = IntegerField('Weight', validators=[InputRequired(message='Please enter a valid integer'), NumberRange(min=1)])
submit = SubmitField('Submit')
class RetirementForm(FlaskForm):
age = IntegerField('Age', validators=[InputRequired(message='Please enter a valid integer'), NumberRange(min=1)])
salary = IntegerField('Annual Salary', validators=[InputRequired(message='Please enter a valid integer'), NumberRange(min=1)])
percentage = IntegerField('Percentage Saved Annualy', validators=[InputRequired(message='Please enter a valid integer'), NumberRange(min=1, max=100)])
goal = IntegerField('Savings Goal', validators=[InputRequired(message='Please enter a valid integer'), NumberRange(min=1)])
submit = SubmitField('Submit')
|
[
"jmc1284@msstate.edu"
] |
jmc1284@msstate.edu
|
2f377b00c1bfc78229f470886bc75aa3f8c9e3c5
|
6c13c12bc531992716aa4e291a9ddef4dcb65131
|
/motsfinder/metric/discrete/metric.py
|
2cfc05ad2505404e8638e29462a5df1d07c601d1
|
[
"MIT"
] |
permissive
|
daniel-dpk/distorted-motsfinder-public
|
9846ed0eb7977b3d0c7ab8649b79eb64c9611e14
|
8c2eec174c755c55b26b568243e58c2956a35257
|
refs/heads/master
| 2021-06-11T07:13:21.288873
| 2021-04-14T14:28:53
| 2021-04-14T14:28:53
| 159,724,600
| 4
| 1
|
MIT
| 2021-04-01T08:41:18
| 2018-11-29T20:40:00
|
Python
|
UTF-8
|
Python
| false
| false
| 13,522
|
py
|
r"""@package motsfinder.metric.discrete.metric
Base class for discrete metrics.
The DiscreteMetric class is an abstract class implementing most of the
functionality required for a ..base._ThreeMetric. The missing part is the
definition of actual numerical data. There are currently two implementations
of this abstract class, serving at the same time as examples:
* .discretize.DiscretizedMetric
* ..simulation.siometric.SioMetric
"""
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from six import add_metaclass
import numpy as np
from ...utils import save_to_file, load_from_file
from ..base import _ThreeMetric
__all__ = [
"DiscreteMetric",
]
@add_metaclass(ABCMeta)
class DiscreteMetric(_ThreeMetric):
r"""Base class for discrete axisymmetric metrics.
This subclass of ..base._ThreeMetric implements the memory management of
numerical data for a metric and other fields defining the geometry and
embedding of the slice into spacetime.
Subclasses should implement:
* _get_metric() - constructing the metric as a .tensors.DiscreteSym2TensorField
* all_field_objects() - return all fields as a list
The reason to have the latter of the two is to allow metrics that don't
supply a lapse and/or shift field but still have an easy way to keep track
of all fields (for caching and memory management purposes).
Optionally, subclasses may implement:
* get_curv() - extrinsic curvature (.tensors.DiscreteSym2TensorField)
* get_lapse() - lapse function (.tensors.DiscreteScalarField)
* get_shift() - lapse function (.tensors.DiscreteVectorField)
* get_dtlapse() - (.tensors.DiscreteScalarField)
* get_dtshift() - (.tensors.DiscreteVectorField)
If all of these are supplied, then the full 4-metric can be evaluated on
the slice.
"""
def __init__(self):
r"""This base constructor initializes the properties."""
super(DiscreteMetric, self).__init__()
## The metric discrete field object.
self._metric_field = None
## Whether all matrices should be saved.
self._save_full_data = False
@abstractmethod
def _get_metric(self):
r"""Abstract method to create/load numerical metric data.
This method should return a .tensors.DiscreteSym2TensorField built
from the component matrices of the metric. It is called only once
(lazily) and the object is *not* destroyed even when calling
unload_data(). Instead, the field's unload_data() method is called.
This method is the hook for subclasses to implement their method of
generating/loading the numerical data.
"""
pass
@abstractmethod
def all_field_objects(self):
r"""Abstract method supplying all defined field objects as a list.
See .discretize.DiscretizedMetric for a simple example.
"""
pass
@property
def field(self):
r"""Field attribute containing the actual field object.
The object is lazily loaded (i.e. on first access) and kept as
instance attribute. Note that this access does not imply that data is
loaded or generated. This is handled by the field object itself.
The result is a .tensors.DiscreteSym2TensorField.
"""
if self._metric_field is None:
self._metric_field = self._get_metric()
return self._metric_field
def set_interpolation(self, interpolation):
r"""Set the interpolation for all fields/components.
Refer to .patch.DataPatch.set_interpolation() for details.
"""
for field in self.all_field_objects():
if field:
field.set_interpolation(interpolation)
def set_fd_order(self, fd_order):
r"""Set the finite difference derivative order of accuracy."""
for field in self.all_field_objects():
if field:
field.set_fd_order(fd_order)
@contextmanager
def temp_interpolation(self, interpolation, fd_order=None):
prev_interp = self.field.components[0].get_interpolation()
prev_fd_order = self.field.components[0].fd_order
try:
self.set_interpolation(interpolation)
if fd_order is not None:
self.set_fd_order(fd_order)
yield
finally:
self.set_interpolation(prev_interp)
if fd_order is not None:
self.set_fd_order(prev_fd_order)
@property
def save_full_data(self):
r"""Read/write property specifying whether the full grid data should
be stored.
This is `False` by default. If set to `True` instead, saving this
object (or more generally "pickling" it) will include the numerical
data on the full grid. For large slice data, this will basically store
the full slice.
"""
return self._save_full_data
@save_full_data.setter
def save_full_data(self, value):
self._save_full_data = value
def save(self, filename, full_data=False, overwrite=False, verbose=True):
r"""Save the metric to disk.
@param filename
The file to store the data in. The extension ``'.npy'`` will be
added if not already there.
@param full_data
Whether to store all the matrix data (if `True`) or regenerate it
on-demand after loading (if `False`). Default is `False`.
@param overwrite
Whether to overwrite an existing file with the same name. If
`False` (default) and such a file exists, a `RuntimeError` is
raised.
@param verbose
Whether to print a message upon success. Default is `True`.
"""
prev_value = self.save_full_data
try:
self.__dict__['_prev_save_full_data'] = prev_value
self.save_full_data = full_data
if full_data:
self.load_data()
save_to_file(
filename, self, overwrite=overwrite, verbose=verbose,
showname=self.__class__.__name__
)
finally:
self.save_full_data = prev_value
self.__dict__.pop('_prev_save_full_data')
@staticmethod
def load(filename):
r"""Static function to load a metric from disk."""
metric = load_from_file(filename)
metric.save_full_data = metric.__dict__.pop(
'_prev_save_full_data', False
)
return metric
def constraints(self, point, norm=False):
r"""Compute the Hamiltonian and momentum constraints.
If the constraints are satisfied exactly, the returned values will be
zero. However, for numerical simulation data this will rarely ever be
the case. The returned values can then be used to determine the
accuracy of the simulation, e.g. by comparing them at different
spatial resolutions (grid densities).
Note that this function is not optimized in any way and thus will
perform poorly when evaluated on a large number of points.
@return A 2-tuple ``(scal_constr, vec_constr)``, where `scal_constr`
is a float representing the Hamiltonian constraint
\f$K_{ab}K^{ab} - (K^c_{\ c})^2 - R\f$ and `vec_constr` the
momentum constraint \f$D^a(K_{ab} - g_{ab} K^c_{\ c})\f$. The
latter is a (3-D) covector (indices downstairs). Here, `D` is the
Levi-Civita covariant derivative compatible with `g`, the slice's
Riemannian 3-metric, `K` is the extrinsic curvature of the
slice and `R` the scalar curvature of the slice. If ``norm=True``,
the second element is a float, the `g`-norm of the momentum
constraint.
@param point
The point at which to compute the constraints.
@param norm
If `True`, compute the `g`-norm of the momentum constraint instead
of the covector. Default is `False`.
"""
g_inv = self.diff(point, inverse=True, diff=0)
curv = self.get_curv() # pylint: disable=assignment-from-none
K = curv(point)
R = self.ricci_scalar(point)
K_up = g_inv.dot(g_inv.dot(K).T).T
KK = K_up.dot(K.T).trace()
trK = g_inv.dot(K).trace()
scal_constr = KK - trK**2 - R
dg = self.diff(point, diff=1)
dK = np.asarray(curv(point, diff=1))
G = self.christoffel(point)
vec_constr = (
np.einsum('ac,cab', g_inv, dK)
- np.einsum('ac,ica,ib', g_inv, G, K)
- np.einsum('ac,icb,ai', g_inv, G, K)
- np.einsum('ac,bac', g_inv, dK)
+ np.einsum('bac,ac', dg, K_up)
)
if norm:
vec_constr = np.sqrt(g_inv.dot(vec_constr).dot(vec_constr))
return scal_constr, vec_constr
def load_data(self, *which):
r"""Load/generate the full numerical data.
Without arguments, all fields are loaded to memory. If one or more
arguments are given, only those fields are loaded. Possible arguments
are: ``metric, curv, lapse, shift, dtlapse, dtshift``.
"""
if len(which) > 1:
for field_name in which:
self.load_data(field_name)
return
field_name, = which if which else [None]
def _load(field):
if field:
field.load_data()
if field_name is None:
for field in self.all_field_objects():
_load(field)
elif field_name == "metric":
_load(self.field)
elif field_name == "curv":
_load(self.get_curv())
elif field_name == "lapse":
_load(self.get_lapse())
elif field_name == "shift":
_load(self.get_shift())
elif field_name == "dtlapse":
_load(self.get_dtlapse())
elif field_name == "dtshift":
_load(self.get_dtshift())
else:
raise ValueError("Unknown field: %s" % field_name)
def unload_data(self):
r"""Free memory from all numerical field matrix data."""
for field in self.all_field_objects():
if field:
field.unload_data()
self.reset_cache()
def release_file_handle(self):
r"""Convenience method to signal child classes they should release any file handles.
This does nothing by default. Subclasses may implement this method to
free access to any files currently opened by this class. It may be
called by users of this class in case they deem file access to be
finished (e.g. after loading all required data).
"""
pass
def reset_cache(self):
r"""Reset the cache of all fields."""
for field in self.all_field_objects():
if field:
field.reset_cache()
def grid(self, xz_plane=True, ghost=0, full_output=False):
r"""Convenience method delegating to .patch.DataPatch.grid()."""
return self.field.components[0].grid(
xz_plane=xz_plane, ghost=ghost, full_output=full_output
)
def snap_to_grid(self, point):
r"""Convenience method delegating to .patch.DataPatch.snap_to_grid()."""
return self.field.components[0].snap_to_grid(point)
@property
def shape(self):
r"""Shape of the domain, \ie of individual component matrices."""
return self.field.components[0].shape
@property
def box(self):
r"""Index bounding box of the full domain."""
return self.field.components[0].box
@property
def domain(self):
return self.field.components[0].domain
@property
def safe_domain(self):
return self.field.components[0].safe_domain
def component_matrix(self, i, j):
r"""Return the DataPatch of a component of the metric."""
gij = self.field.components
if i == j == 0:
return gij[0]
if i == j == 1:
return gij[3]
if i == j == 2:
return gij[5]
if j < i:
i, j = j, i
if i == 0 and j == 1:
return gij[1]
if i == 0 and j == 2:
return gij[2]
if i == 1 and j == 2:
return gij[4]
raise ValueError("Unknown component: (%s, %s)" % (i, j))
def _mat_at(self, point):
return self.diff(point, diff=0)
def diff(self, point, inverse=False, diff=1):
r"""Compute (derivatives of) the metric tensor at a given point.
@return For ``diff=0``, returns the 3x3 matrix representing the metric
interpolated at `point`, i.e. \f$g_{ij}\f$.
If ``diff=1``, returns ``dg[i,j,k]``, where the indices mean
\f$\partial_i g_{jk}\f$ and if ``diff=2``, returns
``ddg[i,j,k,l]`` with indices \f$\partial_i\partial_j g_{kl}\f$.
In each case, if ``inverse=True`` the inverse metric is used with
indices upstairs.
@param point
The point at which to compute.
@param inverse
Whether to compute (derivatives of) the inverse metric. Default is
`False`.
@param diff
Derivative order to compute. Default is `1`.
"""
if inverse:
return self._compute_inverse_diff(point, diff=diff)
return self.field(point, diff=diff)
|
[
"daniel.dpk@gmail.com"
] |
daniel.dpk@gmail.com
|
479f0b1077982bb0d93dca0a6bf8804036a0938c
|
f35d8c6617822bbde2769b268e9b7b0a533c0c53
|
/main.py
|
f2540a4d999560074280fe56436804c0a29f5d22
|
[] |
no_license
|
DylanLaw03/CDDBot
|
decac51a936e921f9e95181bfdf5108cffd7f45f
|
f9ac8bf67c9374e65cbf5cd47783f814a28e44f5
|
refs/heads/master
| 2023-05-30T16:19:20.465471
| 2021-06-13T03:54:30
| 2021-06-13T03:54:30
| 375,544,789
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,629
|
py
|
'''
This program will scrape a page from the website cheap digital download, to find potentially profiatble games.
Started on 6/9/21
'''
#import dependencies
from selenium import webdriver
import os
from datetime import date
row_xpath = '//*[@id="offer_offer"]'
HOME_STORE = 'G2A'
WORKING_DIRECTORY = 'c://Users//18022//Desktop//Python//CDD'
MIN_PROFIT = 1.7 #the minimum value for profit_margin to put a report in the flagged folder
BANNED_STORES = ['Microsoft', 'Epic Games', 'Steam', 'Gog.com', 'Kinguin', 'G2A']
def main ():
os.chdir(WORKING_DIRECTORY)
#open link_getter file, and append that to urls
urls = []
link_getter_file_name = 'link_getter - ' + str(date.today()) + '.txt'
with open(link_getter_file_name, 'r') as link_file:
Lines = link_file.readlines()
for line in Lines:
urls.append(line)
os.chdir('./reports')
#create a folder for the date, and within that have two folders, one called flagged to hold listing_pages with a value >= MIN_PROFIT and one for all other reports
os.mkdir(str(date.today()))
#move to new directory
os.chdir(str(date.today()))
#make new folders
os.mkdir("flagged")
os.mkdir("all")
#iterate through all urls
for url in urls:
url = url[:-2]
dir_control = 0 #if set to 1, do not go back one directory at the end of the loop
#use get content to get a list of lists comtaining links and web elements
content = get_content(url, row_xpath)
game_name = url[33:] #will be used for making folder
text_doc_name = game_name + ".txt"
#get price info
price_info = get_price_info(content[0][1]) #first return item, second item in that list.
#close web driver
content[1].close()
#verify that HOME_STORE in price_info[0]
if HOME_STORE in price_info[0]:
#create listing page
listing_page = create_listing_page(price_info)
#If profit margin > min profit, move to flagged, if not move to all.
if listing_page.profit_margin > MIN_PROFIT:
os.chdir('flagged')
else:
os.chdir('all')
#now create a text document with game_name as the name
else:
print(f'{HOME_STORE} listing was not found for {game_name}')
dir_control = 1
continue
#open file and print info
text_document = open(text_doc_name, 'w+')
text_document.write(f'Report for {game_name}, created on {date.today()}\n\n\n')
text_document.write(f'Profit Margin: {round(listing_page.get_profit_margin(), 2)}\n\n')
#print info for home listing
text_document.write('Home Store: \n')
text_document.write('Store: ')
text_document.write(listing_page.get_home_listing().get_store())
text_document.write('\nVersion: ')
text_document.write(listing_page.get_home_listing().get_version())
text_document.write('\nRegion: ')
text_document.write(listing_page.get_home_listing().get_region())
text_document.write('\nPrice: ')
text_document.write(str(listing_page.get_home_listing().get_price()))
#print info for cheapest listing
text_document.write('\n\nCheapest Store: \n')
text_document.write('Store: ')
text_document.write(listing_page.get_cheapest_listing().get_store())
text_document.write('\nVersion: ')
text_document.write(listing_page.get_cheapest_listing().get_version())
text_document.write('\nRegion: ')
text_document.write(listing_page.get_cheapest_listing().get_region())
text_document.write('\nPrice: ')
text_document.write(str(listing_page.get_cheapest_listing().get_price()))
#Create header for all listings
text_document.write('\n\n\nListings:\n\n')
text_document.write(format('Store', '<20'))
text_document.write(format('Version', '<20'))
text_document.write(format('Region', '<20'))
text_document.write(format('Price', '<20'))
text_document.write(format('Profit Ratio', '<20'))
text_document.write(format('\n'))
for listing in listing_page.get_listings():
text_document.write(format(listing.get_store(), '<20'))
text_document.write(format(listing.get_version(), '<20'))
text_document.write(format(listing.get_region(), '<20'))
text_document.write('$')
text_document.write(format(listing.get_price(), '<19'))
text_document.write(format(round(listing_page.get_home_listing().get_price() / listing.get_price(), 2), '<15'))
text_document.write('\n')
text_document.close()
#go back to dir for the day
if dir_control == 0:
os.chdir('..')
print(f'Process Completed for {game_name}! The profit ratio was {round(listing_page.get_profit_margin(), 2)}')
def get_content(url, x_path):
driver = webdriver.Firefox()
url_info = [] #index 0 holds url, 1 holds the list of web elements
driver.get(url)
listings = driver.find_elements_by_xpath(x_path)
url_info.append(url)
url_info.append(listings)
return url_info, driver
'''
This function opens the given url with webdriver, and gets the class content for the specified id
@param url to open, and class_id to look for
@returns a list of WebElement object lists where index 0 is the link, and driver in index one, so that it can be closed
'''
def get_price_info(web_element_list):
return_list = []
store = []
region = []
version = []
price = []
for listing in web_element_list:
loop_control = 0 #set to 1 when the price is found
listing_text = listing.text
listing_text = listing_text.split('\n')
#append store, region, and version to their respective lists. located at index 0, 3, and 4.
store.append(listing_text[0])
region.append(listing_text[3])
version.append(listing_text[4])
for item in listing_text[5:]:
if item[0] == '$' and loop_control == 0:
loop_control = 1
price.append(item[1:])
return_list.append(store)
return_list.append(region)
return_list.append(version)
return_list.append(price)
return return_list
'''
This function takes a list of WebElements and parses their .text to find the store, region, version, and price
@param a list of WebElements
@return a list where index 0 is store, 1 is region, 2 is version, and 3 is price
'''
def create_listing_page(listing_info_list):
loop_control = 0
listings = []
for listing in listing_info_list[0]:
store = listing_info_list[0][loop_control]
region = listing_info_list[1][loop_control]
version = listing_info_list[2][loop_control]
price = listing_info_list[3][loop_control]
listings.append(Listing(store, region, version, price))
loop_control += 1
return Listing_Page(listings)
'''
This function creates a Listing_Page object
@param it takes a list from get_price_info where index 0 is store, 1 is region, 2 is version, and 3 is price
@return Listing_Page class
'''
class Listing():
#The listing class will be used to make up the listing_page class. It contains price, store, region, and version for each listing.
def __init__ (self, store, region, version, price):
self.store = store
self.region = region
self.version = version
self.price = float(price)
def get_store(self):
return self.store
def get_region (self):
return self.region
def get_version (self):
return self.version
def get_price (self):
return self.price
def equals (self, object2):
if object2.get_region() == self.region and object2.get_version() == self.version:
return True
return False
class Listing_Page():
#This class takes a list of listings that make up a whole page, and will contain metrics for determining if it is a good deal
def __init__(self, listings):
self.home_listing = None
self.listings = listings
for listing in self.listings:
if listing.get_store() == HOME_STORE and self.home_listing == None:
self.home_listing = listing
#Go through each listing to see find the lowest
if self.home_listing != None:
self.cheapest_listing = self.home_listing
for listing in self.listings:
#verify that price is lower than current cheapest, that store is not banned, and that the listing is equal to home store.
if listing.get_price() < self.cheapest_listing.get_price() and listing.get_store() not in BANNED_STORES and listing.equals(self.home_listing):
self.cheapest_listing = listing
self.profit_margin = self.home_listing.get_price() / self.cheapest_listing.get_price()
#Find the listing that matches the name for HOME_STORE. Set it equal to self.home_store
def get_home_listing (self):
return self.home_listing
def get_home_price(self):
return self.home_price
def get_cheapest_listing(self):
return self.cheapest_listing
def get_profit_margin (self):
return self.profit_margin
def get_listings (self):
return self.listings
main()
|
[
"Dylan"
] |
Dylan
|
934f1a319faf97d1f5baf73106ca6dc15b89d1e5
|
dceda528f50ecdb744cc4a83f0ed53aead10ec88
|
/Scraper.py
|
bc3dc680c3aaf755bf4585fbdb88653b9011ccea
|
[
"MIT"
] |
permissive
|
alrightalrightalrightalrightalright/etsy-scraper
|
4a03819d0b6245706ecdd8dcf61b0e616447eb7b
|
e77acbda8cee210ca95f08eafee5203e8d445026
|
refs/heads/master
| 2023-04-05T11:25:02.637038
| 2021-04-07T22:18:39
| 2021-04-07T22:18:39
| 353,975,787
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,231
|
py
|
import requests
from bs4 import BeautifulSoup
import re
import db
import psycopg2
import Product
proxies = {
'http': 'http://178.62.95.218:8080',
'https': 'https://178.62.95.218:8080',
}
class Scraper:
_session= requests.Session()
_baseUrl="https://www.etsy.com"
_partialUrl="/uk/listing/593579116/plant-enthusiast-bookmarks"
_requestUrl="https://www.etsy.com/uk/listing/772695061/brass-or-silver-leaf-bookmark-set"
_headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-GB,en;q=0.9,en-US;q=0.8,en;q=0.7",
'Host': 'www.etsy.com',
'Connection': 'keep-alive',
"Upgrade-Insecure-Requests": "1",
"Cache-Control":"max-age=0",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "cross-site",
"Sec-Fetch-User": "?1",
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36 OPR/71.0.3770.441"
}
def __init__(self):
self._session.headers=self._headers
#use GB proxies in order to get price currency right
# or run app in GB.
#self._session.proxies= proxies
r= self._session.get(self._baseUrl)
print( 'initial get: ', r.status_code)
def __scrapeProduct(self,soup):
'''Internal method of the scraping method where pure scraping
process happens. It returns the product object.
:param soup: A bs4 object for scraping source
:return: The product object that's data has been scraped. '''
#more pictures of the product can be scraped easily with using find_all.
#there are also 2 versions of the image, one is normal the other is 2x zoomed
link= soup.find_all("img",class_="wt-max-width-full"+
" wt-horizontal-center wt-vertical-center carousel-image wt-rounded")[0]["src"]
title=soup.find("h1",{"data-listing-id":True}).text.replace("\n","").strip()
price=soup.find("p",class_="wt-text-title-03 wt-mr-xs-2").text.replace("\n","").strip()
raw_price= re.findall( "\\d*\\.\\d*",price)[0].replace(".",",")
product= Product.Product(title, raw_price,link)
return product
def scrape(self,fullUrl):
'''Scrapes a product with the partial url. Scrapes the image link, title,
price of the product and stores in a "Product" object. The object is then
in database with existing configuration.
:param partialUrl: The partial url of the product's url to be scraped.
'''
fullUrl = fullUrl if fullUrl.startswith('https') else ('https://' + fullUrl)
r= self._session.get(fullUrl)
print( 'get: ', r.status_code)
soup = BeautifulSoup(r.text, 'html.parser')
product= self.__scrapeProduct(soup)
db.insertProduct(product)
#expensive, slow and not thread-safe
product.id=db.getNextIndex()#can use UUID for id and get rid of this
return product
|
[
"33198855+alrightalrightalrightalrightalright@users.noreply.github.com"
] |
33198855+alrightalrightalrightalrightalright@users.noreply.github.com
|
c61c6f504d865b4d6caa828ddb7ebb7b0697cb9e
|
15bb8feae971512613c27ea755068dcb78a4af74
|
/locations/migrations/0001_initial.py
|
2fbea7b973d5348933e04cd295c514dd73c494dd
|
[] |
no_license
|
CalebNash/ccs-final-project
|
a20dfe4e95c5f60730c9255ecbe7965865b03cd1
|
83d94f58a14c07dc82f5e2aa47588b633b0b80bb
|
refs/heads/main
| 2023-01-15T07:30:01.935037
| 2020-11-20T15:45:20
| 2020-11-20T15:45:20
| 307,705,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 873
|
py
|
# Generated by Django 3.1.2 on 2020-11-03 19:12
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('address', models.CharField(max_length=255)),
('lat', models.CharField(max_length=255)),
('lng', models.CharField(max_length=255)),
('categories', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=100), blank=True, null=True, size=None)),
],
),
]
|
[
"calebnash@Calebs-MacBook-Pro.local"
] |
calebnash@Calebs-MacBook-Pro.local
|
aef17ef6d1fcf70551b5b6d5eb30f70313fabed8
|
9d497fa82d4fe570c5c637dde3b1d0f4c8956c35
|
/api/core/views.py
|
4781290ae61ec1bed9c4b49047117cfd7c307110
|
[
"MIT"
] |
permissive
|
mmourafiq/polyaxon-api
|
1160b4e9cef3de8975b36c23971e67e1453dc3b9
|
97eeb99b173f60593bb2302efe2bbcb11a82dc38
|
refs/heads/master
| 2022-09-02T04:16:25.459919
| 2017-07-14T10:03:32
| 2017-07-14T10:03:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,664
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from rest_framework import status
from rest_framework.generics import ListAPIView, RetrieveAPIView, GenericAPIView, CreateAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from core.models import Experiment, PolyaxonModel, Estimator
from core.serialiazers import (
ExperimentSerializer,
ExperimentDetailSerializer,
PolyaxonModelSerializer,
PolyaxonModelDetailSerializer,
EstimatorSerializer,
EstimatorDetailSerializer,
StatusSerializer)
from core.tasks import start_experiment, get_experiment_run_status
class ExperimentListView(ListAPIView):
queryset = Experiment.objects.all()
serializer_class = ExperimentSerializer
class ExperimentDetailView(RetrieveAPIView):
queryset = Experiment.objects.all()
serializer_class = ExperimentDetailSerializer
class ExperimentEstimatorDetailView(RetrieveAPIView):
queryset = Experiment.objects.all()
serializer_class = EstimatorDetailSerializer
def get_object(self):
obj = super(ExperimentEstimatorDetailView, self).get_object()
return obj.estimator
class ExperimentModelDetailView(RetrieveAPIView):
queryset = Experiment.objects.all()
serializer_class = PolyaxonModelDetailSerializer
def get_object(self):
obj = super(ExperimentModelDetailView, self).get_object()
return obj.model
class ExperimentStartView(CreateAPIView, RetrieveAPIView):
queryset = Experiment.objects.all()
serializer_class = StatusSerializer
def retrieve(self, request, *args, **kwargs):
obj = self.get_object()
serializer = self.get_serializer(get_experiment_run_status(obj))
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, *args, **kwargs):
obj = self.get_object()
job_info = start_experiment(obj)
if job_info['status'] == 'PENDING':
return Response(status=status.HTTP_201_CREATED, data=job_info)
return Response(job_info, status=status.HTTP_200_OK)
class EstimatorListView(ListAPIView):
queryset = Estimator.objects.all()
serializer_class = EstimatorSerializer
class EstimatorDetailView(RetrieveAPIView):
queryset = Estimator.objects.all()
serializer_class = EstimatorDetailSerializer
class PolyaxonModelListView(ListAPIView):
queryset = PolyaxonModel.objects.all()
serializer_class = PolyaxonModelSerializer
class PolyaxonModelDetailView(RetrieveAPIView):
queryset = PolyaxonModel.objects.all()
serializer_class = PolyaxonModelDetailSerializer
|
[
"mouradmourafiq@gmail.com"
] |
mouradmourafiq@gmail.com
|
9ed19b923204882ca4ac3b4e52c5170da91b35b8
|
7e07e6ab5144324dd3ff7e8be2c2085aa9c390d0
|
/titanic.py
|
18898580119555b6316532870675931082a2dfe3
|
[] |
no_license
|
todavis/kaggle-titanic
|
65f66dbc3cd9c4191032a79b559d25d0044c0465
|
1d7ca537d46eb37363dc9bd043d6dd33e6cd379c
|
refs/heads/main
| 2023-05-09T04:15:39.684032
| 2021-05-26T17:46:25
| 2021-05-26T17:46:25
| 371,120,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,310
|
py
|
# %% [code] {"jupyter":{"outputs_hidden":false},"execution":{"iopub.status.busy":"2021-05-26T17:39:55.046848Z","iopub.execute_input":"2021-05-26T17:39:55.047357Z","iopub.status.idle":"2021-05-26T17:39:55.058384Z","shell.execute_reply.started":"2021-05-26T17:39:55.047247Z","shell.execute_reply":"2021-05-26T17:39:55.057313Z"}}
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# %% [code] {"jupyter":{"outputs_hidden":false},"execution":{"iopub.status.busy":"2021-05-26T17:40:09.531361Z","iopub.execute_input":"2021-05-26T17:40:09.533419Z","iopub.status.idle":"2021-05-26T17:40:09.563806Z","shell.execute_reply.started":"2021-05-26T17:40:09.533377Z","shell.execute_reply":"2021-05-26T17:40:09.563066Z"}}
# data loader
class dataLoader:
"""
Load data
"""
def __init__(self, train_filename, test_filename, normalize = ['Age', 'Fare']):
self.train = pd.read_csv(train_filename)
self.test = pd.read_csv(test_filename)
for cts_variable in normalize:
self.normalize(cts_variable)
#self.remove_na()
#self.train_label = self.train.pop('Survived')
def remove_na(self):
""" Remove empty or invalid elements"""
self.train.fillna(0, inplace = True)
self.test.fillna(0, inplace = True)
def normalize(self, column_name):
""" Normalize continuous data to zero mean and standard deviation of 1"""
mu = self.train[column_name].mean()
std = self.train[column_name].std()
self.train[column_name] = (self.train[column_name] - mu) / std
self.test[column_name] = (self.test[column_name] - mu) / std
def summary(self):
""" Output summary of data and first few rows"""
print('Training set:')
#print(self.train.head())
print(self.train.describe())
print('Testing set:')
#print(self.test.head())
print(self.test.describe())
raw_data = dataLoader('/kaggle/input/titanic/train.csv', '/kaggle/input/titanic/test.csv')
raw_data.train
#raw_data.test
test_ids = raw_data.test[['PassengerId']]
#test_ids
# %% [code] {"jupyter":{"outputs_hidden":false},"execution":{"iopub.status.busy":"2021-05-20T00:59:37.97603Z","iopub.execute_input":"2021-05-20T00:59:37.976434Z","iopub.status.idle":"2021-05-20T00:59:38.010467Z","shell.execute_reply.started":"2021-05-20T00:59:37.976398Z","shell.execute_reply":"2021-05-20T00:59:38.009226Z"}}
# remove features that are not useful for now
raw_data.train.drop(columns = ['PassengerId', 'Ticket', 'Name'], inplace=True)
raw_data.test.drop(columns = ['PassengerId', 'Ticket', 'Name'], inplace=True)
# take the first element of the cabin feature
raw_data.train['Cabin'] = raw_data.train['Cabin'].str[0]
raw_data.test['Cabin'] = raw_data.test['Cabin'].str[0]
# encode class features using one hot encoding - try combining test to get same number of splits
raw_data.train = pd.get_dummies(raw_data.train, columns = ['Sex', 'Embarked', 'Cabin'], drop_first = True)
raw_data.test = pd.get_dummies(raw_data.test, columns = ['Sex', 'Embarked', 'Cabin'], drop_first = True)
# remove nan values from age, ...
raw_data.remove_na()
train_label = raw_data.train.pop('Survived')
# ensure train and test sets have same number of columns
missing_cols = set( raw_data.train.columns ) - set( raw_data.test.columns )
# Add a missing column in test set with default value equal to 0
for c in missing_cols:
raw_data.test[c] = 0
# Ensure the order of column in the test set is in the same order than in train set
raw_data.test = raw_data.test[raw_data.train.columns]
# %% [code] {"jupyter":{"outputs_hidden":false},"execution":{"iopub.status.busy":"2021-05-20T00:59:41.228705Z","iopub.execute_input":"2021-05-20T00:59:41.229075Z","iopub.status.idle":"2021-05-20T00:59:41.323234Z","shell.execute_reply.started":"2021-05-20T00:59:41.229043Z","shell.execute_reply":"2021-05-20T00:59:41.322127Z"}}
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(raw_data.train, train_label, test_size=0.3, random_state=0)
# create model LR
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(random_state=0)
model = model.fit(X_train, y_train)
test_acc = model.score(X_test, y_test)
test_acc
# %% [code] {"execution":{"iopub.status.busy":"2021-05-20T00:59:43.904763Z","iopub.execute_input":"2021-05-20T00:59:43.905167Z","iopub.status.idle":"2021-05-20T00:59:43.941731Z","shell.execute_reply.started":"2021-05-20T00:59:43.905124Z","shell.execute_reply":"2021-05-20T00:59:43.940554Z"}}
# create svm model
from sklearn import svm
model = svm.SVC(kernel = 'rbf', degree = 2, verbose = True)
model = model.fit(X_train, y_train)
test_acc = model.score(X_test, y_test)
test_acc
# %% [code] {"execution":{"iopub.status.busy":"2021-05-20T00:59:46.161654Z","iopub.execute_input":"2021-05-20T00:59:46.162058Z","iopub.status.idle":"2021-05-20T00:59:46.350023Z","shell.execute_reply.started":"2021-05-20T00:59:46.162021Z","shell.execute_reply":"2021-05-20T00:59:46.348974Z"}}
# create RF model
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators =80, max_depth = 9, random_state = 0)
model = model.fit(X_train, y_train)
test_acc = model.score(X_test, y_test)
test_acc
# %% [code] {"execution":{"iopub.status.busy":"2021-05-20T01:13:46.645901Z","iopub.execute_input":"2021-05-20T01:13:46.646345Z","iopub.status.idle":"2021-05-20T01:13:46.677923Z","shell.execute_reply.started":"2021-05-20T01:13:46.646278Z","shell.execute_reply":"2021-05-20T01:13:46.67689Z"}}
# save predictions from most recent model
test_pred = model.predict(raw_data.test)
#np.shape(test_pred)
submission = test_ids
submission["Survived"] = test_pred
submission.to_csv('/kaggle/working/submission.csv', index = False)
submission
|
[
"noreply@github.com"
] |
todavis.noreply@github.com
|
2dbfce2b253fb4765bfa5b96f64df1e8997b18ff
|
a2c9edf20a9db52347e7aec7b6646bf110f4b922
|
/scripts/factorio-dump-blueprint
|
9404ba5b99cf503cd185ba8e7b84d4ff43bdbcf9
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
dmitmel/dotfiles
|
02e7289efacd4da21f01bb3fca27f93f06b51ab2
|
9a8c24ff76b4c1ca13549d9ff383ca189ef00f5f
|
refs/heads/master
| 2023-09-03T09:35:02.331334
| 2023-08-20T07:01:32
| 2023-08-20T07:01:32
| 117,337,066
| 88
| 17
|
MIT
| 2023-08-20T06:50:17
| 2018-01-13T11:10:25
|
Lua
|
UTF-8
|
Python
| false
| false
| 445
|
#!/usr/bin/env python3
# <https://wiki.factorio.com/Blueprint_string_format>
import base64
import json
import sys
import zlib
if __name__ == "__main__":
data: bytes = sys.stdin.buffer.read()
version, data = data[:1], data[1:]
if version != b"0":
raise Exception("unsupported blueprint string version")
data = base64.b64decode(data)
data = zlib.decompress(data)
print(json.dumps(json.loads(data), ensure_ascii=False, indent=2))
|
[
"dmytro.meleshko@gmail.com"
] |
dmytro.meleshko@gmail.com
|
|
8914fa8c2aa7f4228c98ce1c0dd7ea0ba06f6b6f
|
cf0b976c4a07e57c74174909fc6507a5701f5b67
|
/mkw_ghosts.py
|
5f6634f8d602ceb31331b9e7ba38de123749ce98
|
[] |
no_license
|
william-texas/rksys-ghost-extractor
|
820b4b166567bcf10152b3bf8a6cf4bb06e33a01
|
bdf43c4560a3c0be7481e928c15226d9059e9a48
|
refs/heads/main
| 2023-05-01T00:05:27.643848
| 2021-05-11T13:52:03
| 2021-05-11T13:52:03
| 355,788,146
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,207
|
py
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
import binascii
from enum import Enum
class CTGPGhosts(KaitaiStruct):
class Controllers(Enum):
wii_wheel = 0
wii_remote = 1
classic_controller = 2
gamecube_controller = 3
class Ghost(Enum):
players_best_time = 1
world_record_ghost = 2
continental_record_ghost = 3
flag_challenge_ghost = 4
ghost_race = 6
friend_ghost_01 = 7
friend_ghost_02 = 8
friend_ghost_03 = 9
friend_ghost_04 = 10
friend_ghost_05 = 11
friend_ghost_06 = 12
friend_ghost_07 = 13
friend_ghost_08 = 14
friend_ghost_09 = 15
friend_ghost_10 = 16
friend_ghost_11 = 17
friend_ghost_12 = 18
friend_ghost_13 = 19
friend_ghost_14 = 20
friend_ghost_15 = 21
friend_ghost_16 = 22
friend_ghost_17 = 23
friend_ghost_18 = 24
friend_ghost_19 = 25
friend_ghost_20 = 26
friend_ghost_21 = 27
friend_ghost_22 = 28
friend_ghost_23 = 29
friend_ghost_24 = 30
friend_ghost_25 = 31
friend_ghost_26 = 32
friend_ghost_27 = 33
friend_ghost_28 = 34
friend_ghost_29 = 35
friend_ghost_30 = 36
normal_staff_ghost = 37
expert_staff_ghost = 38
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.magic = (self._io.read_bytes(4)).decode(u"utf-8")
self.finishing_time_minutes = self._io.read_bits_int_be(7)
self.finishing_time_seconds = self._io.read_bits_int_be(7)
self.finishing_time_milliseconds = self._io.read_bits_int_be(10)
self.track_id = self._io.read_bits_int_be(6)
self.unknown_1 = self._io.read_bits_int_be(2)
self.vehicle_id = self._io.read_bits_int_be(6)
self.character_id = self._io.read_bits_int_be(6)
self.ghost_sent_year = self._io.read_bits_int_be(7)
self.ghost_sent_month = self._io.read_bits_int_be(4)
self.ghost_sent_day = self._io.read_bits_int_be(5)
self.controller_id = KaitaiStream.resolve_enum(MkwGhosts.Controllers, self._io.read_bits_int_be(4))
self.unknown_2 = self._io.read_bits_int_be(4)
self.compressed_flag = self._io.read_bits_int_be(1) != 0
self.unknown_3 = self._io.read_bits_int_be(2)
self.ghost_type = KaitaiStream.resolve_enum(MkwGhosts.Ghost, self._io.read_bits_int_be(7))
self.drift_type = self._io.read_bits_int_be(1) != 0
self.unknown_4 = self._io.read_bits_int_be(1) != 0
self._io.align_to_byte()
self.input_data_length = self._io.read_u2be()
self.lap_count = self._io.read_u1()
self.lap_split_time = [None] * (5)
for i in range(5):
self.lap_split_time[i] = MkwGhosts.LapSplit(self._io, self, self._root)
self.unknown_5 = self._io.read_bytes(20)
self.country_code = self._io.read_u1()
self.region_code = self._io.read_u1()
self.location_code = self._io.read_u2be()
self.unknown_6 = self._io.read_u4be()
self.driver_mii_data = self._io.read_bytes(74)
self.crc16_mii = self._io.read_u2be()
self.data = self._io.read_bytes(((self._io.size() - self._io.pos()) - 216))
self.security_data = self._io.read_bytes(76)
self.track_sha1 = str(binascii.hexlify(self._io.read_bytes(20)).upper())[2:-1]
self.ctgp_pid = str(binascii.hexlify(self._io.read_bytes(8)).upper())[2:-1]
self.truetime_float = self._io.read_bytes(4)
self.ctgp_ver = self._io.read_bytes(4)
class LapSplit(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.finishing_time_minutes = self._io.read_bits_int_be(7)
self.finishing_time_seconds = self._io.read_bits_int_be(7)
self.finishing_time_milliseconds = self._io.read_bits_int_be(10)
class MkwGhosts(KaitaiStruct):
class Controllers(Enum):
wii_wheel = 0
wii_remote = 1
classic_controller = 2
gamecube_controller = 3
class Ghost(Enum):
players_best_time = 1
world_record_ghost = 2
continental_record_ghost = 3
flag_challenge_ghost = 4
ghost_race = 6
friend_ghost_01 = 7
friend_ghost_02 = 8
friend_ghost_03 = 9
friend_ghost_04 = 10
friend_ghost_05 = 11
friend_ghost_06 = 12
friend_ghost_07 = 13
friend_ghost_08 = 14
friend_ghost_09 = 15
friend_ghost_10 = 16
friend_ghost_11 = 17
friend_ghost_12 = 18
friend_ghost_13 = 19
friend_ghost_14 = 20
friend_ghost_15 = 21
friend_ghost_16 = 22
friend_ghost_17 = 23
friend_ghost_18 = 24
friend_ghost_19 = 25
friend_ghost_20 = 26
friend_ghost_21 = 27
friend_ghost_22 = 28
friend_ghost_23 = 29
friend_ghost_24 = 30
friend_ghost_25 = 31
friend_ghost_26 = 32
friend_ghost_27 = 33
friend_ghost_28 = 34
friend_ghost_29 = 35
friend_ghost_30 = 36
normal_staff_ghost = 37
expert_staff_ghost = 38
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.magic = (self._io.read_bytes(4)).decode(u"utf-8")
self.finishing_time_minutes = self._io.read_bits_int_be(7)
self.finishing_time_seconds = self._io.read_bits_int_be(7)
self.finishing_time_milliseconds = self._io.read_bits_int_be(10)
self.track_id = self._io.read_bits_int_be(6)
self.unknown_1 = self._io.read_bits_int_be(2)
self.vehicle_id = self._io.read_bits_int_be(6)
self.character_id = self._io.read_bits_int_be(6)
self.ghost_sent_year = self._io.read_bits_int_be(7)
self.ghost_sent_month = self._io.read_bits_int_be(4)
self.ghost_sent_day = self._io.read_bits_int_be(5)
self.controller_id = KaitaiStream.resolve_enum(MkwGhosts.Controllers, self._io.read_bits_int_be(4))
self.unknown_2 = self._io.read_bits_int_be(4)
self.compressed_flag = self._io.read_bits_int_be(1) != 0
self.unknown_3 = self._io.read_bits_int_be(2)
self.ghost_type = KaitaiStream.resolve_enum(MkwGhosts.Ghost, self._io.read_bits_int_be(7))
self.drift_type = self._io.read_bits_int_be(1) != 0
self.unknown_4 = self._io.read_bits_int_be(1) != 0
self._io.align_to_byte()
self.input_data_length = self._io.read_u2be()
self.lap_count = self._io.read_u1()
self.lap_split_time = [None] * (5)
for i in range(5):
self.lap_split_time[i] = MkwGhosts.LapSplit(self._io, self, self._root)
self.unknown_5 = self._io.read_bytes(20)
self.country_code = self._io.read_u1()
self.region_code = self._io.read_u1()
self.location_code = self._io.read_u2be()
self.unknown_6 = self._io.read_u4be()
self.driver_mii_data = self._io.read_bytes(74)
self.crc16_mii = self._io.read_u2be()
self.data = self._io.read_bytes(self._io.size() - self._io.pos())
class LapSplit(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.finishing_time_minutes = self._io.read_bits_int_be(7)
self.finishing_time_seconds = self._io.read_bits_int_be(7)
self.finishing_time_milliseconds = self._io.read_bits_int_be(10)
|
[
"noreply@github.com"
] |
william-texas.noreply@github.com
|
82ca775d5001e2ba8db6afdbf60d535f7fe92d4e
|
ef8bd7e05d35e915b81338a7397447288a6fe703
|
/ClassicalMechanics/solvers/rk.py
|
740a1459a24fc1ce39fea7fdca8ea92a63d33a65
|
[
"MIT"
] |
permissive
|
phy6boy/pyphy6
|
57bfd441a5461b115f7d9100d51e61ee1f6e7715
|
46818eb405c283c563231d1816eab1f60f39b898
|
refs/heads/master
| 2023-03-06T01:15:27.660429
| 2021-02-26T14:12:13
| 2021-02-26T14:12:13
| 165,656,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
"""
This script impliments Butcher tableu of different rk methods
available at :
https://en.wikipedia.org/wiki/List_of_Runge%E2%80%93Kutta_methods
"""
import numpy as np
def rk4():
"""
classical Runge Kutta 4th order
Butcher tableau
"""
c = np.array([0, 1/2, 1/2, 1])
A = np.array([[0., 0., 0., 0.],
[1/2, 0., 0., 0.],
[0., 1/2, 0., 0.],
[0., 0., 1., 0.]])
b = np.array([1/6, 1/3, 1/3, 1/6])
return (c, A, b)
def euler():
"""
The old fasioned euler method
Only for testing purposes.
"""
c = np.array([0.])
A = np.array([[0.]])
b = np.array([1.])
return (c, A, b)
|
[
"muhsinibnalazeez@gmail.com"
] |
muhsinibnalazeez@gmail.com
|
bb81afe9b8dcf40c77f92402b61ba4bdfe5f19cc
|
d62460a108115ff6b1be0a8972c3dfb284b54d3c
|
/Main.py
|
1efa0660b1a11ed6f5692332618b402d614acfa6
|
[] |
no_license
|
JJhuk/PythonGame
|
66900d9881e4613089a75974a7b0c77d61fbd114
|
9cc338ec54f9b51182183fcd60b359e6473a2a74
|
refs/heads/master
| 2021-08-20T05:03:41.539823
| 2020-05-26T16:23:41
| 2020-05-26T16:23:41
| 185,246,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,935
|
py
|
import pygame
from pygame.color import Color
import random
from UFO_class import UFO
from Boss_class import BOSS
from time import sleep
from UFO_Monster import UFO_MONSTER
from Meteor import METEOR
from Boss_class_Attak import BOSS_ATTACK
FPS = 28
pad_width = 800
pad_height = 200
background_width = 800
WHITE = (255,255,255)
RED = (255,0,0)
BLACK = (0,0,0)
meteor1_width = 34
meteor1_height = 36
meteor2_width = 86
meteor2_height = 60
def textObj(text,font) :
textSurface = font.render(text,True,RED)
return textSurface,textSurface.get_rect()
def dispMessage(text) :
global gamepad
global crashed
largeText = pygame.font.Font("D2coding.ttf",40)
TextSurf,TextRect = textObj(text,largeText)
TextRect.center = ((pad_width/2),(pad_height/2))
gamepad.blit(TextSurf,TextRect)
pygame.display.update()
sleep(10)
crashed = True
def crash(x):
global gamepad
global explosion_sound,bgm_sound
pygame.mixer_music.stop()
pygame.mixer.Sound.play(explosion_sound)
if x == 0 :
dispMessage('주혁이가 운석에 맞고 죽었습니다.')
elif x == 1 :
dispMessage('주혁이가 UFO와 충돌하여 죽었습니다.')
elif x == 2 :
dispMessage('주혁이가 고압전류때문에 죽었습니다.')
def drawObject(obj,x,y):
global gamepad
gamepad.blit(obj,(x,y))
def endgame() :
global gamepad
gamepad.fill(BLACK)
dispMessage('성공적으로 새로운 행성을 찾았습니다.')
sleep(10)
pygame.quit()
quit()
def runGame():
global background1,background2,UFO1,clock,boss,UFO_Monster
global bullet,meteors,boss_bullet
global crashed,shot_sound
global Score
font = pygame.font.Font("D2coding.ttf",20)
bullet_xy = []
isShotBoss = False
isShotUFO = False
isShotMeteor = False
crashed = False
clock = pygame.time.Clock()
# UFO1.rect.x = pad_width * 0.05
UFO1.rect.y = 120
UFO1.rect.x = 50
boss.rect.x = 550
boss.rect.y = 0
Score = 0
boss_bullet.rect.x = boss.rect.x
boss_bullet.rect.y = random.randrange(0,boss.sprite_height-boss_bullet.sprite_height)
meteor_x = pad_width
meteor_y = random.randrange(0,pad_height-80)
random.shuffle(meteors)
meteor = meteors[0]
UFO_Monster.rect.x = pad_width
UFO_Monster.rect.y = random.randrange(0,pad_height-UFO_Monster.sprite_height)
UFO1_y_change = 0
UFO1_x_change = 0
background1_x = 0
background2_x = background_width
# 게임 루프
while not crashed:
#시간에 따른 점수 + 폰트 출력
time = pygame.time.get_ticks() // 1000
time_textSurface = font.render("Time : "+str(time), True,(216,216,216))
time_textRect = time_textSurface.get_rect()
time_textRect.center = (80,20)
#Score 점수
Score_textSurface = font.render("Score : "+str(Score),True,(200,0,0))
Score_textRect = Score_textSurface.get_rect()
Score_textRect.center = (200,20)
plus_score = 0
# 1) 사용자 입력 처리
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
UFO1_y_change = -UFO1.speed
elif event.key == pygame.K_DOWN:
UFO1_y_change = UFO1.speed
elif event.key == pygame.K_RIGHT:
UFO1_x_change = UFO1.speed
elif event.key == pygame.K_LEFT:
UFO1_x_change = -UFO1.speed
elif event.key == pygame.K_SPACE: ##총알 발사
pygame.mixer.Sound.play(shot_sound)
bullet_x = UFO1.rect.x+UFO1.sprite_width
bullet_y = UFO1.rect.y+UFO1.sprite_height/2
bullet_xy.append([bullet_x,bullet_y])
if event.type == pygame.KEYUP:
if event.key == pygame.K_UP or event.key == pygame.K_DOWN:
UFO1_x_change = 0
UFO1_y_change = 0
gamepad.fill(WHITE)
background1_x -= 5
background2_x -= 5
boss.rect.x = random.randrange(580,600)
if background1_x <= -background_width:
background1_x = background_width
if background2_x <= -background_width:
background2_x = background_width
drawObject(background1,background1_x,0)
drawObject(background2,background2_x,0)
drawObject(time_textSurface,time_textRect.x,time_textRect.y)
drawObject(Score_textSurface,Score_textRect.x,Score_textRect.y)
#UFO_Monster Positon
UFO_Monster.rect.x -= 7
if UFO_Monster.rect.x <= 0:
UFO_Monster.rect.x = pad_width
UFO_Monster.rect.y = random.randrange(0,pad_height)
##메테오가
if meteor == None:
meteor_x -= 30
else :
meteor_x -= 15
if meteor_x <= 0:
meteor_x = pad_width
meteor_y = random.randrange(0,pad_height)
random.shuffle(meteors)
meteor = meteors[0]
if boss_bullet.rect.x >= 0 and boss.Attack == True :
boss_bullet.rect.x -= 20
else :
boss_bullet.rect.x = boss.rect.x
boss_bullet.rect.y = random.randrange(0,boss.sprite_height)
# UFO Postion
UFO1.rect.y +=UFO1_y_change
UFO1.rect.x +=UFO1_x_change
if UFO1.rect.y<0:
UFO1.rect.y=0
elif UFO1.rect.y> pad_height - UFO1.sprite_height:
UFO1.rect.y = pad_height - UFO1.sprite_height
if UFO1.rect.x<0:
UFO1.rect.x=0
elif UFO1.rect.x > 500 :
UFO1.rect.x = 500
#Bullet Postion
if len(bullet_xy) != 0:
for i,bxy in enumerate(bullet_xy) :
bxy[0] += 15
bullet_xy[i][0] = bxy[0]
if bxy[0] > boss.rect.x and boss.IsAlive and time>=10:
if bxy[1] > boss.rect.y and bxy[1] <boss.rect.y + boss.sprite_height:
bullet_xy.remove(bxy)
Score += 1
isShotBoss = True
if bxy[0] > meteor_x and meteor != None and isShotMeteor == False:
if meteor!=None and bxy[1] > meteor_y and bxy[1] < meteor_y + meteor.sprite_height:
try : #예외처리 오류가 나서 그냥 페스해버림...
bullet_xy.remove(bxy)
except :
pass
Score += 1
meteor_x = pad_width
meteor_y = random.randrange(0,pad_height)
random.shuffle(meteors)
meteor = meteors[0]
isShotMeteor = True
if bxy[0] > UFO_Monster.rect.x and isShotUFO == False: ##여기서도 오류남 나오지않았는데 xy좌표나옴
if bxy[1] < UFO_Monster.rect.bottomleft[1] and bxy[1] > UFO_Monster.rect.topleft[1]:
try :
bullet_xy.remove(bxy)
except :
pass
Score += 1
UFO_Monster.rect.x = pad_width
UFO_Monster.rect.y = random.randrange(0,pad_height)
isShotUFO =True
if bxy[0] >= pad_width:
try :
bullet_xy.remove(bxy)
except :
pass
#충돌 체크
if UFO1.rect.x + UFO1.sprite_width > meteor_x and meteor != None:
if (UFO1.rect.y > meteor_y and UFO1.rect.y < meteor_y+meteor.sprite_height) or (UFO1.rect.y + UFO1.sprite_height > meteor_y and UFO1.rect.y + UFO1.sprite_height < meteor_y + meteor.sprite_height):
crash(0)
if UFO1.rect.x + UFO1.sprite_width > UFO_Monster.rect.x:
if (UFO1.rect.y > UFO_Monster.rect.y and UFO1.rect.y < UFO_Monster.rect.y+UFO_Monster.sprite_height) or (UFO1.rect.y + UFO1.sprite_height > UFO_Monster.rect.y and UFO1.rect.y + UFO1.sprite_height < UFO_Monster.rect.y + UFO_Monster.sprite_height):
crash(1)
if UFO1.rect.x + UFO1.sprite_width > boss_bullet.rect.x:
if (UFO1.rect.y > boss_bullet.rect.y and UFO1.rect.y < boss_bullet.rect.y+boss_bullet.sprite_height) or (UFO1.rect.y + UFO1.sprite_height > boss_bullet.rect.y and UFO1.rect.y + UFO1.sprite_height < boss_bullet.rect.y + boss_bullet.sprite_height):
crash(2)
# 2) 게임 상태 업데이트
UFO1.update()
boss_bullet.update()
if UFO_Monster.IsAlive and isShotUFO == False: #UFO 쐈을때 없어지는거 구현
UFO_Monster.update()
if isShotUFO == True :
isShotUFO = False
if time>=10 and boss.IsAlive:
boss.update()
if meteor != None and isShotMeteor == False:
meteor.update()
if isShotMeteor == True :
isShotMeteor = False
# 3) 게임 상태 그리기
if boss.Attack :
drawObject(boss_bullet.image, boss_bullet.rect.x ,boss_bullet.rect.y)
drawObject(UFO_Monster.image,UFO_Monster.rect.x,UFO_Monster.rect.y)
drawObject(UFO1.image,UFO1.rect.x,UFO1.rect.y)
if meteor != None and not isShotMeteor :
if not isShotMeteor :
drawObject(meteor.image, meteor_x,meteor_y)
else :
drawObject(meteor.image, meteor_x,meteor_y)
meteor.HP -= 1
isShotMeteor = False
if len(bullet_xy) != 0:
for bx,by in bullet_xy:
drawObject(bullet,bx,by)
if time>= 50 : #보스 출현 시간 time이 10이 나와야 출연을 함
if not boss.HP <= 0 : #보스체력이 0이 아닐때
drawObject(boss.image,boss.rect.x,boss.rect.y) #일단 출력하고
if not isShotBoss : #보스를 쏘지 않았으면
if boss.Attack == True and not boss.HP <= 0:
drawObject(boss_bullet.image,boss_bullet.rect.x,boss_bullet.rect.y)
else :
boss.Attack = not boss.Attack
else : #보스를 쐇으면
boss.HP -= 1
isShotBoss = False
else :
boss.IsAlive = False
boss.Attack = False
endgame()
if meteor == None :
isShotMeteor = False
random.shuffle(meteors)
meteor = meteors[0]
pygame.display.update()
clock.tick(FPS)
pygame.quit()
quit()
def initGame():
global gamepad,clock,UFO1,background1,background2,bullet,boss,boom,UFO_Monster,meteors,boss_bullet
global shot_sound,explosion_sound,bgm_sound
pygame.init()
gamepad = pygame.display.set_mode((pad_width,pad_height))
pygame.display.set_caption("UFO game")
UFO1 = UFO()
boss = BOSS()
meteors = []
for i in range(3) :
meteors.append(None)
meteors.append(METEOR())
meteors.append(METEOR())
UFO_Monster = UFO_MONSTER()
background1 = pygame.image.load("background_1.png")
background2 = background1.copy()
bullet = pygame.image.load("bullet.png")
boom = pygame.image.load("boom.png")
clock = pygame.time.Clock()
boss_bullet = BOSS_ATTACK()
shot_sound = pygame.mixer.Sound('shot.wav')
explosion_sound = pygame.mixer.Sound('explosion.wav')
bgm_sound = pygame.mixer_music.load('mybgm.wav')
bgm_sound = pygame.mixer_music.play(-1)
runGame()
initGame()
|
[
"ly0738@naver.com"
] |
ly0738@naver.com
|
6d45510b9974c91305c7cdc3f646051fb94cf87d
|
809467f757aa55165fc1337bb7c169cc4d6a852b
|
/day09/Tencent/Tencent/settings.py
|
2be96d567b9cc94c419cf1f29fef611307060651
|
[] |
no_license
|
rhlp/Python_Spider
|
91e544593c598d1f6ce85f23131705bf034d8fd7
|
534161bf20b9d65434bebf3c1d851fce0c74bce5
|
refs/heads/master
| 2021-09-02T12:01:08.428834
| 2018-01-02T12:02:20
| 2018-01-02T12:02:20
| 115,380,328
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,464
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for Tencent project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Tencent'
SPIDER_MODULES = ['Tencent.spiders']
NEWSPIDER_MODULE = 'Tencent.spiders'
# 将所有的日志信息保存到本地文件里
LOG_FILE = "baidu.log"
# 只显示当前等级及以上的信息
LOG_LEVEL = "INFO"
# 最低是DEBUG
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'Tencent (+http://www.yourdomain.com)'
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko"
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 320
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'Tencent.middlewares.TencentSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'Tencent.middlewares.UserAgentMiddleware': 643,
# 'Tencent.middlewares.ProxyMiddleware': 543,
}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'Tencent.pipelines.TencentPipeline': 300,
# 'Tencent.pipelines.PositionPipeline': 400,
}
USER_AGENT_LIST = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_8; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.302.2 Safari/532.8",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_4; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.464.0 Safari/534.3",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; en-US) AppleWebKit/534.13 (KHTML, like Gecko) Chrome/9.0.597.15 Safari/534.13",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.186 Safari/535.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.54 Safari/535.2",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Macintosh; U; Mac OS X Mach-O; en-US; rv:2.0a) Gecko/20040614 Firefox/3.0.0 ",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.0.3) Gecko/2008092414 Firefox/3.0.3",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1) Gecko/20090624 Firefox/3.5",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.14) Gecko/20110218 AlexaToolbar/alxf-2.0 Firefox/3.6.14",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1"]
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"renhl1991@163.com"
] |
renhl1991@163.com
|
60d05e5f995152d5461a260c213345887637357c
|
329cc042bb5829ab26a51d0b3a0bd310f05e0671
|
/credentials.py
|
680276870d334e695779b99d754d01d7480a139d
|
[] |
no_license
|
bkhoward/WLC-PSK-Change
|
53afe64e767889ce967679d8aeb798745166fa72
|
1b92fd1d5afae4bc64bfc61bc4935c635cca12f0
|
refs/heads/master
| 2023-03-25T01:33:53.765751
| 2021-03-11T18:59:03
| 2021-03-11T18:59:03
| 345,891,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 70
|
py
|
credentials = {
'username': 'bhoward99', 'password': 'Sh0tei02'
}
|
[
"bkhoward@live.com"
] |
bkhoward@live.com
|
7ba089890ac03183b71de1929bde2b4e220606ac
|
84f873e80bbe741aede99d42b3fe8ed79a354086
|
/002_fun_coding_time/edunext_challenge_django_project/edunext_challenge_django_project/urls.py
|
7af8affc2dd2bbc99cdb9b81882992417499c450
|
[] |
no_license
|
yeguacelestial/edunext-challenge
|
bd8f2e60ffbafcf6f48ac20695ea4cb503f50dbe
|
113c6a556e0658a23194ef03aea74dc082ee26fa
|
refs/heads/master
| 2022-12-05T14:29:28.963110
| 2020-08-06T04:13:03
| 2020-08-06T04:13:03
| 283,603,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
"""edunext_challenge_django_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from edunext_paypal_service.router import router
urlpatterns = [
path('admin/', admin.site.urls),
# REST FRAMEWORK URLS
path('payments/', include('edunext_paypal_service.urls')),
]
|
[
"yeguacelestial@gmail.com"
] |
yeguacelestial@gmail.com
|
3012e9a0a6447b928a7cc74995256be4f5fe81df
|
8d0f964650cb0e6a738b62771d1a69b14f24aec3
|
/setup.py
|
29395977261da34d89db48c441c1911e4a6bcab6
|
[
"BSD-3-Clause"
] |
permissive
|
InfinityMod/easyRedisCache
|
ed8aeec184fbea9dfbba2bcae35ce79c9afc702c
|
b79747e0134a3b1916a6b42bbdc1cd50aa8c8ff6
|
refs/heads/master
| 2021-01-11T15:11:45.263321
| 2017-01-30T11:11:13
| 2017-01-30T11:11:13
| 80,313,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
# -*- coding: utf-8 -*-
from setuptools import find_packages, setup
setup(
name='easyRedisCache',
version='0.15',
author=u'David Ziegler',
author_email='webmaster@SpreadPost.de',
packages=find_packages(),
include_package_data=True,
url='https://github.com/InfinityMod/easyRedisCache',
license='BSD',
description='Easy lock secured cache for redis',
zip_safe=False,
keywords=['cache', 'lock', 'redis', 'easy'],
dependency_links=[
"git+ssh:git@github.com:InfinityMod/easyRedisCache.git#egg=redisSherlock-0.3.0"
],
install_requires=[
'redisSherlock',
'redis'
],
)
|
[
"solino.webtech@gmail.com"
] |
solino.webtech@gmail.com
|
69056781c3aa3764dffb60ad08403e8de4b6a3ee
|
f5b80ea1e60bae54985ade2c6a1d1d583e8d3040
|
/binary-trees/is_balanced_binary_tree.py
|
6c7ed26c31bdb4d7c36bd776ef75b678ba00f4d0
|
[] |
no_license
|
FicciTong/elements-of-programming-interviews
|
4eda6b4568281a080ade9ddc5d10009bd250b86d
|
9f8d52c85f1bef0feb20213798019594ee219a7d
|
refs/heads/master
| 2021-07-05T00:35:37.505560
| 2017-09-27T19:42:57
| 2017-09-27T19:42:57
| 104,961,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
class BinaryTreeNode:
def __init__(self, data=0, left=None, right=None):
self.data = data
self.left = left
self.right = right
def is_balanced_binary_tree(tree):
return True
def main():
root = BinaryTreeNode('A')
node_b = BinaryTreeNode('B')
node_c = BinaryTreeNode('C')
node_d = BinaryTreeNode('D')
node_e = BinaryTreeNode('E')
node_f = BinaryTreeNode('F')
node_g = BinaryTreeNode('G')
node_h = BinaryTreeNode('H')
node_i = BinaryTreeNode('I')
node_j = BinaryTreeNode('J')
node_k = BinaryTreeNode('K')
node_l = BinaryTreeNode('L')
node_m = BinaryTreeNode('M')
node_n = BinaryTreeNode('N')
node_o = BinaryTreeNode('O')
root.left = node_b
root.right = node_c
node_b.left = node_d
node_b.right = node_e
node_c.left = node_f
node_c.right = node_g
node_d.left = node_h
node_d.right = node_i
node_e.left = node_j
node_e.right = node_k
node_f.left = node_l
node_f.right = node_m
node_g.left = node_n
node_g.right = node_o
if __name__ == '__main__':
main()
|
[
"FicciTong@me.com"
] |
FicciTong@me.com
|
701dd8791ae9747f4bc55c4bbcd9a6abd8d28bd9
|
e77dcd2b991adbb204683fc76c655ffb90cebc6a
|
/archive/boj/21-30/9461.py
|
e9b9d8e3d3988fbf39fd6d9d4837e99e96fa5fa7
|
[] |
no_license
|
circlezero/Algorithm
|
369d66e74fbab7e7c3dab065d4d602e2ee7e1f27
|
046ba418a612ee4f37ba1c6ff5a180f04b412deb
|
refs/heads/master
| 2022-03-08T03:26:44.105406
| 2022-02-28T23:02:41
| 2022-02-28T23:02:41
| 155,205,100
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
import sys
input = sys.stdin.readline
t = int(input())
d = [0, 1, 1, 1] + [0] * 100
for i in range(4, 101):
d[i] = d[i-2] + d[i-3]
for i in range(t):
n = int(input())
print(d[n])
|
[
"jwy0565@gmail.com"
] |
jwy0565@gmail.com
|
1483e7f92fdb8100aee88c12fdd838099e982d37
|
07e36f2548040e53bdf7bb418ff3f94081f42cb5
|
/dataset-versioning/sim_modify_labels.py
|
ab3d734bebf9e95336cccdaebdcfd0fd8e3aa311
|
[] |
no_license
|
wandb/artifacts-examples
|
99bd7e4c046d10a70c2861a7eb867617e1264cb9
|
9f38abfcd2b48af5c314f2346920abb99f1bb81f
|
refs/heads/master
| 2023-02-08T14:18:15.813319
| 2023-02-02T02:01:00
| 2023-02-02T02:01:00
| 254,713,385
| 12
| 2
| null | 2023-02-02T00:38:29
| 2020-04-10T18:59:40
|
Python
|
UTF-8
|
Python
| false
| false
| 768
|
py
|
import argparse
from datetime import datetime
import glob
import json
import os
import sys
import data_library
parser = argparse.ArgumentParser(description='')
parser.add_argument('label_file', type=str, help='')
def main(argv):
args = parser.parse_args()
seg_labels = data_library.get_seg_labels()
box_labels = data_library.get_box_labels()
new_labels = json.load(open(args.label_file))
for label in new_labels:
label_id = label['id']
if 'segmentation' in label:
seg_labels[label_id] = label
elif 'bbox' in label:
box_labels[label_id] = label
data_library.save_seg_labels(seg_labels)
data_library.save_box_labels(box_labels)
if __name__ == '__main__':
main(sys.argv)
|
[
"shlewis@gmail.com"
] |
shlewis@gmail.com
|
4aaf6014bb2c69d740bc278b1697defe93f9b565
|
a127ac15dc03271edfba3ee8a931718d70c452ad
|
/test/tests/decorators.py
|
1eda3bec351b28f7da64016ccaf723447f2f1556
|
[
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
josephwinston/pyston
|
7e354580b3a011ebf768ef14d5b2d298a3a48427
|
7d703b0214ffc1c61598ba4e689abfc8c1658486
|
refs/heads/master
| 2021-01-11T10:05:19.995366
| 2014-07-23T12:08:57
| 2014-07-23T12:08:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
# expected: fail
# - decorators
def print_when_eval(x, msg):
print msg
return x
# Test the order that decorators and function defaults get evaluated
@print_when_eval(lambda f: print_when_eval(f, "calling decorator"), "evaluating decorator")
def f(x=print_when_eval(1, "evaluating default")):
pass
# Result: looks like it's decorator first, then defaults, then the decorator is called.
|
[
"kmod@dropbox.com"
] |
kmod@dropbox.com
|
4c6ae23fcdd718fc99eafebc9f28c35e27dbb990
|
d0391c4f00ff41503489a45dc4ad0a3b4d084258
|
/main.py
|
facf3565a096f08413e216bff31922fdfd782ac8
|
[] |
no_license
|
Pandawastaken1/LowestOpenArray
|
5ea7532fb92dac6bfd7340d345f39130cf140de4
|
1642185c0868c17de0a0c43f9ed1a15003a37ba9
|
refs/heads/master
| 2022-12-06T09:17:13.148088
| 2020-08-28T23:26:57
| 2020-08-28T23:26:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
class bus:
#Constructor
def __init__(self,colorInput,numberOfSeats,engineHP,isbus):
self.bodyColor=colorInput
self.seats=numberOfSeats
self.engineSize=engineHP
self.bus=isbus
def driveoffintothesunset(self):
print("and this is just like a movie")
mybus=bus("black",40,3050.1,True)
print(mybus.bodyColor)
print(mybus.seats)
mybus
#epicbus
|
[
"aaakashgamer45@outlook.com"
] |
aaakashgamer45@outlook.com
|
6a1f5008c2673861e1856c147fa293aceea2108a
|
68486e67ac8fbf233d0c12f3b13412abe63d607d
|
/htmltest/views.py
|
f23ad9fe627fa2a15997dfe29b35e783d8875198
|
[] |
no_license
|
khs0806/the_project
|
f1632da64ddc198bb136601ba6dc515ed401fe9f
|
41187c4ad81bb56f3efbdee22f49afddbe33ba07
|
refs/heads/master
| 2020-12-22T07:45:07.705328
| 2020-02-11T06:28:18
| 2020-02-11T06:28:18
| 236,713,141
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,218
|
py
|
from django.shortcuts import render, get_object_or_404
import pandas as pd
import folium
import json
from folium.features import DivIcon
from folium.plugins import MarkerCluster
from blog.models import Post
from django.utils import timezone
from django.shortcuts import redirect
from .forms import PostForm
# Create your views here.
def post_list(request):
crime_seoul = pd.read_csv("static/seoul_crime_data.csv")
#c_list=[]
#for i in range(len(crime_seoul.columns))
# if i[-2:]=='_p':
# c_list.append(i)
#sc_clist = [i for i in c_list]
sc_clist = [i for i in crime_seoul.columns]
clist=[]
for i in sc_clist:
if i[-2:] == '_p':
clist.append(i.strip('_p'))
sc_ylist = [int(i) for i in crime_seoul['year'].unique()]
print(clist,sc_ylist)
return render(request, 'htmltest/post_list.html', {'sc_ylist':sc_ylist,'sc_clist':clist,'l_year':sc_ylist[0],'l_rate':clist[0]})
def make_map(request,year_r,rate_r,poli_loc):#, year, columns,poli_loc
#year=2018
year=int(year_r)
rate=rate_r+'_p'
poli_loc = int(poli_loc)
crime_seoul = pd.read_csv("static/seoul_crime_data.csv")
year_data=crime_seoul[crime_seoul.year==year]
year_data=year_data.set_index('state')
geo_path = 'static/seoul_state_map.json'
geo_str = json.load(open(geo_path, encoding='utf-8'))
f = folium.Figure (width = 600, height = 530)
map = folium.Map(location=[37.5592, 126.982], zoom_start=11,
tiles='Stamen Toner').add_to (f)
map.choropleth(geo_data=geo_str,
data=year_data[rate],
columns=[year_data.index, year_data[rate]],
fill_color='YlOrRd', #Reds,YlOrRd,PuRd,Oranges
key_on='feature.id',
legend_name=rate[:-2]
)
gps_data = pd.read_csv('static/seoul_state_gps.csv',encoding = 'utf-8')
for i in gps_data.index:
x=gps_data.loc[i,'state_x']
y=gps_data.loc[i,'state_y']-0.015
kor_s=gps_data.loc[i,'state_kor']
folium.map.Marker(
[x,y],
icon=DivIcon(
icon_size=(150,36),
icon_anchor=(0,0),
html='<div style="font-size: 12pt">'+gps_data.loc[i,'state_kor']+'</div>',
)
).add_to(map)
data=pd.read_excel('static/seoul_marker.xlsx')
if poli_loc == 1:
icon_create_function = """\
function(cluster) {
return L.divIcon({
html: '<b style="font-size:18px"> ' + cluster.getChildCount() + '</b>',
className: 'marker-cluster marker-cluster-large',
iconSize: new L.Point(30, 30)
});
}"""
y = data.geo_y.values
x = data.geo_x.values
locations = list(zip(y,x))
popups = list(data.title.values)
marker_cluster = MarkerCluster(
locations=locations, popups=popups,
name='seoul',
overlay=True,
control=True,
icon_create_function=icon_create_function
)
marker_cluster.add_to(map)
# c_list=[]
# for i in range(len(crime_seoul.columns)):
# if i[-2:]=='_p':
# c_list.append(i)
#sc_clist = [i for i in c_list]
sc_clist = [i for i in crime_seoul.columns]
clist=[]
for i in sc_clist:
if i[-2:] == '_p':
clist.append(i.strip('_p'))
sc_ylist = [int(i) for i in crime_seoul['year'].unique()]
#dic_m={'year':sc_ylist,'columns':sc_clist}
map.save('htmltest/templates/htmltest/s_map.html')
return render(request, 'htmltest/post_list.html', {'sc_ylist':sc_ylist,'sc_clist':clist,'l_year':year,'l_rate':rate.strip('_p')})
# def s_map(request):
# return render(request, 'htmltest/s_map.html', {})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'htmltest/post_list.html',{'post':post})
def test_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'htmltest/list.html', {'posts': posts})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_list')
else:
form = PostForm()
return render(request, 'htmltest/post_edit.html', {'form': form})
def index(request):
return render(request, 'htmltest/index.html')
def indextwo(request):
return render(request, 'htmltest/indextwo.html')
def tozi(request):
return render(request, 'htmltest/200122.html')
def gutozi(request):
return render(request, 'htmltest/gutozi.html')
def piechart(request):
return render(request, 'htmltest/piechart.html')
def satis(request):
return render(request, 'htmltest/satis.html')
def lat(request):
return render(request, 'htmltest/lat.html')
def marker(request):
return render(request, 'htmltest/marker.html')
def center(request):
return render(request, 'htmltest/center.html')
|
[
"asdasqwex63hd65@naver.com"
] |
asdasqwex63hd65@naver.com
|
02ca018a3e7a5f451cf9c339a3b1dfa6e4534e59
|
b95df751b39533cf4b194d47587210e226350087
|
/led.py
|
7d4247159609440387b8f8f2e2104c88f7f5f8ea
|
[] |
no_license
|
gandipg/micropy_esp
|
dbbeaa214810f213a3ac05555515ae9c43bcab06
|
c570a10689747812bacb0fb5bd8c64feee593cf6
|
refs/heads/master
| 2021-09-01T13:49:35.395670
| 2017-12-27T09:09:26
| 2017-12-27T09:09:26
| 115,424,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,580
|
py
|
import socket
import machine
#HTML to send to browsers
html = """<!DOCTYPE html>
<html>
<head> <title>ESP8266 LED ON/OFF</title> </head>
<center><h2>A simple webserver for turning LED's on and off with Micropython</h2></center>
<center><h3>(for noobs to both the ESP8266 and Micropython)</h3></center>
<form>
LED0:
<button name="LED" value="ON0" type="submit">LED ON</button>
<button name="LED" value="OFF0" type="submit">LED OFF</button><br><br>
LED2:
<button name="LED" value="ON2" type="submit">LED ON</button>
<button name="LED" value="OFF2" type="submit">LED OFF</button>
</form>
</html>
"""
#Setup PINS
LED0 = machine.Pin(0, machine.Pin.OUT)
LED2 = machine.Pin(2, machine.Pin.OUT)
LED2.on()
LED0.on()
#Setup Socket WebServer
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 80))
s.listen(5)
while True:
conn, addr = s.accept()
print("Got a connection from %s" % str(addr))
request = conn.recv(1024)
print("Content = %s" % str(request))
request = str(request)
LEDON0 = request.find('/?LED=ON0')
LEDOFF0 = request.find('/?LED=OFF0')
LEDON2 = request.find('/?LED=ON2')
LEDOFF2 = request.find('/?LED=OFF2')
#print("Data: " + str(LEDON0))
#print("Data2: " + str(LEDOFF0))
if LEDON0 == 6:
print('TURN LED0 ON')
LED0.off()
if LEDOFF0 == 6:
print('TURN LED0 OFF')
LED0.on()
if LEDON2 == 6:
print('TURN LED2 ON')
LED2.off()
if LEDOFF2 == 6:
print('TURN LED2 OFF')
LED2.on()
response = html
conn.send(response)
conn.close()
|
[
"g.tsankov93@gmail.com"
] |
g.tsankov93@gmail.com
|
c03315fd9e075cf8664700e10adfaa229031f664
|
8110adff2bcb8ceb946682ad37fe3575ecdf3d01
|
/website/urls.py
|
c08a1d07378381fe7a48a6da83e862b31a4b4932
|
[] |
no_license
|
captain-jbit/template_django_dentist
|
2791505ddcb5748a170c64457175e9df79497b08
|
3fff49a6f0cde0c32edba5305fc784ebddcc85e2
|
refs/heads/master
| 2022-12-07T11:12:24.064023
| 2020-08-29T22:54:06
| 2020-08-29T22:54:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 846
|
py
|
"""djangoProject4 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
#url paths
urlpatterns = [
path('', views.home, name="home"),
path('contact.html', views.contact, name="contact")
]
|
[
"jake.yanov@gmail.com"
] |
jake.yanov@gmail.com
|
602406ee410725341876913303a2fb511aebf002
|
ac1f286fb73e3fa866de88bc8b08cb9be02357f2
|
/arena/tests/test_spatial_glimpse.py
|
8a93223090b60716e57ac816883210182b8b29d2
|
[] |
no_license
|
flyers/Arena
|
8a717184f37cd36b07ec28dbe9d667bc8f9fa8c2
|
6ed303c10eaefd7e4ee52ab56b1ced6b773de244
|
refs/heads/master
| 2020-04-27T20:44:58.091106
| 2016-10-10T08:58:53
| 2016-10-10T08:58:53
| 174,668,982
| 0
| 0
| null | 2019-03-09T08:37:46
| 2019-03-09T08:37:46
| null |
UTF-8
|
Python
| false
| false
| 3,402
|
py
|
import mxnet as mx
import mxnet.ndarray as nd
import numpy
from arena import Base
import cv2
import time
from arena.utils import *
def load_roi(path, height=360, width=480, num=100):
a = numpy.loadtxt(path, delimiter=',')
cx = a[:, ::2].mean(axis=1)
cy = a[:, 1::2].mean(axis=1)
sx = a[:, ::2].max(axis=1) - a[:, ::2].min(axis=1) + 1
sy = a[:, 1::2].max(axis=1) - a[:, 1::2].min(axis=1) + 1
cx = cx / width * 2 -1
cy = cy / height * 2 - 1
sx = sx / width * 2 - 1
sy = sy / height * 2 - 1
rois = numpy.vstack((cx, cy, sx, sy)).T
return rois[:num, :]
def load_image(path, height=360, width=480, num=100):
data_npy = numpy.zeros((num, 3, height, width), dtype=numpy.float32)
for i in range(num):
image_path = path + "\\%08d.jpg" % (i+1)
print image_path
bgr_img = cv2.imread(image_path)
b, g, r = cv2.split(bgr_img) # get b,g,r
data_npy[i, :, :, :] = numpy.rollaxis(cv2.merge([r, g, b]), 2, 0)
return data_npy
def pyramid_glimpse(data, roi, depth, scale, output_shape, name):
l = []
curr_scale = 1.0
if type(roi) is tuple:
roi = mx.symbol.Concat(*roi, num_args=depth)
for i in range(depth):
l.append(mx.symbol.SpatialGlimpse(data=data, roi=roi,
output_shape=output_shape,
scale=curr_scale, name="%s-%d" %(name, i)))
curr_scale *= scale
ret = mx.symbol.Concat(*l, num_args=depth, name="%s-concat" %name)
return ret
ctx = mx.cpu()
data = mx.symbol.Variable('data')
center = mx.symbol.Variable('center')
size = mx.symbol.Variable('size')
roi = mx.symbol.Variable('roi')
print type(data)
depth = 3
scale = 1.5
rows = 720
cols = 1280
path= "D:\\HKUST\\advanced\\vot-workshop\\sequences\\sequences\\ball1"
net = pyramid_glimpse(data=data, roi=roi, depth=depth, scale=scale, output_shape=(107, 107),
name='spatial_glimpse')
batch_size = 50
data_arr = nd.array(load_image(path=path,
num=batch_size, height=rows, width=cols), ctx=ctx)
roi_arr = nd.array(load_roi(path=path + "\\groundtruth.txt",
num=batch_size, height=rows, width=cols), ctx=ctx)
print data_arr.shape
print roi_arr.shape
data_shapes = {'data': (batch_size, 3, rows, cols),
'roi': (batch_size, 4)}
glimpse_test_net = Base(data_shapes=data_shapes, sym=net, name='GlimpseTest', ctx=ctx)
start = time.time()
out_imgs = glimpse_test_net.forward(batch_size=batch_size, data=data_arr, roi=roi_arr)[0].asnumpy()
end = time.time()
print 'Time:', end-start
print out_imgs.shape
for i in range(batch_size):
for j in range(depth):
r, g, b = cv2.split(numpy.rollaxis(out_imgs[i, j*3:(j+1)*3], 0, 3))
reshaped_img = cv2.merge([b,g,r])
cv2.imshow('image', reshaped_img/255.0)
cv2.waitKey(0)
#
# shapes = []
#
# arr = {'arg_%d' % i: mx.random.uniform(-10.0, 10.0, shape) for i, shape in
# zip(range(len(shapes)), shapes)}
# arr_grad = {'arg_%d' % i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
#
# up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d' % i) for i in range(len(shapes))],
# sample_type='nearest', scale=root_scale)
# exe = up.bind(mx.cpu(), args=arr, args_grad=arr_grad)
# exe.forward(is_train=True)
# exe.backward(exe.outputs)
|
[
"xshiab@ust.hk"
] |
xshiab@ust.hk
|
fa3d11538f126d7e3f6428c997a19b45c6004eb3
|
a19778f85fe30cc50f125049d3ef8928e3bf9fde
|
/backend/elittech_20991/wsgi.py
|
ec7d72a67fd3b1d8aa1d88fdac4c76a94f36b9c3
|
[] |
no_license
|
crowdbotics-apps/elittech-20991
|
6665ccb7c26cc7c10c82c7d3346abbd322212d2b
|
75b5644a3ad9b1dca526c2b58beccaa50f1fd9a7
|
refs/heads/master
| 2022-12-24T14:52:47.574719
| 2020-10-02T21:52:36
| 2020-10-02T21:52:36
| 300,742,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
"""
WSGI config for elittech_20991 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "elittech_20991.settings")
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
ee31da0617681fbaf31fb79f4d0c7cf013b9a1b5
|
2f82c81b5c6d1033d5d79a1f41a853c84669d4d4
|
/restaurant/restaurantApp/forms.py
|
968b983255c425b08e285a78393fbd7a0c0644ae
|
[] |
no_license
|
DarshanN1/UTSTuesdayG52021Aut
|
ba241c88126574cfdd39451468e19a2d68043966
|
345025e675f638e2d1b0a7419996e32c58d6e81a
|
refs/heads/main
| 2023-06-16T08:05:48.230774
| 2021-07-15T02:49:39
| 2021-07-15T02:49:39
| 341,414,511
| 0
| 0
| null | 2021-07-08T12:11:00
| 2021-02-23T03:22:03
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
from django.forms import ModelForm
from django.contrib.auth.models import User
from .models import StaffMember
from .models import Booking
from django.contrib.auth.forms import UserCreationForm
from datetimepicker.widgets import DateTimePicker
from django import forms
class CreateUserForm(UserCreationForm):
class Meta:
model = User
fields = ['username', 'email', 'password1','password2']
class StaffForm(ModelForm):
phone = forms.CharField(max_length=10,min_length=10)
class Meta:
model = StaffMember
fields = ['first_name','last_name','phone','email']
class DateInput(forms.DateInput):
input_type = 'date'
class TimeInput(forms.TimeInput):
input_type = 'time'
class BookingForm(ModelForm):
phone = forms.CharField(max_length=10,min_length=10)
name = forms.CharField(max_length=100)
class Meta:
model = Booking
fields = ['booking_date','booking_time','number_of_guests','name','phone']
widgets = {'booking_date':DateInput(),'booking_time':TimeInput()}
def __init__(self, *args, **kwargs):
super(BookingForm,self).__init__(*args, **kwargs)
self.fields['number_of_guests'].widget.attrs.update({'min': 1, 'max':20})
|
[
"dulyacsm15@gmail.com"
] |
dulyacsm15@gmail.com
|
92ca1c3921ccec559cdcd2de490cee6594ab9e6b
|
431ff3094d36404793f9bbdec364e3f356fd259a
|
/env/bin/pip3
|
6c07383965d0228b315099e16950dcb07b37a0d3
|
[] |
no_license
|
Deaconjoe/djoePython
|
cd875e864a9c6d2a7d4f3a290fda58b24d15c681
|
99f4978954fb2e352841aac77e0f0407c25b5ff0
|
refs/heads/master
| 2020-08-06T09:09:57.954451
| 2019-11-15T19:50:01
| 2019-11-15T19:50:01
| 212,918,646
| 0
| 0
| null | 2019-11-15T20:10:03
| 2019-10-04T23:37:29
|
Python
|
UTF-8
|
Python
| false
| false
| 234
|
#!/home/yaw/flask-by-example/env/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"clouddjoe@gmail.com"
] |
clouddjoe@gmail.com
|
|
30868fa93642295347293682bd6a79606261a641
|
28383bd346f6e5f1c499796fc39f14ae83b119e7
|
/check_ckpt.py
|
c51cf30974934828e1423fcd2641c49872a87f90
|
[] |
no_license
|
Interactive-Context-Lab/Deeply-Supervised-Part-Feature
|
25adfe201a4a7ff8f79a2730063eb25e84396a1d
|
83245faea23c5984c83687cac3ec5b27a0b17882
|
refs/heads/master
| 2022-12-23T19:25:35.743628
| 2019-10-30T07:05:01
| 2019-10-30T07:05:01
| 215,671,821
| 1
| 0
| null | 2022-12-08T06:43:58
| 2019-10-17T00:49:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,071
|
py
|
import tensorflow as tf
# from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
#
# print_tensors_in_checkpoint_file("senetmodel/Inception_resnet_v2.ckpt", all_tensors=False,
# all_tensor_names=True, tensor_name="")
# #
# print_tensors_in_checkpoint_file("pretrain/all_baseline/model.ckpt-100", all_tensors=False,
# all_tensor_names=True, tensor_name="")
# print_tensors_in_checkpoint_file("pretrain/model.ckpt-60", all_tensors=True,
# all_tensor_names=True, tensor_name="stem")
from tensorflow.python import pywrap_tensorflow
import os
#
# # checkpoint_path = os.path.join(model_dir, "model.ckpt")
reader = pywrap_tensorflow.NewCheckpointReader('model/190608_164544/model.ckpt-77')
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
# if "stem" in key and "Adam" not in key:
print("tensor_name: ", key)
# print(reader.get_tensor(key)) # Remove this is you want to print only variable names
|
[
"55469294+Interactive-Context-Lab@users.noreply.github.com"
] |
55469294+Interactive-Context-Lab@users.noreply.github.com
|
02f13775b3c78236ce2ab80545def64ab6c36542
|
c214ad7a04e581d81b42fec19d68f999169bfc33
|
/EX_EstruturaDeDecisao/ex15.py
|
f542a34e11a1b9fdd6434f759cd998e323eb20ee
|
[] |
no_license
|
leohck/python_exercises
|
34f06df60d0a40f50466f0b2da3e91ec7026a47b
|
85aafaa04ae395b3c722a7ad20aeb16de8c1eff4
|
refs/heads/master
| 2021-01-01T04:08:06.336740
| 2017-07-26T13:48:48
| 2017-07-26T13:48:48
| 97,128,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,349
|
py
|
__author__ = 'leohck'
print('>.....----- EX 15 ----.....< ')
"""
Faça um Programa que peça os 3 lados de um triângulo.
O programa deverá informar se os valores podem ser um triângulo.
Indique, caso os lados formem um triângulo, se o mesmo é: equilátero, isósceles ou escaleno.
Dicas:
Três lados formam um triângulo quando a soma de quaisquer dois lados for maior que o terceiro;
Triângulo Equilátero: três lados iguais;
Triângulo Isósceles: quaisquer dois lados iguais;
Triângulo Escaleno: três lados diferentes;
"""
def Triangulo(l1,l2,l3):
triangulo = ''
if l1 == l2 and l2 == l3:
triangulo = 'Equilátero'
elif l1 == l2 or l2 == l3 or l1 == l3:
triangulo = 'Isósceles'
elif l1 != l2 and l2 != l3:
triangulo = 'Escaleno'
return triangulo
is_triangulo = False
while True:
lt1 = int(input('digite o valor do primeiro lado do triangulo: '))
lt2 = int(input('digite o valor do segundo lado do triangulo: '))
lt3 = int(input('digite o valor do terceiro lado do triangulo: '))
if lt1 + lt2 > lt3 or lt1 + lt3 > lt2 or lt2 + lt3 > lt1:
is_triangulo = True
if is_triangulo == True:
triangulo = Triangulo(lt1, lt2, lt3)
break
else:
print('os lados nao criam um triangulo')
print('Triangulo',triangulo)
|
[
"noreply@github.com"
] |
leohck.noreply@github.com
|
ca4bf6c6f26579e696c768fafa5f991e395020a4
|
7fb469e93ff89b1c697d5a53a39188127e50d272
|
/handler_analysis_mem.py
|
d8350f51c122249c5e1ee4d64b224614e132e714
|
[] |
no_license
|
seekplum/seekplum
|
fde98f93145a78fc030032a4499090583aba154a
|
9e66f5e62214e566528003d434ef2b74877419fd
|
refs/heads/master
| 2023-02-13T19:00:49.866130
| 2023-01-31T08:55:19
| 2023-02-02T04:33:45
| 182,075,292
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,178
|
py
|
# -*- coding: utf-8 -*-
import json
import time
import logging
import gc
import random
import commands
import traceback
import objgraph
from schema import SchemaError
from tornado.options import options
from tornado.web import Finish, HTTPError
from tornado.web import RequestHandler
logger = logging.getLogger(__name__)
class BaseRequestHandler(RequestHandler):
"""Base RequestHandler"""
def on_finish(self):
self.do_finish()
def do_finish(self, *args, **kwargs):
pass
def get_url_data(self):
"""
返回get请求的数据
获取query_arguments,同一key有重复值时只取值列表最后一个
"""
return {key: value[-1] for key, value in self.request.query_arguments.iteritems()}
def get_body_data(self, name=None):
"""
当post时,获取json数据
"""
try:
if name:
data = json.loads(self.get_body_argument(name))
else:
# `strict = False` 允许控制字符出现在value里
# reference: https://docs.python.org/2/library/json.html
data = json.loads(self.request.body, strict=False)
return data
except ValueError as e:
logger.exception(e)
self.error_response(error_code=1,
message="get json in body error: {}".format(e.message))
def get_data(self, schema=None):
"""
post, get, put, delete 类型handler 验证数据,
schema 是 from schema import Schema中的Schema的一个实例
func_type: get, post, put, delete
"""
stack = traceback.extract_stack()
func_type = stack[-2][2]
if func_type in ["post", "put", "delete"]:
data = self.get_body_data()
elif func_type == "get":
data = self.get_url_data()
else:
raise Exception("unsurported function type: {}".format(func_type))
try:
if schema:
data = schema.validate(data)
return data
except SchemaError as e:
logger.exception(e)
self.error_response(error_code=2, message=e.message)
def write_json(self, data):
real_ip = self.request.headers.get('X-Real-Ip', "unknown")
logger.info(
"method: {}, uri: {}, real ip: {}, remote ip: {}, start time: {}, finish_time: {}, error_code: {}".format(
self.request.method, self.request.uri, real_ip, self.request.remote_ip, self.request._start_time,
time.time(), data["error_code"]))
self.set_header("Content-Type", "application/json")
if options.debug:
self.write(json.dumps(data, indent=2))
else:
self.write(json.dumps(data))
def success_response(self, data=None, message="", finish=True):
response = {
"error_code": 0,
"message": message,
"data": data
}
self.write_json(response)
if finish:
raise Finish
def error_response(self, error_code, message="", data=None, status_code=202, finish=True):
self.set_status(status_code)
response = {
"error_code": error_code,
"data": data,
"message": message,
}
self.write_json(response)
if finish:
raise Finish
def options(self, *args, **kwargs):
"""
避免前端跨域options请求报错
"""
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Methods",
"POST, GET, PUT, DELETE, OPTIONS")
self.set_header("Access-Control-Max-Age", 1000)
self.set_header("Access-Control-Allow-Headers",
"CONTENT-TYPE, Access-Control-Allow-Origin, cache-control, Cache-Control, x-access-token")
self.set_header("Access-Control-Expose-Headers", "X-Resource-Count")
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Methods",
"POST, GET, PUT, DELETE, OPTIONS")
self.set_header("Access-Control-Max-Age", 1000)
self.set_header("Access-Control-Allow-Headers",
"CONTENT-TYPE, Access-Control-Allow-Origin, cache-control, Cache-Control, x-access-token")
self.set_header("Access-Control-Expose-Headers", "X-Resource-Count")
def write_error(self, status_code, **kwargs):
"""Custrom error response
"""
http_status_code = status_code
http_reason = self._reason
if "exc_info" in kwargs:
exception = kwargs["exc_info"][1]
if isinstance(exception, Exception) and not isinstance(exception, HTTPError):
code = hasattr(exception, "code")
if code:
http_status_code = exception.code
else:
http_status_code = 1
http_reason = exception.message
if isinstance(exception, HTTPError):
finish = False
else:
finish = True
else:
finish = True
self.set_status(200)
self.error_response(http_status_code, http_reason, finish=finish)
class AnalysisHandler(BaseRequestHandler):
def show_leak_increase(self):
print objgraph.show_most_common_types()
print "===" * 20
objgraph.show_growth()
def show_chain(self, obj_type):
# obj_type: Myobj_Type, type:string
ref_chain = objgraph.find_backref_chain(
random.choice(objgraph.by_type(obj_type)),
objgraph.is_proper_module,
max_depth=5)
objgraph.show_chain(ref_chain, filename='chain.dot')
cmd = "dot -Tpng chain.dot -o chain.png"
commands.getstatusoutput(cmd)
def show_leak_obj(self):
root = objgraph.get_leaking_objects()
logging.error("leak object: {}".format(len(root)))
import pdb;
pdb.set_trace()
# objgraph.show_refs(root[:3], refcounts=True, filename='/tmp/leak.dot')
# # yum install graphviz
# cmd = "dot -Tpng /tmp/leak.dot -o /tmp/leak.png"
# logging.error("result picture is: /tmp/leak.png")
# commands.getstatusoutput(cmd)
def gc(self):
### 强制进行垃圾回收
gc.set_debug(gc.DEBUG_UNCOLLECTABLE | gc.DEBUG_SAVEALL)
# collect_garbage = gc.collect()
# logging.error("unreachabel: {}".format(collect_garbage))
# gc.garbage是一个list对象,列表项是垃圾收集器发现的不可达(即垃圾对象)
# 、但又不能释放(不可回收)的对象,通常gc.garbage中的对象是引用对象还中的对象。
# 因Python不知用什么顺序来调用对象的__del__函数,导致对象始终存活在gc.garbage中,造成内存泄露
garbage = gc.garbage
logging.error("uncollectabel: {}".format(len(garbage)))
mash = 0
sql = 0
with open("/tmp/garbage.txt", "w") as f:
for g in garbage:
line = "{}".format(garbage)
# print linem
if "marshmallow" in line:
mash += 1
if "Column" in line:
sql += 1
f.write("{}\n\n".format(line[0:150]))
logging.error("total: {}".format(len(garbage)))
logging.error("mash: {}".format(mash))
logging.error("sql: {}".format(sql))
import pdb
pdb.set_trace()
with open("/tmp/garbage.txt", "a") as f:
f.write("total: {}, mash: {}, sql: {}".format(len(garbage), mash, sql))
del garbage
# gc.set_threshold(700, 10, 5)
logging.error("threshold: {}".format(gc.get_threshold()))
def leak(self):
self.gc()
self.show_leak_increase()
# self.show_leak_obj()
def get(self):
key = self.get_argument("key")
if key == "leak":
self.leak()
self.success_response("OK")
|
[
"1131909224@qq.com"
] |
1131909224@qq.com
|
3e991bfa498e16d0e475e2e791c91b0429f81aee
|
7b197a7202325764578c4b75d40620953619656f
|
/p4u/2_dz/seo_marks.py
|
d31ee2a4a5fee52f4be39683e16f777f82ccf4db
|
[] |
no_license
|
alekswatts/python
|
c2988eada23a5d110ffffc2e5c0223c2a6ad01ce
|
4181d5261b22bb85a81a3c559bf506f3a56b37b7
|
refs/heads/master
| 2021-11-25T17:26:41.225469
| 2021-11-12T13:49:45
| 2021-11-12T13:49:45
| 220,013,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,466
|
py
|
from pprint import pprint
from requests_html import HTMLSession
from collections import Counter
import collections
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
#
# url = input("Enter url: ")
# keyword = input("Enter keyword: ")
def validate(url):
govno = False
while not govno:
val = URLValidator()
try:
govno = val(url)
return url
except Exception as e:
govno = False
url = input("Re-Enter url: ")
url = validate(input("Enter url: "))
keyword = input("Enter keyword: ")
with HTMLSession() as session:
resp = session.get(url)
title = resp.html.xpath('//title')[0].text
description = resp.html.xpath('//meta [@name="description"]/@content')
h1 = resp.html.xpath('//h1/text()')
h2 = resp.html.xpath('//h2/text()')
print('*' * 20, 'title', '*' * 20)
pprint(title)
print('*' * 20, 'description', '*' * 20)
pprint(description)
print('*' * 20, 'H1', '*' * 20)
pprint(h1)
print('*' * 50)
title_words = len(title.split())
title_symbols = len(title)
desc_list = ''.join(description)
desc_symbols = len(desc_list)
h1_number = len(h1)
h1_list = ''.join(h1)
h1_symbols = len(h1_list)
title_value = 60
title_mark = 30
key_title_mark = 5
title_grade = title_symbols * title_mark / title_value
if keyword in title:
final_title_grade = key_title_mark + title_grade
print("Title grade: " + str(round(final_title_grade)))
elif str(title_grade):
print("Title grade: " + str(round(title_grade)))
desc_value = 160
desc_mark = 30
key_desc_mark = 5
desc_grade = desc_symbols * desc_mark / desc_value
if keyword in desc_list:
final_desc_grade = key_desc_mark + desc_grade
print("Description grade: " + str(round(final_desc_grade)))
elif str(desc_grade):
print("Description grade: " + str(round(desc_grade)))
h1_value = 70
h1_mark = 30
key_h1_mark = 5
first_h1 = h1[0]
first_len_h1 = len(first_h1)
h1_grade = first_len_h1 * h1_mark / h1_value
for i in range(h1_number):
if i == 0:
print("Too much headlines. Printed the first one! Total count: " + str(h1_number))
for i in range(h1_number):
if i > 0:
first_h1 = h1[0]
continue
elif keyword in h1_list:
final_h1_grade = key_h1_mark + h1_grade
print("H1 grade: " + str(round(final_h1_grade)))
elif str(h1_grade):
print("H1 grade: " + str(round(h1_grade)))
# приколюха з h2 в процесі
|
[
"aleksmytiuk@gmail.com"
] |
aleksmytiuk@gmail.com
|
3bf9eff42eb7c3295f1de0066362595cbdbf6457
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/model_control/detailed/transf_None/model_control_one_enabled_None_Lag1Trend_Seasonal_WeekOfYear_AR.py
|
8afbe3ca79cc51f9a7c11d22afe462e2971627a4
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 156
|
py
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['Lag1Trend'] , ['Seasonal_WeekOfYear'] , ['AR'] );
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
da070c94835ae1212a8890d1c38aa48ab2b71766
|
1cf527dc684a3bd45d08bf51bba58741355ae08c
|
/SafeZone/forms.py
|
c750c90f77ee660c24d9d6d2d8dd7735f6a791f5
|
[] |
no_license
|
AboveColin/SafeZone
|
e163dea4bea6af617d29d0fa837dc7b07cd2b86e
|
6691ae6bf1f690b5e9ff3d0a900a656568e37e88
|
refs/heads/main
| 2023-04-30T20:08:33.422760
| 2021-05-20T12:28:38
| 2021-05-20T12:28:38
| 346,729,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 785
|
py
|
from flask_wtf import FlaskForm
from wtforms.validators import data_required, Email, EqualTo
from wtforms import ValidationError, StringField, PasswordField, SubmitField, SelectField
class Loginform(FlaskForm):
username = StringField("Gebruikersnaam", validators=[data_required()])
password = PasswordField("Wachtwoord", validators=[data_required()])
submit = SubmitField("Inloggen")
class Registratie(FlaskForm):
username = StringField("Gebruikersnaam", validators=[data_required()])
password = PasswordField("Wachtwoord", validators=[data_required(), EqualTo("pass_confirm", message="Wachtwoorden komen niet overeen")])
pass_confirm = PasswordField("Bevestig wachtwoord", validators=[data_required()])
submit = SubmitField("Registreer")
|
[
"noreply@github.com"
] |
AboveColin.noreply@github.com
|
1366d05fccf6f662736817f516976c6e7e0a26a2
|
05cfbd1db4236507a2668d361b8027ae6d8f8f57
|
/363.矩形区域不超过K的最大数值和.py
|
4ff8c2f79bb35180aa5e14a0edccf6304e33a767
|
[] |
no_license
|
ChaosNyaruko/leetcode_cn
|
f9f27da6a1e2b078c5cd23df8d46c7bc794d8487
|
1233137cfb1196019e8d95407c2b8f18b6d6d2f8
|
refs/heads/master
| 2023-08-15T14:17:41.697164
| 2021-09-23T16:21:44
| 2021-09-23T16:21:44
| 302,867,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
from sortedcontainers import SortedList
class Solution:
def maxSumSubmatrix(self, matrix: List[List[int]], k: int) -> int:
m, n = len(matrix), len(matrix[0])
ans = float("-inf")
for i in range(m):
sum_ = [0] * n
for j in range(i, m):
for c in range(n):
sum_[c] += matrix[j][c]
set_ = SortedList([0])
sr = 0
for v in sum_:
sr += v
min_sl = set_.bisect_left(sr - k)
if min_sl != len(set_):
ans = max(ans, sr - set_[min_sl])
set_.add(sr)
return ans
|
[
"cabbageparadise@gmail.com"
] |
cabbageparadise@gmail.com
|
0b627bf66430243c5f7c50a2c931b1e3052bcdf5
|
bda3d629a37df424d736af0a19f5b991364a5048
|
/Database Systems/project 2 - implementation of queries using python GUI/codefiles/3.3.py
|
79f1718dfdee95621de8784aea499ed4e26adaf1
|
[] |
no_license
|
YashwanthThota/Academic-Projects
|
94dc194b3322e4c54175c7d8a7977128e96f420b
|
03acf561a1b59f2b1b2c781b89713d448fdf91fe
|
refs/heads/master
| 2018-12-20T19:13:15.995339
| 2018-09-18T03:59:51
| 2018-09-18T03:59:51
| 109,457,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
#Server Connection to MySQL:
import mysql.connector
cnx = mysql.connector.connect(user='root', password='yashwanth',
host='127.0.0.1',port='8080',database='final')
cursor = cnx.cursor()
print("please enter the table details \n")
print("\n")
print("enter the ownerid number: ")
ownerid= input()
print("\n")
print("enter the tablename: ")
tablename= input()
print("\n")
try:
cursor.execute("""INSERT INTO tables VALUES (%s,%s)""",(ownerid,tablename))
cnx.commit()
print("successfully entered the values")
except:
cnx.rollback()
print("error")
cnx.close()
|
[
"noreply@github.com"
] |
YashwanthThota.noreply@github.com
|
f9342a84ad9076d61d854bc3e87604340d36fb40
|
a01d7e10b70042140c401c5beb83663d958f1e69
|
/AutoRCCar-master/raspberryPi/stream_client.py
|
1cff6482c79e706109110ece4bdc8dc2c46b0e58
|
[
"BSD-2-Clause"
] |
permissive
|
remeoj/Navigational-System-For-Self-Driving-Cars
|
ac88edf119ab01abb759f06ed58d6944f20743e0
|
5aeae3f325176b21734ae18e73294a4b275852ee
|
refs/heads/master
| 2022-01-18T22:12:49.496805
| 2019-04-25T23:53:31
| 2019-04-25T23:53:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
import io
import socket
import struct
import time
import picamera
# create socket and bind host
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('192.168.1.105', 65432))
connection = client_socket.makefile('wb')
try:
with picamera.PiCamera() as camera:
camera.resolution = (320, 240) # pi camera resolution
camera.framerate = 15 # 15 frames/sec
time.sleep(2) # give 2 secs for camera to initilize
start = time.time()
stream = io.BytesIO()
# send jpeg format video stream
for foo in camera.capture_continuous(stream, 'jpeg', use_video_port = True):
connection.write(struct.pack('<L', stream.tell()))
connection.flush()
stream.seek(0)
connection.write(stream.read())
if time.time() - start > 600:
break
stream.seek(0)
stream.truncate()
connection.write(struct.pack('<L', 0))
finally:
connection.close()
client_socket.close()
|
[
"34356209+JoemerRamos@users.noreply.github.com"
] |
34356209+JoemerRamos@users.noreply.github.com
|
62883f2fe8ea1f9b035e6c12dadf2c9bb658310f
|
b505998fea8f748638da5af2ed928f0080e73b3a
|
/30_parseBLAST_biopython.py
|
16c2d856b9f0e98bc906741fb655a76095fbef01
|
[] |
no_license
|
wjidea/pythonExercise
|
eced655f9da1c42e14c8894fc7f0ea54651b2715
|
4fbc5eb7520faf8521e51ce9cab7c3a4b05c5120
|
refs/heads/master
| 2020-07-06T07:38:41.777452
| 2016-09-09T18:24:59
| 2016-09-09T18:24:59
| 67,801,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,385
|
py
|
#! /usr/bin/python
# 30_parseBLAST_biopython.py
# blast populus transcript against arabdopsis transcript and extract evalue
# from blast resulsts (plain text) using biopython SearchIO.
# requirements for this script
# Write the output as Tab-delimited file, which includes:
# 1) Name of the blast database qResult.target
# 2) Name of the Query sequence qResult.id
# 3) Length of the Query sequence qResult.seq_len
# 4) name of the best hit sequence hit.
# 5) length of the best hit sequence
# 6) the number of hsps for the best hit
# 7) the score of the alignment with the best hit
# if two or more equally best hits, data from both should be written to the file
# write a list of this title line and join them with tab
import argparse
import os.path
import sys
from Bio import SearchIO
parser = argparse.ArgumentParser(description = 'parse BLAST output and write '
'to tab-delimited file')
parser.add_argument('-i', '--input', help='input BLAST file in default format',
required = True)
parser.add_argument('-o', '--output', help = 'output file in text format',
required = True)
parser.add_argument('-e', '--evalue', help = 'cutoff evalue threshold',
type = float, default = 1e-10)
parser.add_argument('-v', '--verbose', help = 'increase verbosity',
action = 'store_true')
args = parser.parse_args()
# check if the input fild existed
inputFile = args.input
outputFile = args.output
evalueThreshold = float(args.evalue)
filesL = [inputFile]
if args.verbose:
# check file path one at a time and tell which one is missing
for inputFile in filesL:
if not os.path.isfile(inputFile):
print('Input file {} does not exist!'.format(inputFile))
sys.exit()
else:
# check file path in a bulk, but may not be the optimum case
if not all(map(os.path.isfile, filesL)):
print('Missing input file(s)!')
sys.exit()
# check if output file is present
if os.path.isfile(outputFile):
print('output file {0} already exists'.format(outputFile))
sys.exit()
blastParse = SearchIO.parse(inputFile, 'blast-text')
dbInfo = next(blastParse)
# hitInfo = dbInfo[1]
# hspInfo = hitInfo[0]
headerL = ['Target_database', 'Query_ID', 'Query_length', 'Best_hit_ID',
'Best_hit_length', 'Hsp_num', 'Hsp_score']
targetDB = dbInfo.target
FILE_OUT = open(outputFile, 'w')
# write header line
FILE_OUT.write('\t'.join(headerL) + '\n')
# write data
for qResult in blastParse:
highestScore = qResult[0][0].bitscore
for hit in qResult:
for hsp in hit:
if hsp.bitscore >= highestScore:
dataL = []
dataL.append(targetDB) # Target_database
dataL.append(hsp.query_id) # Query_ID
dataL.append(qResult.seq_len) # Query_length
dataL.append(hsp.hit_id) # Best_hit_ID
dataL.append(hit.seq_len) # Best_hit_length
dataL.append(len(hit)) # Hsp_num
dataL.append(hsp.bitscore) # Hsp_score
# .join is not method for type int
# FILE_OUT.write('\t'.join(str(x) for x in dataL) + '\n')
FILE_OUT.write('\t'.join(map(str, dataL)) + '\n')
else:
break
# print(counter)
FILE_OUT.close()
|
[
"wangj@Admins-MacBook-Air-2.local"
] |
wangj@Admins-MacBook-Air-2.local
|
e8df7529813c851e67d12faea01d688a29bda68e
|
a3fddbf8d953bce9b84173c1ba48780e849f86ef
|
/peaks_test.py
|
da212c465a697196c6fa1b6d15eca124ea6c4d5c
|
[] |
no_license
|
rosswhitfield/wand
|
79f99bef519ed9c334fddcb5396ab66d56f2903e
|
562b1f89acb46749e220081117e2cbda2014df36
|
refs/heads/master
| 2021-06-02T05:38:00.741277
| 2021-04-14T13:19:18
| 2021-04-14T13:19:18
| 97,755,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
# import mantid algorithms, numpy and matplotlib
from mantid.simpleapi import *
import matplotlib.pyplot as plt
import numpy as np
ws=CreateSampleWorkspace()
peaks=CreatePeaksWorkspace(ws, 0)
SetUB(peaks, a=5, b=5, c=5)
peaks2=CreatePeaksWorkspace()
for vector in [[i,j,k] for i in (0,1) for j in (0,1) for k in (0,1) if not (i==j==k)]:
p = peaks.createPeakHKL(vector)
peaks2.addPeak(p)
|
[
"whitfieldre@ornl.gov"
] |
whitfieldre@ornl.gov
|
0c88eaa0c9cf00e19e24b4dd48263ee4d5a04664
|
c83d51af8d8036084aa96f4d8d3963b592e7b0d3
|
/bayesian_config.py
|
dd43e5655353f3f63b4614531bcf627d98dd4401
|
[
"MIT"
] |
permissive
|
SulemanKhurram/ThesisExperiments
|
4f40e52afba9eeabd02647273da41da45895cf65
|
4fdf7b6558c87a096dcdc374c35085ac946d3a58
|
refs/heads/master
| 2021-06-26T00:37:00.472747
| 2021-06-20T19:13:13
| 2021-06-20T19:13:13
| 160,214,643
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,210
|
py
|
############### Configuration file ###############
import math
start_epoch = 1
num_epochs = 60
batch_size = 8
optim_type = 'Adam'
lr = 0.00001
weight_decay = 0.0005
num_samples = 25
beta_type = "Blundell"
mean = {
'cifar10': (0.4914, 0.4822, 0.4465),
'cifar100': (0.5071, 0.4867, 0.4408),
'mnist': (0.1307,),
'stl10': (0.485, 0.456, 0.406),
'origa': (0.5, 0.5, 0.5),
}
std = {
'cifar10': (0.2023, 0.1994, 0.2010),
'cifar100': (0.2675, 0.2565, 0.2761),
'mnist': (0.3081,),
'stl10': (0.229, 0.224, 0.225),
'origa': (0.5, 0.5, 0.5),
}
# Only for cifar-10
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def learning_rate(init, epoch):
optim_factor = 0
if(epoch > 160):
optim_factor = 3
elif(epoch > 120):
optim_factor = 2
elif(epoch > 60):
optim_factor = 1
def dynamic_lr(init, epoch):
optim_factor = 1
if (epoch > 60):
optim_factor = 500
elif (epoch > 30):
optim_factor = 100
elif (epoch > 10):
optim_factor = 10
return init/optim_factor
def get_hms(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return h, m, s
|
[
"khurram@glasgow.kl.dfki.de"
] |
khurram@glasgow.kl.dfki.de
|
4b1a27172693270f1558822c59edcadc3229bbe7
|
45e46c8e850c98cd0ff7ec4cb7f040ead42355ae
|
/pilot/eventservice/droid.py
|
ca4f242ba68dcb7088691d2df485bfd6c1920fac
|
[
"Apache-2.0"
] |
permissive
|
mlassnig/pilot2
|
c76a24f0974b187d0514789439f0f1273fe06146
|
029e013dec888eecadb34c485f8d7498fccd000f
|
refs/heads/master
| 2022-09-02T05:26:01.292402
| 2021-11-08T13:45:46
| 2021-11-08T13:45:46
| 79,211,086
| 0
| 2
|
Apache-2.0
| 2022-07-27T11:47:21
| 2017-01-17T09:29:02
|
Python
|
UTF-8
|
Python
| false
| false
| 431
|
py
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Paul Nilsson, paul.nilsson@cern.ch, 2018
import logging
logger = logging.getLogger(__name__)
def run():
"""
Dummy run function.
:return:
"""
pass
|
[
"paul.nilsson@cern.ch"
] |
paul.nilsson@cern.ch
|
85c85da8a51b5e8013989fa9849dba4ffec1fb96
|
a90b05363448c303f4b8e47778eaa46388bc7592
|
/python/firstAndLastPositions.py
|
e31d4e84bda93ef64b69398318370237e039ecda
|
[] |
no_license
|
mongoosePK/leetcode
|
4363dd5c6fe97f6105d2ea1b7b57349dcf069eaf
|
6a7bd5db56c9c4c323ba58990e84d0ed891a8523
|
refs/heads/main
| 2023-09-01T17:11:39.558196
| 2021-10-30T16:19:21
| 2021-10-30T16:19:21
| 361,819,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
# firstAndLastPositions.py
# william pulkownik
# Given an array of integers nums sorted in ascending order,
# find the starting and ending position of a given target value.
# If target is not found in the array, return [-1, -1].
# Follow up: Could you write an algorithm with O(log n) runtime complexity?
from bisect import bisect_left, bisect
class Solution:
def searchRange(self, nums: list[int], target: int) -> list[int]:
#create a list variable to store the values
locations = [-1,-1]
leftTarget = bisect_left(nums, target)
if leftTarget == len(nums) or nums[leftTarget] != target:
return locations
return [leftTarget, bisect(nums, target)-1]
|
[
"30559692+mongoosePK@users.noreply.github.com"
] |
30559692+mongoosePK@users.noreply.github.com
|
2f0eac367ea2c16758bffafdcd0645073326ee05
|
25830ae6ebb4a9f0317fe4a15a0fc04ca284db95
|
/my_face_recognition/biometric_functions.py
|
b104fec7432a1a52ceb2177551e1171b849483a1
|
[] |
no_license
|
IradNuriel/Final-Project
|
3252bf7b10ba6284e337f113f7952ac745fbe4f2
|
4bbc72e3bb580f232b52ef053ef59c2c790385c1
|
refs/heads/master
| 2023-07-30T18:01:28.556570
| 2021-09-16T07:24:07
| 2021-09-16T07:24:07
| 396,351,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,321
|
py
|
import gc
import os
import pickle
from numpy import expand_dims
from keras.preprocessing import image
from keras_vggface.utils import preprocess_input
from homomorphic_encryption_stuff.EncryptionStuff import hamming_dist, save_encrypted_binary_vector_in_memory, load_encrypted_binary_vector_from_memory
from homomorphic_encryption_stuff.getEncryptionInfo import *
from dataset.yaleFaceBDataSet import YaleFaceBDataSet
from my_face_recognition.model.vggModel import VggModel
from util import constant
from face_detection.FaceAlignment import *
def initialize_model_params(weights_path):
dataSet = YaleFaceBDataSet(constant.YALE_FACE_B_DATA_PATH, ['pgm'], 28)
model = VggModel(dataSet, weights_path)
features_model = model.get_feature_model()
enrolled_users = get_all_enrolled_users()
return features_model, enrolled_users
def __process_img(img):
img = image.smart_resize(img, size=(constant.IMG_WIDTH, constant.IMG_HEIGHT))
img = image.img_to_array(img)
img = preprocess_input(expand_dims(img.astype(np.float64), axis=0), version=2)
return img
def predict(m, img):
pixels = __process_img(img)
preds = m.predict(pixels)
return preds[0]
def get_all_enrolled_users():
users_names = os.listdir(constant.CIPHERS_PATH)
users_names = [user_name.replace("cipher_", "") for user_name in users_names]
all_users = {}
for username in users_names:
users_features = load_encrypted_binary_vector_from_memory(context, username, constant.CIPHERS_PATH)
all_users.update({username: users_features})
return all_users
def who_is_this(features, encrypted):
if not encrypted:
features = np.where(features > 0, 1, 0)
features = list(features) + [0] * (encoder.slot_count() - len(features))
features = encryptor.encrypt(encoder.encode(features))
closest = "non-enrolled user"
closest_val = 5523
for (user, user_features) in users.items():
hd = hamming_dist(features, user_features, evaluator, relin_keys, galois_keys, context)
hd = int(decryptor.decrypt(hd).to_string(), 16)
if hd < closest_val:
closest = user
closest_val = hd
return closest
def enroll(img, name):
features = predict(feature_model, img)
features = np.where(features > 0, 1, 0)
features = list(features) + [0] * (encoder.slot_count() - len(features))
encrypted_features = encryptor.encrypt(encoder.encode(features))
del features
gc.collect()
if name in users.keys():
this_is = who_is_this(encrypted_features, encrypted=True)
if this_is == name:
print("Already enrolled!\nNot enrolling this time.")
return
save_encrypted_binary_vector_in_memory(encrypted_features, name, constant.CIPHERS_PATH)
users.update({name: encrypted_features})
return
def who_is_this_face(img):
features = predict(feature_model, img)
features = np.where(features > 0, 1, 0)
features = list(features) + [0] * (encoder.slot_count() - len(features))
encrypted_features = encryptor.encrypt(encoder.encode(features))
del features
gc.collect()
this_is = who_is_this(encrypted_features, encrypted=True)
return this_is.replace("_", " ")
feature_model, users = initialize_model_params(constant.WEIGHTS_PATH)
|
[
"irad9731@gmail.com"
] |
irad9731@gmail.com
|
025459fc2c308414b00fd045d3cb2a9f3f304fac
|
609d5408f302c9188b723998762c2c1f7b883af9
|
/.closet/jython.configurator.efr32/1.0.0.201606231656-435/rail_scripts/pyrmsvd/Package/pyrmsvd/rm/__init__.py
|
6e14bc03c0cc968e005f33473d8dd8a50ada060e
|
[] |
no_license
|
acvilla/Sundial-Beta
|
6ea4fd44cbf7c2df8100128aff5c39b6faf24a82
|
9f84e3b5a1397998dfea5287949fa5b1f4c209a6
|
refs/heads/master
| 2021-01-15T15:36:19.394640
| 2016-08-31T20:15:16
| 2016-08-31T20:15:16
| 63,294,451
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 47
|
py
|
from regmap import *
from regmap_io import *
|
[
"acvilla@bu.edu"
] |
acvilla@bu.edu
|
eca09c73403d1c2de48e6499cd1a6553e7f8a47c
|
8adf4c0c58cca870dc9e2e3fe790fb29634d00e9
|
/source/main/settings.py
|
b3b7264698f8d3a438953a002691c551965b3290
|
[] |
no_license
|
yuburov/react_python_testing_project
|
bcecc58f97dbcdb7938cd3cda845c2094cb3237e
|
e44fc9ad4d10d8763ab094a43e2f2ec85335d393
|
refs/heads/main
| 2023-04-01T16:05:57.489968
| 2021-04-27T05:15:59
| 2021-04-27T05:15:59
| 361,722,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,690
|
py
|
"""
Django settings for main project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-zgnoar01)dl4n58c8ha-cf096g*@-d@h_p%l$x6p&*5u*@jea)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'phonenumber_field',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'webapp',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'main.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'webapp-ui/build'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'main.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR /'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR / 'static'
STATICFILES_DIRS = (
(BASE_DIR /'webapp-ui/build/static'),
)
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
)
}
CORS_ORIGIN_ALLOW_ALL = True
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"yubur.aziz@gmail.com"
] |
yubur.aziz@gmail.com
|
b4ae0449e6a250c2523aa5b98718aa43082d944d
|
e85d8e1fa15cc28fe799820ef69a962822a03b1d
|
/app/__init__.py
|
d9fddb6291ac1ed1dbe6a9ac6a5f2f0d672fdf34
|
[] |
no_license
|
tolyandrov/funnyrecipes
|
5542ee89ff018e50e78001daf64799c1e2191fb9
|
81afa12b60838e9c4c9a4beb473e1965b8ca6602
|
refs/heads/master
| 2021-01-10T02:39:09.238179
| 2015-10-05T21:40:00
| 2015-10-05T21:40:00
| 43,608,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
from app import models, views
from app.models import User
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'login'
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
|
[
"seeknowells@gmail.com"
] |
seeknowells@gmail.com
|
19b2867570f47add0d1c87112fd84c9703e937d8
|
590ff12865f0ce14594085ab7171da4d39a4a2ea
|
/polls/models.py
|
cf462f70c805fc6957e055b36cfdd609e3617a36
|
[] |
no_license
|
evan-woodworth/django_practice
|
905b5cc136c0e347f75b16e19b0741b94366ddc0
|
38251de2f1a9fc2517ff167f51879746c7607925
|
refs/heads/main
| 2023-06-05T07:15:17.281380
| 2021-06-25T03:54:46
| 2021-06-25T03:54:46
| 379,729,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
|
[
"evan.woodworth@gmail.com"
] |
evan.woodworth@gmail.com
|
a77a3f6e0a767251186344b21dbefd10dbf2193c
|
b86555764a4cde22a6dbd8cf368bc57ed96efb6c
|
/helloword/venv/Scripts/easy_install-3.8-script.py
|
7d71f7db89bbc45b1abf4e12cd5ee43978ef818f
|
[] |
no_license
|
mbebiano/exercicios-python
|
5c089e8821a672b1634554f9e1edca143d070675
|
a349d0f5c7c31f13e9fcc02a2cfdfa7b6742b54a
|
refs/heads/master
| 2021-05-24T14:56:45.922418
| 2020-06-02T01:49:26
| 2020-06-02T01:49:26
| 253,615,190
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
#!C:\Users\matth\PycharmProjects\helloword\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
|
[
"mattheus.bebiano@engenharia.ufjf.br"
] |
mattheus.bebiano@engenharia.ufjf.br
|
b5f8d0670cd215a6de141c410567a75dad38f0bd
|
62faf5b8988430190c7579769427ced27db5df13
|
/handlers/input_handler.py
|
8a68a0572ae1e4e4b1a1c1969e843d3c8dc0b0b4
|
[
"MIT"
] |
permissive
|
pyladieshamburg/games_in_python
|
06783b62962aad9c2991bc56c7e44e4a9289c0ed
|
49a84ceaa3b0a3f322b02a8fccd44560e321e821
|
refs/heads/master
| 2020-04-10T16:17:44.602465
| 2019-01-02T08:46:16
| 2019-01-02T08:46:16
| 161,140,616
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 719
|
py
|
""" input handlers: movement and others """
from pygame.locals import *
def keydown(event, game_obj1, game_obj2):
"""
Change the velocity of a game obj by pressing up or down arrows
:param event: game event loop
:param game_objX: gam object with velocity attribute
:return: None
"""
if event.key == K_UP:
game_obj1.velocity = -8
elif event.key == K_DOWN:
game_obj1.velocity = 8
elif event.key == K_w:
game_obj2.velocity = -8
elif event.key == K_s:
game_obj2.velocity = 8
def keyup(event, game_obj1, game_obj2):
if event.key in (K_w, K_s):
game_obj2.velocity = 0
elif event.key in (K_UP, K_DOWN):
game_obj1.velocity = 0
|
[
"alisa.dammer@gmail.com"
] |
alisa.dammer@gmail.com
|
6825d2e0fce31fe005e6ecf9ee418bd77551d70a
|
a4e1093cd868cc16cf909b8f7b84832a823a97bf
|
/explore_vae.py
|
31e21694dd55f3ae5a68f22995167a4007d8e602
|
[] |
no_license
|
hermanprawiro/gan-playground
|
8fb7eed54314661d9d1b908fe2cb1695eb1e3881
|
bf4c270ad4696d61df0dbe2afb8c9ebafb9c2ba3
|
refs/heads/master
| 2020-09-06T19:43:22.685332
| 2019-12-24T10:45:12
| 2019-12-24T10:45:12
| 220,529,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,642
|
py
|
import argparse
import os
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision.utils import make_grid, save_image
from models import vae
from utils.criterion import VAELoss
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser()
parser.add_argument("--n_epochs", type=int, default=20)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--learning_rate", type=float, default=2e-4)
parser.add_argument("--beta1", type=float, default=0.5, help="Momentum term for Adam (beta1)")
parser.add_argument("--beta2", type=float, default=0.999, help="Momentum term for Adam (beta2)")
parser.add_argument("--beta", type=float, default=1, help="KL Divergence weight beta (beta-VAE)")
parser.add_argument("--ndf", type=int, default=16, help="Base features multiplier for discriminator")
parser.add_argument("--ngf", type=int, default=16, help="Base features multiplier for generator")
parser.add_argument("--n_workers", type=int, default=0)
parser.add_argument("--latent_dim", type=int, default=10)
parser.add_argument("--image_ch", type=int, default=3)
parser.add_argument("--image_res", type=int, default=128)
parser.add_argument("--checkpoint_path", type=str, default="checkpoints")
parser.add_argument("--result_path", type=str, default="results")
parser.add_argument("--save_name", type=str, default="vae")
parser.add_argument("--data_root", type=str, default=R"E:\Datasets\CelebA")
# parser.add_argument("--data_root", type=str, default=R"E:\Datasets\MNIST")
args = parser.parse_args()
args.save_name = 'vae_celeba_b256_z10'
args.ndf = 16
args.ngf = 16
args.checkpoint_path = os.path.join(args.checkpoint_path, args.save_name)
args.result_path = os.path.join(args.result_path, args.save_name)
os.makedirs(args.checkpoint_path, exist_ok=True)
os.makedirs(args.result_path, exist_ok=True)
tfs = transforms.Compose([
transforms.Resize(args.image_res),
transforms.CenterCrop(args.image_res),
transforms.ToTensor(),
transforms.Normalize([0.5]*args.image_ch, [0.5]*args.image_ch)
])
dataset = torchvision.datasets.CelebA(args.data_root, split="all", transform=tfs, download=True)
# dataset = torchvision.datasets.MNIST(args.data_root, train=True, transform=tfs, download=True)
dataloader = DataLoader(dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=False, pin_memory=True)
netE = vae.Encoder(ndf=args.ndf, img_dim=args.image_ch, resolution=args.image_res, output_dim=args.latent_dim).to(device)
netD = vae.Generator(ngf=args.ngf, img_dim=args.image_ch, resolution=args.image_res, z_dim=args.latent_dim).to(device)
optimizer = torch.optim.Adam(list(netE.parameters()) + list(netD.parameters()), lr=args.learning_rate, betas=(args.beta1, args.beta2))
checkpoint = torch.load('%s/checkpoint_%03d.pth' % (args.checkpoint_path, 20), map_location=torch.device('cpu'))
netE.load_state_dict(checkpoint['state_dict']['encoder'])
netD.load_state_dict(checkpoint['state_dict']['decoder'])
optimizer.load_state_dict(checkpoint['optimizer'])
netE.eval()
netD.eval()
inputs, labels = next(iter(dataloader))
inputs = inputs.to(device)
n_interp = 11
interp_grad = torch.linspace(0, 1, n_interp)
idx = torch.arange(args.latent_dim)
idx = torch.zeros((args.latent_dim, args.latent_dim)).scatter_(1, idx.view(-1, 1), 1).unsqueeze(-1).expand(-1, -1, n_interp)
interp_weight = (idx * interp_grad).to(device)
idx_start = 1
idx_end = 60
with torch.no_grad():
mu, logvar = netE.encode(inputs)
z = netE.reparameterize(mu, logvar)
# z = mu
x_recon = netD(z)
print(mu[idx_start], logvar[idx_start].exp())
print(mu[idx_end], logvar[idx_end].exp())
z_start = z[idx_start]
z_end = z[idx_end]
z_interp = torch.empty((args.latent_dim + 1, n_interp, args.latent_dim), dtype=torch.float, device=device)
for i in range(args.latent_dim):
for j in range(n_interp):
z_interp[i, j] = torch.lerp(z_start, z_end, interp_weight[i, :, j])
for j, val in enumerate(interp_grad):
z_interp[-1, j] = torch.lerp(z_start, z_end, val.item())
with torch.no_grad():
x_interp = netD(z_interp.view(-1, args.latent_dim))
print(z_interp.shape)
inputs_row = torch.zeros((n_interp,) + x_interp.shape[1:])
inputs_row[0] = inputs[idx_start]
inputs_row[-1] = inputs[idx_end]
x_interp = torch.cat([x_interp.cpu(), inputs_row], dim=0)
# plt.imshow(make_grid(inputs.cpu(), normalize=True).permute(1, 2, 0))
# plt.show()
# plt.imshow(make_grid(x_recon.cpu(), normalize=True).permute(1, 2, 0))
# plt.show()
plt.imshow(make_grid(x_interp.cpu(), normalize=True, nrow=n_interp).permute(1, 2, 0))
plt.show()
# for epoch in range(args.n_epochs):
# for i, (inputs, _) in enumerate(dataloader):
# inputs = inputs.to(device)
# optimizer.zero_grad()
# mu, logvar = netE.encode(inputs)
# z = netE.reparameterize(mu, logvar)
# x_recon = netD(z)
# loss = criterion(mu, logvar, x_recon, inputs)
# loss.backward()
# optimizer.step()
# if i % 50 == 0:
# print('[%d/%d][%d/%d] Loss: %.4f | Mu/Var: %.4f/%.4f'
# % (epoch + 1, args.n_epochs, i, len(dataloader), loss.item(), mu.mean().item(), logvar.exp().mean().item()))
# if i % 50 == 0:
# outG = torch.cat([inputs[:32].cpu(), x_recon[:32].cpu()], dim=0)
# save_image(outG, '%s/fake_epoch%03d_%04d.jpg' % (args.result_path, epoch + 1, i + 1), normalize=True)
# save_model((netE, netD), optimizer, epoch, args.checkpoint_path)
|
[
"herman.prawiro@gmail.com"
] |
herman.prawiro@gmail.com
|
ad05c42a7240dc72c2dcb38173fdee93f772ff82
|
7809d83384c286eae8fea42abbcd44471da0cf7a
|
/hello/big10.py
|
d586844cdd4a2071ef1af98acf62fc2e0d9517ab
|
[] |
no_license
|
krohak/codeIT-suisse
|
dc04be7b86150d26726e2934f1b810ae72139780
|
785bb4e6d81153b053690bab1e65df897b694104
|
refs/heads/master
| 2021-05-16T10:29:18.343558
| 2017-09-24T20:11:57
| 2017-09-24T20:11:57
| 104,672,751
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,330
|
py
|
from array import array
from collections import deque
import psyco
data = []
nrows = 0
px = py = 0
sdata = ""
ddata = ""
def init(board):
global data, nrows, sdata, ddata, px, py
data = filter(None, board.splitlines())
nrows = max(len(r) for r in data)
maps = {' ':' ', '*': '*', 'b':' ', '#':'#', 'o':' '}
mapd = {' ':' ', '*': ' ', 'b':'b', '#':' ', 'o':'*'}
for r, row in enumerate(data):
for c, ch in enumerate(row):
sdata += maps[ch]
ddata += mapd[ch]
if ch == '@':
px = c
py = r
def push(x, y, dx, dy, data):
if sdata[(y+2*dy) * nrows + x+2*dx] == '#' or \
data[(y+2*dy) * nrows + x+2*dx] != ' ':
return None
data2 = array("c", data)
data2[y * nrows + x] = ' '
data2[(y+dy) * nrows + x+dx] = '@'
data2[(y+2*dy) * nrows + x+2*dx] = '*'
return data2.tostring()
def is_solved(data):
for i in xrange(len(data)):
if (sdata[i] == '.') != (data[i] == '*'):
return False
return True
def solve():
open = deque([(ddata, "", px, py)])
visited = set([ddata])
dirs = ((0, -1, 'u', 'U'), ( 1, 0, 'r', 'R'),
(0, 1, 'd', 'D'), (-1, 0, 'l', 'L'))
lnrows = nrows
while open:
cur, csol, x, y = open.popleft()
for di in dirs:
temp = cur
dx, dy = di[0], di[1]
if temp[(y+dy) * lnrows + x+dx] == '*':
temp = push(x, y, dx, dy, temp)
if temp and temp not in visited:
if is_solved(temp):
return csol + di[3]
open.append((temp, csol + di[3], x+dx, y+dy))
visited.add(temp)
else:
if sdata[(y+dy) * lnrows + x+dx] == '#' or \
temp[(y+dy) * lnrows + x+dx] != ' ':
continue
data2 = array("c", temp)
data2[y * lnrows + x] = ' '
data2[(y+dy) * lnrows + x+dx] = '@'
temp = data2.tostring()
if temp not in visited:
if is_solved(temp):
return csol + di[2]
open.append((temp, csol + di[2], x+dx, y+dy))
visited.add(temp)
return "No solution"
|
[
"rohaksinghal14@gmail.com"
] |
rohaksinghal14@gmail.com
|
257aaddee8b51ae7ebf8f4e108a69c7203e246ec
|
811ccdd2ffe95c871842407cc44d1fa44b363e6e
|
/squad/prepro.py
|
452f53f3e5da03ca4abe71e06abd8845d14c70e6
|
[
"Apache-2.0"
] |
permissive
|
Jh-SYSU/Bi-Directional-Attention-Flow-for-Taidi
|
69979a0c09f4f6befe49664703cd8097707f9f87
|
3eb5007af82f4fbacb486315c48bde06d8dd3d00
|
refs/heads/master
| 2020-03-10T21:59:11.181222
| 2018-04-15T12:33:27
| 2018-04-15T12:33:27
| 129,608,007
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,260
|
py
|
import argparse
import json
import os
# data: q, cq, (dq), (pq), y, *x, *cx
# shared: x, cx, (dx), (px), word_counter, char_counter, word2vec
# no metadata
from collections import Counter
from tqdm import tqdm
from squad.utils import get_word_span, get_word_idx, process_tokens
def main():
args = get_args()
prepro(args)
def get_args():
parser = argparse.ArgumentParser()
home = os.path.expanduser("~")
source_dir = os.path.join("data", "squad")
target_dir = "data/squad"
glove_dir = os.path.join("data", "glove")
parser.add_argument('-s', "--source_dir", default=source_dir)
parser.add_argument('-t', "--target_dir", default=target_dir)
parser.add_argument('-d', "--debug", action='store_true')
parser.add_argument("--train_ratio", default=0.9, type=int)
parser.add_argument("--glove_corpus", default="6B")
parser.add_argument("--glove_dir", default=glove_dir)
parser.add_argument("--glove_vec_size", default=100, type=int)
parser.add_argument("--mode", default="full", type=str)
parser.add_argument("--single_path", default="", type=str)
parser.add_argument("--tokenizer", default="PTB", type=str)
parser.add_argument("--url", default="vision-server2.corp.ai2", type=str)
parser.add_argument("--port", default=8000, type=int)
parser.add_argument("--split", action='store_true')
# TODO : put more args here
return parser.parse_args()
def create_all(args):
out_path = os.path.join(args.source_dir, "all-v1.1.json")
if os.path.exists(out_path):
return
train_path = os.path.join(args.source_dir, "train-v1.1.json")
train_data = json.load(open(train_path, 'r'))
dev_path = os.path.join(args.source_dir, "dev-v1.1.json")
dev_data = json.load(open(dev_path, 'r'))
train_data['data'].extend(dev_data['data'])
print("dumping all data ...")
json.dump(train_data, open(out_path, 'w'))
def prepro(args):
if not os.path.exists(args.target_dir):
os.makedirs(args.target_dir)
if args.mode == 'full':
prepro_each(args, 'train', out_name='train')
prepro_each(args, 'dev', out_name='dev')
prepro_each(args, 'dev', out_name='test')
elif args.mode == 'all':
create_all(args)
prepro_each(args, 'dev', 0.0, 0.0, out_name='dev')
prepro_each(args, 'dev', 0.0, 0.0, out_name='test')
prepro_each(args, 'all', out_name='train')
elif args.mode == 'single':
assert len(args.single_path) > 0
prepro_each(args, "NULL", out_name="single", in_path=args.single_path)
else:
prepro_each(args, 'train', 0.0, args.train_ratio, out_name='train')
prepro_each(args, 'train', args.train_ratio, 1.0, out_name='dev')
prepro_each(args, 'dev', out_name='test')
def save(args, data, shared, data_type):
data_path = os.path.join(args.target_dir, "data_{}.json".format(data_type))
shared_path = os.path.join(args.target_dir, "shared_{}.json".format(data_type))
json.dump(data, open(data_path, 'w'))
json.dump(shared, open(shared_path, 'w'))
def get_word2vec(args, word_counter):
glove_path = os.path.join(args.glove_dir, "glove.{}.{}d.txt".format(args.glove_corpus, args.glove_vec_size))
sizes = {'6B': int(4e5), '42B': int(1.9e6), '840B': int(2.2e6), '2B': int(1.2e6)}
total = sizes[args.glove_corpus]
word2vec_dict = {}
with open(glove_path, 'r', encoding='utf-8') as fh:
for line in tqdm(fh, total=total):
array = line.lstrip().rstrip().split(" ")
word = array[0]
vector = list(map(float, array[1:]))
if word in word_counter:
word2vec_dict[word] = vector
elif word.capitalize() in word_counter:
word2vec_dict[word.capitalize()] = vector
elif word.lower() in word_counter:
word2vec_dict[word.lower()] = vector
elif word.upper() in word_counter:
word2vec_dict[word.upper()] = vector
print("{}/{} of word vocab have corresponding vectors in {}".format(len(word2vec_dict), len(word_counter), glove_path))
return word2vec_dict
def prepro_each(args, data_type, start_ratio=0.0, stop_ratio=1.0, out_name="default", in_path=None):
if args.tokenizer == "PTB":
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace("``", '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, "{}-v1.1.json".format(data_type))
source_data = json.load(open(source_path, 'r'))
q, cq, y, rx, rcx, ids, idxs = [], [], [], [], [], [], []
cy = []
x, cx = [], []
answerss = []
p = []
word_counter, char_counter, lower_word_counter = Counter(), Counter(), Counter()
start_ai = int(round(len(source_data['data']) * start_ratio))
stop_ai = int(round(len(source_data['data']) * stop_ratio))
for ai, article in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
xp, cxp = [], []
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for pi, para in enumerate(article['paragraphs']):
# wordss
context = para['context']
context = context.replace("''", '" ')
context = context.replace("``", '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi] # process tokens
# given xi, add chars
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
# get words
qi = word_tokenize(qa['question'])
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
# TODO : put some function that gives word_start, word_stop here
yi0, yi1 = get_word_span(context, xi, answer_start, answer_stop)
# yi0 = answer['answer_word_start'] or [0, 0]
# yi1 = answer['answer_word_stop'] or [0, 1]
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1]-1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1]-1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
# print(answer_text, w0[cyi0:], w1[:cyi1+1])
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
# add context here
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy,
'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx}
shared = {'x': x, 'cx': cx, 'p': p,
'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter,
'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print("saving ...")
save(args, data, shared, out_name)
if __name__ == "__main__":
main()
|
[
"raojh6@mail2.sysu.edu.cn"
] |
raojh6@mail2.sysu.edu.cn
|
311a6aa2da76abe9146ab35a0e532833d59b34ae
|
ca506bf32d8c3380264a2fa9e9fc2950a5df5613
|
/python_arrays.py
|
68d0d98308df4d8790fd75d1347a53cdd97a5755
|
[
"MIT"
] |
permissive
|
anspears/astr-119-hw-2
|
3a69b6023b803c9d6e7832763b73d5742cf3ec67
|
964a94ef7edcb0d2d707cbcfb83e398494e2585c
|
refs/heads/main
| 2022-12-26T01:32:45.791410
| 2020-10-08T19:45:33
| 2020-10-08T19:45:33
| 302,409,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
x = [0.0, 3.0, 5.0, 2.5, 3.7] #define an array
print("When we start, x = ",x)
#remove the third element
x.pop(2)
print(x)
#remove the element equal to 2.5
x.remove(2.5)
print(x)
#add an element at the end
x.append(1.2)
print(x)
#get a copy
y = x.copy()
print(y)
#how many elements are equal to 0.0
print(y.count(0.0))
#print the index with value 3.7
print(y.index(3.7))
#sort the list
y.sort()
print(y)
#reverse sort
y.reverse()
print(y)
#remove all elements
y.clear()
print(y)
print(x)
|
[
"noreply@github.com"
] |
anspears.noreply@github.com
|
0a15bed3e14b90cd6ce31c2d80cb043468eb77c7
|
46b7d39fac4f22b04bc74acc2db61056c49db853
|
/day3_puzzle2.py
|
27001f78650cf218503bb27fb1f2abded2d64d7c
|
[] |
no_license
|
antonforsstrom/adventofcode
|
e46e630cf346d1cd7653751233cd7cc8b8f0d38e
|
e087a2143fd5f4b131b520db0ab0ca4e88ac2988
|
refs/heads/master
| 2020-11-25T09:07:32.290709
| 2019-12-18T16:04:38
| 2019-12-18T16:04:38
| 228,586,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
import day3_puzzle1
def steps(wire1, wire2):
"""Calculates most time efficient intersection, i.e. the one reached with the least total steps from origin"""
points1= day3_puzzle1.get_visited_points(wire1)
points2 = day3_puzzle1.get_visited_points(wire2)
intersections = day3_puzzle1.intersections(points1, points2)
# Number of steps required to get to each intersection
numbr_steps = []
for intersection in intersections:
# Correct for 0-indexing
steps_wire1 = points1.index(intersection) + 1
steps_wire2 = points2.index(intersection) + 1
total_steps = steps_wire1 + steps_wire2
numbr_steps.append(total_steps)
return numbr_steps
def min_steps(wire1: list, wire2: list):
steps_list = steps(wire1, wire2)
min_steps_list = min(steps_list)
return min_steps_list
if __name__ == "__main__":
filename = 'inputs/day3_input.txt'
wires = day3_puzzle1.read_input(filename)
print("Min distance: ", min_steps(wires[0], wires[1]))
|
[
"antf@C02Z6367LVDL.local"
] |
antf@C02Z6367LVDL.local
|
b9ab3134e2660cbd612f2de81377cf7567e58df6
|
cff539edac9e5b2492fcbaa87b7a0f5cb5e33497
|
/csv_test.py
|
efad46a4471bcb2e0e7056e044e92ae47301acb7
|
[] |
no_license
|
Charlie-Robot/csv-card-catalog
|
1f70157764a34475ed3cca0368915ba31ac44187
|
b7427c2e0bca4fa87d3f7b0d9b7188a18de042c9
|
refs/heads/master
| 2020-05-26T17:23:36.424839
| 2017-02-19T19:32:13
| 2017-02-19T19:32:13
| 82,483,972
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36,040
|
py
|
import unittest, os, time
from sheet import Spreadsheet
class Unitest(unittest.TestCase):
def setUp(self):
self.FILE = 'test.csv'
self.FILE_1 = 'test_2.csv'
self.FILE_2 = 'test_3.csv'
self.HEADERS = ['First',
'Middle',
'Last',
'Street',
'Apt#',
'City',
'Zip',
'Email',
'Occupation',
'Employer',
'Filed By',
'Rolodex',
'Member Signed',
'Cope Signed',
'Date Signed',
'Contribution',]
self.FULL_VALS = {'500': {'First' : 'Madison',
'Middle' : 'Katy',
'Last' : 'Wyss',
'Street' : 'Marina Blvd',
'Apt#' : '305',
'City' : 'San Leandro',
'Zip' : '90210',
'Email' : 'madison@baby.com',
'Occupation' : 'baby',
'Employer' : 'Mom',
'Filed By' : 'Igor',
'Rolodex' : '500',
'Member Signed' : 'yes',
'Cope Signed': 'yes',
'Date Signed' : '01/01/2001',
'Contribution' : '10'}}
self.MISSING_VALS = {'100': {'First' : 'Hobart',
'Middle' : 'The',
'Last' : 'Great',
'Street' : 'Awesome Ln',
'Apt#' : '',
'City' : 'Candy Land',
'Zip' : '12345',
'Email' : '',
'Occupation' : 'Magician',
'Employer' : '',
'Filed By' : 'Igor',
'Rolodex' : '100',
'Member Signed' : 'yes',
'Cope Signed': 'yes',
'Date Signed' : '02/11/2012',
'Contribution' : '0'}}
self.MORE_VALS = {'501': {'First' : 'Sheman',
'Middle' : 'Tai',
'Last' : 'Wyss',
'Street' : 'Mallard Loop',
'Apt#' : '133',
'City' : 'Whitefish',
'Zip' : '59937',
'Email' : 'stumpy@lowdog.com',
'Occupation' : 'Guard Dog',
'Employer' : 'Mom',
'Filed By' : 'Igor',
'Rolodex' : '501',
'Member Signed' : 'yes',
'Cope Signed': 'yes',
'Date Signed' : '12/10/2011',
'Contribution' : '20'}}
self.OVERLAP_VALS = {'500': {'First' : 'Franklin',
'Middle' : 'The',
'Last' : 'Lesser',
'Street' : 'Adequate Ln',
'Apt#' : '',
'City' : 'Kalamazoo',
'Zip' : '24681',
'Email' : 'not_so_great@humble.org',
'Occupation' : 'Ponsy',
'Employer' : 'Mom',
'Filed By' : 'Igor',
'Rolodex' : '500',
'Member Signed' : 'yes',
'Cope Signed': 'yes',
'Date Signed' : '12/2/2014',
'Contribution' : '0'},
'100': {'First' : 'Tech',
'Middle' : 'The',
'Last' : 'Tank',
'Street' : 'Podunk Blvd',
'Apt#' : '',
'City' : 'Haywire Gulch',
'Zip' : '11223',
'Email' : 'tractor_time@hotmail.com',
'Occupation' : 'Tractor',
'Employer' : 'Old McDonald',
'Filed By' : 'Igor',
'Rolodex' : '100',
'Member Signed' : 'yes',
'Cope Signed': 'yes',
'Date Signed' : '10/12/2004',
'Contribution' : '10'}}
self.actual_dic = {}
self.sheet_1 = Spreadsheet(self.HEADERS,self.FILE)
self.sheet_2 = Spreadsheet(self.HEADERS,self.FILE_1)
self.sheet_3 = Spreadsheet(self.HEADERS,self.FILE_2)
self.sheet_4 = Spreadsheet(self.HEADERS)
FN, ext = os.path.splitext(self.sheet_1.FN)
self.MERGED_FN = '{}_merged{}'.format(FN, ext)
def test_basic_class_properties(self):
#Affirm seperate objects
self.assertNotEqual(self.sheet_1, self.sheet_2)
self.assertNotEqual(self.sheet_1, self.sheet_3)
self.assertNotEqual(self.sheet_2, self.sheet_3)
#Test object sheet_1
self.sheet_1.row_dict = {}
self.assertEqual(self.sheet_1.FN, self.FILE)
self.assertEqual(self.sheet_1.HEADER, self.HEADERS)
#Test object sheet_2
self.sheet_2.row_dict = {}
self.assertEqual(self.sheet_2.FN, self.FILE_1)
self.assertEqual(self.sheet_2.HEADER, self.HEADERS)
#Test object sheet_3
self.sheet_3.row_dict = {}
self.assertEqual(self.sheet_3.FN, self.FILE_2)
self.assertEqual(self.sheet_3.HEADER, self.HEADERS)
#Test default file naming convention
#Spreadsheet created with unspecified file name is auto name with current date 01.01.2016
self.sheet_4.write(self.FULL_VALS)
DATE_FORMAT = '%m/%d/%Y'
TODAY = time.strftime(DATE_FORMAT)
EXPECTED_DEFAULT_FN = TODAY.replace('/', '.') + '.csv'
self.assertTrue(os.path.isfile(EXPECTED_DEFAULT_FN))
self.assertEqual(EXPECTED_DEFAULT_FN, self.sheet_4.FN)
#If sheet with current date as file name already exists,
#append (n) to default file name, first duplicate is (1), every duplicate file name
#that follows is n+1 ==> 01.01.2010.csv => 01.01.2010(1)/csv => 01.01.2010(2).csv
ACTUAL_DUPLICATES = []
for n in range(1,21):
EXPECTED_DUPLICATE_FN = TODAY.replace('/','.') + '({})'.format(n) + '.csv'
#create 20 duplicate file names
duplicate_sheet_name = Spreadsheet(self.HEADERS)
duplicate_sheet_name.write(self.FULL_VALS)
ACTUAL_DUPLICATES.append(duplicate_sheet_name.FN)
self.assertTrue(os.path.isfile(EXPECTED_DUPLICATE_FN))
self.assertNotEqual(EXPECTED_DEFAULT_FN, duplicate_sheet_name.FN)
self.assertEqual(EXPECTED_DUPLICATE_FN, duplicate_sheet_name.FN)
for FN in ACTUAL_DUPLICATES:
os.remove(FN)
#Check csv RESTVAL
self.assertEqual('-', self.sheet_1.RESTVAL)
def test_write(self):
'''
Test a single row of data
'''
self.assertTrue(len(self.sheet_1.READ_DICT) == 0)
self.assertTrue(len(self.sheet_1.WRITE_DICT) == 0)
#Write a single line of data below Headers
self.sheet_1.write(self.FULL_VALS)
#Number of Values does not exceed number of headers
self.assertTrue(len(self.FULL_VALS.values()) <= len(self.HEADERS))
#write() created a valid .csv file
FN, ext = os.path.splitext(self.sheet_1.FN)
self.assertTrue(os.path.isfile(self.FILE), '{} is not a file'.format(self.FILE))
self.assertTrue(ext == '.csv')
with open(self.FILE, 'r') as content:
for row in content:
row = row.split(',')
self.assertNotEqual(row, self.HEADERS)
row_as_dict = {header:value for header, value in zip(self.HEADERS, row)}
for key, row in self.FULL_VALS.items():
self.assertEqual(sorted(row), sorted(row_as_dict))
self.assertEqual(row_as_dict['Rolodex'], key)
#write() should remove key, value pairs from entries if value == ''
#these removed values will be replaced by csv.DictWriter's restval
missing_vals = ['Apt#', 'Employer', 'Email']
for header in missing_vals:
self.assertTrue(header in self.MISSING_VALS['100'].keys())
self.sheet_2.write(self.MISSING_VALS)
for header in missing_vals:
self.assertTrue(header not in self.MISSING_VALS['100'].keys())
def test_read(self):
'''
Read in rows of data into a dict of lists, update Spreadsheet Rolodex
to highest Rolodex.
'''
EXPECTED = {'501': ['Sheman',
'Tai',
'Wyss',
'Mallard Loop',
'133',
'Whitefish',
'59937',
'stumpy@lowdog.com',
'Guard Dog',
'Mom',
'Igor',
'501',
'yes',
'yes',
'12/10/2011',
'20'],
'500' :['Madison',
'Katy',
'Wyss',
'Marina Blvd',
'305',
'San Leandro',
'90210',
'madison@baby.com',
'baby',
'Mom',
'Igor',
'500',
'yes',
'yes',
'01/01/2001',
'10']}
self.sheet_1.write(self.FULL_VALS)
self.sheet_1.write(self.MORE_VALS)
test_sheet = Spreadsheet(self.HEADERS, 'test_sheet.csv')
self.assertTrue(os.path.isfile(self.sheet_1.FN))
default_rolodex = 100
self.assertEqual(default_rolodex, test_sheet.ROLODEX)
test_sheet.read(self.sheet_1.FN)
self.assertEqual(test_sheet.ROLODEX, 501)
for key, row in EXPECTED.items():
self.assertTrue(key in test_sheet.READ_DICT.keys())
self.assertEqual(test_sheet.READ_DICT[key], row)
self.assertEqual(EXPECTED, test_sheet.READ_DICT)
def test_empty_cell(self):
'''
Undefined headers values in input dic should be filled with a "N/A"
if corresponding spread sheet cell
'''
EXPECTED = ['Hobart',
'The',
'Great',
'Awesome Ln',
'-',
'Candy Land',
'12345',
'-',
'Magician',
'-',
'Igor',
'100',
'yes',
'yes',
'02/11/2012',
'0']
#Write a single line of data below Headers
self.sheet_1.write(self.MISSING_VALS)
#Read Spreadsheet row
self.sheet_1.read()
for key, row in self.sheet_1.READ_DICT.items():
for indx, value in enumerate(row):
self.assertEqual(EXPECTED[indx], value,"Index: {} not 'N/A'".format(indx))
def test_find(self):
'''
spreadsheet.find(header, value), return a list of rows
where header=value in current working file
'''
#write two rows into spreadsheet
self.sheet_1.write(self.FULL_VALS)
self.sheet_1.write(self.MISSING_VALS)
#simulate a loaded spreadsheet
self.sheet_1.read()
self.assertEqual(2, len(self.sheet_1.WRITE_DICT))
self.assertTrue("Last" in self.HEADERS)
#No matching value does not alter sheet.WRITE_DICT
self.sheet_1.find('Last', 'Smith')
self.assertEqual(2, len(self.sheet_1.WRITE_DICT))
#Both rows have matching value
self.assertTrue('Filed By' in self.HEADERS)
self.sheet_1.find('Filed By', 'Igor')
self.assertEqual(2, len(self.sheet_1.WRITE_DICT))
self.assertTrue('100' in self.sheet_1.WRITE_DICT.keys())
self.assertTrue('500' in self.sheet_1.WRITE_DICT.keys())
#Single row with matching value
self.sheet_1.find('Last', 'Wyss')
self.assertEqual(1, len(self.sheet_1.WRITE_DICT))
self.assertTrue(self.FULL_VALS == self.sheet_1.WRITE_DICT)
def test_remove_single_row_by_rolodex_only(self):
#Create Spreadsheet with3 rows of data
self.sheet_1.write(self.FULL_VALS)
self.sheet_1.write(self.MISSING_VALS)
self.sheet_1.write(self.MORE_VALS)
self.sheet_1.read()
self.assertEqual(3, len(self.sheet_1.WRITE_DICT))
ORIGINAL_KEYS = self.sheet_1.WRITE_DICT.keys()
self.assertTrue('501' in ORIGINAL_KEYS)
#Remove a single row of data according to a Rolodex number
self.sheet_1.remove_row('501','','')
ALTERED_KEYS = self.sheet_1.WRITE_DICT.keys()
self.assertEqual(ORIGINAL_KEYS, ALTERED_KEYS)
self.assertTrue('100' in ALTERED_KEYS)
self.assertTrue('500' in ALTERED_KEYS)
self.assertTrue('501' not in ALTERED_KEYS)
def test_remove_multiple_rows_by_rolodex_only(self):
#Create Spreadsheet with3 rows of data
self.sheet_1.write(self.FULL_VALS)
self.sheet_1.write(self.MISSING_VALS)
self.sheet_1.write(self.MORE_VALS)
self.sheet_1.read()
self.sheet_2.write(self.FULL_VALS)
self.sheet_2.write(self.MISSING_VALS)
self.sheet_2.write(self.MORE_VALS)
self.sheet_2.read()
self.sheet_3.write(self.FULL_VALS)
self.sheet_3.write(self.MISSING_VALS)
self.sheet_3.write(self.MORE_VALS)
self.sheet_3.read()
self.assertEqual(3, len(self.sheet_1.WRITE_DICT))
ORIGINAL_KEYS = self.sheet_1.WRITE_DICT.keys()
self.assertTrue('500' in ORIGINAL_KEYS)
self.assertTrue('501' in ORIGINAL_KEYS)
#Remove all rows in a given range of Rolodex numbers
self.sheet_1.remove_row('101-600','','')
ALTERED_KEYS = self.sheet_1.WRITE_DICT.keys()
self.assertEqual(ORIGINAL_KEYS, ALTERED_KEYS)
self.assertTrue('100' in ALTERED_KEYS)
self.assertTrue('500' not in ALTERED_KEYS)
self.assertTrue('501' not in ALTERED_KEYS)
#Remove all rows in a range of Rolodex numbers combined with indivudual
#Rolodex numbers outside of range
ORIGINAL_KEYS = self.sheet_2.WRITE_DICT.keys()
self.assertTrue('100' in ORIGINAL_KEYS)
self.assertTrue('500' in ORIGINAL_KEYS)
self.sheet_2.remove_row('0-100, 500', '', '')
ALTERED_KEYS = self.sheet_2.WRITE_DICT.keys()
self.assertTrue('100' not in ALTERED_KEYS)
self.assertTrue('500' not in ALTERED_KEYS)
#Remove all rows in a comma seperated set of rolodex numbers
ORIGINAL_KEYS = self.sheet_3.WRITE_DICT.keys()
self.assertTrue('100' in ORIGINAL_KEYS)
self.assertTrue('500' in ORIGINAL_KEYS)
self.sheet_3.remove_row('100, 500', '', '')
ALTERED_KEYS = self.sheet_3.WRITE_DICT.keys()
self.assertTrue('100' not in ALTERED_KEYS)
self.assertTrue('500' not in ALTERED_KEYS)
def test_remove_single_row_by_rolodex_plus_header_value(self):
#Create Spreadsheet with3 rows of data
self.sheet_1.write(self.FULL_VALS)
self.sheet_1.write(self.MISSING_VALS)
self.sheet_1.write(self.MORE_VALS)
self.sheet_1.read()
self.assertEqual(3, len(self.sheet_1.WRITE_DICT))
ORIGINAL_KEYS = self.sheet_1.WRITE_DICT.keys()
self.assertTrue('100' in ORIGINAL_KEYS)
self.assertTrue('500' in ORIGINAL_KEYS)
self.assertTrue('501' in ORIGINAL_KEYS)
#Remove all rows in a given range of Rolodex numbers where header == value
self.sheet_1.remove_row('000-600','Last','Great')
ALTERED_KEYS = self.sheet_1.WRITE_DICT.keys()
self.assertEqual(ORIGINAL_KEYS, ALTERED_KEYS)
self.assertTrue('100' not in ALTERED_KEYS)
self.assertTrue('500' in ALTERED_KEYS)
self.assertTrue('501' in ALTERED_KEYS)
def test_remove_multiple_rows_by_rolodex_plus_header_value(self):
#Create Spreadsheet with3 rows of data
self.sheet_1.write(self.FULL_VALS)
self.sheet_1.write(self.MISSING_VALS)
self.sheet_1.write(self.MORE_VALS)
self.sheet_1.read()
self.assertEqual(3, len(self.sheet_1.WRITE_DICT))
ORIGINAL_KEYS = self.sheet_1.WRITE_DICT.keys()
self.assertTrue('100' in ORIGINAL_KEYS)
self.assertTrue('500' in ORIGINAL_KEYS)
self.assertTrue('501' in ORIGINAL_KEYS)
#Remove all rows in a given range of Rolodex numbers
self.sheet_1.remove_row('000-600','Last','Wyss')
ALTERED_KEYS = self.sheet_1.WRITE_DICT.keys()
self.assertEqual(ORIGINAL_KEYS, ALTERED_KEYS)
self.assertTrue('100' in ALTERED_KEYS)
self.assertTrue('500' not in ALTERED_KEYS)
self.assertTrue('501' not in ALTERED_KEYS)
def test_remove_multiple_rows_by_header_value_only(self):
#Create Spreadsheet with3 rows of data
self.sheet_1.write(self.FULL_VALS)
self.sheet_1.write(self.MISSING_VALS)
self.sheet_1.write(self.MORE_VALS)
self.sheet_1.read()
self.assertEqual(3, len(self.sheet_1.WRITE_DICT))
ORIGINAL_KEYS = self.sheet_1.WRITE_DICT.keys()
self.assertTrue('100' in ORIGINAL_KEYS)
self.assertTrue('500' in ORIGINAL_KEYS)
self.assertTrue('501' in ORIGINAL_KEYS)
#Remove all rows in a given range of Rolodex numbers
self.sheet_1.remove_row('','Last','Wyss')
ALTERED_KEYS = self.sheet_1.WRITE_DICT.keys()
self.assertEqual(ORIGINAL_KEYS, ALTERED_KEYS)
self.assertTrue('100' in ALTERED_KEYS)
self.assertTrue('500' not in ALTERED_KEYS)
self.assertTrue('501' not in ALTERED_KEYS)
def test_remove_single_row_by_header_value_only(self):
#Create Spreadsheet with3 rows of data
self.sheet_1.write(self.FULL_VALS)
self.sheet_1.write(self.MISSING_VALS)
self.sheet_1.write(self.MORE_VALS)
self.sheet_1.read()
self.assertEqual(3, len(self.sheet_1.WRITE_DICT))
ORIGINAL_KEYS = self.sheet_1.WRITE_DICT.keys()
self.assertTrue('100' in ORIGINAL_KEYS)
self.assertTrue('500' in ORIGINAL_KEYS)
self.assertTrue('501' in ORIGINAL_KEYS)
#Remove all rows in a given range of Rolodex numbers
self.sheet_1.remove_row('','Contribution','10')
ALTERED_KEYS = self.sheet_1.WRITE_DICT.keys()
self.assertEqual(ORIGINAL_KEYS, ALTERED_KEYS)
self.assertTrue('100' in ALTERED_KEYS)
self.assertTrue('500' not in ALTERED_KEYS)
self.assertTrue('501' in ALTERED_KEYS)
def test_edit_cell(self):
'''
edit_cell(rolodex, header, new_value, [over_ride = False])
Alter the value of a single cell in a row or rows of Data,
spreadsheet.edit_cell('100', 'Header', 'New_Value'),
-or-
spreadsheet.edit_cell('100,101,103', 'Header', 'New_Value'),
-or-
spreadsheet.edit_cell('100, 200-600', 'Header', 'New_Value')
Rolodex unalterable unless over_ride == True
'''
self.sheet_1.write(self.FULL_VALS)
self.sheet_1.write(self.MISSING_VALS)
self.sheet_1.write(self.MORE_VALS)
self.sheet_1.read()
#Unaltered data row
self.assertEqual(self.sheet_1.WRITE_DICT['100']['Contribution'],
self.MISSING_VALS['100']['Contribution'])
#Call spreadsheet.edit_cell() on single row
self.sheet_1.edit_cell('100', 'Contribution', '30')
#Spreadsheet altered value of given cell
self.assertNotEqual(self.sheet_1.WRITE_DICT['100']['Contribution'],
self.MISSING_VALS['100']['Contribution'])
self.assertEqual(self.sheet_1.WRITE_DICT['100']['Contribution'], '30')
#Call spreadsheet.edit_cell() on multiple rows
#Test unaltered values
self.assertEqual(self.MISSING_VALS['100']['Filed By'],
self.sheet_1.WRITE_DICT['100']['Filed By'])
self.assertEqual(self.sheet_1.WRITE_DICT['100']['Filed By'],'Igor')
self.assertEqual(self.FULL_VALS['500']['Filed By'],
self.sheet_1.WRITE_DICT['500']['Filed By'])
self.assertEqual(self.sheet_1.WRITE_DICT['500']['Filed By'],'Igor')
self.assertEqual(self.MORE_VALS['501']['Filed By'],
self.sheet_1.WRITE_DICT['501']['Filed By'])
self.assertEqual(self.sheet_1.WRITE_DICT['501']['Filed By'],'Igor')
#Alter single cell in multiple Data rows
#individual rolodex ID's
self.sheet_1.edit_cell('100,500,501', 'Filed By', 'Jack')
#All 3 rows in spreadsheet altered 'Filed By' value
#Test altered values
self.assertNotEqual(self.MISSING_VALS['100']['Filed By'],
self.sheet_1.WRITE_DICT['100']['Filed By'])
self.assertEqual(self.MISSING_VALS['100']['Filed By'], 'Igor')
self.assertEqual(self.sheet_1.WRITE_DICT['100']['Filed By'],'Jack')
self.assertNotEqual(self.FULL_VALS['500']['Filed By'],
self.sheet_1.WRITE_DICT['500']['Filed By'])
self.assertEqual(self.FULL_VALS['500']['Filed By'],'Igor')
self.assertEqual(self.sheet_1.WRITE_DICT['500']['Filed By'],'Jack')
self.assertNotEqual(self.MORE_VALS['501']['Filed By'],
self.sheet_1.WRITE_DICT['501']['Filed By'])
self.assertEqual(self.MORE_VALS['501']['Filed By'],'Igor')
self.assertEqual(self.sheet_1.WRITE_DICT['501']['Filed By'],'Jack')
#range of rolodex ID's
self.sheet_1.edit_cell('0-100, 501', 'Employer', 'Dr. Frankenstein')
#test altered range
self.assertEqual(self.sheet_1.WRITE_DICT['100']['Employer'],'Dr. Frankenstein')
self.assertNotEqual(self.MORE_VALS['501']['Employer'],
self.sheet_1.WRITE_DICT['501']['Employer'])
self.assertEqual(self.MORE_VALS['501']['Employer'],'Mom')
self.assertEqual(self.sheet_1.WRITE_DICT['501']['Employer'],'Dr. Frankenstein')
#Rolodex can only be changed if over_ride == True
self.assertTrue('100' in self.sheet_1.WRITE_DICT.keys())
#edit_cell called without over_ride invoked, rolodex and WRITE_DICT[INDEX] should not change
self.sheet_1.edit_cell('100', 'Rolodex', '222')
self.assertTrue('100' in self.sheet_1.WRITE_DICT.keys())
self.assertEqual(self.sheet_1.WRITE_DICT['100']['Rolodex'], '100')
#edit_cell called with over_ride invoked, rolodex and WRITE_DICT[INDEX] should change
self.sheet_1.edit_cell('100', 'Rolodex', '222', over_ride = True)
self.assertTrue('100' not in self.sheet_1.WRITE_DICT.keys())
self.assertEqual(self.sheet_1.WRITE_DICT['222']['Rolodex'], '222')
def test_remove_column(self):
'''
All values under under a given header are removed without removing the header itself
'''
HEADER = "Apt#"
self.sheet_1.write(self.FULL_VALS)
self.sheet_1.write(self.MISSING_VALS)
self.sheet_1.write(self.MORE_VALS)
self.sheet_1.read()
for indx, row in self.sheet_1.WRITE_DICT.items():
self.assertTrue(HEADER in row.keys())
#call spreadsheet.remove_col()
self.sheet_1.remove_col(HEADER)
for indx, row in self.sheet_1.WRITE_DICT.items():
self.assertTrue(HEADER in row.keys())
self.assertTrue(row[HEADER] == '')
def test_basic_merge(self):
'''
Combine current working spreadsheet with second existing sheet, results written to third
.csv file, original files are unaltered
'''
self.sheet_1.write(self.FULL_VALS)
self.sheet_1.write(self.MISSING_VALS)
self.sheet_2.write(self.OVERLAP_VALS)
self.sheet_3.write(self.MORE_VALS)
self.sheet_1.read()
self.sheet_2.read()
self.sheet_3.read()
self.assertTrue(os.path.isfile(self.sheet_1.FN))
self.assertTrue(os.path.isfile(self.sheet_2.FN))
self.assertTrue(os.path.isfile(self.sheet_3.FN))
self.assertNotEqual(self.sheet_1.FN, self.sheet_2.FN)
self.assertNotEqual(self.sheet_1.FN, self.sheet_3.FN)
self.assertNotEqual(self.sheet_2.FN, self.sheet_3.FN)
sheet_1_keys = self.sheet_1.WRITE_DICT.keys()
sheet_2_keys = self.sheet_2.WRITE_DICT.keys()
sheet_3_keys = self.sheet_3.WRITE_DICT.keys()
#Merge two files without overlapping Roldex Numbers
FN, ext = os.path.splitext(self.sheet_1.FN)
for rolodex in self.sheet_1.WRITE_DICT.keys():
self.assertTrue(rolodex not in self.sheet_3.WRITE_DICT.keys())
self.assertFalse(os.path.isfile(self.MERGED_FN))
#the merge
self.sheet_1.merge(self.sheet_3.FN)
#merged file was created
self.assertTrue(os.path.isfile(self.MERGED_FN))
self.sheet_4.read(self.MERGED_FN)
#merged file contains rows of both sheet_1 and sheet_3
for rolodex, row in self.sheet_1.WRITE_DICT.items():
self.assertTrue(rolodex in self.sheet_4.WRITE_DICT.keys())
self.assertEqual(self.sheet_1.WRITE_DICT[rolodex],
self.sheet_4.WRITE_DICT[rolodex])
for rolodex, row in self.sheet_3.WRITE_DICT.items():
self.assertTrue(rolodex in self.sheet_4.WRITE_DICT.keys())
self.assertEqual(self.sheet_3.WRITE_DICT[rolodex],
self.sheet_4.WRITE_DICT[rolodex])
#Original files are unaltered
for rolodex in self.sheet_1.WRITE_DICT.keys():
self.assertTrue(rolodex not in self.sheet_3.WRITE_DICT.keys())
self.assertEqual(self.sheet_1.WRITE_DICT.keys(), sheet_1_keys)
for rolodex in self.sheet_3.WRITE_DICT.keys():
self.assertTrue(rolodex not in self.sheet_1.WRITE_DICT.keys())
self.assertEqual(self.sheet_3.WRITE_DICT.keys(), sheet_3_keys)
def test_merge_overlapping_ID_change_all_duplicate_IDs(self):
'''
Test onverlapping rolodex ID's
If duplicate ID's found, GUI window prompts to change conflicting ID
to 1 higher than current known ID (or not), with option to do same for
all remaining conflicting ID's,IF NOT, Second GUI prompts to overwrite
secondary ID and corrosponding row in new combined file (original file unaltered)
with option to overwrite all remaining ID's, GUI's suppressed for testing
'''
self.sheet_1.write(self.FULL_VALS)
self.sheet_1.write(self.MISSING_VALS)
self.sheet_2.write(self.OVERLAP_VALS)
self.sheet_1.read()
self.sheet_2.read()
self.assertTrue(os.path.isfile(self.sheet_1.FN))
self.assertTrue(os.path.isfile(self.sheet_2.FN))
self.assertNotEqual(self.sheet_1.FN, self.sheet_2.FN)
sheet_1_keys = self.sheet_1.WRITE_DICT.keys()
sheet_2_keys = self.sheet_2.WRITE_DICT.keys()
#Conditions to pass to Spreadsheet.merge()
conditions = {'TESTING' : True,
'CHANGE_YES_ALL' : True,
'CHANGE_YES_ONE' : False,
'CHANGE_NO_ALL' : False,
'OVERWRITE_YES_ALL' : False,
'OVERWITE_NO_ALL' : False}
#Merge sheet_1 & sheet_2, sheet_2 have duplicate Rolodex Values
for key in self.sheet_2.WRITE_DICT.keys():
self.assertTrue(key in self.sheet_1.WRITE_DICT.keys())
#MAX rolodex of both sheets being pre_merge,
MAX_ID = max(self.sheet_1.ROLODEX, self.sheet_2.ROLODEX)
#Expected newly merged file does not exist yet
self.assertFalse(os.path.isfile(self.MERGED_FN))
#The Merge
ORIGINAL_ROLODEX = self.sheet_1.ROLODEX
msg = self.sheet_1.merge(self.sheet_2.FN, **conditions)
#Original sheets are unaltered
self.assertEqual(self.sheet_1.WRITE_DICT.keys(), sheet_1_keys)
self.assertEqual(self.sheet_2.WRITE_DICT.keys(), sheet_2_keys)
#Expected merged file was created
self.assertTrue(os.path.isfile(self.MERGED_FN))
self.sheet_1.load(self.MERGED_FN)
#merged file should have 4 entries, Rolodex ID's from sheet_2 being += 1
#to highest known ID
merged = Spreadsheet(self.HEADERS, fn = self.MERGED_FN)
merged.read()
#merged file should have 4 entries
self.assertEqual(4, len(merged.WRITE_DICT.keys()))
#Test all ID's from sheet_1 are in merged file unaltered
for ID in self.sheet_1.WRITE_DICT.keys():
self.assertTrue(ID in merged.WRITE_DICT.keys())
#Test all ID's from sheet_2 are Altered and added to merged
for ID in self.sheet_2.WRITE_DICT.keys():
MAX_ID += 1
self.assertTrue(str(MAX_ID) in merged.WRITE_DICT.keys())
#Rows with Altered ID's, merged.WRITE_DICT[ID]['Rolodex'] reflects changed value
for ID in merged.WRITE_DICT.keys():
self.assertEqual(merged.WRITE_DICT[ID]['Rolodex'], ID)
#Currently working sheet's Rolodex ID is updated to reflect new higher Rolodex
self.assertNotEqual(ORIGINAL_ROLODEX, self.sheet_1.ROLODEX)
self.assertEqual(self.sheet_1.ROLODEX, merged.ROLODEX)
def test_merge_overlapping_ID_change_single_duplicate_IDs(self):
'''
If duplicate ID is not changed, data row with duplicate ID is not
added to newly formed file, Test is limited to only changing first
duplicate ID, actual function should allow to skip first found duplicate
but change following duplicates, GUI's suppressed for testing
'''
self.sheet_1.write(self.FULL_VALS)
self.sheet_1.write(self.MISSING_VALS)
self.sheet_2.write(self.OVERLAP_VALS)
self.sheet_1.read()
self.sheet_2.read()
self.assertTrue(os.path.isfile(self.sheet_1.FN))
self.assertTrue(os.path.isfile(self.sheet_2.FN))
self.assertNotEqual(self.sheet_1.FN, self.sheet_2.FN)
sheet_1_keys = self.sheet_1.WRITE_DICT.keys()
sheet_2_keys = self.sheet_2.WRITE_DICT.keys()
#Conditions to pass to Spreadsheet.merge()
conditions = {'TESTING' : True,
'CHANGE_YES_ALL' : False,
'CHANGE_YES_ONE' : True,
'CHANGE_NO_ALL' : False,
'OVERWRITE_YES_ALL' : False,
'OVERWRITE_YES_ONE' : False,
'OVERWRITE_NO_ALL' : False,
'OVERWRITE_NO_ONE' : False}
#Merge sheet_1 & sheet_2, sheet_2 have duplicate Rolodex Values
for key in self.sheet_2.WRITE_DICT.keys():
self.assertTrue(key in self.sheet_1.WRITE_DICT.keys())
#MAX rolodex of both sheets pre_merge,
MAX_ID = max(self.sheet_1.ROLODEX, self.sheet_2.ROLODEX)
#Expected newly merged file does not exist yet
self.assertFalse(os.path.isfile(self.MERGED_FN))
#The Merge
ORIGINAL_ROLODEX = self.sheet_1.ROLODEX
self.sheet_1.merge(self.sheet_2.FN, **conditions)
#Original sheets are unaltered
self.assertEqual(self.sheet_1.WRITE_DICT.keys(), sheet_1_keys)
self.assertEqual(self.sheet_2.WRITE_DICT.keys(), sheet_2_keys)
#Expected merged file was created
self.assertTrue(os.path.isfile(self.MERGED_FN))
self.sheet_1.load(self.MERGED_FN)
#merged file should have 3 entries, Rolodex ID from sheet_2 being += 1
#to highest known ID
merged = Spreadsheet(self.HEADERS, self.MERGED_FN)
merged.read()
self.assertEqual(max(merged.WRITE_DICT.keys()), str(MAX_ID + 1))
#merged file should have 3 entries
self.assertEqual(3, len(merged.WRITE_DICT.keys()))
#Test all ID's from sheet_1 are in merged file unaltered
for ID in self.sheet_1.WRITE_DICT.keys():
self.assertTrue(ID in merged.WRITE_DICT.keys())
#Test sinlge (lowest ID) from sheet_2 are Altered and added to merged
EXPECTED_ROW = self.sheet_2.WRITE_DICT['100']
EXPECTED_ROW['Rolodex'] = str(MAX_ID + 1)
self.assertTrue(EXPECTED_ROW in merged.WRITE_DICT.values())
self.assertEqual(merged.WRITE_DICT[str(MAX_ID + 1)], EXPECTED_ROW)
self.assertEqual(merged.WRITE_DICT['501']['Rolodex'], '501')
#Rows with Altered ID's, merged.WRITE_DICT[ID]['Rolodex'] reflects changed value
for ID in merged.WRITE_DICT.keys():
self.assertEqual(merged.WRITE_DICT[ID]['Rolodex'], ID)
#Currently working sheet's Rolodex ID is updated to reflect new higher Rolodex
self.assertNotEqual(ORIGINAL_ROLODEX, self.sheet_1.ROLODEX)
self.assertEqual(self.sheet_1.ROLODEX, merged.ROLODEX)
def tearDown(self):
for FN in [self.FILE, self.FILE_1, self.FILE_2, self.sheet_4.FN, self.MERGED_FN]:
if os.path.isfile(FN):
os.remove(FN)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
Charlie-Robot.noreply@github.com
|
a67b58537018827a34978cad18fde383dd6c68c8
|
050c53f4930674674a8cb22790f39b0e87bdc580
|
/lending/admin.py
|
c57f8967f6a12229031ba7aef247d250cb529ec8
|
[
"BSD-3-Clause"
] |
permissive
|
destos/toolhub.co
|
44ca3391b4a40720370985c55d54ee48fb5eb4b9
|
b51cf2cc2e0eb7c09c59684f985052c966943c0b
|
refs/heads/master
| 2021-05-28T01:02:14.426904
| 2014-08-05T05:21:08
| 2014-08-05T05:21:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
from django.contrib import admin
from .models import Transaction, LendingAction
class TransactionAdmin(admin.ModelAdmin):
raw_id_fields = ('hub', 'tool', 'lendee', 'last_action')
class LendingActionAdmin(admin.ModelAdmin):
raw_id_fields = ('transaction',)
admin.site.register(Transaction, TransactionAdmin)
admin.site.register(LendingAction, LendingActionAdmin)
|
[
"patrick@forringer.com"
] |
patrick@forringer.com
|
579abdc1f2ddc32420aa68c54bebe2a82fde7f5f
|
1adcabfb3bea9769e44306105a0ac403cca0836c
|
/smtp.py
|
a1c5548055567a8752b57dee2842ea5298db0e92
|
[] |
no_license
|
cab22/Computer-Networks
|
20f1cf6f7a8269949cfea0d8e683695cbd7707c7
|
265243b423b27f4ccfba9ac95b85280bb085ca62
|
refs/heads/master
| 2021-01-21T01:43:30.380387
| 2016-07-25T16:31:34
| 2016-07-25T16:31:34
| 64,150,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,662
|
py
|
from socket import *
import ssl
import base64
#Dados necessarios para enviar o email
msg = b'\r\nI love computer networks!'
endmsg = '\r\n.\r\n'
mailserver = 'smtp.gmail.com'
login=b'************'
senha=b'**************'
destinatario=b'**************'
clientSocket = socket(AF_INET, SOCK_STREAM)
# Número de porta pode mudar conforme servidor de e-mail
clientSocket.connect((mailserver, 587))
recv = clientSocket.recv(1024)
print(recv)
clientSocket.sendall(b'EHLO 192.168.25.6\r\n')
recv = clientSocket.recv(1024)
print(recv)
command = b'STARTTLS\r\n'
clientSocket.send(command)
recvdiscard = clientSocket.recv(1024)
print (recvdiscard)
clientSocketSSL = ssl.wrap_socket(clientSocket)
# clientSocket.sendall(b'HELO 192.168.25.6\r\n')
# recv = clientSocket.recv(1024)
# print(recv)
clientSocketSSL.sendall(b'EHLO 192.168.25.6\r\n')
recv = clientSocketSSL.recv(1024)
print(recv)
encoded = base64.b64encode(b'\000'+login+b'\000'+senha)
clientSocketSSL.send(b'AUTH PLAIN ' +encoded + b'\r\n')
recv = clientSocketSSL.recv(1024)
print(recv)
clientSocketSSL.send(b'MAIL FROM: <'+login+b'>\r\n')
recv = clientSocketSSL.recv(1024)
print(recv)
clientSocketSSL.send(b'RCPT TO: <'+destinatario+b'>\r\n')
recv = clientSocketSSL.recv(1024)
print(recv)
clientSocketSSL.send(b'DATA\r\n')
recv = clientSocketSSL.recv(1024)
print(recv)
clientSocketSSL.send(b'From: login\r\nTO: '+destinatario+b'\r\nSubject: Teste\r\n')
clientSocketSSL.send(msg+b'\r\n')
clientSocketSSL.send(b'.\r\n')
recv = clientSocketSSL.recv(1024)
print(recv)
# clientSocket.sendall(b'MAIL FROM: <dossantosjorgehenrique@gmail.com>\r\n')
# recv = clientSocket.recv(1024)
# print(recv)
|
[
"noreply@github.com"
] |
cab22.noreply@github.com
|
e6f308f3e67e773bcecbfee624beff58d586a4c9
|
6409d2492f9bef0d0b7219fb067d278611be31ea
|
/setup.py
|
4781b4f3c5c0e41a1636ac1ade5e616c64e22860
|
[
"MIT"
] |
permissive
|
Eugenio-Alcalde/CodonAdaptationIndex
|
d7b4fef98836f14c8bfa6483c2400e2278621fc2
|
568ee01d438d45a4527ddb6eda5fee429be40c2a
|
refs/heads/master
| 2021-09-02T04:58:21.601563
| 2017-12-30T15:06:01
| 2017-12-30T15:06:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
from setuptools import setup
setup(
name = 'CAI',
packages = ["CAI"],
version = '0.1.8',
description = 'Python implementation of codon adaptation index',
author = 'Benjamin Lee',
author_email = 'benjamin_lee@college.harvard.edu',
url = 'https://github.com/Benjamin-Lee/CodonAdaptationIndex', # use the URL to the github repo
classifiers = ["Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Programming Language :: Python"],
install_requires=['scipy'],
license="MIT",
use_2to3=True
)
|
[
"benjamindlee@me.com"
] |
benjamindlee@me.com
|
43c5ac8e72eff42df3abd6890e31bd12da42a1c6
|
51981cdf4b36ac6bd5d2e9312c19fdd19c333c84
|
/oddnuminter.py
|
5bda0926057b5b9d686ea857216f0ee4f7e52df0
|
[] |
no_license
|
jasirknm/guvi
|
ac07b436294533dc758542a9ee4e5f6737ba63ee
|
84c77d6dbeae6642ef81d6f2bba6d7b5060f45fb
|
refs/heads/master
| 2020-04-21T16:08:23.085051
| 2019-02-08T05:35:30
| 2019-02-08T05:35:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
#srini
a=input().split()
n=int(a[0])
q=int(a[1])
for i in range(n+1,q):
if(i%2!=0):
print(i,end=" ")
|
[
"noreply@github.com"
] |
jasirknm.noreply@github.com
|
8e032812208772cafa364fc657e132f45704b3c0
|
bde5dbf63ce34f61b0e4dbe36c4ab49e7e1d7e9c
|
/old/resnet18_ex3.py
|
3ba3595b4b4fefd44900e5eca214f59a24f7e446
|
[
"Apache-2.0"
] |
permissive
|
killf/pytorch-cifar10
|
13dda528bb8e7a3aba4107eecd9c20acc47ba304
|
2bf864eeed72fd6b2dcbc5513d83eb611d7d86ef
|
refs/heads/master
| 2021-05-27T07:50:02.847634
| 2020-04-16T23:48:21
| 2020-04-16T23:48:21
| 254,237,788
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,067
|
py
|
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim.lr_scheduler import LambdaLR, MultiStepLR
from torch.utils.data import DataLoader
from torchvision.datasets.cifar import CIFAR10
import torchvision.transforms as transforms
from torchvision.models.resnet import resnet18
from torchmirror import model, dataset
import torch.backends.cudnn as cudnn
LR = 0.1
EPOCHS = 5
BATCH_SIZE = 128
NUM_WORKERS = 2
DATA_DIR = "../data"
MODEL_FILE = "models/resnet18_ex2.pkl"
def main():
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = CIFAR10(
root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=128, shuffle=True, num_workers=2)
testset = CIFAR10(
root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(
testset, batch_size=100, shuffle=False, num_workers=2)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = resnet18(num_classes=10).to(device)
if torch.cuda.is_available():
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=LR, momentum=0.9, weight_decay=5e-4)
for epoch in range(EPOCHS):
net.train()
correct, total = 0., 0.
for step, (x, y_true) in enumerate(trainloader, 1):
x = x.to(device)
y_true = y_true.to(device)
optimizer.zero_grad()
y_pred = net(x)
loss = criterion(y_pred, y_true)
loss.backward()
optimizer.step()
_, predicted = y_pred.max(1)
total += y_true.size(0)
correct += predicted.eq(y_true).sum().item()
if step % 100 == 0:
print(f"Epoch:{epoch} Step:{step}, Loss:{loss.item():05f}, Acc:{correct / total:05f}")
del correct
del total
del x
del y_true
del loss
del predicted
net.eval()
with torch.no_grad():
correct, total = 0., 0.
for x, y_true in testloader:
x = x.to(device)
y_true = y_true.to(device)
y_pred = net(x)
_, predicted = y_pred.max(1)
total += y_true.size(0)
correct += predicted.eq(y_true).sum().item()
print(f"[VAL]Epoch:{epoch}, Acc:{correct / total:05f}")
print()
del correct
del total
del x
del y_true
del predicted
if __name__ == '__main__':
main()
|
[
"killf@foxmail.com"
] |
killf@foxmail.com
|
a16c93879e70cb13937180f7c8eb02f8bc56a853
|
9ee26754de512df4ce7c8e4c48c66a8ac6916f18
|
/DataLoader.py
|
707c3158683d1fa6eb24444e8c402919038084a4
|
[] |
no_license
|
ryumaggs/ComplexNN
|
4d0b0aa40aed3a59cbac473e4acd0c6d5daedbf0
|
2da891e2b2b57d860931ecdbe89c3498e5a2a095
|
refs/heads/master
| 2023-04-01T22:50:07.382224
| 2021-04-08T13:16:06
| 2021-04-08T13:16:06
| 281,153,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
import tensorflow as tf
def Load(path):
'''
'''
f_in = open(path, 'r')
line = f_in.readline()
x_real = []
x_imag = []
y_real = []
y_imag = []
while(line != ''):
line = line[:-1]
line = line.split(',')
x_real.append(float(line[0]))
x_imag.append(float(line[1]))
y_real.append(float(line[2]))
y_imag.append(float(line[3]))
line = f_in.readline()
#print('a', line != '')
tensor_x = tf.complex(x_real, x_imag)
tensor_y = tf.complex(y_real, y_imag)
return tensor_x, tensor_y
|
[
"ryu@colgate.edu"
] |
ryu@colgate.edu
|
f3179f6636a833a4370ae01a7632477aa089ac9b
|
934678d8886d9ca2472a7a6af5cdf7654fb2487f
|
/infer.py
|
d633534b24ac3ba2d30236c4f4040dbdc5956ee9
|
[] |
no_license
|
suchuhong/paddleYOLOv3
|
560be68f07c266695f65bcd18b7bc647656b3abf
|
80942589324a23b23b9cd85a04f9ec99d770d3cb
|
refs/heads/master
| 2020-05-27T19:58:12.894050
| 2019-05-27T04:24:35
| 2019-05-27T04:24:35
| 188,770,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,918
|
py
|
import os
import numpy as np
import paddle.fluid as fluid
import box_utils
import reader
from models.yolov3 import YOLOv3
import xml.etree.ElementTree as ET
def infer():
use_gpu = False
input_size = 608
model = YOLOv3(is_train=False)
model.build_model()
outputs = model.get_pred()
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
if "./weights/model_iter53999":
def if_exist(var):
return os.path.exists(os.path.join("./weights/model_iter53999", var.name))
fluid.io.load_vars(exe, "./weights/model_iter53999", predicate=if_exist)
feeder = fluid.DataFeeder(place=place, feed_list=model.feeds())
fetch_list = [outputs]
image_names = []
for image_name in os.listdir("./img/"):
if image_name.split('.')[-1] in ['jpg', 'png']:
image_names.append(image_name)
for image_name in image_names:
infer_reader = reader.infer(input_size, os.path.join("./img/", image_name))
label_names, _ = reader.get_label_infos()
data = next(infer_reader())
outputs = exe.run(
fetch_list=[v.name for v in fetch_list],
feed=feeder.feed(data),
return_numpy=False)
bboxes = np.array(outputs[0])
if bboxes.shape[1] != 6:
print("No object found in {}".format(image_name))
continue
labels = bboxes[:, 0].astype('int32')
scores = bboxes[:, 1].astype('float32')
boxes = bboxes[:, 2:].astype('float32')
draw_thresh = 0.40
path = os.path.join("./img/", image_name)
box_utils.draw_boxes_on_image(path, boxes, scores, labels, label_names, draw_thresh)
if __name__ == '__main__':
# 创建根节点
root = ET.Element("coordinate")
# 创建elementtree对象,写文件
tree = ET.ElementTree(root)
tree.write("coordinate.xml")
infer()
|
[
"noreply@github.com"
] |
suchuhong.noreply@github.com
|
1a6e80b9bb2bbbf88d2ca308404b79804e540972
|
a46cd09c2b1a984fc8733beba630b32a0e7a04df
|
/pythonteste/desafio55.py
|
b1a7536dcb9a466f55e3aa434a8823880e381b9c
|
[
"MIT"
] |
permissive
|
dangiotto/Python
|
36c7a4af706fdf6e0b6f56f879b50a1f9a55506c
|
29a9d18d7595a5c21e65dafc39f7fd4c55d8971c
|
refs/heads/main
| 2022-12-29T11:50:07.470877
| 2020-09-29T22:43:06
| 2020-09-29T22:43:06
| 299,753,324
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
mais = 0
menos = 0
for c in range (1,6):
peso = float(input('Informe o peso da {}ª pessoa : '.format(c)))
if c == 1:
mais = peso
menos = peso
else:
if peso > mais:
mais = peso
else:
menos = peso
print('A pessoa mais pesada tem {} KG.'.format(mais))
print('A pessoa mais leve tem {} KG'.format(menos))
|
[
"danilogiotto100@hotmail.com"
] |
danilogiotto100@hotmail.com
|
51ef258d29175cb7831bfaf2b78003466bcbc2ea
|
e27e9c484b90e74c5b292ac938a82d49121d683f
|
/tests/test_address.py
|
dfdb8146ff8853bcc9099e107127e294350dce5b
|
[] |
no_license
|
mickeystone/cryptotools
|
ce99ba69b29a29f64792408d63b104290150b0b0
|
d7e5e4f312776ede5f71b0ed1105e338974a5ac4
|
refs/heads/master
| 2020-03-24T12:51:55.182415
| 2018-07-06T10:55:34
| 2018-07-06T10:55:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,055
|
py
|
import unittest
import secrets
import urllib.request
import urllib.parse
from lxml import etree
from btctools.address import pubkey_to_address, script_to_address, hash160, address_to_script, address_type, ADDRESS
from btctools.script import push, OP
from ECDSA.secp256k1 import generate_keypair, PrivateKey, PublicKey
from transformations import bytes_to_hex, int_to_str
from btctools import bech32
class TestLegacyAddress(unittest.TestCase):
url = 'http://gobittest.appspot.com/Address'
def test_p2pkh(self):
"""https://en.bitcoin.it/wiki/Technical_background_of_version_1_Bitcoin_addresses#See_Also"""
payload = {'Random': 'Random'}
data = urllib.parse.urlencode(payload).encode('ascii')
req = urllib.request.Request(self.url, data)
with urllib.request.urlopen(req) as response:
html = response.read()
tree = etree.HTML(html)
private = tree.find('.//input[@name="private"]').attrib['value']
public = tree.find('.//input[@name="public"]').attrib['value']
address = tree.find('.//input[@name="Base58"]').attrib['value']
my_pubkey = PrivateKey.from_hex(private).to_public()
self.assertEqual(public.lower(), my_pubkey.hex())
self.assertEqual(pubkey_to_address(my_pubkey), address)
self.assertEqual(address_type(address), ADDRESS.P2PKH)
def test_p2sh(self):
script = secrets.token_bytes(32)
scripthash = hash160(script)
payload = {'RIPEMDWithHash': '05' + bytes_to_hex(scripthash)}
data = urllib.parse.urlencode(payload).encode('ascii')
req = urllib.request.Request(self.url, data)
with urllib.request.urlopen(req) as response:
html = response.read()
tree = etree.HTML(html)
address = tree.find('.//input[@name="Base58"]').attrib['value']
self.assertEqual(script_to_address(script, 'P2SH'), address)
self.assertEqual(address_type(address), ADDRESS.P2SH)
# def test_balance(self):
# # if satoshi moves his coins this test will fail
# addr = Address('1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa')
# self.assertEqual(addr.balance(), 66.65271233)
def test_address_type(self):
self.assertEqual(address_type('1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa'), ADDRESS.P2PKH)
self.assertEqual(address_type('34eBzenHJEdk5PK9ojuuBZvCRtNhvvysYZ'), ADDRESS.P2SH)
class TestBech32(unittest.TestCase):
hrp = 'bc'
witver = 0x00
def test_bech32_decode(self):
private, public = generate_keypair()
witprog = hash160(public.encode(compressed=True))
address = bech32.encode(self.hrp, self.witver, witprog)
wv, decoded = bech32.decode(self.hrp, address)
self.assertEqual(wv, self.witver)
self.assertEqual(bytes(decoded), bytes(witprog))
def test_p2wpkh(self):
"""https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki#examples"""
pubkey = PublicKey.from_hex('0279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798')
self.assertEqual(bech32.encode(self.hrp, self.witver, hash160(pubkey.encode(compressed=True))), 'bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t4')
address = pubkey_to_address(pubkey, version='P2WPKH')
self.assertEqual(address, 'bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t4')
self.assertEqual(address_type(address), ADDRESS.P2WPKH)
def test_p2wsh(self):
"""https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki#examples"""
pubkey = PublicKey.from_hex('0279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798')
script = push(pubkey.encode(compressed=True)) + OP.CHECKSIG.byte # <key> <OP_CHECKSIG>
address = script_to_address(script, 'P2WSH')
self.assertEqual(address, 'bc1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qccfmv3')
self.assertEqual(address_type(address), ADDRESS.P2WSH)
def test_valid_bech32(self):
"""https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki#test-vectors"""
valid_strings = [
'A12UEL5L',
'a12uel5l',
'an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs',
'abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw',
'11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j',
'split1checkupstagehandshakeupstreamerranterredcaperred2y9e3w',
'?1ezyfcl'
]
invalid_strings = [
int_to_str(0x20) + '1nwldj5',
int_to_str(0x7F) + '1axkwrx',
b'\x80'.decode('ascii', 'replace') + '1eym55h',
'an84characterslonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1569pvx',
'pzry9x0s0muk',
'1pzry9x0s0muk',
'x1b4n0q5v',
'li1dgmt3',
'de1lg7wt' + b'\xff'.decode('ascii', 'replace'),
'A1G7SGD8',
'10a06t8',
'1qzzfhee'
]
# Should raise no exceptions
for string in valid_strings:
bech32.bech32_decode(string)
# Should raise Bech32DecodeError
for string in invalid_strings:
with self.assertRaises(bech32.Bech32DecodeError):
bech32.bech32_decode(string)
def test_address_to_script(self):
"""https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki#test-vectors"""
valid = {
'BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4': '0014751e76e8199196d454941c45d1b3a323f1433bd6',
'tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7': '00201863143c14c5166804bd19203356da136c985678cd4d27a1b8c6329604903262',
'bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx': '5128751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6',
'BC1SW50QA3JX3S': '6002751e',
'bc1zw508d6qejxtdg4y5r3zarvaryvg6kdaj': '5210751e76e8199196d454941c45d1b3a323',
'tb1qqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesrxh6hy': '0020000000c4a5cad46221b2a187905e5266362b99d5e91c6ce24d165dab93e86433'
}
invalid = [
'tc1qw508d6qejxtdg4y5r3zarvary0c5xw7kg3g4ty',
'bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t5',
'BC13W508D6QEJXTDG4Y5R3ZARVARY0C5XW7KN40WF2',
'bc1rw5uspcuh',
'bc10w508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7kw5rljs90',
'BC1QR508D6QEJXTDG4Y5R3ZARVARYV98GJ9P',
'tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sL5k7',
'bc1zw508d6qejxtdg4y5r3zarvaryvqyzf3du',
'tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3pjxtptv',
'bc1gmk9yu'
]
for addr, script in valid.items():
self.assertEqual(bytes_to_hex(address_to_script(addr)), script)
for addr in invalid:
with self.assertRaises(bech32.Bech32DecodeError):
address_to_script(addr)
def test_address_type(self):
self.assertEqual(address_type('bc1qh2egksgfejqpktc3kkdtuqqrukrpzzp9lr0phn'), ADDRESS.P2WPKH)
self.assertEqual(address_type('bc1q8yh8l8ft3220q328hlapqhflpzy6xvkq6u36mctk8gq5pyxm3rwqv5h5dg'), ADDRESS.P2WSH)
class TestNet(unittest.TestCase):
def setUp(self):
import btctools.network
btctools.network.current_network = btctools.network.NETWORK.TEST
def tearDown(self):
import btctools.network
btctools.network.current_network = btctools.network.NETWORK.MAIN
def test_address_type(self):
self.assertEqual(address_type('mgxVT9fzHwYDsgEGJSZekKgYbAyrBkqdpi'), ADDRESS.P2PKH)
self.assertEqual(address_type('2MzAQDXGpmJyS6ybm2q57dbe8j2oxmvRDkc'), ADDRESS.P2SH)
self.assertEqual(address_type('n2NGrooSecJaiD6ssp4YqFoj9eZ7GrCJ66'), ADDRESS.P2PKH)
self.assertEqual(address_type('tb1q7w5dhw4hl5yvxvl3yvv2xxvh7jwm28p9kpelcp'), ADDRESS.P2WPKH)
|
[
"mc-dallas@hotmail.com"
] |
mc-dallas@hotmail.com
|
5e1002e62f5ddda4dd97a0fba653878027f285ab
|
d1c53def818f9c7e1bd660e3303a754f297aff43
|
/code/ch7/6_9_a.py
|
87ff65160d0731629c3f3095c98607c6d2920708
|
[] |
no_license
|
khimacademy/c104
|
dcdcae13499e5b68905f09ea009e1a2b9f552e1c
|
83443858d5b85c23c107fa09cd672d17549776ee
|
refs/heads/master
| 2020-03-26T10:57:52.536935
| 2018-08-25T06:17:04
| 2018-08-25T06:17:04
| 144,822,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
'''
6-9. 좋아하는 장소
favorite_places 딕셔너리를 만드세요. 딕셔너리 키로 쓸 이름을 세 개 생각하고 각 사람마다 좋아하는 장소를 1~3개 저장하세요. 이 연습문제를 조금 더 흥미롭게 만들려면 친구에게 좋아하는 장소 이름을 물어보세요. 딕셔너리에 루프를 실행하고 각 사람 이름과 좋아하는 장소를 출력하세요.
Output:
ever likes the following places:
- mt. verstovia
- the playground
- south carolina
erin likes the following places:
- hawaii
- iceland
eric likes the following places:
- bear mountain
- death valley
- tierra del Fuego
'''
|
[
"sarang.khim@gmail.com"
] |
sarang.khim@gmail.com
|
414ea8595217197e54c34419406b88bdadf5f87d
|
6b29dbfe527d9e1950d53fde913e05e2a58758ab
|
/server/tasks/dast.py
|
dcbc7b141dcf21d665840953ca8301bc3884d601
|
[
"Apache-2.0"
] |
permissive
|
parijatsahai/camcops
|
d7b2843b77bedee87b8298138bc8a33fe66c5178
|
09c7000060b546ad22b908e4245b1ff02940dd63
|
refs/heads/master
| 2021-01-21T09:20:43.670032
| 2015-07-02T15:42:59
| 2015-07-02T15:42:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,241
|
py
|
#!/usr/bin/python2.7
# -*- encoding: utf8 -*-
"""
Copyright (C) 2012-2015 Rudolf Cardinal (rudolf@pobox.com).
Department of Psychiatry, University of Cambridge.
Funded by the Wellcome Trust.
This file is part of CamCOPS.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cc_modules.cc_db import repeat_fieldspec
from cc_modules.cc_html import (
answer,
get_yes_no,
tr,
tr_qa,
)
from cc_modules.cc_string import WSTRING
from cc_modules.cc_task import (
CTV_DICTLIST_INCOMPLETE,
get_from_dict,
STANDARD_TASK_FIELDSPECS,
Task,
)
# =============================================================================
# DAST
# =============================================================================
class Dast(Task):
NQUESTIONS = 28
TASK_FIELDSPECS = repeat_fieldspec(
"q", 1, NQUESTIONS, "CHAR", pv=['Y', 'N'],
comment_fmt='Q{n}. {s} ("+" = Y scores 1, "-" = N scores 1)',
comment_strings=[
"non-medical drug use (+)",
"abused prescription drugs (+)",
"abused >1 drug at a time (+)",
"get through week without drugs (-)",
"stop when want to (-)",
"abuse drugs continuously (+)",
"try to limit to certain situations (-)",
"blackouts/flashbacks (+)",
"feel bad about drug abuse (-)",
"spouse/parents complain (+)",
"friends/relative know/suspect (+)",
"caused problems with spouse (+)",
"family sought help (+)",
"lost friends (+)",
"neglected family/missed work (+)",
"trouble at work (+)",
"lost job (+)",
"fights under influence (+)",
"arrested for unusual behaviour under influence (+)",
"arrested for driving under influence (+)",
"illegal activities to obtain (+)",
"arrested for possession (+)",
"withdrawal symptoms (+)",
"medical problems (+)",
"sought help (+)",
"hospital for medical problems (+)",
"drug treatment program (+)",
"outpatient treatment for drug abuse (+)",
])
TASK_FIELDS = [x["name"] for x in TASK_FIELDSPECS]
@classmethod
def get_tablename(cls):
return "dast"
@classmethod
def get_taskshortname(cls):
return "DAST"
@classmethod
def get_tasklongname(cls):
return "Drug Abuse Screening Test"
@classmethod
def get_fieldspecs(cls):
return STANDARD_TASK_FIELDSPECS + Dast.TASK_FIELDSPECS
@classmethod
def provides_trackers(cls):
return True
def get_trackers(self):
return [
{
"value": self.total_score(),
"plot_label": "DAST total score",
"axis_label": "Total score (out of 28)",
"axis_min": -0.5,
"axis_max": 28.5,
"horizontal_lines": [
10.5,
5.5,
],
}
]
def get_clinical_text(self):
if not self.is_complete():
return CTV_DICTLIST_INCOMPLETE
return [{
"content": "DAST total score {}/28".format(self.total_score())
}]
def get_summaries(self):
return [
self.is_complete_summary_field(),
dict(name="total", cctype="INT", value=self.total_score()),
]
def is_complete(self):
return (
self.are_all_fields_complete(Dast.TASK_FIELDS)
and self.field_contents_valid()
)
def get_score(self, q):
yes = "Y"
value = getattr(self, "q" + str(q))
if value is None:
return 0
if q == 4 or q == 5 or q == 7:
return 0 if value == yes else 1
else:
return 1 if value == yes else 0
def total_score(self):
total = 0
for q in range(1, Dast.NQUESTIONS + 1):
total += self.get_score(q)
return total
def get_task_html(self):
score = self.total_score()
exceeds_cutoff_1 = score >= 6
exceeds_cutoff_2 = score >= 11
MAIN_DICT = {
None: None,
"Y": WSTRING("Yes"),
"N": WSTRING("No")
}
h = u"""
<div class="summary">
<table class="summary">
""" + self.get_is_complete_tr()
h += tr(WSTRING("total_score"), answer(score) + " / 28")
h += tr_qa(WSTRING("dast_exceeds_standard_cutoff_1"),
get_yes_no(exceeds_cutoff_1))
h += tr_qa(WSTRING("dast_exceeds_standard_cutoff_2"),
get_yes_no(exceeds_cutoff_2))
h += u"""
</table>
</div>
<table class="taskdetail">
<tr>
<th width="80%">Question</th>
<th width="20%">Answer</th>
</tr>
"""
for q in range(1, Dast.NQUESTIONS + 1):
h += tr(
WSTRING("dast_q" + str(q)),
answer(get_from_dict(MAIN_DICT, getattr(self, "q" + str(q))))
+ u" — " + answer(str(self.get_score(q)))
)
h += u"""
</table>
<div class="copyright">
DAST: Copyright © Harvey A. Skinner and the Centre for
Addiction and Mental Health, Toronto, Canada.
Reproduced here under the permissions granted for
NON-COMMERCIAL use only. You must obtain permission from the
copyright holder for any other use.
</div>
"""
return h
|
[
"rudolf@pobox.com"
] |
rudolf@pobox.com
|
a4c252bad6d3b3e3972ec02f656853437fe2997e
|
b1599f517e62c6651c930c28c430ac3ff7d52bb9
|
/src/apps/api/tests/test_profiles.py
|
1c95fc5d70cd665d96d44fe058618e9d3fd7b8ce
|
[
"Apache-2.0"
] |
permissive
|
HunDeMingMingBaiBai/competitions-v2
|
4d9ef93f14a3bc43c582c67b62904c6bcf0c19fb
|
745b56274ada40b78cda6e91dd762f2d547cd841
|
refs/heads/develop
| 2023-08-11T06:02:49.780503
| 2021-09-09T04:39:42
| 2021-09-09T04:39:42
| 377,348,778
| 0
| 0
|
Apache-2.0
| 2021-06-16T02:32:30
| 2021-06-16T02:32:29
| null |
UTF-8
|
Python
| false
| false
| 9,799
|
py
|
from django.urls import reverse
from rest_framework.test import APITestCase
from json import dumps
from api.serializers.profiles import OrganizationSerializer
from factories import UserFactory, OrganizationFactory
from profiles.models import Membership
class OrganizationPermissionTests(APITestCase):
def setUp(self):
self.random_user = UserFactory()
# Organizations Setup
self.owner = UserFactory(username='owner')
self.manager = UserFactory(username='manager')
self.participant = UserFactory(username='participant')
self.member = UserFactory(username='member')
self.member2 = UserFactory(username='member2')
self.invited = UserFactory(username='invited')
self.organization = OrganizationFactory()
self.organization.users.add(*[
self.owner,
self.manager,
self.participant,
self.member,
self.member2,
self.invited,
])
self.organization.membership_set.filter(user=self.owner).update(group=Membership.OWNER)
self.organization.membership_set.filter(user=self.manager).update(group=Membership.MANAGER)
self.organization.membership_set.filter(user=self.participant).update(group=Membership.PARTICIPANT)
self.organization.membership_set.filter(user=self.member).update(group=Membership.MEMBER)
self.organization.membership_set.filter(user=self.member2).update(group=Membership.MEMBER)
self.organization.membership_set.filter(user=self.invited).update(group=Membership.INVITED)
# Urls
self.update_member_group = reverse('organizations-update-member-group', args=[self.organization.id])
self.delete_member = reverse('organizations-delete-member', args=[self.organization.id])
self.validate_invite = reverse('organizations-validate-invite')
self.invite_response = reverse('organizations-invite-response')
self.invite_user = reverse('organizations-invite-users', args=[self.organization.id])
self.url_organizations = reverse('organizations-detail', args=[self.organization.id])
def get_membership_id_for_user(self, user_id):
return self.organization.membership_set.get(user=user_id).id
def get_token_for_user(self, user_id):
return self.organization.membership_set.get(user=user_id).token
def test_non_admin_org_members_cannot_raise_permission_group_of_themselves(self):
self.client.force_login(user=self.member)
data = {
'membership': self.get_membership_id_for_user(self.member.id),
'group': Membership.MANAGER,
}
resp = self.client.post(self.update_member_group, data=data)
assert resp.status_code == 403
def test_non_admin_org_members_cannot_raise_permission_group_of_others(self):
self.client.force_login(user=self.member)
data = {
'membership': self.get_membership_id_for_user(self.member2.id),
'group': Membership.MANAGER,
}
resp = self.client.post(self.update_member_group, data=data)
assert resp.status_code == 403
def test_non_org_member_cannot_find_object_when_changing_permission_group_of_others(self):
self.client.force_login(user=self.random_user)
data = {
'membership': self.get_membership_id_for_user(self.member2.id),
'group': Membership.MANAGER,
}
resp = self.client.post(self.update_member_group, data=data)
assert resp.status_code == 404
def test_admin_org_members_can_raise_permission_group_of_others(self):
self.client.force_login(user=self.manager)
data = {
'membership': self.get_membership_id_for_user(self.member2.id),
'group': Membership.MANAGER,
}
resp = self.client.post(self.update_member_group, data=data)
assert resp.status_code == 200
def test_admin_org_members_can_lower_permission_group_of_others(self):
self.client.force_login(user=self.manager)
data = {
'membership': self.get_membership_id_for_user(self.member2.id),
'group': Membership.MEMBER,
}
resp = self.client.post(self.update_member_group, data=data)
assert resp.status_code == 200
def test_admin_org_members_cannot_lower_permission_group_of_owner(self):
self.client.force_login(user=self.manager)
data = {
'membership': self.get_membership_id_for_user(self.owner.id),
'group': Membership.MEMBER,
}
resp = self.client.post(self.update_member_group, data=data)
assert resp.status_code == 403
def test_admin_org_members_cannot_raise_permission_group_of_invited(self):
self.client.force_login(user=self.manager)
data = {
'membership': self.get_membership_id_for_user(self.invited.id),
'group': Membership.MEMBER,
}
resp = self.client.post(self.update_member_group, data=data)
assert resp.status_code == 403
def test_admin_org_members_cannot_delete_org_owner(self):
self.client.force_login(user=self.manager)
data = {
'membership': self.get_membership_id_for_user(self.owner.id)
}
resp = self.client.delete(self.delete_member, data=data)
assert resp.status_code == 403
def test_non_admin_org_members_cannot_delete_org_member(self):
self.client.force_login(user=self.participant)
data = {
'membership': self.get_membership_id_for_user(self.member.id)
}
resp = self.client.delete(self.delete_member, data=data)
assert resp.status_code == 403
def test_admin_org_members_can_delete_org_member(self):
self.client.force_login(user=self.manager)
data = {
'membership': self.get_membership_id_for_user(self.member2.id)
}
resp = self.client.delete(self.delete_member, data=data)
assert resp.status_code == 200
self.organization.users.add(self.member2)
self.organization.membership_set.filter(user=self.member2).update(group=Membership.MEMBER)
def test_admin_org_members_can_delete_org_invited(self):
self.client.force_login(user=self.manager)
data = {
'membership': self.get_membership_id_for_user(self.invited.id)
}
resp = self.client.delete(self.delete_member, data=data)
assert resp.status_code == 200
self.organization.users.add(self.invited)
self.organization.membership_set.filter(user=self.invited).update(group=Membership.INVITED)
def test_invited_org_member_invite_token_validates(self):
self.client.force_login(user=self.invited)
data = {
'token': self.get_token_for_user(self.invited)
}
resp = self.client.post(self.validate_invite, data=data)
assert resp.status_code == 200
def test_invited_org_member_can_accept_invite(self):
self.client.force_login(user=self.invited)
data = {
'token': self.get_token_for_user(self.invited)
}
resp = self.client.post(self.invite_response, data=data)
assert resp.status_code == 200
self.organization.membership_set.filter(user=self.invited).update(group=Membership.INVITED)
def test_invited_org_member_can_reject_invite(self):
self.client.force_login(user=self.invited)
data = {
'token': self.get_token_for_user(self.invited)
}
resp = self.client.delete(self.invite_response, data=data)
assert resp.status_code == 200
self.organization.membership_set.filter(user=self.invited).update(group=Membership.INVITED)
def test_org_member_cannot_accept_invite_for_other(self):
self.client.force_login(user=self.member)
data = {
'token': self.get_token_for_user(self.invited)
}
resp = self.client.post(self.invite_response, data=data)
assert resp.status_code == 403
def test_org_member_cannot_reject_invite_for_other(self):
self.client.force_login(user=self.member)
data = {
'token': self.get_token_for_user(self.invited)
}
resp = self.client.delete(self.invite_response, data=data)
assert resp.status_code == 403
def test_org_member_cannot_invite_user(self):
self.client.force_login(user=self.member)
data = {'users': [self.random_user.id]}
resp = self.client.post(self.invite_user, data=dumps(data), content_type='application/json')
assert resp.status_code == 403
def test_admin_org_member_can_invite_user(self):
self.client.force_login(user=self.manager)
data = {
'users': [self.random_user.id],
}
resp = self.client.post(self.invite_user, data=dumps(data), content_type='application/json')
assert resp.status_code == 200
def test_admin_org_member_can_edit_organization(self):
self.client.force_login(user=self.manager)
data = OrganizationSerializer(instance=self.organization).data
data['name'] = "Changed_Name"
data = {k: v for k, v in data.items() if v}
resp = self.client.put(self.url_organizations, data=dumps(data), content_type='application/json')
assert resp.status_code == 200
def test_org_member_cannot_edit_organization(self):
self.client.force_login(user=self.member)
data = OrganizationSerializer(instance=self.organization).data
data['name'] = "Changed_Name2"
data = {k: v for k, v in data.items() if v}
resp = self.client.put(self.url_organizations, data=dumps(data), content_type='application/json')
assert resp.status_code == 403
|
[
"noreply@github.com"
] |
HunDeMingMingBaiBai.noreply@github.com
|
682ee65e3b3619ab3edb8cc875be5f533710d491
|
88be8d0532c4190f386409cbdb957682537826bb
|
/a2.py
|
c9aaeb503178383fa7b784d3c64e73553aa8c601
|
[] |
no_license
|
taoxlei/pythone-chanllenge-code
|
c9e84c39a6c786082b2dbe89de285d45ec23ff4f
|
b7d837c4a2a13bbdb61c8587394d8fb8c4d16eb4
|
refs/heads/master
| 2022-04-27T00:58:12.652800
| 2013-11-28T13:43:05
| 2013-11-28T13:43:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98,921
|
py
|
orginStr='''
%%$@_$^__#)^)&!_+]!*@&^}@[@%]()%+$&[(_@%+%$*^@$^!+]!&_#)_*}{}}!}_]$[%}@[{_@#_^{*
@##&{#&{&)*%(]{{([*}@[@&]+!!*{)!}{%+{))])[!^})+)$]#{*+^((@^@}$[**$&^{$!@#$%)!@(&
+^!{%_$&@^!}$_${)$_#)!({@!)(^}!*^&!$%_&&}&_#&@{)]{+)%*{&*%*&@%$+]!*__(#!*){%&@++
!_)^$&&%#+)}!@!)&^}**#!_$([$!$}#*^}$+&#[{*{}{((#$]{[$[$$()_#}!@}^@_&%^*!){*^^_$^
]@}#%[%!^[^_})+@&}{@*!(@$%$^)}[_!}(*}#}#___}!](@_{{(*#%!%%+*)^+#%}$+_]#}%!**#!^_
)@)$%%^{_%!@(&{!}$_$[)*!^&{}*#{!)@})!*{^&[&$#@)*@#@_@^_#*!@_#})+[^&!@*}^){%%{&#@
@{%(&{+(#^{@{)%_$[+}]$]^{^#(*}%)@$@}(#{_&]#%#]{_*({(])$%[!}#@@&_)([*]}$}&${^}@(%
(%[@%!}%*$}(*@)}){+@(%@*$&]*^*}*]&$[}*]%]+*}^!}*$^^_()#$^]++@__){&&+((#%+(&+){)$
%&&#($[[+##*%${)_!+{_[})%++)$#))]]]$]@]@($+{&%&%+!!!@]_]+])^*@$(@#${}}#}{%}#+{(@
#__+{{]${]!{(%${%%^)(_*_@+)$]$#_@$)]](}{}$(}*%+!}#+)$%$}+#@*&^{##}+@(%[*@_}{(^]^
+_*{@+[$!!@%$+{_&(#^(([&[][[&@#+}_]&&]}^*&$&)#_^$@$((%)}+{}$#+{+^}&[#[#_+${#[#]{
(@@[%}[}$%+*#$+[%(**!$+@$@&+$_$#!_&&&&{***+)}][}#^!%#&$*)$!%}*&#}}##(^_%^]{+]&&]
}^]#^(}@]&$]*_][])$]{_+})^_}]))()^&)(!*![!&}{][(]})[(*^}$&$_@^$)#${%[$_]!^]}}}*+
*^_(+}^)(%(}{&)[}!$$&&+}&[{%}^+#$]@)^&*%{@}]&!%*%$*&][}&{$&*@{@#]$*_[]%%[#]#*%)@
$_^#%$!{#]^$}%^@^+{($!^($%)]+&}+$@[$*)*&)*%!_!!+@&^*{}%#&{}$!(*^*@]@@})[($!)]]})
})(&+##]##%&##$}@{#_])*%(*(@$)}[+(+_)!{{#^{_@)!&)$}@^^^[$#__+$^!*#%%]_!#$]$&+^}%
@])])%}]#$((^+{{@++^])$^*#[$}*]}}{)@+)[_}*@^%#]]#+()+)(]_[!!!)+)$+&@@])!}+*%]$[]
&&[@+$_&#[$!$${}{%[]#+@)*!#)*!{$#*$%}[(&@$&_@($$]]]_[+(#@}&_}+]@$#_+](}^})!@@}@)
}^]^]*}]+&(@@!!](*@#(++*)]!(^$})&_^@+]{#_@*%^[$[%&_%@%_![&&]&_@*#_}[{{])^$[_$_&_
@%%[@#[@_[&+]}[+)!_#_+++%)[@%$(&$[{#@(}$*![#^#{}_)[$^_$${_@&}*![#*#_+%[@{*^$){)#
#%}]{+((*^]+{})&#$!#(*%({_!^*[{%@_&#){![&]@$#[#(!{*#^*%)]!%(#]%${*_^{+}(@}{_^(](
_+!_)^&}!#([(+&[@])[_(]@]@&@{#@(%[@+[^@%@+]*_[{]$[_(_@[!]]^%+@#(@$}]@(^**+]%^)^(
@}^[]@@[@[@}^(^!]%*_]&$!!^^#*[#*[*_}+[$#(_#%@](+[^+}%{_*#]+*(]}!$(%@%#^)}]_&]{${
}$[*{+&+&}[#_#}_(}){^#{[_%*!$+[#)%]@&&_{)#[+*&+#!&)%)%++$_}){%%*@!*&%__(_!]#$*(_
$^!@@}_())%(&$%]]{{{@+!&%@(^!+*{%[*[!]){(#$@)(^{]%[&*(&!{&}!%*$)*]]$%(__[}_+&)!(
^_&*]*+#@{@[_({$*&}][(*!+$+#%&![%^)^#(#}+*+(@)&&!({^^_*($^+)&{)%$@%)&!$$&&^+#[)$
+!$^]*!%^_$}$+!!&%_&){$%{((&^{{(&_&_]{^}@[$^+]}]^{@!^@_%_{^@*)+^*#$#!+*}#)}@(}!]
_*)}$**@}[^_&*^)*+#()]&{{]*+#${@&}#)$[]_+(^_@^][]_)*^*+_!{&$##]((](}}{[!$#_{&{){
*_{^}$#!+]{[^&++*#!]*)]%$!{#^&%(%^*}@^+__])_$@_^#[{{})}$*]#%]{}{][@^!@)_[}{())%)
())&#@*[#}+#^}#%!![#&*}^{^(({+#*[!{!}){(!*@!+@[_(*^+*]$]+@+*_##)&)^(@$^]e@][#&)(
%%{})+^$))[{))}&$(^+{&(#%*@&*(^&{}+!}_!^($}!(}_@@++$)(%}{!{_]%}$!){%^%%@^%&#([+[
_+%){{}(#_}&{&++!@_)(_+}%_#+]&^)+]_[@]+$!+{@}$^!&)#%#^&+$@[+&+{^{*[@]#!{_*[)(#[[
]*!*}}*_(+&%{&#$&+*_]#+#]!&*@}$%)!})@&)*}#(@}!^(]^@}]#&%)![^!$*)&_]^%{{}(!)_&{_{
+[_*+}]$_[#@_^]*^*#@{&%})*{&**}}}!_!+{&^)__)@_#$#%{+)^!{}^@[$+^}&(%%)&!+^_^#}^({
*%]&@{]++}@$$)}#]{)!+@[^)!#[%@^!!+{(@&+++_{!$}{]_%_#^#%&{!_(#$%%&@[})]+_@!(*[_@[
*_&+][^][}^@}])!(&^*[_%+(}!!{!!^*@!({%]#[_&()$]!$]@}*][)#()})[*^[^}]#(((_^#%%]@}
^###%!{(@+]$%*^}(![$@*]_{#*!$*@%*(^+#!)$&]*%$&*@$[)_$!&+_[$)%_*((%+##*]@+#*[$$)^
@)]}!)$^%+%&_#+]&&_!(}+^*#)$%%^+&%^_]@*%^^_#]%{%[&(*_(%(*{^@[@&+!@&[+[++$})$!*}+
(_^%%*}^{+}(+]]_][_(@}^#_{_}*){*)}+*)%#%++}{}__%$$$[%%*})_#*!_!%&*$!]!}{*+{^()$}
*$%*$]][{@+*]_*&!^]_*!_{_@(}+%#$+@}_]#@$#^%((#$%+++]])#*@)&([^#]_$%$)[#)){({%@_^
@#}@*!!()[]%$*+*{*$%@**!}&#[*#[[{(@&_){{!}!)++@*{{({_!#^]}+{{#]{$^)&]%}})^@&$%@$
$!_+!{]*^_+@&@){#*!_#+{[@$^(__}*[^$&{&]!(&+++_@+)&}))$%]${+*!(#@(}&&&!)!_!$&@&{[
[@!#!]]#%)(_^!{*[{^{]})$)^&(*)%}#]#()^#+}!{_}*+{@&_^)+%@!%%${$&%}(%*_!)%$((+$&^^
}#[@%+)&^!](]%+_{{]}@]+^]{(!_*&@][]@_%}%(%&)})&!#)[_]^+$)[(%*%({]$[(#+&+[@[*([$#
^*!@{]]#![[{_]#^@])_[[+%]#[%[+_{)^+([^}[]_[}])*^!_+$}^+_)+*@$$^}(&[)_^[+})^]&)))
}*+}%){@_)]_)&)!@)*#^_%{}(]]$)+^@+}+$_*&)]%^@&)![!@$[@)@}%!)@$((^![{(%([+#&{$+#[
#&)!+{)__]+%)#@)]*%#]*{)$@*!^#[]**+]&])$@*@]{$_+]]^_*+*+)%!_!}#}^@*[
$[##&_^+&)((_$#!]}[_*]_$^_*{[^$#[{@$[()+*@_$((+}*^!]){][_}!)%{}{&#@[&#$(}#}%%{!_
@)[($}&+&$}}%[)@[{^_+%+[)[^[*{{^#]*__$^%^}#]}*{^+{!@#(+*]$)^(*!^^]^)[}@{%(($(+_#
*){@}]+}&)[(^^(*$&_$@#[#_$)^_()}{[]]{^@*)_!{@)(!))^_+_]{+_}$%(@#+{*+%@!${*&&@${]
(}&($(^*{^$])*}$(#}%}#)_@^*}#!)$)&$*__+!!+[&}])*_{+$}!@)*^{{({@}{@}+@#${$^*@^^}(
[)!^){!+@%(^_)[]@(]+&^_@[*(_@^*##*&*$!{^{!&#@(%&(@!]^[]({!+(]+^}&&${{]^!+#^*#&%{
$[}@&(]^&^@*#[&&}^[!%+#(+(%+&){_@_&%&!$}&)[$]%_^]*@^]&_}&^^^(&(${%#^(#[}}{%))&}{
%&___*&*((!#&(^)}%**$+_[!#{&_{$_))[${$*([&*%^!$%%&}$&_))}{(_]!+{}@+%{^*_[[@@)%}%
@)%*(}($$)^!#}+}#$]@})+]&@^!*{@_**{_^{@(^(@++&)!#](&#*[)+!!%{]_*$*(]%+&_^%)$$$*$
&]@}!&{@[{(+**#)}${[*@$(+%__{{}#!}@(%^+*)$+&){^(_*&}&__]^*)}]^!!%&$#[)$)+_{!$%@)
_##)#^*%#}{$}$[!!__$)}(%+[^(^$(%_)#!$[$@+]${$({)[^$+&$($]*!+$^{_(_%}(^)(!_))![*^
^%^&})[@#}#_%$*}&#!_)!)){*}!@&]*(@^_&[)]!*$&_[@&]($}!]!{)[)[{{_{#(+_!_#{]_(_(${}
%*+[}#{)@]&@($+&!^[}![}&$}*^[!)#^_#**${+(!+!#)!^%_#%[][(@(!!}&)_*{%@_^!)_!_@!^!(
{^](#%%&@#))$&#_&[[#&^&^}))([]&^+^@($!}{+^+*{%[}_*+_*^+$)+%^(}&[_%)$+{}{[*]+$]}&
@}_]_[%&)[@+}{+&^!#@_{!__!^%[(]@!+*%[!+)!{+_]*&+@*[_(*%+}*@+@&$!+_@+*&[#@%###}^^
%_@)$[&){&#&$!&}{@&&*[&!!!({)&{)+]^*&)]+[+^%^[_&+^$!$!+!+_(}+^}+&#*_&*(+_+[[)$!}
^$]}%](]]#_}[(&${{+((][_*%!)(##[@*&(^!_]**}[}{[%]*!$])[_))&(^$)%))&(+^@$&${_@![!
#(}_{$(&_&[_]%*&*@]}]}[{!&*}{{&+*$@%%}$+^[}{&$^%%^]#%{#](_){[()@@)[@]!#!%)&*+]_}
}[([#}[}&&}+{@!@+}]){_)%_+({{!]*}}^(!])#*^)(#&!)_#$[%_#{!##%%+)@{**)%+&*[*#[+*$)
@(!%)*)^]![_^%#)}*#!_@_@)(#)(_(]!%@)!_+_)]!*@{&!^%_+*(%%$!!_^}@^^{[@#*&+[(%#[{({
+)&+_$!${]+}^$[!]_#{{#]!{[&[$@*^]}@]}+{)_#}@_^%%$^{+($*[###@))]}@!@]]{^[_@)!@{@}
*+^(_]*$!_^_+[#))$$]*+%}{[&([&)@#{#)*%)])!_^+%%*#(+!}+{[#+#%%]@((]!_#@*%&(@[(@@#
^#}@}[*@})&$)@#+*!}^^()[^#^])+$$*%^*@^^!^$^#_!@^^(_&&][)@(%([[]&]]{[^}!@^[+${*%#
{[!({&_(_!+$]@}@!))$[[+#+{(@&@^{]]*][]}_$$$)##&&^#_{}}#[*%(+&]@%{@)!&{+{*^_#_$*^
[]}+^+*{[)}![}@[#$)}_^}#$!#%{([*_^+!#}$@{{&)!#*$#*@&)@&]^}!+{!}{)%}^[}]}[$&)^$)$
+){@)%$*[$_#))[({)&()[_^&^+#}%#((+@@[$+!^[%*_{]*+)}#$${+!@_)@@)@%**]_]))$$^&**!_
{!]^*+#]$!@+!$)^%)!&[$*[@!(_)[![)(}$}*)$(&%%&+^}+^%%&^_}]!(&]#+$*__*))#*{_&*]{*@
}_{%]]$)(#@![!(_]!)+&$&^(){$%_(_%+}%*%&%!!^^)(_*@{^#())[{^&@}#{{^_$[*()*$&%^_{)%
#@#{%]_{%#^)@(]#]}})#)*%)[{_^%[}][]^]^**]]}]@+%_}(])#+*^&]$^[$$%]$&+({!^{^@+]{(}
&^$@[$#(&+_%{*+%%(][%*+*{[)*$#%{({#@]_(#[{*(*$}{$^}[]{%]&{@#@^]{[)({%[)%!*$$}@&&
_+&_}@!^$}!$@_+^^]}@}%}%#+&($)]*+[%^#*@+_[((#!}%[+])[}[^)!(&*^}*+]){$#&}&*{)%^&!
]@][(&@)#{#_*^[@]$[(]{()*}$[}*{^]&]$!%)*!}}()^^(]+*!]^*[^&+$({]%!@)]^#$(^{@^!(}]
&*^_(+_&++_]])&]#%@^#%$^^_[+[&*[*^^}_**!}*}_%&([%^$%_$]]](_@$*%!)({&##([&%((}$*%
]*_${+(#^+^*!@@@![&&!}$)]%^@)#%_]&(])_@+{*#(}*_(!(}$[(_^_^]_}!&_&()(@+&_*^+)}#}[
+[[%){#[#()#_$(]!(^]{@(_%}$[))}*)(]@@#+@[+$[))[%*#%@}*_$)*@@@}{&^#@!}{+{^&))+}#]
%{%{&#(*]$}}{}&]$*%(}]{#*(+#%(@]&[^!{&}(^*[@)$^!^+$*]&])@##@*+![&+%_{+_)[[)+)(#_
&{^#}!!]_]}}}][@{^[%{*+!!!(&*$@^!_)^*$^@][+[{&#%!!_^$&{(_(^!}*@%([@^{%^%(+()(+%^
])*_)^$}_}}*{[[*{+[+*@%}!%#%%$!]]}}(^@@^!*)%^+(+!}!#$_&}_}_*#^&^)[(&^&}#$*}*#}*^
&{%{[}!{_$_[]^&%}})!}]%$+@!@(%)@[}%_@]]%!+$*_)%}#$[&_])}][@%[^_$#$((%]+})%#$)]@#
]@(!@#)[{$@^!]{#^&+)[**[}%#)+(%]#*$*&}}_%%]]&!&$[)]@{+@*#_!{@{#{_$%[!+&$[}+((_]}
^}[[+![})&+^&@_){(&_^&&]#&&))@@%[{%$+%})!_&[[$}&_@[+%}&*]$)}$]&^@{^})))_#@*@{([+
+%}^{]{]]]+}${$!^)]#()}#{_^+__&#*^&+{!{&}&&@{#&(_%#^*#+_${}!]##]*[(%])[{(+!((}&]
)!(}##{+%#_[%([}*}]!_(!^[{[}^{@*(}{@+&$)$){!^@]{](@{)#[_!*_*[#!$@)!}@}]_}[*#(}@#
%@^}&[^]$^(%(&&+!{^!)!#%{[^&+@[+*!])}^])+[&[)}!}{$)}!)^#)^+}+${][!%_%$%&%$)){*_&
*+@!}+${()}{(&$^_)&+#{)[^$*[!$]_$%)]^]@[$!#%((_&&[%]!}$%$$[$*}$(*(+&!#^^#][*{(^$
]{!(}#])#%{&!)+(({#{#*_+{[%[!#&%*])_&*)_}+@{&}#*&#[(!&]]*)#%](#^^&%]@(*]+^{@{#!*
](%$()%!]+}_([](@[^&@{][%])*]{^@@{#$*{!^#)$&!)!%_}&[(*#[[&^_&!_&!@#_$*__@{_#%&#}
@@+[+&%&$[%[@@(@!_&&%#__!@{$#&@[*($]*](&&{}#!^^$[%&{+%}]_}!#([}^{{**@*($[[#[%}*#
*^^[%)%@[&%}!+&!}*&*!)_$[^^#]$$}@(*)&)}@}^+@@!!}&%[]*#$}+}+#!&+}]&&#^))(*_][@!*{
!}*#^()_^*+((&+($]!!$){@$#$}*][]]&}(^{{]_]+^$+)[^##)})@^+$[[%[(_+@$$*#&%%_]+}[!$
@])(]){)#_$_)[$!({@#$#^#_&^(_%(*}^%$]+$%&%{{!{]}]$*@_&%(&&{)*]^}&}^&[@)++]!(#[([
@^#^$&}#)}#*@)*#&[^]{)#*@(%(%[$}!{$}_}#(_}#^^+[(#]_}#}__&^{{@$+^!#}}[#{!+#&_^!_$
+)]#%*{+((*_+_]+{]*)#*]*_!+_*$(!^%%#%*%^$%{*@@[+[((*(!([}_]}@^^+(_)@{[({_^]^)@&!
!+]^]#%]^!{(!#){_!![({)+@{&^}*[}%}(}%_%*&]+*)_!#+{^)$##_]*}@$^#()]*)@%}[^_)!%++!
_(#(&}#@%!)]$#^@&]%[[#!)(}){}}([)+%}@[![^%_**@[]@%]*)])$+_[%]#@!$^]&}*!(}(^}+)(%
})!{^^)#+%%#[^!{{&#_}^*^@@}#}$(!&#((_*_]_)$[(%}[+^(@}{+{}][#^_{]]^)!##$#&!@@%@%}
}_!@[##$@%)}&+!!@#&}$[]+%+({()@+}]#+%_#_{$_^}^}!&[^*^[&@@@^*[@}{+[[%^+]{@@}&+]](
*$**&+{%$@[%^]+#@]}@[*%*]]@*!*%[^+(&_!{[%)}}]&$^[[+_[%@!!}_@!])*@^+*&+&!#(*]!}%(
^![%$&[+*#]}&$}&_%^&}*!*^&_[@&#{^%]$^[*#]^@{#%)%{)@{)*]]!]@((![[(($$[%{(!#^^%!__
{^*!*%^[}&]_}#]{_(_&((([(]!}@(]^!+]#+!&}_@}@_)$$)}()]{_(&}(%%^}_^+[+[$}[__{(*$+^
!%${[&{@#%}^#_*$^+%&+{%)]^%##+{^@$[&_{++)_@_}#[[(_+!{&@[!%@*{_*][$$$###[!}%%&@(!
@+}{{$#%^(#@&(!!_]*$[#(&)^&$$%#{{#_*^{}@&{*@$!_(+*@]@$(+}+@%}*+]()_&_(]%)@]{(_#_
$*@]%($}}*}$}[$(!+(&@+]!#+{@@%%[[)#(+]{}+%@%+_+*+{#&(]$}})^!*%_][@{{!@)&%{@$^}!(
}&]&&[*^@#}&++#{]%*^@%)}@)]+(_^!}*^]_@#__#^#[&]&%]{_(_{)}&}}(_{}+(]&{^))}${}%]_]
%)[*+_[)^]+(+{#&_([^)^}!_*}#}^]}^]}^@&($@[!&#]{$%$}_#(^^[%@]%_}]+%&&%$}]){@}}]{@
]!%_$}&#]&+_](*_(*)*][]%%$#+!!^((})+{}%]@@_+}&_&[{]}**^$^)[&}]^%$_#{}%)]+!%@!*)}
!!{[{*!+_^+[&(_&@{]@+]{%_+%%+][*]*}{^](%*)_]!$#]#@%}!+&][!@%[$((}@[[#&{!^)%#&+*+
*#*{@)^[{_][{]!*$[#@^+#{)#%_^&*{){+[^(!^%$(&$&[{(^!%)]^{]%}}%}#)(#}#{_}!$$**{%#)
+}***@+[!)@{[+^[+[##}*%$[!!%_!!@[#@#@&#![%]@())_}!##%+#{}#(+_&{}!%_@)^}++^*!%)!_
{($#[#+_%}^)_^!#%*&#[_#_)_[#+&%_!*{[!**@}($!+(!__(^}%#{)^%}[)[){^!*{@}]!+{]){]*_
_[{!_%[!#^*[{#[!}{!($%+^$]]{]*)&@&#&+(}!*&])[%()*]}^_+@[^+%*]%^!${@*[{])}){}^$(_
!(+]{%(((&+!*%@[#*}^)_(@+*@*&(){{@}+]]+^*)({{%#})&(*$]{&$#_{{{[@](*$[!%!@%!*&!+&
^@[_&$&[_*&^}+__&%[#+#+})[+#!$*#^}[#&*}(+(]+!*]*(+(^_#_+^%]]@!][@%!+{{_%_*$!@{!^
$^%**@$[$^]@%+%__*}+$)*(*]{[$](]#)!)#@}_]+**)@!%##*^#(!*!+}^{!){}$^@_^!_%$}*&[#}
*^**{%*#@!}#+%@[&##]]%+$*@[+&%@){$%{}}$^]}&**%$(^%%@[$&)_}*)*(#_#%_+@%)]*{{!{{*}
$^(_*$_$&&%](_@}]&(&}@$+]_%+@!++_*@*@%&[*%]@{)#%_]@_@&{#!%]+^^$*{]#@[+[)^)&%{@$&
#++$+!#{([%%+)}+}_+$){{#++&%((^^@%!}^&^$($#%}+$}([]][@^_@}##&_)$##[{{@)%{+*}]{&^
)$%!#}@!*_[_%!^+[)}!#{}{)*]!@#)%{&*#(&_[$)&)@[_+][(${%%((}#+[)!+[*@+$$[^$[!]_!#&
&)^^@@[*^^_%_@*#$!}_&**}@_@__%_$*}^][(($$[(*%)}+*$((}${@^$_)$#@]!(}{^}]!%%!}}^#(
}!_($(^%^]}][{(^]@+{+%}+]!{%}%!@}&[}[&&^+!#$)]_{#$[]_@}_$${^%^^($%*]}$*_]^{%+$$_
[!+@@)^]_&&[{)+@!%[)+^![_))&_^!(^$((!_}@$+)!@]!]+${#_@^@@+_*$([##)@^^*#[]@$}^@){
*})($_()()[*!!@#(&&}]{]+{[*+}%_$(@(}&{]*$!$]_]+&!%+{}$}+_**!@]_+(&[$}__@&[!}{#&_
)(}(+(&#$($$@$)){&*#($]&(@{+*+@)}+(_]()^)}@$^&]+}#**+(%+]_*]+}_}$%]%)]}{[$*&{$_^
()(]]#^*)#(*_^}}_(*!&{@%($+[](*$+}_!))*$#@@^!#*$**%!!)+@+%!^)_[}{*@{(_^#}}+_}))[
]&[]{{](}*#^%(!@%$@)&})+#[@[(}%+&)_*}#!)+]*&}+%](++**]!(([#[$*])}{{!+_*])$${!]&%
[%]@@++#$@^_^[(+)@%_^_%+^^^*![@{+%_{[(([]@][)&^^*#&[@*^+}&$+@*_!!^}{)&_!!$]@^))]
+^]+^}&@@$!${*([}}{&{}_&]#&)!{*#}_*]_{@%}_]%#%@**}$[#%*%([$#+)^(*^^#)%}[)!+[}])[
[{)_[)*@_#*($(%${[)$[%^@+%&!}]_&(^()#()_{_&_&*&[}+!$^$!]++*}])$$]!}[+@%{!]*^}(%]
&+!]]!&^{(*+[!&]$%%_#&]+}*$_%!#&^]*!*($_@+#(#&&)#)&+![%[^{^%&}@{(&#^^^&&#@]{!!@^
{@(*_{*[}+${(!$]%![*}!#*%&)^&&@#{#&^{)#@_(%&}^[!@_^+__{_{){$_)&#(}(*+)%[)@+)}[}#
[%[!*@$${[&^[&@%&]%#+_}%##%${$)]}@&&)_)*#%#${_+}+{*^{{{$&$^[@%[[]$@]%}#$)_[^!__]
$*]&[[+([&{!}}}%[{}#@}!!}^_(}@{{$_##%}{]][!!@^[&)#*(%^_!!%^*_][_%}^%}[}]}()]}_%)
!@_}^{*!$)@){#)_*{}@&]&(@()!&!#%_(]^[@$*{{{[#)*@%!@}^}+%^$!]+}$*(_&}}{(+)%(&{!!_
(%$#!^%{[)##^]**@+]*+]_&#{{&%^&#%)^#}^)*$*&)[]**!#^*@(^*^{[$$$+$+}+[%&*%[_]^#@$(
@)*}*}(+#%{^(+@&!@%^#$&^}&}&}%{#}+!)!^}#{^_}_(%&(#_$+!%+$*@#)%#{}(($!&^%&}+&@%]!
%&*&)*$!##)%[&)(_)&}*{%{]@[#[$@^]&*&@{+{){*^^)@}$%#&)%^)@+##_]$@_{{}({+$#{[@_}()
^+@])!%}%^[)^)[(_$^@{**{(^%_[^{$)&*$^{^#%@)](}!^_#_[)!_%{[%]{#&*(^^[[{(+}^%+##]@
+^_$@{^+_{[+^#@)&+%_+)[^}}*{$)![#][!^@{}&[^]&@}@{%+@}{+([{(](}&%}%+&^]+(}{][_[&@
#_#^$$[!%}#@[@+&%&*(&&[}@)^!@[@&{*&{[{@+%)$!#!!]{@(@@&+)++&_+)[_(@{&[_@&&#})%[+@
^[${+![}[&][)(@+[+*+#{*!(^&()&#_^(%@]]&(+^)](&^]@@)%[@#*[_}$$]*][@(())#)@%*{&{!+
_[(^$@!+**!!*!#*]$*]@{%{$*$]*{#%)*^})*$[{$&^(^@)%!_!}({@#)%&$(+(^{+[%}++#]{[((^@
&])(^+$%@+$&)](](^]@^^]}%[%[**#^_$+$!_[{}#_{)]!$@]@$}+(]_}^#{%#$(!%+&*!_&%!@]*^^
($&*#*&){+@{$#@[()[*!{}_)!&$%%^@!%!&@$!&^$_}!]&!]+[_*(^)_^]}**}&%)}[&#&[[)$]!&({
}#@]_#@}@$#__%#}*}{++![#[}[+@($()){#^)(#$!^!&@}@*@{{+#(}@+^$[&&%!{_${$#@])&_[]#)
]@)+##@]^@}@[&#%_!*@]#]$+&+[%+!*}*+}_}]*^#+^#}!)#!}&#%({@[{#@$!)@!)!+%$&{[{(_$$!
(})$]{}^+_%(*]*&&))[])+(+]}@*%!%{]%@*%$_#)%(&(&%!#(&^_*&*&#]{]^][!(%&}(@&{[[]+$)
&%(*@@#+!_[}&^$)}%[$%([$)!##{@))^_}_{*@+^)[&}]*@^{*^(#!(&^@{)*$&{(]**$*+)%$($!@!
[*]!]_*%[]#__+_}}__)[{$!&&({)[#{#_&()$@{)+)#+}#&{^^{#^##_*&#}*+*%&}%@]_!}}!#**&%
$&@{$*]%[!@*&%+%&&)#_+!_&^^%+${!+++)+$#@[$)+%%!{#(&{]&){!&@$*#%&}[+^{!#+$}++$#(}
#%$(%$&#@$!))*#!_]#^@%%&!*}!)%+&@+*$#_[__^[%+(*!*)^&))(_(%&{}#%&[)+_]{#+&!#]](%!
($@&^}_@^+%$($_${$[+@]#*#[}]&#!!{&@!&@(!&#{[+]#*)@)[&[}))^[[@#!*}+(&)(*_*%%#!(]_
!_&!&_{[)%@{_){_{**!@[[$]__^([)^++_+{+^_&!&$}![#}$$]]@[(_)^}_(&+_]{&}}}{_[][##+#
{@^{+)_*%}@%*(_{^+&)[]&{*(}]]$}}^@$!&[^@@^__]}[^[((@*+%_%%@])&{^_&$]#)!+!_}{[(&(
#}{&^){{!#(+#!}[)+}]$+^{)#!_%_[@}(]}%}#&&!][[%@}^]@))+!))]#!+]*[)]_]*{][$!+*@{#{
&)&&^+_*!}!%({)}^)))$[&_%@#]]!]@)&$(^{@&@[^_#)@@+#%(]&)+!){$]}]){_{}@#%%*%#!)[]_
_[#@@$}}{^&&$^_{}%]{]&#(@_!]%_)^$$!#*@##^!($+*&$+&__@***][!@$]$)*$^[}$^&{}([+{}&
_[$[&&*#@[[@&{_$%!{)[)&[^[+^^^{#$&$_*{*^&)(+(&$}^)%+(#[%*#[*[([+[]${&({%@!](&]*[
+_^[^[#&)^[@$*+@@[!}&){}{^+@[)^&*$]]%^_!^$+%&)_}}([{$$_][(*]$&]]{^)&(^#[]%*%&}}#
+*[[@)_{}&%}_+#)!^{(}*^[@)}@(+[#+#*{$)&_!}[#[*+)!#%{%*)#@++&^]$[$&#$@}}_)*&]))#^
({^(](}+#&[][%+]}^(#^*+&[{{$_$$@^(!%#^*{()%&$))#]{(@%*}}))@+^&)+%$^)&[(]{}^]}}*+
^%&@)!_[${!)&%#)^*)#{)})@*_]){{{[$@$#{!@}_^{{!_$&$]+[[[))_]@)[{#)$_*(}*]#$#%@+]@
$(^_[[^}^&%+)([#_!*})%%)%)^!#%)]&!@^}#_!)[*@*[{!(_}{{^^}
]@*{*)!!(@+]__*@$[}&($[)#{*[}(@@%!}%[{$]&^%)@&(@][+{}*{%++}$&+!&[^^%]+%_(_!#)++(
]+)($[#]@(#$+%]+$!^_&}+[!$)][)(}((+!@{^^^*{[#$_@}$!@%{(]{+^(!$*!@@*}^+*!]])$!)*[
^%[(&[{#@}*!*$_#@@+{_&&$@(#*_]#@$}[_)*][$][_!_(_++$+)$_}^++_]$+(*+!%[}}*_^}({&[_
$[]]@@+!(_$$([#_%_$#(%!#[+)[_&!_*]+!&%}&*[{]*+[!!]+_})[)]))(}_$+{{){[#}^+[{@$[!_^]&@](^!&**^@[^($_{%{++[@[@%^[#(*[+([{}+[{%#+}{_+(%#*[&^&!)*_*+[#&)
_}_^$%#(&+_!#$($*^)@(#%}+^($**][}){+(#{}*&^!(@#&][&*$#!{_!*%$)*(&@]^_*+^^#$}^{}(
+)%&!)^{^$*{!$$[+{])%_^^%!*&@[%#*+##{#^+^(_])$(]_!{*+_)#]}%]^&*{)(+$!_#[*^)[@&@+
^&[(__+_#})_*))#%#!)(]@%{^{#^&][_[+!^&++$++_#$*(&$]))@_#+&#{!)[%!^+{%#{+(&$^_)&]
#^+%&&#(#!$}#((*_+&$_![+}+)[!!+*]@^!_#%^)}%+({![]_%@*[+(}@!$%$(@)+(#)%]}}@]#_%$@
_]^*+}!$+]{{*[{{]%$^)[]_@}#+@*[+@]^%)##[{^^(}_^(@}{*!(+}]#{+(@@@@@{+@(}*(*(%%*!@
)@^$%#+]!&^$*$#%*!+%]#$}^)[@_#%*_!&]&!{$#)$**[[*]+%#!{]^@&]#}^^%(%!*%#{@(*)![*(+
[@(++&]#!{_({%+@)}](&*^*!{$^_{)]}}[^+)&{##*!++_([}^})[]__@!]]&^{^]#}@++{&&{)][[[
}[}}*{)%&]}}+*!$%$[}[@[}%*^{%(^])&&_[*)+%*!%^[*()[)#%_!{]}%@)_&@#$%&(*+)#(]$&!_*
[#){*%+}(+#@*[[_!)^%*%&_#_(%^^$}*(_)(@+(#+*!+*+_^$&($+$&{@[@]{%*!*_{}^%$%^@%%&+}
((^+@{$}(^$$}%()({{^#{]]{{#){&%[!+*[)#%$}*]+}+%{))[((##__$^*%{#_$#(^)){%}*@#(%**
{!]^!@[$)++%@}+]]{]+@#!*]{)+!}!+_@{*@__##]&$)#%{[#![{%+_)&_#**]#$]_#!*@]*&(@})(]
^_+#+$({}@%{^%*#()(^@%$^%%]#}&^)_{%(!$)]{@(#)*@}$&(){*%+](_+}#)(_!@*$@$]$)@%{*%$
$*!{&$$@$@+&)#}}}[{){}([+__+_+](_)@++^%[!*)(+(%}}+%@%!)#$*[$@)$+){_!@*}!]]{{++[}
&@&&(%*#$!^)*_({))]*(^)_^_%_@%(@)]]!_!)&%{[(]*+^+#*#^%)*[]#[[}@$%#{{^#_+[[@+)@+)
+!+__#[]{*)^#%}()]}**$!%[$!*(+[}!)}+(_($@)[#(}*(]#{}{[!{)^@*%*[!!&$$&({%)+{#@]}}
%[)[&(*%[)!)}$&$%@{*#${{%&[#}%@#}@!!*@*)(%(!_*(+]^&{_{_(@}#)[}#%%^*%](_)+*@^{&{@
!{!_*#*[*^(*%_@&_^]^^#!((!*#{#]#(%!{)]#++@@_&}&@+_}+&!#$${^^_(^%_+&)!@*[(])])&+_
[({*%&[%!@&&&_*#_@{_*]@&$+)}(&)+(#]^}{#%&([^^%{^}){$#](^##^%*%&#%#$#}*@#!$#}+]!!
&*+!^%@]^&&++[[$}+@_%)]$_}*@*[%&*$}%&$)*#*%[^@!#@%!)}_![_%(}!$(*_$!*]+)@]}@(@{#^
[}+{[]#@)@^{!(@_]][#}+@&$$#)$*_!_[@**{^(+$$)!!$!](}!)+)^!}](+_{({!{[{}+%)$)@$%$[
$(@^*)@!^^!}#*@]%!^(@{}_!@]&^#({}{](*){+}[_}_($+@}+]@[^*!@]++_%*{^*&+[%*{})%_+&&
@{@!+%*#)@^%#$&}^){[){]}]%*{+)&+)#}*#![())@&#+!*))]%@[$$^+%#}+_!}{#((}@+]]%$)%#%
$]&]{&%^}^(&[}%]#!][}_]+$)${^%[#{)#&$+!^%@%%_]%_*&*!_!]{%+@]&%(*[_^(_[!$]!){!*[+
#$!(}$)#&}^](%!(^_$]*_@!^{]+)_(*^{^{&@(_#(!+!}+%${+_){%!@%++_&)}@}^_)+___*&](!}[
!}(%%@_+}{(]$+@[+%_+$%){#[{[++&)&&^&@&*%&&}@@{^!*^((@]^^{}($}_$_]@[&]%++$[^#]{]^
^{_@%#__%{&%]%_{}++!_]}_][$+@**$^^{_@]){@)}[)!__@_$}%_$}^&!}^@%%+{&^][})@*%(]+([
[!!){[{]]@^_)*!!%@(}}^^}!@$^_${#*_@]]}@}&[*&@%[#*$]_(*%++&$+))}_}+{_!^@&)${%*}{)
@)]+#(}(*_!*]%$@)_][)]_%#{$[!%$_@)]#@]*$$}[#$&+&%$[{*@^$_%$@([$}[%%_(_&!^$#((!(^
{!!+!^+{@$(^^@#(]$($#]_]!%[*#%&_[%]]]*@^(})){!_@_#(*![@}}}$[$]^@_%%}{(&[})!!#}!)
]*%!]!&{%+%@{_}*#_@$)^{{]&&^]{+)@(&+!&@_*@}^@}%(]@$}${_}{#*)!@!*)@%(%$*}(]#&&{&+
}!(*+[)!}}_*_$*@($+]+#+{)%_!{%^!{]^]_{([*))!^&))@&}*!_#^++{)]$#)}(#)%))+)+$})#(+
^{+))%$_%]$&{#+(+!+_&!{^(@]}(@)^$$@@+_$#@^_){%)#*]+][$&(&&&*$_*{*$#(*^&*(_%%^*++
$(&#[{@*#{]_@!(}#${)(!#@+#{^@_^${[+]*(![$(_{$%+(!+(!}[&)((((*^)^@%+![!_{][#%++*_
&[&%)$![(]#$+@@*#_}@]&^@@%+{%(+(+![@))#{$*]{}+{[*!(^_^}]%#]%+[@*_&&+#}^[@[&$_]@}
^[})![*(&{#{&+}(^[)&_%[@*_)(@()!(^)^((({})^}_&]*#[*^[@^+{$&#[{[^%&_&)*{[+!^(&{*@
!)%&}{^&[{{!%{}([+](!*$]#&+++%+($*[({_$}!*^_[%{*(+###^^{(_$}(&}@}}(@)]*%)!&_%^[)
#^+%]#*%{#[%@*@]{$*_$*!}^)}%!{)[))+@[%}$_#@+!_+^!}{{#^!)[+!&($![!@#!^}}{^@*$[]#!
%+{+*)+#@@&([[((@)##%@)!^[$^}(##[}))%%([^*+${)(@[}$[&)%@[$])](!]]{@+)(&*#*@&]+[^
)]{$%$$}^^}&&^]&(%@*!)%[!})&(!_^]%*[&)#&!^+@(#%+@+{*%}^]$!)]{}]{&@]]$]#$_[${*@%{
(^$][(@))(!{(#))%+{{{+#{]{^}&#&+%_@#$%]_&($[!!}]++{%%(#%^(%+*_#^#[*!+&$!]_(@%^_]
$!^#){]%}*_%&@$$[*[&{*@[^}+&)_{+])}][]))%%([[[}[_%}!}[^(}^{{%![@+]][*+{^[}+++![(
)$&]_+#[+}({#+}*{)[+[[([@})+^{^{*%[#{^$@#@]][}{{%&]#_{(%#@${)]]*(}(]$}&&@*&+@](#
_^({%+&^&}((_#${+](+]}@!]}#($${{!}[}}$[}{$}#*((}%[){*%+^}]%+](}&&%][!#$]#[+@&&{_
*}&!)%)%%*{#%%@__[_+%^@&$#@(%*+^$_[)%({*$()(]@*[^_*%}*%]%[%+#_))^@(+$#_+&(_@]$&@
*{}_^@){)*((](@${}[%!)_+!!%^*&${([^+$**^_{*](&({^%![&_$%!]&%%[@]}}%!^^$%@}]@%(!*
+%(*$[&@]*(&@[#{_%!^{)!*!@_]^[(}*]}(!]__{)!**}(!}++[$+([!]*()${){+%_(!&[{*]])#]&
++(_$%_!])$))((_^+[_&++@_$}%*!&}&[%@@_})@%)[{})^{*%@]$]!#%*#%%*%+*&{^*&](^}#*!*_
#_%@([]^*][%!{@)#}[])($[]*()_*[@)^&!%+]%%]&{(^{{%[!!!!_#+!@$]&#(({!_*]]+{#**^*&)
$!$(#[*}+}*__$]!])#$!}]&{]_&#*_[^&}(@*[##^*!{)[+[(}_&@+&+&_(#@[{^*[]}${^*{!@+$^$
#^]$}((&){#@^*}_#]##&@@}^@%)@}{*_{&+[&}}{@+(#+{#]@#^!(%}))}^
{__&(]&#$@&!@((${))_^!][$)%@&%(&_]]^)$@$(]}&$)}}$)(([&{){%{%{^#!%+)*}#@_%{%*#@[@
(%{^}(@$$(^_]($)]_*}&+{^$%!%@$)!#$+(!*^}&(*!(+!$_^#}!*&@_%%{#!$+)]@{}((__$}{[!(@
#[](]$!_#%}&][!!&***(#(@(!@!+)&!&(*$+@#$&]@^_}{((^!@!_[^)##@([&]()[}()+!(+!]#@[&
}[}*(+)[*$*@_}[+&&}**_+]+]+#*$(%)%}[+{)*[{)^#%$]}}(^[{%]%#+[$%&*#][++}&)@^^]&([(
*}]#!_@(!$)@)]&^[_@{+%@($&#{$%@{#!(}(@[^[#__[!]}$+#__*&#^+[#]&%({@_(%}^[]!)$&}]$
&&*&(){[+#%($]^&[($(@$^*[^%](*#[$_*{&{!_#*}$&]&}^}_[}{@*(!@**^!()]#%$[^}&]%}}^%}
^^$*[$+*![%++({&^^_@{[)_([@*#)&_+$&{[{(+[}^_!_^#}++*$$+^)}%]@*#(}%^!)^&)_{)&&@][
@@&}}![+!%{+*#}(#[%*#@)&$(@(_$(]}]{%&]^)&}]_#$@(_})$$^]**&$_%!!##)+(%($([!&$[@$}
(^][&}$]##({[)^$[*}@*)(^]$+($$+]+[]&!&*(}$]&[}{_^}#]*+!!}{__^+${%%!*{*}})&](+^{^
_(*#*^}*}{]++_([##$%&[$%]**#$!}%[)&(](!((*_(&_]][(_!{_@]!%@+_){+)]@&[{[(_$()&)[#
[_(*@*_)([_&&{$)@@[}*&+!(##^+#*$#*)(}{(_]@%!@!)!%%[%%$*$$#(
}!_+*!(^!@[$)_{[@{@}%%@^##$*[#++_(]#}!)!^_%%][[}#{}*[[$#!{*(+)$$}@^{^$$$+]^]$}$%
))@[}@][_((_%@+#_{@)#_*)_*]@%$)!!&!_)&%(}{[#++*}!]{)_$[&([!^[){{{+]%@%[)&@](^}(%
[(@(**__*{$$%}}!#@+@&$!_#!@]@)]{+]&))(^_}[%}#@^&{&_({+_[_()()}__+##+_^+!)%!#[!![
@$*]^]!^}[)#%!]+$@%[^**{*+!*@{}$!]%[(*(]+)+!)}[^{+&{[{%{+$))(]^%(@%]}&_(%@$)_$+)
{($#%_!%!&%!@^$@)}}%[_%@}$@!^*!%$^%+%)!]_[)}*{&%^$$)}+^!_][^}@##%#$}*!&*@%}*{{%#
$**!_$!#&+%@^@@#@%!#__(#[})^(}@{(]$%!@&&@*++)((@#[@]]+@@{*++$$(%}&_[*%#(](_!*}[#
$]#%{%${!!#^!#{}@)]$[%$(&[!&#![&+([@*&@]!}[[*+)%][*}@&!$*]_*+++{!!*!_+%{*++#*^#}
(&!@!+!#($@%+[))*&]*&%)$+_**%^]&%})+]{{{(#{$$[$[[*}$%!]!+(*%*$[[+_(}[}+}*{$)^]&*
!#%^%^(@*&$(!$#$^(}+[&##(})$+#!(*]!]#!^{+%$(&**#^{!+_#}&%^{]$+[!&}^@@+*#_+#)@]$$
]{%]{%&^#!@@)}*)(]_{{@^&)&(@%{@{++}_^{)]#+*)_}@)[&[(^&!}_^&)&@}*((%]][$$#$[&!}$@
!#&^^^}_^&!#%#$![)(]_)^}@[{[)*}[#@(*&#^*%*[_{)([{[(](+{^)@#&_%_&+}@^]$_@(&(_@![)
#_!&_)^[[#$(^#+}@#&[!##_{!^![}@#+))&**$((*^[#]^!%^]_(_#$+^[]{)*+]!%@+@&+$++}((]]
]+)%]){($)$]&*$*]_)({#}))!]{[*+&%[$!#^^%#[($^*$_#^(+)^#{!#}%&*#^]{$)%!](*$$*%+]%
##*_{)^+@(]{_#))+*[+$#^@]+)@!*#%&)[{{{&&*_%}%]*(+]}[#$$)*$$$}!}*{@%!+)^]%(({+}&&
[__$#)(%##*^&{($#+!{})#^&#!%^$*#]+*]*[{[*$*!^{&+#(@@##($!#_^*_+($$_%@[^%^[)$_$&{
%!&#*{*[&}](*&)*)!(%%#$%)[&_&]@{*%+@%@%**}&+]+!*][&^)%_^@@}#%%&]#$${%}_[@(%!+}))
(+[#_&[$#%%__+{+[[([)@[}(&^(_$)#[&!)_##*{__@&!^@+[!_{(*%]!+^](&@&!{]{^^$)(#]%+@@
!{_]#@]&%((+&+*@^@$&&$*{+##_}&_(!%(^}%)#&_^$][]#(^@@(@+&(%![({}[$}_$%*]!)$#{$@[#
(%%]@{$(^{$(*$(*#[^]}%%(@%@)}@[^+)$+![%[(!&+&(_*@^@_$(]_@]_[]#{^@%_!%{+][]$}__!#
*[(!]{*{[^)*(%[%*!(]}%*^_}&[)+(*_+(#((]]}$![@%})[+__!{(#+{%!}#&&^%+*%(*$%}+_#&&*
^@_)}+$(^}(([^$&^((*+*!{%_[{))$$+]%_&%!]#{&!#^^%(^$_#&!^@]&*#]&))$])+$^](^^]))+@
&[($&}#]__%(}&_&()*(&#*&)))(%+]&_(*#^_{%}$@[$#*&]+&%}+^)^{}]*]_@]_&&%@!{%$^}$##$
)*^))+(}#^!(&([$$](*_%)$&(#!__%+)^@)$**%(%_]{)^)+@^!))+$@#&^!&@!^{{)##%}_{%}%[^$
&!]}(}]{#^##}#@[!){*!%^()!{_@{*+%_$+#*{@}]_]^}[]*[#%_!%{*(*({!)+]{})!{[!{[!#}@)}
!*#)[)%(]_&)#%}(_&(*&$}%*$}%))}(%]]@*_}@+%]{@##(!@_+(^%&^]]#_(!#&+@(+^)^*[[!&+&*
^&&+[!_$*)}{]{!}_@_]*__*)]}(]_(]#_!_)#^!!$#**#^](!++*[)(]+(&*(!_^@*#]{!!}^$(_#(_
]!__!^%}}+%)+$]_*#&++]##*&!$]}+^_*!]%])(++})!#&$()##!%^&}][)[_{&]]]@%(}][(]%&*%]
!)@]{##&}#!][%!(^)#)]!#^!%!#_(#%]{_}%({)}%+}]()$$)$(((}*{]!_])$!)%[#{%)!}{%!@#)&
}${}$[@$]&)^@**($][&[{{!)+@#}+[$$(+*[_}&@*^%]}{_[$(#{$[_!)##${[(*(^($@(^_##{#}[]
%(+%{!$}(&$!)])}!]]))]!^)*&^@[%)*%}*(^{{)+_&%_%$$(!$$&({[&&)^_}#!)%$$]*))_!+]]{{
]&@}^&[)&)!}!+_[$)%!!)%($)*!^[^+]&*[))+)^(#&%^+{@][(@%$!^#$^]_[(((}(})*@])@)%&[[
+@]{%%!&}%#[_^_#+)[&%${)[_^@{}]^&&&^@({#$+]]]*#[%)[_{!)%)]![[##*!]_+_(+${[@^}!#(
^^*(}&$]{*()!#]([%$^$[)+)!%{(__!{$$&%}+!*(}%#[[+}]+$$%[]*_{(^@}&@%@+((*!@[%+[+)#
!!_$@@+%@*%]&#+@%%}[*&+!]{+&{}_[#_%^&)]{^[}^&+[}}^&}+*[[{&}[^&{[}!+[(_%)!){(^__(
^%&@%@#!%@$*(*([([&}$_{+**%&%%&^![#]^[_}#[]%@]+[]&[}@{!^}%#%{]^#@}#})@$_}}{{}#]{
*^^[^+}(*&&^{*{[_&[]+^[(**}$^)+**(}]^@^{$*(]%])##+[!(_}+{($[+*@}}*[$*_@){_{_%!#)
$}{@[#!(@+@^}#}(^($#{[()%&*%_#@&$[^[(@}%*$^*_%{)[](@+$*{(+![]{$%%&[(]__+)]^$*_^]
{@[]$*({${#(%&+!)$^(#!}_}}%%@}^]((%{)*${([_+@^+]${)&+%(@)!{[(+*{[__*}*%)$&^$%[$)
_[[%]!(&{({])*][{!%_)@%!%_&)_+(@!*)(]^{)$)^*)*{[+$#(}_]_%_*+^[_)*}(]{)}^+[&[@$&{
#^)%(+@@+#(]**[#^[!($_]^@}+]_$[!(%![$$^!#+&$&#[*{]{$!@{{]!![&^%}[{(_)[@**_]&())^
*+^_)(#!$%#{)^$%(_[^%%*(}&!{@#^@#)](]++%%]$^*)@+]&_^(^@!{%%({[([$]{}%]^*+%^*$(%^
$]_!&++!)*+%*)@#}!)@)_*]&{)[**)*[@[%{(%#)%$!*}&%[^]*_+{(!+&%(_]{#(!#)#!]]}*^+[}+
[{(%!{(*_)_[@^&_+&}@{#+^$^*$[+!+(!@+*[@!+%]_^{[][#^)([}&!_@_#_&_)!+*{$[{#^_$&&+&
#(*+##$%$+}$]%&]&(+!+})$]![]%_]]+}@}^*^[!])@(!_[]+$*}@][!&)%@^_!#%^[@&$(^_(&})%}
(@%@[]+%{)+([!(!@){!^_$]&*(+@#]#]{#)#+][#*#!{^&@[@$%^[!^@#*#@!%!]}$^{&$$*][(%$]^]&]@_&$!$[&&[^*&![$^}$+{_&!@%[%(_)]&^!!*_}
*[^]}{{_@^^!{##%&]$(*_)#+{{[$++]%%+(&^$#!#$^&){!_+!@[_$@_+](%#*#!}[&$&#[{)$+#@+)
%[)[+$$*}#[*${$)[%$!$)*(]([%@%#)!(^#!)[]@]{)}*(#(){^%)@${$#}])@^(@#!}^(&]%_]^@@$
_%+[%&{_(($+[_!#)]+*)[^&*%$*^]!^+{&*%{^(%))[(]}$&})($&@((%#[)_%^]_#{**}+&[**_&[[
](+]&}&#[!#[)^^}@^)+&(&@[&!!)$]*{{$**)^(*[)$#}*)_{@(}&^$#([((*_]&^[!+()(&)(^]#*%
]{(&!!%+#^^!#}@@&+[{@@$^{+%%{{!}**%!*+_#!_([(!*%!@)(!@)[+*!!*_+_[%_}]&)$^{!)+!*]
)_)&*]!{]($&[&*(*##{^%*_#!&}}{)%#}^@#@%$&]($(_@[{##})^(%+%)$(_[#@_)[){@[@)+)#]+^
%{[!])]^]_[%%]{&%#{!*_$[%@}@^]@)!#(&#{(#(_{([+@)%!@(@[$$_$_!@_$&[))*}){+(]}(*^[)
%!@!!!%{[(}{$$@%$}+#)*[^$}({)*{(@*(%]&%#%)&+[+$[]*{%)$$##)+}}!+@!%_$#+#!)[&%{*!@
#&&{{^+[}@$$)]*{!_]#}+^{}*%*$[$)@@^_!*^#*(+}_()$@%$#){_!{_%!${_^!}}!^$#_]$)^]*)]
]{++}(!}}%@*[!{**_+^#^!(+%[^){_&&($!(!!@*){_)[]$($}@[&$!![_#+}[}%#@&+%%}*{[@#&)(
]#!_][+{[^![_([&{})$*({**@+#]+%#(}(^!+@&}]$[{*#++{&^@}&@!)_^{%[})))%%&((#}{#]@*%
@+$%_[(((!&{@^#[$#$@@)_}^){}%_**){^$+)*$+[%!)^%#}&)(@^_}}[+_[}&@#!*$+!&}[_})*[_)
+*({}{*&($(})@]+#_{!!@*%^_$+#($(}%}&[!*}[$^{(*#[#%+}%${}^*[#&{!&@(%^^{+_}@{*[%!!
]_]{}{*&%([){$@*$_$+^&!_()}}[!${^{}@&+}#(*!}@+[*[*(*[%!^*+$]^[)${*@#]_@(+%[)$#!]
}#%}+))(}+$)]@$^*$^$+^[#${*#%]{)@$@(_@%(@#+$]+(({@!#$__&*[[*}[!!+#%%%*(*%}%*}(*+
@^+(}{)#_}^)[$%}+]^}$()@#%#{!*{!(%%[!(&_![_#)_]{((!]%+^(#@[!]%}]%+@[)^@^]#}[{$(_
#_{+[)^}))%%#!*{+[+}&%_$@!_)@$)*{]*[&^!{&}$%([$]+)}#$@#]}&*@$+_($])#_(#+[+@*${*^
!%!!^&*^+{*(*@$((]_$_*+]!{%^$)#__]*+@(__$%&#]@#]%(}$*)#*!(^#_]&&))(+]@%(_{__+%)!
+}&(%*!]][!&)$}([)$@{*{{##+&&@_]%(*+(&@_@)*$}^#[+!@%_$@{&!]+&){{(&{]#&*!@}*[%[@+
}[+]](]_(#*{#*&%_*@$!_)^!*%#^+$*](*{$!{#)]^!}{+)^}][^%(%@({)}&_}+]!}%)}{}&$%$&{^
{+[@#@$)@[])__$^&++]+(%*[#]%#@([#]({#%&%%^+&+(](@}@{@*([$)%%}&$%[+[[(@#]*!][_][}$+[)#)$&**)
)[*#&#(***+@{}^@$$!]&+%&&$]##!@)%@#!}}%%_*%[^)]%{%)%^[}}[+}#+*({_*%*!]({#}+!*_)#
*]([*$@+!_#&@#)}&!(%)}{(+!)]{_^#{+%}{[!^(+@!++$)}{[_@[)$]_)%#*+{)})($**{{]&^*_^%
[)@#!}%!+$&@]@_&+*$[$_(&(}@)()_^([%!^^*_+*}^))#))$!!]$}^(#&#$$[}^_!]){^%[]^&_(**
^!{_!!%])[__^)^%}_^))_($@&$*#&+#)!}[@%+%(&&$$%#++%^%}+$_%%!_(@(+!@$[}^]]!*^}#_{{
*{*&$}$+@[!@&)])[{%]^($]%&#^+&{[[(&^]#{}{}*!{_!*+_&)(]%_&__$@{]_)^#_+{*[+[^($^[@
)(+%_&($]$){!#_%$!)$(^$%)^[_*$$*#{([^$_]%{%%+@]^{)}]+%$^%[)@^+(+}_+)$#)##][&{^$^
#}@}%]+*}{^)*$)^!#)%()#)]$*@*(&}$#%&%]#*%{##^_(*(][$[_$@${#&%)#%%&}]^_%*_@!)&%}_
(%*@!}{%@@)^}#&}&{%__{@^*}#)([+![]&_%@#$%^*)}}{$[@+{^%*(@#[&[!^
(*+%}(^(%!%^!+)!!)[#+_^+{+)]+^$%}{_]^@*@%#*#*%[^){([*[__##$)&{&+_)%$^}_@{^__)%[)
+^_[)!$]^#(+^)%(}!]&__[]^!@}}@{*+!*!_*+%($^$}}[()]_!)(*)[&(+!(([#)+[%^(&}$&_}[{}
(%*!+[+[^[#][}@+!^*^%[}$]@!%%(%[[(#_+{#({@{&[$_%)+$%}@{_+[{}+]![}@]+[#{{]))}+#[*
#]**@$@@+_@)[&[#$+]&]&*$()[#^})()$$#^*+^+%]}^]]&%(#}&(+!%]%]](!#+}$%^_^^@{}+*&}_
_%{#*!{!@]@${%!_$}#_@_#(!(^!#_*#_$&@(@^]_@%)!^&^&{%)({+}}_{%%]%^{&&^@_@&&^[](}(]
_&&^(#_])*)+_]!+&)$%+[](){)+_#*_#[[[%$!^#!!$(_^}(#%({%$!_}$}(()$]!&]}^^)+&+!@%$@
#[_$%!(&[!]@[#]{{*]*(@{+&#_^*[!&)#$_)%[!&!))%@&{[)@![[^[+}&#*$*}+!*&{@(%&}[$^&%_
!!!$+]&@})%%[@[&%!($_)@[}({_[$#}^}@)(%%^^*&&+]%]&$_#^!$!(_$%}*_$_!#!_@^^)${*)%+!
$})&}*}#&([[+^[)*#&%}+[*_+!!&_@%!^##&&{#%@{{*%_+%_{(&#+{[[[*$(%&}(()#)[!%)%@&{[#
+_%(()!!$&(+{{@*(!*!&^#^!(}+{[@^}*%)]#%(!^!]@]!{!{_%!!)#@!_*_#$$})}%&[)[!&*{@]_!
+{%_+]+(+}%#_[)+#%%$+@{([]#^_$#@(}%$]#&^#%%&$%(+()+_}!&}^&*)}]*+]]]*_{%(!][}[_{}
{{(^[!&{!]$&(]]+%^%%{@}_{%@+%+[(^}&@#+^^@^])&!@}])$+$}[)![^%$@_[}(%@$![!+}#@+{&)
^*&_^%+{^{$+$[^_*)*++%^++#%#%*^$*@+${!+%@!}^q(%!({@]%@]]@&#^$[&![(**${)]*^))[$}#
_*^}[}+]{_([#_*)@}{#)@$__!_(^_!{]++$(&)*(}{}_!^*!!++}(_+}$()@%&#{]]+_!$&+#&[{$^$
)^]()@$!(#_!((@&**)*_[@^)#}$%(}&)()+))&[[!%&}{&{[+](#&+_#(({*#]^(#]))#}}@_*{^%+^
%!!%&((&&)@*!*!!^+^#*}#!&!*!+$)$!_%^+&[_+%{})(@[*{$$_)})[!*((_(++_@(${*#](#_]!{{
]]])(^)*%[_*{!@##}[%#(&%%$[#+#{]+}@*+}}!($_}$}^[%_{][%{]@]_[$(_{#&)_![@)^*%{*#&$
}_+#{)@^$]_*$@+@][%^*%+&&[*^[[(*)(#!+]()$#$_@*+__)!&!+*@(&_^*[)${$+^$&]))_({@+[*
_!_&}*$#%_&[@^^%{&&&${}!}{}{]{]}{]]_&%+![+!]_}[$[%[&*(_[_!@(+%_@({*_]+^*(_@##_&{
*&}@&^#}%%!^)!{}))!%[$^_^{@%*#!_[_&&!!^)[]@!{[+!^+([+%+*@[]}*^$$^#$]&$%$}@_[[[}!
$(+$*@!*&^[!{+@[}$#&{}[+&^)}&*[]}*^#]$*]%^}&@)]${)$@@%[*$((_)[*@[%%&^&^}*{#^}&@}
)*)_*^}%+_!{(_!#@@__&]*&_}_+*)(%_@]_@)&]{@(]*+&+@})@(__#}%$($[@)@$@}*}*#)%(((${!
}{[(+#}^{}@((^]%@}({)%&(&[}(!&!!$^+_%^}_{&}!__){+$(}*)[![#%&%^&^__]&[!+{_!)${))*
]]]^_}#%!]_!*^}}!(%{)}($_&@^}]&]^#@)[^@(@)%_($!)[}*!^@#_^]^(^}(_(+%*)}&^][@^]}(}
}{)}][[*]{#(+(@)[_]$$_&[]_&#$)_(&}][&}&%#)@%!)+!]{%*)^%{([!&^%)}+*)%&&(@*$$[{@$&
]^_!_@&@(*$)[*^)*()$({!!_)[((!*]{+[_{*+(#{%]%!(%!(^[{@}(]&$(%^%#^$$*[^#!_(&]$}!{
!^&&^*$!@*{}[*{{{_&(+#&+$%!^_^[($&+#_&@&]}[%}^{{$&!)}_[}(){[)%)$$_#_$}+$^()[%_]_
]])![(]_(*@!)_!&{@__%{$[)]&!*$@()+_][@}&#_*)+%_%^&#${^]$$@+$&]&(%][&[@[^{*%@#+${
}^!_}{)_{$]_@{%*_&^_+{$[__]^&*[&*${{#[$%*&#&{_^$_[)!)%]^+(%}[$_@[$^_*_!{_{#&{()]
}*_)@(&*[@%$$&))()}]!^+[{##@%+**)$)]&_]{^([&*&#$*&(]{([+&&^}*^$!_&%}&}!(}!__$[{&
@*#(*&!_)%&__%#+%^[}!^![}@{}())%]!]!@*+)){!{&*+_}]{}{!^^$)@_)#_(!@^+^%@+(%]!{*+}
*$]$*%}&__!{%)+)@)%&!]}!#&[*&#&+@(^{[$**$^#+)&}#)[][}$}*@_@!%&{{@!_#%]&]_]^%][({
_]*!]}@@+{{]%_($(^^^&#@[[%*]_@[#]{*+{_}!{&)+^@@$#)(){[!])$#[$&@)#+@][]{^](}%)+#%
&[$%*#!}+_@$)_}[+^[^{})!}]_#(&}+[!)!}}*}}[_^((![#*_+[$$[*)(_{+&{@^*}()@@&$]^#+^&
#&@{[^@)(}#[@@&$)]!%@)+*})[{%#{%^*}}{{}]&_$&&$()&#_{!}@(+$%@!*@]*$+&_()&!#}@@{+!
(^&!^*@){%)@@)+*]!@$!#](%&}$_+}+@)}}[+&_+#!*[$$+[(&{!^{)[@_%[])@+)@&&(!!#$&+_+)*
]#&*^}&*}%+#()%()[+}([!$]#{%%+@@@{^^_#*]()^*^%]{+{$(][$])@}]{%]+]+*!!}^![#@*[@)+
+($&}]#^]%%(})${&&!&@@]$_+$[&@%})!*$]{_!_(^+%&_(*(+**%(_})[]$))%+([!{]#**)()([*)
]%+({^)(+#(&*#%#]^^**+^}}+[$_(+&!_{%&{(@&*^[%[_]*]@#@&)$#+!${!_$[#@!)@}}+_^#%{}#
#({@)[&[})[+({_!+^+)]#[#[$_^((^@%}{[^_*$^!]*!*(^{@^}}*{{&*@+}![_#%^%[$&+&{@_%@#}
^^!([)]]^((@!_[#[^#+^+&)#[$#{}$+$]()!__$$#(#!#+[*#)@#_}_]@%#&$!@)]$&]##{*(&}}}[@
$&&]#@($%{(![$((^&*#(^@$}+[%_[}[]!!*%&]![%!*)[[%)[$({%[@[%]_!){!*]$}(@((#}[$^{@(
%{#@!)++)&}$%)_^}[@$_}&)*#^$_)&@}[![+%+{@!$]*}[!!!([[^{}!{&$*)*@**^]_@&_%](*_[*(
^!@(*&_)$[$]@]!^*]*!^)@(*]{@[)]}&+!%[][(#$_[$}!!+{*](((([#@!($(_$@*&^#)&+*%{_&%$
}}&&[(%*]*[_]])$%}^)!#!*&_@(%$@_[_]$*)+)}+*+]#!^)_@#%(&(#}&([[${(+_{{!}#]+$@^]{}
(@_{^%*]##*^!!)(}#{@*&#_}[[$)}[#&[)@}!_*}]#@+&!}{^@!*}{+$#}$]}([{&@+]+++*+[+@+&(
[+${^!)}($[!#$&(^!{^])({%%@{+$)!)[#$@!]({(}&$$&{]](@+@)*$&$[&(!!(^**[*#!){+!!)$$
_{%{}!&+]*&[$}_!@_&{+%{+({(!]}}&_^_!@![)]}##)+!]^_#@%@[#^*+!*^${*)($_#___[*_)+)$
*!}^^+^$++&++*%]#$$$#^*^$!]]^%$%&*%@{#+)&)_](__#^]^&%!+(!#[}@[*_+^+_^)&%!&}#{*#(
{}+${&@{}]]$%[^%%!(![}]}[)@(_%+&[}^#(@[^#&[[(+){+$[)(}*+{&}%{+_#_]#+}([^*}!$)&^!
+!&}^%)%})#&*{]{%}^)^$+})*[&#$*&!]_]{$#)+&[(]$)@(+)&))^_{[_&&@%#{%}}_!++#!@{)}$}
{)(*(]+##$[&({{_][$[*)#[[#!]{&)${&(*[*}%++&**&&%#}^}^]*(#!]*@{)#[_{}$[[#&{#@!%]+
{^{]{@*(%##[#$[$&^][_}]}!&{_!&[^&)[%&+*[+#_*)*+$]**%$]{_%**#+{]+}^_)@{{}]}{[+&@&
#@^@@[*(^)[_#}{]](])!$&{}&*{}&)(**}*[@+}$]][)@}[&[#%$@){[^@%&{+}{{#*]_&[%#&]+$&_
]^{(}+#^][(__#]}(}${)@(*)$(^*^!_!!{(!#{)#_]}[*_{_]](*@&&_]_{{]}]%{$_${!]]+$@][@$
$^&*(+$(*{$$%)%+]#_&#}*+@%[(__$}$(@_]{)&%&$_^%])(]$()(^#]_(!^@{{))&^$_({^)@()#%+
{%&((#)}}[#&$(}]+{^@{+}@]}+#}{(}+]!{*!![)}}+$&_%%*]*(!)$+#^$]}+*#*!(^^*_{)+]%)!*
}^{)[)%])$$&&(]{&{@&#$*{@)%@+!%*#%%((@@[#*_@%($##!&!#$!#{$&^()]]($%]}#(_]!(!_%!_
]$^%^&$#%%())_)!_{}]{@#{&})]$_!]%%]]}}[{[_)}}^((^^[!&*)&+)#&}%*($%+[@$[+}&#@[$(!
@}{[!&_%]{_{*+$#&#*$%!@]}[$]!!^#^&)#}#__[@$#(})(_!((*#){)$#+%_%[{+#_+&@}[}^*%$&#
@{[}^#{@}!@%}(*&@(!)]@)_@}%+!%}]%$&%][#$$)[#{)[^%]+{{*^&^}}^@%^]^{)[[)][])_+##*@
($!^(#[+$#@[]{*([!@]]}%$^+@#(%[^}@_&@(!&{_)^%&$@^$[&!+^(%+]}@_]%!&(&{^${%*({_}%$
_%})%@__@$_@+)#+(^@^{^@_!](*]%_^^**@_#(*]_)$^]&}_**(+!}@}}}+@*]])&^_[$!!_*&)$)^[
{@@*!!}*_&)#[&{)]**$!_!_*&)+![&+)&^[$#&&!%])@]$_+&+)))!&@}[$+!&**%&*!+(^&[%!*}@$
&[@}]_[)[())^&%#+$#(}^]*}}_[#*_$#{(_+#}&&+%%}{+)[}))*^#^_+!+))&_]#({)@+*%_$_)}!&
{&&%!$)&@%!}(&(]]]%!))#{@*@$&{_[%})!(@]@)${}{[_*^({&!_#&&^#!*{_{&!^+!%{}+{{&%@&[
!(%*(@[^+$@_&}#}#[}{^#({^}!)}*$$}(_(+)*!+)[]#+@(%&}}!)}!]$$^(%_)_&[&_%*#(^%)@[#)
+$(_}^}}{]@_&+}_{}&#[**)#(!#!%_&&_!^!(+_@}%)#[&^)])_#_)#]{#!([$%%{+{&%$^!+_@%(]{
})]#]({][*%)_&^+}]!@&]&_{($^($*!%&#&[!(^@+@!}%]{@_@}[_$_@@^_&![@$+^+^^$!*#*{$[]!
^(!+[}&&@##_*!$%_{+)^%+_)@*][{!]$]#%{[%#(*(+$@{^*{+@#+#&#&+})*+%}[^+_$@@&@$+&}@*
#}@%*}^&_@%)[&@]^{(!^}#_^(}(+{_}$&#!]{%@_^{}^#_#!]*@%)){*[$@&%]_)%}${+_(!*[^{})$
]!*])&}[%&)*&#}}(][]&{+@)(+&^[(#}^*]#+&}]#![@*}()($#{}+_(#[{&}*{$_&[$^%%[$*[{%^)
!#%**!^!&^@}!*@)[&[!__]]^(#&$%#&(@&#+{${%(+##$$[%%%^}@%+]!^)+#%{%%!+{[#}}+!)+#%[
!$${](]}_!&_(^^(_!{#*^{*#}{^[!#)&)!$_!@*^@^^)]@!{{{^[!!)])]@%+({*![@%#%^}))!${)]
#))_&*]@^!!+@){[)][}$%!^+)%#$&]%_}(]$#}*&^_&){+%[)]}}[$*^_+})(%+&]^*$@[&!#}%}}(#
}&]#)&]^$&[^%[*)^&(]}&+$@%^]@)+!+)&_})*%(_+]*_)#+#&_^{#+!!)(_#]*[%}]*!!@)%()![%+
{{%$^)#^+[+^}#+^&}}{%%+*(+}(&%}%}){_&$]+)+)^#^*+@[@^[!^&)^!@(}{$*{*&_{@$&@!@#{!{
$)#![*]%+#*@$_^^!+&!]#)(#*$*%*@*+(*#_^!@&*]+{](*[+#_@]%{#[}^^%}[_}$+({!%+@@]]+&^
(*^(_@]_%(]+%_)@]&!{]@$[__)@[+$)%$!^{%^!)}]{[[(+*[&*(_^*{*^}]){[_))!(%!}$![#^$+&
**${*+{$!^$$]*_%+@}{{(+_&$+*)]*@&!$#)*]}@@%!(#+){[!!+&)$$){#_@&%](^#]#}$)^*}!]&+
]!%![_)]}){}&_$]%!_[_{%])#!#}%^{{@*$_@@%_&)%{)*}+$#{!($!{{[!@_+(@_+!$]#][]}{{%_(
(!_*$%^{@@^#{[[!_[&!)}!&%#$[+#]{](&^*%^@&})_)*[([%($^$^#*&%_*&%+$)}@^@+^#@%^&+^*
+&#@%&](*@}&}#[{@@]+))&^#%!#*}#[+{!^]*+&{^)&}{#+}@+!]$@(&(##)_]$%#_+![}_}}}(($*[
)^#^*)_]%&%)^]_)!(+{^}{#^^]{@^_&#[^^&!#+_#]#&]((]]}@!#()$*!){*((%+^]+]_&&}}{(]{@
}$^##]+^$(*_{@%{({($&!_])*#(_]&(^!!)@{[^%$_{]^_^_[)%_]#&{{_#$&}{]#%){_&{_%!&[@)]
!_${]*[+]}&$}@[[_}{#_^*#&}!+^{}#+)^%]{*+($({}^*^]#$+%+]#}({]{{*}{]&#_@&+[+]${)})
)&@*+@])](_%^#!))@#%)+!(!^@%[}{{(^}{}%&[$[+&$+}]!$%_[$!*]!{]^#@)+))$)#)@^^&]+#[^
@%_%*%)(}&%!%]%]$)&!]!}+)*(^&+&}@}}+]{^@^]^!$)+{!{(@]}]$@}])}}{%^@]#*&!!!%^&_&@@
&)}$*(!_*&!!(})(+)([]!]*^&+^^#{@*++}*+&+@!##}[$^_&_%(%&}*!]@}$]}]+))(!@@+^+{]!)%
^$^}@!!_$@{_{(}($%%{&@}_][#@${}&}*_)}@%)!}&{}*}@@(*&{+)%**&%]^}&(!_&]#$(}[#^[)#^
{@@#$&]][(@&]{_&[}&*$+[(^][&^][%*}+!]#%{!##$*{}++}+{!(#![@^()}!+)&**{{[**&&$!@%%
)_#$&()$+[((_}]*!!_[*{*%[%+&$!}@({#%@%[%!%[!*](]%^}##@(*)]{@%@^@^#(]^[{_&&**)^+!
*@($&!+@$]@_]&!(##@]&#]+*%[}(_@_@+!+^+$&(_!({++[#@!#(+_^)($&^*%(^&#_^!^^(+}$+_{)
)_{^%%$]_!]$+@^[*}^*]+_$$&]^^+{&&&(}[{*^^@%%+)[^+$&@&)^%&($}*!&%#!*&$[%(]_{$])*[
*@}@*(!_[]{)@*]][(^%(##]{+&+$&($&^@{@^[%*+((%]$])(#$[%)#(*_#&^*$*+_[#{{{%{}&({+$
#$)&$!]!^**[)^!!@(][^$$$(*@*(*{(&##%_%}]%^)*^%#$_($_@(&+#@{){{{_^!#!!*#$#$_]}*^#
!{&++#^({{@$$@#)&*%[!]$&{^!%$+)}]_@+{*_]@)]{*]@+^]$}}]&)]#!_)}]@$@_[&_*)+(_}%#u(
)^(())(){+@]&+_){(_%!{^^*^!)$+{+^#!}}]_[}^**(}*%($(+_%]))${)_*%&&]$!%^))&#({]$^$
_&**[_&[(%@%**)[*$[]#_&+^{@_&!{^]%#_)![]![@#&[}]_]!+{}{$+_((}]_{!)})%[*$^(+^)+}*
_{@@@@)()#)@&[]*(}@%%@[*][(!%$@#%($#*]_[(*!{]+)*#({*{[{%[#{$^)]%!+#&](__}(]%+$&(
{#${$&*$]#](}[[[)($%@!(@@^_#^&})_(![+_)}_%*}@%{!{+%@_(%&{#])()]#!(]%!$$#_*%@%_*[
}@$$)%}*@^}}{)%({_&[$)_}^(&!#)!@*%{%#^_@$)((()^)$@*@%_$%)*$(!$]*#*#+++_$&}{^]$@]
!**$*{[}#@#{+}@$]]_[)@&](]*{]#(**^_!^(@^!#**}#}+{!$@]_]@!&}}*[#$}!!]{[{]!_{&!](^
[()[)#$*&!^[+%}(@{*%*{!}!$(%(#]^]&^#@!$)!{}#+&{)@)[*]($)@!{)*^*([{*}}+]}$++[%+^_
}#^+@[%$%$(*]_(*&]!+)[#}((([+{]##&%!)%{*({@^*#!]++[!^!#+@)$%*!_@[{^[${$}#{(}#)_[
)^}(+{(++(]}#@{&^@^_}!]!]%+[[(!_]${(*+[}*_@_@*_^@^_]+!)$)!)]*&*$}&[+[)%}#%^))]%^
+($@!]^[_%$__&@+[]^%@)[((]#&$&+}!+(}$^^+&{!)&@$$]}!![$&)&][+}+)#]#}(_@@^*!)_)[^$
@^&$^@*}_[!+^!#{^}!{[$[{{++^[+*##%+$(}{[^%@[&#!!*+[@(!#+){[)^!+_[[***+#[+&))*#@{
@{}#^^^*]{_%]+@*)$*[$@]#]{%_$_^}$&$]@]))#((*(&_@$[)]!%_$#]({&&[)])@_}*@]^!@}}%[{
)!%}_%!{^]_{&@%@%(+#^+}]*#)*%$%{%*#(#{}@)}([%_{^}_]#^[!_)&$*+{))_^+%!+}[@{]^+__#
^[_@{[%*@+$[*!$)$$&#(_[^+!)(^%_[{&}$]^}{&[!$[^{*[^)#@)(_(]#[&)]%[+@)]_+$_{}[@{)+
_+&]]!@^}#^&+@[&[%{{[)_]%[_^%**@}))]^*_}@@[}@{)&}#[{$!)}]%]]$_!!%!$@#@^()#]+%(&&
+%@)($@)@^^^]+@_)})#)!^_#!@{*^!@*%^(_^@!!$%!&_+}((%(#&$$#}@#]^$+]$@&)^%}+)!$)&&^
!#[(}$}((&*$&+][++#]^_%!]]&+#%}&$*%*#_(#}+!^{[#!$)!@%_!!()(%!(_]{[$*!^+#]^{{}+{#
_{*{+(#)#!@+${+$@]&*%}_+_@](%(*_}}+^(}$${}^!%}*#&*__(${(]}%^[^(!}##@@&)}*]%)[%$+
}][#{@$)!}*_{&+_{!]**($_[@^}&+&!(@%&%$)$_(!$_{^*]}+_](&^{!%_${@]^}&]%%@#!+%(%&_@
$@]&&_)^((}{}&^(_}@[]_^%&^)^)_@#%_*]&&[!}()*!_(@@+}@($!%^)$!]$%{[^[#({[*^^#{@_^}
]+!${)[^+!$*#%#{!#)__@$_^}&#{%)++)_!*{){_*]^&}{$[{(^{__+[[)@)@#%_%$^{@_}{}+$^)+@
]((!*}^])@!^{+#%%{}+]{[}[![(^#{_}[(#_]%+)}*&}**]@*}]}[(}[{{#*+@#$#^)$++({[^#+**&
]}++(@!&^!#_{[&*!(![(&@{(]!+{(![$^&&}{}&{^*])#&)^#{!]&+)}@_]^$}@{+(#@[([@[++_#_[
##(]{^^%{)*)!$#_*@$({[!))!@^*&@&(]+([^(*!+#@[^^^+!$]!}@+&{_*&{{!)}(!@]%_(&+[__%)
$]]%++!!^^[$@(&@(%^!@!$)[{[%@))&^#}*&+((]([_^&}%!&+^*@($}}$&[$(}__{)+][%!@{({$]&
$^+$#&*+*)!!+_*}&$&^#^*+*_}{%$*][#!$!{#*%%(}*%*@[](}][+)]@{#]($)[_#@^[!]%}%#[+[{
[%{*]&{${#_)(+%^}$}$}#)^^#($##%@{&}@){+*!+!%^{}@]%!}#*_^#+_&&&##^^[{})*((*{!_+)]
$[@@}#[*%%_@}({##)[]%&]_]_[#(](}#)_]*#_)}%$[&}!^!)@#&*)&@%^{@#{@@)}{$)+&%%%](^${
+@##$*({+#_]!_{(]{!}%$&_})#($(_]*%+]_^%)$_)%^^!+{]}}{@%$@$+!]_!([[$(%)!$&}[%]]@}
!(@}]{^^*{#*&(^}${!(]]^^%}&_%(*#]%!_$@}($%@(_#{!*@[&+#!{{[^]!#{${{*{![(_*^(_]%$[
@(^$_)@(!^@@#{{]})]*]^^*@][!@&)@^]%(]_$%${$^)@)_))][*^#))@*&%_{)}^_&&__#!*^&)^*_
+*!}^^{}{([&#{}]}$}@!%[$@]!#!+!^({))$}]#{&)!^)@}${@%^]%}#$%+^*)[&^+)+)@@#_^+)^&*
&[^(({{*)_*)++#${[&+)]$&)]_%_&%&{!(}_}_}^[)]_@@&(![@}{{$}+#+^@%))@$*^)+^+]++&&*#
@)###%^)&][%*_^&()%*[@^+%$@+@{*@([_+]}{){(^&}$(#[*&{)]&{$_^@&&}(+$@!++%*!+%^)!#[
#]^}{$}*@#@$$[]_!+]&+^@@){#&**}{{[%$[^!&@])&)_]%(!{*#@]#())+^_#{^](&](##[&[}^&%@
({[!+@(]$&]&$[}+(&%%[!!&(@(*$*[}$@($%*}_]!_@!^_((${[}^%}+^**@%{%_!&$}$_)&*^^@&]!
}*!@!&^{^}*@#[){%)%+]_$$+%$[(@){%*{++}$&[^&{]&#&@{#[]}]%$__$@^+$}*%))&!%!**#%+**
$}&@*&*}^+^[&]}[_}__][]![&(!![]!(@#@&**}*([}*_}&!{})*_&$_][]@[@[)__}@]*]+]}^[$%!
!{_^++}}))$*%!&}#[@{[^&&[_{!)}_]%@&+}}+[[&+}[[+&&!)!_{{^&]%*+!@^%$)+(%^+*^[+[]}*
]$#{_&_){*_@[@{@}{(^]%^%&(@&)(&&@[*&*%@{]!)$^^[[^}$#!$&_(@}%^()_+!)}[*!){}!(}*)&
@+([!@{+{$^*##{(^@$#+![*}#$[&&^^#_^#%$*}#*+^@[]%#$@^+*)*#^$${)][#)&)(]^!@)!%@$]&
%]&}!_%@*#}#}^&#[[!*)(#}]}])])+]#($)%]_@$(&(%][&)}_[@%^!{&!+&]$&#@+[&!^@^%%$[#%)
#^%&{+(+^$+]}^(!}^#*}}&)*)$(}*&&}$@@#&!&[]_]+*)!)+_%))}%_&!%_$_+##^(@^&)%_@*_(&(
}$]^@(%(*)((_^!%}%[}__]##+(_@{%%+*}*(^}(%)$}@+@_*[$+%*+)$}&![)}+#($#{@^##%!}!@)_
_)}}%+}&[+%%#_}]!%_$%#([#*](({(_$[!!}^{$_@^+(*_[#)]!$_^)*)!_}*$*$&(]!_[+*+&_*)*(
&](]&&@+!@$$%{]+(($@&)}%}$]_{{)&*}%+[}($$&%_#]%#^^_}[+#!+(!#@%}_${(!])!]}{%$%%*]
[)]}(_%]]!+*{+]_+#@)!{#()*__{+^^@#!((}#()*[${#(}^**[^+$&&*]_%&{$]!%{+$+)*}[&(}(#
++]}#[*%$}[$_$([$$}[**!!%{*&[$@^_&$$*#{![&^($([%$@{{{@))]#_]{$([[+[%#[^*{+(_%$+(
^$(!$}[(_^^}(&#*[[@]*[[_#]+__+*{_*)_&()@!@}#[++^!#^!!&^*((@%%(^^{{*[&+#(+&][{*){
#++!]%#_!+}@^](_]*^!+$]*$[^_[*&&@]*$![%%{}{&%{}][+(&}{)(&%]^))&}^*#_+$+@_@*#%%[$
!#}*!(_]+$@_[@%^(#_[*{!!($!)%+#%)%*%@@%{(!##*{&@#*{!!!)^(___*]_[#!%^[!*+!}%{#]($
}^+$]}$)}^__$)())@](@]]_&!*[&(*^_##$_)&)_!${$(($$$+$($)^#$(*}&$$)%]*{%@%(!*]&^[_
]&*)](}{%$]}}%%]!_*{(_[]([^[@!}[[$^!}#)**%#$}#{#&%!%![_&(]^_#!!{+@+*@$^#+#@[}%*%
$*%%{^^(*#*+^}{]]^&^{!@$*&%{%^)]$[_${[{^!(+{_*]@]}{%^}^$%(([}[+&[]^&^&#^)^$}[!@[
#[&[_##!&]*_$%$&^++^!]{%*&$%%)+%+!+({+)#$&@$@@*}__[@&{++$[$_](*_&{{_^*)%^_$@^{#_
@**]^)])*{^)*@%^)+**!%#)%!(_#!)(+#!+&{$*]^}%]{!_$^&!_@_)$%&#{^@^&!#&+&${#]*_)]^]
#%_%@{}!+&#}}$*(){}]_))%)}}_![}@}(#}#[}{[#^%#_*){_$&]!{#(]^]&[!)&&}(]^$%_(^{)}!+
)##]]}_@]}__{#}++!{![*^%]{]]^}%!{{&@}!#&[^$[}]{%$)({#(%@$%[(()#^]%^$!}$$$&_%#&{+
_(@%{}(%#]*^!@$[&^]}&*&}%%%^()]!((_%^{$[_[}_$$+&^%@@+%[(]+{$+$_^+#*%[#_!%)!#%_@%
$)_!]&*[)[{{]&{!*[*}#{*}}!&_+$!#*)[})]%+{#$+}@$&+_{$%{)&$!{*}}]${#[^{#{%{{+)@+_$
[{{[(^[[!}*$!+})+^$*_!+_{*(*#%)]@${*(#&#{{[!%*_(@([$(+$#(^*${[$%}}&&@#)^*!(^&[^$
]&#@^][*(^@%]&#%@((]{)(@@}[^@[*@(}@{%$%{}&+{+^}^)^^&!%!@_!*$![^__(+[(_$}}}*#^})*
)%$%@_#*#}^!}}^*$+}%}#]_[]&+{$]%(_&{&[_{^^##!$^*_+@#&@{_)!)*#(%[%$_(]{@_(#(]&#_[
[(#%]$+{#_{){&[$%%{+&&$)_@*%(%(^^!$)__%*&$$@@])&{%[_&!}]%]_[(}#}{_^!(){&+(]+{#__
*@_])_&&%+([!(}&+(#)(&)+$(]*(^_&^}{^][_&^#(_@[!%[#]}]$^+(_&!&[)_^#^!}[{]&}^#{%^$
[)${]$$*^}+(#%+^&{%*[&&%*_)@{)](](+)(#$_^[^_]@*!&[(%]^}%{*)_+**@(%*{^[&[$[@*#}+]
]@$*+%)(^$$)#}!}[@)%{+#$%##@)%$))_)#{!@#)_*@}[[(#)#)*+]{[(_]$}_@{*$$[%$]!^+&(!([
})}%((}*()##)%(({}!)[_[*}*#!}*{]!#]}^][))_{%[&{!(@&@%(]{*&*)()}+!)@}%}+!)$&!_]]*
+&*%)@#!##@&*(_^]${{]!$!)*$!+][[[@^]%^#{&%!+@**&}]%$$[]!!![+)}%)]](![[[![!}*^###
[^*@}@})[%{_{_^_((&%^{@#%}()$}[&*{]*])%)@{]#((&#+)&+&_}#+&]{_@#%[*)&+!@!^)+$[@![
^_}%@#!)&$#!%)%)***+_$+(!@{#$)&!$&_^(%!*(^]^]^^(&(]@^&*(&%_(!@*({%)&!$_]$$((**@+
])_^_([{{()}+}&$($#&$*+#&$%*^}[@*@@*[^+)&(]^)()$+!(^@!!*@]*{@^]){_(}%*$]$#@&!$(^
@*)*%*_&!##*![_$_^#+!}{!&]+%*@^#^[+({!#*%@+^*@!+})^$+[@@_](]$}($^}^$&*}}$%@#!%[*
)[*)(%(!}*%$_+&(^$[_$%_]]+$#{)}_##[*&#@[@!(&&$@{!^^%%_@*($(!%#[}[!$[__{+]*$$]*@)
*)(!^)&($)*!+^^#)}[&^!%&#]({]&[%!$!++^))&_%}+]#{%)}%[[+[^]+#+})[]*){_{[]#)_&^[{*
[^+$)_&%(^+_&*(]]]^^#{)%^{@@&*{#+_)^}^]}_#]%_[##@)}[*[+_(]{*&#$$#*(*)(_}}}@[%*()
^@)[$#[($_]{$%[&!(&+!]*(&@!{%(][*[]${#{)}+]]&@)!$#}(&])#^^!)%!^_{}[]_[@{[@#[&{(]
@{)%_]#*{%^&@%$[@((*)_%!#+*^]]!}$]+}^]&$#[}{$*#^$$_{{}}@[@+@+&}}^^@[[@@})()(@&)[
$&[}#%&%[!!$_!(}!___!#^%)^[${#_&&{&])}*]^}&_$@{[@$@&}[%)!@{*%&*}%%&^$^)}^{}{&_+!
__^}[}+_]$*}[^$#@%_^&{}!}&*(@)+^{@+[_*(!@(}{#][+&]}&_)*]&*$*^]@}+$!#$(@{{%([@+@}
*{]_@{*[[($@]&)#_!}%)($@@)$+{)(*(#{&#{!&[&+](@*!#)}%+{(%$#]&[))@{(&*@(!&[$#*$*!(
^+#%*}*($[%@{_#@!&[&&{[+&&%&!_@%#%$#_)&%@($+#@!{+[(]{^*^(^[)}_&_[}(#[^*%!+)@+%^#
&#$#@^}*$##@$+#]^%}[[(]{+{[#!}$)(##@(_)#^&%]_%_%(&@)!((+!(]%#@#)!&][$[)]]*(+({@_
)}%)+*]%#${(!&*&!$^){^})$#}}^%+@$*_])}%@(&!#&%@@+]]^^]_**)^]&+[$_%]*__*@}@)([[@^
^#%!^}!&%%&!$+&%%$#$#$^**]!]+#((#_^$&#()(_#_{}}&_#^+++#_)@_)+@%[+(+![^&#][%**$+#
#}([]{{]}_]#$]}$$&}_%$([%$&#[#]+[_)@([!+${)$}%*{%!)##}*{&]&!#^$*}+]_&#}{]&#]#&(_
^+@@+##_]]*#^_^&%_&@!$[}*&$+!_@*)$[&$[*$(!^#@#+%_*[^{@}]}#+]}&!@_+)(++&&(#{[#*@{
]$]}(]^*!%@]&&!%&})(])+@^&@$@@[(%*@*^++^$##{[$^{*$&{@$!%^@$_#]$_^#@@**+^$%)*%%{]
(&[[)*](*}%*%@+$${&)_#+^^]@$}%$]@[]$[_@]^!]&&+[[$!%{_((%&{@[)_^#)%!](__[)+_**$(]
$##&(*[#)}[^&%[@@)*(()_#@)[*#%!&}[)%^+@+{)[++}_+[+])&#^!)+#%(@*]$*]$_^^#$$%!_*&&
%$){^[])&$@{^@+_]([(])**)](*+$*{@%]+)^*+[]&+%%[(%&#!{_^%#*[^})*$]*%^&*)+{(%}#]_#
{($#&!^!{[}(}@}}{%&(@!+_)]!*%)!(^!**!{!({}}}&{%&@#({_](+}##@$)!]^!]#!^^_[)%}@}))
[#^#}(%{#*%**{]]((__)![]^+]+@__*_{)$*#)&][!)_{&$}}#_$#([++{[$*()_**#_#%_$)#]*%_(
*+^*(}*$_%[${)+[%[%^#_]&*+{&)^*{**$[(!}*&@![&({}*#&@_&$)@$]!*@}}&{%{*[}+]_%$])_%
%#+&$}#&_%&[%{+[{@_$!$])!*#*_$($))#!+)))&_[^!&)*}+)_){@{+(&&_^&{]$&!%_{*&&!#[+*@
]{[]]}]%$!$_]]%%$@^](!}]!^[!^(%$%*$]&)@%!#+@@**$^%!*$$(_^[%*!)^$]^@_#}@(*}){({+(
$}^%_]#$!#@[@])[_]*[]_+!(}([]$&}{([[&&}@$@*+]]{&{#[_{^)@&@[$(#{&{$]!$#_#(}^]{+_[
@{&_@[#&^!#!%%@]^]]@{[[@](]][)+)^%![#%@%]%&@)[@@]^[!*]&!^+^!]{+&#%()+{@$)*{#([^(
_*[*#^@[&#@^}!@%!)*^%!]]#&%@$&^+{${^(]!{[%[+_}$!(]}!+&*}#}@@#}[*$*}^^)#)&_!![(@_
__&([^$)+@##{**%&!%}$)@(+*])&!&_![^@)}{!_}}*@[!^^)]{$%{+^({@$*!((*%%}_@)_{!*%{))
(*&)$*)#_#&%%!$}_^}+%]${}&($}!]!*()@!{*)%%*%}}_^_*}@@_}%&^[])*(_}_#_$@(!$*+#*)+$
!+[*[%*+([&[*@&_]^$]&_+^^+_@]_+}^*@{_})@!%@_!*&^!((}!__$#*)!})[))[{^($]@$@$($&%)
@#@%!!&*^*&@#*}&[(@#&@(*!^@{%]^{%&!}#[%}%()%!*@+&!)]!]^%{(^!$!(^@+&$%)@%!@_{[#[%
)#]**+*]@}!&($*])$}![%%%^+#{]*[$@@){@_}]@([}{](!&#[%_%}{&#^%@^_!}@[${&$&*$)]#(&_
{{*#[]+$%*_])))^+#*[++#*#}$]@$#(+[_]#]^(_^#)_%__![(%(&((^$*%%&])+)]&(&$&#)]()${&
_#$@)$@$@@})[$_([(_^[+##)@]*(#^%(%(${[&+#)@^))[^#$^_(]**&%+%&(+$^#[+}))^^]_%$@#[
$*$%}*{!^[#@}&{&[(+@$)&!$&#}{]]++#]@!)[&@[_#)!!%){$]&)})*@)[{()@!)@^(#@]+@_$&#*@
+^{}^*_&^$*(#^!%)@()@{!%%}^#@_)#%%_%!++{&(}{}}%{++}+&$]]@)@(+{()][%*&_*!!#!@[!]@
]+*_^$@&!)+!{+{}&_&))@&{{++{{}@#[*)%%$]{%#!+#%@@_*}!!)&%!{{($$&%[*{[!^**&[_[_+}^
_$]^[]+__${$@%&@[^}}#}+]_#(@_}+^+%^##]^$_{@+!_$}##_@#&{@_++@)*${*%^))%{}_@@&[)*$
_^]}#&(%^@^+[*{^}{_*)%@)$$#[(+(&^]#![{^_{([+&(!*^#&{$}@#(&!!(!{({^%%@!{+{)])^^({
(+{_%#(%)%(**(@&%$*#_+$^^!$)!$$%*&&%&[^[+(@$${!@)$#_[^}{)_%!@&%{{$*]@&%){[}!!^}+
%+[!*$]}(+#@*#&&[_)!*}&*#&#*^@&[%(_)!&+#^{#%#((+#+]]#@]$!!$@@!@[{))#()}(}[^@*)}#
_#[!^${]$)]_!@@+$@(@+^$()*{!{{(@#+$&*@_$&@*++[][%]$[(@{]+}}$*{%[%#&%)]!}(+__$]_*
+][}!&^*}_@!*}*#()$^&*%%#{_*@_+[({!_%_]!^#@(&@$&]]%%+)!]_{&[}^^)!#]_{$^({(^**_[$
]}@)%_(!##]#![{]%*((}$)]{}&(%{{$#+&)_!#)_%&@^()%*@#%&!%+^_+(+$@($&^!$_$%()!!_!(*
%(]^_}^#$])$]#{%@*#%)+}([^+*](!_^%[]%{}!_&+[$@*%{[@*!{}+^(@@&)+!_[^}*${@)^){@_&$
!*_@&+!]^(*#^[%&^%^+%}{[$^@]!$^{}{(()#!%@]{]}(!)[#(*$)$()@*{@_!$*#)$*+)])^{!}{#^
]]${!@&&^!_}@^$$+_(@%_[{##%@*]}[##@${$*(${(#${)}{}%#!%#$
(#}&_#%!+](!$@)}##[^!#{^}&&!)&+@$^$%!$^$}(*&_&[(_(*]{$+#@_($@_}!+#({*(^@(_([_#++
}[&&*!%)#!@&(_[{{+}@)[_^!&($)_%*%){{!^$_*^]@%%+@&!#}(%}#])&(^%$$+@%_#^^%+[!^}!)!
%*!*]^!!](@!([[]@_(#}*%&}*{}%&^($+!*%^!*[!_$_%@($^$[!#[^]^}}]!{##^^!(#++!()$$([[
][__{}(&{!{_[{+!{*(&)[^}!@%{]()_^$+&_)!&$[#_)(&%!]$^+}%]([_@{)*!^#^*%[!^*!*#}!*]
]{&[+&$@[+%@^$%(#!+$*^]^{#!&{^@]]@@*[@^%#%(_+{}&@(_)+)(+++^#})}!+@)^_#}_[{^&]#%(
%(^]^]$(#_*_)@)@!^}(&_$#]})^(}+#(_}$]@$&#{^}*!*]{}}{^}[#^(^]}[)+@^{@)^@{&**__]}]
]_}!%$+_$}#&_%)%[*&%_@}+&+&$]]#(([+([{*[{$+!@[%{#^#$_]#){+$%*_&%(+##$_}%[}!%!&{[
&@^%&(%%%#[^*@[*{&*![[+$){!)%#_*(]^#!}#+%)&!^)*)({$[^%})^}}@)}!)+^_[_^$)+)*+&@*^
%!!#!&}!%+[#!@[#@__*](_*+&^$]*#}!&*@*[*{)%)$*][}^!][_@*]*}&*}{%*((^^@+]&_%#[&}{^
[[!+}&[&}$)@{!)&*{*[$)$)!{&!]#*[((!(]{}]#![)@&}]{_!&{]({$_^*%]#{_]{#]!&*#*(_&)@}
}+^)(}*^(+)+)($)#])^{%#&()!+%$}(+$+#*[%)*@!([$_+[}&&!^&$[_]][{%)[#%$*^%#$!*}+!$(
_[^}{$%@$))_$]+[[##%$]&+]^]!)+^![(%+{&$%&)_$[#%_%_{*{)({{&+^@_(&!$&!![[!{%_**%%]
&*^}!{)]^^[(@+[{!&%%$[@]&+$&[)({{#)[([({{#!(](]$!$+#_[@(({&+@}^}!!{@@**#_*_]$]](
*&]]{#)%%@{@$##[*][@#$[(@+()%%!@^]{$#!([+#@}&_)#_@{&{[#_}+__*_%+__+&&*!]&#%*%**&
###$@{#!+[){$_}[{#%$@+[{#)!#^}[$%}}[@_@%+!!}*}]$)#)@%^*]!}_*@&)@!]*#*_}##^[#@^%]
$)!_%&*^*#!{%&+%#^![%$)*@%*+}+%_#[&_&+}(#$+#@!#!!#!%$@&+[@[#+@@+[_^#}$&)}$#]]&*#
+*!@!#(+$!][!}@![$[_)$$&)^)[[*{@])}+^(!&*#%![(*+@+&_[)+)]$(++($+)#]}][]{*@%&@$$_
*+})+*&^_*!$)#@_%&]}+%#(&${$+(@}()](^}&!&+[]^[#()+!#^_@]{*#{{[_]_[*)[+#+^]+&%&!!
*%($)_@]%)_!$_*!*@(_$_^^^$)}{&!@)%({(+(&[+%&+%}_)(#]$!!)%[!&+)*@%+}])$}#&)*[!++}
*)](+{^!&$)_#[*&^)))[#}$({(}!+{]#[_{[}*+]!@[*^%]&{^&{)]#{#)&${(]+{$])&@]{{)}&_$}
($^$+}[{#%]@_(]@@)(&!_$}[&!!@{!%%_&[{[@&)&$_${%_*%%&@+#+%*!$]}])^!](+_)[%{^%{+@*
&%+[%!{_{![+&[&!!_^_#^[%&[[}*++#!#**}{+}[+&+_$^^]}^^_{$)${!_)@_^_[}*#}&{!*@$#}}#
*!^+&)%**}*{@#^^$^*)**!@&]]#[@@##%}@@[(!)!}*)%@+#+**)_%^&#}(+]][(%#*]_(}&){#%*#_
{^}_%*{^**@&)[&!#_#){@+@}&$()!][*_**#(&*{&]@&@#^${&!]}%^*@)!**&&(@^()@*!_$@]@($+
}]{!}#]$[!&&[*(!]!$))%+$%{!{^[^[$@[((]#!%@)!]__+{}%{_^_!{@{)^)+&%&!^*{_&$][!]@_%
%&!#a}#+#^#{_&)({%!_]!_][}^_%*+}$)&!@[)#@{@[%*!*#_[$$(_[+!^[[[[+*{[*+{!#&*^@&+%)
%#}&)#%*]!@#_&@!^{@]#)_&@){*#]@{@}%@]!(}$*)%#[)^{)}&#[]@}%+)@@}#^#_[]*(%@)!)#+@{
$}*@@[})+(})}}{@{%{%*{{%*+^*%]@]{[^[_&+#(&_]!([#&_[%]![[_#)&@%&]!$_&#&^#@^*+@%))
{{)!%[$#!_[@!(([)++}*)%]@^#![!{+$+((#)+$_^]__]+_^@+_}[%+[{()_%!*}$+[$#%%%}$%]}{&
^+&)^@)$%}]!(%^%(]+!^&_}_*%@]((%%&!)[*_([#{&!)@$!!!$){(&$}&!%*$]%&{})^+}@]@(*(*!
[)}%+^_[^%(_%**{%({{$]}[*_)&(*(+{@!&$%{!{#*{!%{)![_}%}{[!(]@*@#([(*${*[[*($+!])#
@}({%&@}#_^#+_^(*+$)$@+(^[$*#!{_!_]}!(#)_)*(!{^(@(*!#$){[#]&$^[+]!%_}+*]}}}%^_@#
+@&][&^[[&!]*!)$%[#*]!*[*^^(_[*]^]!+#$[*##!!__&}^&^(^*%!&%{[**@%$%]}+^!]_#&+@+[{
$$^@)(#(}@{&*}^$!![%^$*$##^$#+)&[%+}}#[@!}!{#}+@$*$+{}^[)[]^}!)!])]^@}+{^_%(@*()
@]$]!#$$%)())+)}_#*)(_}*@](^@!*&_^!$(!(_!$+@*[)$$*($)+%!{@_}!@_+}+]@(#}[^%@(][(}
(_@^&^*)[*$*)+&$@){]$#^(#_({}**+!#${()_)#^+)&)*[%$%@^]})#{+^&*]_#^!]!)][#*#%!_%$
]$&@@&[!$(!+_^#*($(_]]{&+!]*_)!$_%)*}&][[+(^]_[{[^^$*^{*!#*))!{@+{#$[+(^+*%(+*++
!+)&_)*}{!!#$_&%%*]&}+*&&}%}))&#($##!$!#**@^%]{##$!)*#+@(%))}]__[^$^})_@)]@*{&[$
)]^_!#]%&#[}}(][+}^^_}{@+%^[)$@%+_(&{*%*)]_@+]($!@*[)%#$]}#[_$!%_}*)(*_+}_&%&{})
&+(]({$]{+%*}]@&(([&(&^&!@{][^+^^)!@#[[%_!@^}_[(+@!!^+@${[]%]]@&[{}^)+()@%&}#!(@
[!)*%)^!}&(}+[(#$^@*}%_$+)[)}!+^](+__@@{%}(#_#)[%[([$)*#)@&&*&#_$}{!_+_!$_*_)&@)
!}}(_[@)!_[[[&^$#+$)[)%##_[%{+*@}($&)!}}{%%#[#!#*&*@@_%@@#!#{!%][}+_](*+*{@}}@%@
)+_#_^!#(#+}]!]()@$%&!{^&*!^&##]^_+})}+]*%%*@_[(!%]}^([%{]%#][$@!@{&+]**^{%}^&([
(][@+^$^$[#{@#)][}_@!]][)$_({%#)%&)!}(@])!{_#$((*[@[}##^($&]+{^]!$}^]&[&]}*{[{_^
%!)%]&]^#!+$_*+)$$(@]+]*#+@%]&$_]#*}%$[#_%^+{+}$$[&&*^_]@^*@!@^+*]%^!(){#^^(){{^
[^]]$$^{&*{)%@^$%}!%](@&!^&]!@{%}[]_![#]!]#[}]{!)+@+#!_&]^_@+$}_[_%([$(&)$)!&+){
__@!&$@!]}^&}[%[[&&_#$({}(#{%_*[#!{!{@+_{*$_%_]^^*#_@}[+${$)!%_!#+#^*](%{}_@^)(@
&$__@@)$!**_[*%!]#$%]@!@]()]%!!%_+%}[]$+%)^)#_[[}+)*{%(^%#}*%&[{{%%%[)%*%_(%%&)*
%(]#%!*(!]($!!&}%()+)*%$!}]+}!%{&+$}(#]]&()&%!&(}!&^{@]!__$_@_[)]]@&)}@$[)+{^)*%
}%%@@)]^)_](&!#[@+^$)$#}{@^{***{[^*@](}%}(#&*!}+}%][]]@#^$[*!+!@{](*]*&&@$%&[}^+
]{[#[&+&_[]^(})!#!}]{}&{_$]&&@($[[[+_{!@$_+]}](![@}$[^!)_%]^*$#)*&)#[{%@!)[_({&_
!&!@!#+&^(!%^)#_^$)@*]{_#(*{%$${}_&&$+@+[@&&!!%][##&(+]+_[}[&}_((}%[]![^$(!&_$$#
!{_}&[&+^*)]!($]@*#^&**]_($(_^])^_^^$!_!@{}]+{]^&!)}%)^{]&*}!%%!%[(_%}{}$#+!])!!
}[&}!([!)%%(}&((}]+!!)&#(&}}{+&)%^[%_%(}+*!(_[*!_[#)^&&+}_$!_$!&+_[+%*(^!&*@^{@[
]$&]}#[&[#]]]^#%])](!+]]%!+$(*@^_#^*_&)@[${{$]]#&]!+&_!{!)!%*+%_%_)$]#!]{+_[){$^
_#[^$_#%^]^&[)$@)]]{@@^(&[([!}@}%]!+_((_^[{$&^(^)*((![*{_+#(]#^[]_^#([[^[!!%]$!(
[&(_@!*^{&{+(_%{!$%#[&}&@&)}[^){^}!$){{%$&)_@))+(%#@+{(^+)}%$_{*@{#]@([!)@&+*!%{
^}%{+&#]^{[%%]+((*@+@{+#_(({{$[[@#%!+{+]%^@+#^][}#+$*&@+@&]$%^}*@_)}}*[]+}_*@*@)
[$!&_[^@){[_{#{%{#$+!}{#!@&&#(_#@&}(!@+#*#@%[@%%[%[$&@!]@_]!{@#))@]]&]{$}}%$[}*]
)^(^(&}[$#}^$_@@{&^}}[%[*{)$![&*&$![#)*@_$]^$)%&&!_%%][+$()#_&)_^+(%+)_&^]!+*){#
)_]!_&{[^_}+@%#}#[*%]%+!!(()!*_(^{#^!*}#^}$_^}$}_}{_#^@}@%!_{^[!]&%_](}]$#()#{}#
)*+*}*{&$+}[{${[#_#{}_!@}&)_@[!)_))}))#($[_}^^${)@^(&[_+!_^[(&#}@_}{]%#%)[]}]*{)
&@!]}]%_{+)!@%]#$_*#{]+!%!}[^{[)#[@(+!*&_{*]]&]$[*&%^{^!$!@*)*(_!+[{*^]@}+#($$&)
!$(^)]!#*[@][^]@}(&*{}())@{%^#))*+_+#_})(^($$%{{![^+@!+&}_((*#[*++}*^^[*@&!{+%+]
_&%}*++_(*![%^{$_!!&_$}{+%{!!}+!@&+}(}}[}]){!{{&_]_]&)%!!*^#__(^%([#%+&{{)*}&}$(
#^!^#@{%${^*$^(^{(}#)%!&#})(}#}_(&#%^${#}#]{*@^!+}!$$${&#{{[[*^]]%^+#[+%$@]@#]}&
)%&)}[^+^&@$^)]&[$*{^%^@**]^*!^!&_#%@+(@][)([&^(@@^@&!&#&_}&[^!^@]**{__}_!+}%!_^
_^])^@^}&(&#})[(@&*}_{{$[&^][+*&{!+#%@#_]$[{$@+[{[@}]}{*(+$#^[%[@&}]^^)_(]{@{##[
[_&!$]_$%$*]#!^}(%!*+_^}&@++_!%$(!!_#]{{$()))&*+}#[@@!_%%%*!_+)(@*@%)_{^!%![#_+!
{_*)!(@[%@{*{)]$*_&%*$+!%$&&!(%}^]^!)*@+@!){_(&#%[_)_]*[$++&!}@])#%)}@)!+!%]@*+#
+%&*])!{()@}*$$+){_)%@[%[*+$]#$}[#{#$)]{*!}!&##_^+&]])%!^((+}](*!&&&!^^&!!)^!)[#
+(${^&^%+}@!#[#%_&]({$&)##^%+#@*]{_%#^{%_%+}@@]+%}@}]_)*^@*}+#*##+]+^))*#^^}&]&[
$[}%@{)&#_}#$&[&(]})&__!_@&]$]&(_{}^&_*$!@)[+%)+#()(+$_$!)*+((_)*@##{+#++}[^@{@+
_(}+}%[@&{%((*{{]+^}*]@%}@)%#![&)])^}]^&[@]!#(&}^!][{}$)]!)^^&^_&^[#)+[%#@+}@+!+
&$%)$&{]@%)%%))+%*{^#[@$+}(%@[})#$*@}+(}[&_%^&$&}}]%({^@*$&^@{]}!@(})!)[!}@!_+&^
&}_&%+!#(%#{[#&[![%&)$#+@[!)_!@_}})!#^((^#!+[}+&)(%!!&#[_]##!+%][]]+[_@_(}_^!&@{
+$^_]$()$({])##+(+%)+[%&+[_{%[##(#(!&_$*@#+{#}*&!&{(^#))$^%%@${}!{@%^^((#&)$!%!{
^_^_&!%}#()@+(^%%)_]@%^[]%%!*)}!}}]!#{$^+*&[!@@)!&$^]{+((*]]_*_#{(*!#)$#$&+^)#_$
+!*+#_)!(&%[+}(+(_*&$]$%+&&[])!#*!{(}_[{_%]^(%[)*+(#(^+%__{[@+)@]#%(($^+@@+%(_)}
]+!%*_!++[[+]]@+]@}$)$^)_#^@}%}#+[+%^*!!{+${$]*$!#@](@@$*#*^+@{^_+%#!^()*&(}+$_}
{&&&{]%$^_%_!+#%@$%_}#{_*!%@+^+{%&&![*%(]#]$![)#)***${*)#!()+{}+*($($*)%*)%([(!^
{[#)*^&(_^^$]{][%))[+!^[&[%@[*!]%_+@@}$^$^}**%%_&__{&&)_]+^(@%)+!}!]]^%}!++(&)(@
]@{^^^]$]%}%_%($*&^*(%#&#)&*&[$@&+_}_!+!{^])_!}}*![!({&$)%(%^(})})[@&]*(}]@$@{[*
))$&%(#&_^@(&$]&@*%]+_{@^)+({&&}[*%}[{#$${@*}@]*!(]&)+&!!^}+*%[])[)_!{$+^&#%+@(_
#$]+#{#!$+%(*)#]}[%]**{![(}++$!#)&}%#(*@)$]+!@++*&_]}!![#}&[[)@%(&{_[^{{[}$[}^{*
[##_]]!}(}^^@!(*#&[%!!]_^(@^((_#%$+@{^$%{)#]{^&&!@#]_!}{#*!!{]@{)(@&^*^]!&^@](+#
_^!_^*]@_%$(]^$!}{*]&!+*+#!+(+^[%}*(}](%%{]##)_}*(]++]#*}]$^%({*{$+&@@#^!()&)%_{
[*!(($$)^%+_]]{^{%{&^!*}[{]+]%(*^]{@)[#{_@_&^%%+${}{]{%*_%#$)_*]#$_@$%#^{%*%)@%]
^{]()$%^_($]{^*]+!*!_])!_)%}{%^&]$)^[)^@^#_{{)!*
%{)[[^(!]*$$#^({)[)%[&!![__@}(*){]%)]@#*%}_^}#_$(&*^+%%+^}!#]*[!}!&@#{+$}*#@#*((
}@+%]_@+!!_!%@_}_{[$*_(}%)#{}]_^!+#*@])+$+**[]#^_}{+}*&{)#%{!_$$**}&(@]_+)[#}}]^
%^[[{%@)%%+$}((*(_^_${*[$&+}@%&^$%&[+!@#]*%+$_@*!}#)#*((*&[$!+_]%[^$[]+$[({$}}!$
[^&^([!*])_@+@%!^!%}[(!#***)&)@{{@]_#^_+{*)^^_+&#@{]{@#&]@+*$$_#{!^#&&^{]*#)__]+
&)&)*]##)%#&){%!+*[$}{#]]$_(@(%])$^](*%#])[*({!^{&{%__!(%[*__)+#*_(*]_[%{(!_}^*{
$[%$}{^@_+*(]##&(+{(}[!_[}}$%[{$&[@!!@[%{[]&%[&}^^!]{]#@$)%(@)+_)}{_](&[(^[{$+%%
*!}}(}*&[!)*!&!%}}*}]%]{^@{**@[@{]*{#@}+([@}}[[&_%*)_){&[)(]]^}^@#]%#_!(^_[$$+!!
#&&+{#+!^+++*!![^]@)#][&(_}$&_(!_[$&%{%@]%]}&$#{!^@%}%{$$^]_!((**!{%)*&^]}#)%%$]
{$^^{#[!+*]{}(&{{{{}(]#[#_+@[#+[}@)@#_!{_[&&*)@#[*@@[$(*)@^[[]^%][&@@_$+_%&_*@&!
@*{*&_&({{[@{]*&![]*_}&#!+@#+&&_(+#*[+{^%@_+*+}}^@*(]#{*$++)_@@]%_}!$%+]+$&^!$@$
[}^_#[({^+_%{*@}@_[]{+][_*!#{])_^_@$(@%!@!(]%))){%](#]]#}@&}+%[@&[[)%#%{{+@[(^@+
{)}{%(&+]^}^+!$!^*_*+^@*}+]#}_#^(%#$]({+{#(+@$][%$#(_[$@]@+@[@]@%]@}[[@(*^@($#_(
*)[&_{{(!*+%(_&*}@#[%[()@}]}[#&{)*}&(}]&{%][_%$@&_%*)&_^]{##]${_(*%*{!_+)^]][_&+
]{_{()%(_&@}@}[%+#^@}#**!}+$@]@+*($*}#_{)!+])#$[@%]&])*%___!*${++&%^$^#@&_{)#[$+
$+]@#@_^^_#@}!)$_#$*&{+^{+@*{#!#$!%*^!}[[{]+{[@(%_&}^$%^()[&#%@_+{(*[$#!}[+_{*%(
_+&}__!_#^&(%!&^%)(@)^!^)$([_[_{_$!){*[!*&#+((%(@#)!%!_[!#[_]%[#]}}%[$)+#@%%*[%+
^!*[([#&${}^%@{[&!!$}!&#![_&[^&_{@#))+&%$!!{([@[}*({$]&}}@&**[++!_*^^[{^(!*^^%][
_[^])_{}!*^^]@@%*(@_])[&&$[}%@$$##]&#)+!#_%@}#(%&++&){#!+!&]!{&$&})[]}$*(&&)@#[$
%^^(}^#&@!_#*%)^}{&&*]+(^{({+$^]()@[)&&[&[##[][%]*&!_}&[}{{[$@}+!{[$[^++}]}[#[^#
^!%!+}(^*^%#@}{@_[#^@)(+)#%^)+@!{}_}{(!$[[+_[%@}!){{]]$]!^[@)(}&#([((%$#&%*]#!&^
!%$+}+&[[{[![!{(_@_(^^#]+!&%([_[*}^}}%!^&^&#)&#[)*@$+$%(@+*][}}(#@%!++^_!*[+#%&_
(@@)]#)#{}@&#{{&@_^&$+$@**{(][&]#{@+{#*$_)#_!&{#]%#(%!*()+)%#&{!+*^[[[{*))$!*__}
)[%%]&_[{]{^[#)*)+#*}}(($)*(*{${}{}#[&}[![%!]%}^{&}&$[(%}^*_[)]+{!&+)[@*&&{@&%#[
!*___^_&]]#&[](*+}(^]%+^^)*}]{$!({%+_*]#&{+&)]&}}]}^+[)#_&_+&!&{[{_)]%{+&{*}*%[+
]#%{_[){!)}_#]}!#%{#][_+]}^$}))#{@{+#(!_]$[!!&{{&+}!!)@)&)}]^^@^((]&^!+!]$}${#$*
}]*_&%_]{^(_&$@&_({#!(_]@#%+{##_+*+^]!#]_]#](]${]][_]]%_{$*}[&^{!_)##%%)+)*&*__!
}&*_*]&*#(]]@[%{%]#{]+_(}^(}*!#&]^[!*]&^$+!_^_++%+##(*@+(^#}#&*[{*])#)]$*%*+](+[
[{+&${}%!){!_%&&]*!!({$#)&}+[)(!}@&!^][)[#)}@_$*)^%${]][^]$!_$$#*&&#!{!!*+_##^#!
[#&##(}+[@!{_+}%]&$((@$*(#{]@&(]&^)%#^^^[)()+*^{]%#%][[*{%@_))}@])%#*!)#_(}(&&}$
_&&@%)%#(^}&]_(_%@]}^$]_#@^]+{^#^*&&@!^$]$*#}$(!])#)_@]@[@^+}#&_*#^(_%${%(}]+&}!
{&_}}{*){)_*^_[!)$]^%%]&]_*]&*_@}]_{+@!!$@(]$))!+#)*!](@[@*&!+%}@$+@*@_(_!)_[]}!
)[^!^^^!$^$^_*#}#&{{}*}}[$#!$#(&_(_}^+]@#{]}_^]^}{&_[)[&_##_#*$[_]&}$_&]()+&$_{]
+$_{#_]%}[+)$++$}[(*!+[$]*}!$${@#@{^{{#}[!%!#$&^^[+[+^(%}(^]#+!)(@{$&}#{*)#$(&{$
($^*%*#+$%{*&]@(*_+&{*_]}%(!{)*([+()(@[}&!+{$!+%{^{{}]!(]}!)**__*]$^()@$&*+})_!+
%@_)%@([#{@$({[^*(${(}#*@[))+[^!([#]{$*^[)_^}*{$#&$$%+!%}($$!{+[$}&#^$&!^@^@{!**
&*}]@#)#&*&%&{#^((({%}$*^*!__*$_!%^{_!^$*]#*_{!(*){$}^${%@$$$_}%!)*(^}+)@}$)&+(_
#([]_)&_*^_^*($$+&[$!}&[[@{])[%*_$+%])[(!+)#@(()!+^{)})%@&&^@]}#^@]$]+)])&^_]_]#
$&_*))[*[{%_##^#(*[$$&!$^#^*++$**}){[*+%))!!+%(#}@)[$$$&$+{+%&+&[{]^{!_)%(*)}#[(
$@[_)([@}_%&%)@{*)]^%*^!!!%]{}!([#!^^[+!^$+}^&{}*(+]{![!)$$&{!{{[^#$){+)(&^{)_{!
{{!%}&&%#}!]!_&%@@_])((}}(@^]*+})}{*{@[$[&%(]%!_[(}%+)((*(}]&#*_$[^#[![_)%_+((@}
!&(_&^+[(!#+{@#[[%[_)_*]%+)!@[(%#&^+{#$)$]]![(@+@(]*%#{@#$&#&*&!#_)*[@*+++{+}@](
#]#()_)#^}&%#(^$&(}^#]!$]^+*$*]*%])&@}$!{^_&+$]&{}{*^$_(]]%##%)!#^(@&)](](]}]_@#
%+]^+%&%([^)!#_+^]%++#+!({*)^@#)(_&^$*(_$](@[{@_++_%@_#][(&$&#}%@##}*!_[[+@@!&}*
$@^*)(*^!$%$[)}${))&^*+&_#*[{))(*_##&*_$+^&^!#![@@$[@#!&&)_+#%)&@(!!^$$!^!(_{%!(
{^$[[#[@@(]}{!+)[($%({@#%[}}+#^]#{%#^#*]#{)&__&@%+)@@}_!*_#&]{])&_#)){%!&]%##++[
({+{_#[}}#%!&&*#{[##][]*@_&#*+!)]__#^+_!^*_}#+}}((!+]]++]%_]*){]%_}]]&%{_%]^)!})
[@*!*!@_[]__{**[}*{{+]@&[@*!&]^_{[($]}^[}%!!(@+[]$)#!}${*#&}}#*^%&!{*#$}){+!{!@#
]*$]@(*$}[!@{($*&+[_[_*+*@@%_]*$[*%#{%![]!_@}!_{#)]!*@+[*%&[{^_]!%#+!}]%*#%[@{}$
^[[})(&&_%#[}+%]{{*%](*^[%])(_$]+[[&^$+{&]*$}]%$#(_$!&##}$%&@[[{)@#&+&(&@!+)@@+[
@}$][([]*]&&%__*{*++!($#@$*+]^&!%)!)*@]$#]*@#*!^%+#(!^#{{#*(][)([&!@!*%^*(#{&{{[
{}*_#+*%(}*(%$^^&$[_)[*)%)_(^&&!&&%$(([**+)_)$[!]%{$[({[$$!}_(]^_%{^[%$*@^_!!&))
]_(_#!}]&&{]{*]]%{@{+$&!@&!_{!&!#]_(!%@[{)(&&[#)#$#{[!^{_*]%[^+%{^*+#[!%*#[(@^#(
#{*#&+_{]@^#[[^_!+*}]!^$#$#)#[$!})%}#^#%%%@_+$((]^*#^&^)[]$[]!{^$%&*%&!^^!(+$#$&
$(+({[({@&{^)+@]]$_(%_&^%_&%!^(]_!{*@*+[#}}[}{@&&#(}@#^[^{(@_})_*!+{*]_(&+]#)*[@
{{$@)&&{&%%*@&_++)$[_}^&@$%@_[^]_}^&#^]#&^[%#*[!}!&}@##!@
[@+[_#[({*%)&{*^%]+[_+%(&[(%!@&$_*}_+^)+(}))+%]))[#($^!]+^$%}([}!%#%&!&}^)@(_{[@
+@)^#)]$#&!+_]_@]${^^)[+$[[)*%{!({&#@+@!*!&*&_&!*!*@}#&%]%!]&%^@&#_$%}++[%(*$&%(
$(()%}]#!])+#[]({{*!$&(@&#}[}#[]#[(![#{*})@^]*!})#*+}@}^}^%]^!}}#({[&!^(%}]}{$*}
*@^{*@^[&}])(!%&(_%*&}{*$}^@#]*^%&^$__$)(![@$)@]+*!+__{#*_^}%@)_$]]#@{$__%*#!_*+
])[%][!]({#+&@{}}{)()[#{_{_%_+&^{[!#$$]&_(]+@{)#&^+${]!@&][}%(]&!*(*@!)@]__![*+#
}+%&((]+&#^!$+__@*+(&#])!^!%]$^#_)[+]+*&@+@{%{[{_@]([!_@_&{{$[*%]}#[!&@%}(%#{_}&
*}&{)_[*[_*%[$_(@]!@#}${^#+)^$]@^{#]^_%&@%(})@!{{!_%@#(@(_@{^#[!^**)!*&](![[![&_
{#%!#$}#*!$+&)}$%*_&&#}+]{__@!!^%@[+]%[#!*!]@{_%+#{))&#@+}}[%&(@@(]((*@@!}]}{#!^
+_}][^[^#}]+][+%]$__*&&+]!+$[+(@$__&#+)}@[*{+(%*)&@#)+*+!}&&_$+[#$[*_#%@{((&[$%$
}+$#&+}!^([^!](!%&)#!_^!#$*)[[{}#[_(@@^#+$)(}_$]^&[+#+})[#]%)(}!}+!}##_$$&#@^*]]
$%^))}#($]$_*%%+*${!_(){@_^(*[_^{]{()]){^&#@_&@{!)!&)}]%{$*^(&#]}*])&{&[+[^_*+@[
]%^[^_%%{]!!$([^!*##^)^%%&@[{#+%)[)!#&[]@[{]+!##@_)%&[#@(+)#&&)_[%[#[*(}&#_@[[$)
%^%{[{{}+&{*]*_[*$[%[)_{!*&*+@)_@@_!+*(#!}_[(!]@@*{#&#(_{]@&$[[&&(#^{)++}#(#&{+(
()#](()]]_&)!}(][@_%{)*+^$[((){(#)(_#%+%!%}))}%@)#*}_)#$&}(*@][@}+##%+}}_[[%(!&@
&)&@#[$^*^^#*&)#!#{_][@#*^}$&!#!][$+@{)}^^^(*^}$%[&(@^#$*!}*$&^)!_[]]}@[+#)**@&]
@[}{_[@$}**_%+]${[&*#[^!#]*[(*$)@]*[*}*&_{%(@)]%%$]({]@&%]&_)%_#%)#**{&*(%&!*+_}
[^]^_*#[!^+#$!_{[}&&])#$_@!#]&_{{[[+*()#{*}&#(+}}_@@(@&^+$@&}*!^!_*[%](%[$)_[]&_
+(&$@})+{{_}[&*_!*^^!!+!{((({[[$^^%!_&[][!%]}^&{&&]_}$@$}[(_{_&{#$@$@!#[)}_{($}&
&@#+(((&%+$&$!!*^}{+}%&^{&&!_]+}}][!%[$){)_[+(&+{)$(+(+*#%{%&+(+}+*%%}_]&+&[^_[#
)_+#}!#@$){+}##+}+*%}@}[#!!@_)@&%*{_!{{#!!##]{}{#+_}]+({]^}@{]$_@*^+{_{^]@**+)[^
[%&[[&+{}%@}}*]##]}%#[_+{)!*_)[[^#!(+$+(_!})]()#)}*!)_{^#[@!#]]^()_@]^]$@!+_$!(^
&{[([}&!]{_%%$+}+!%!({_]&+@]@[@^*)_+_(%#}*#_#%[#*+(!)}]^$)%**}]@&]%++#})[_((@[}(
$(]!]$@&!+]{#]*_{)(@(^]*[+[]@*#{&#+%%&(@!@{)(#[]]%[!+(&!&@)&{^++&}*_*_#{(_&[(}{!
}&#(({#%$^(()^}^^{$][)+![}%}[!()@%_^][)@+]+@!!%+^#@++$%(@*$]^*{]!+###)^#&@[^[(#}
)+{!}(_@#@)([$^{$@*$)^{#!]_)_&]{}+(^]}*[(**]))@)$+]*+[_]@&&({#(}[_*+%){$&^}}(*[_
*^!_+^_#(_*}))#{#$)[^$*(_+}[#+_@^#{+){${]*)[]]}((_*%_^+&(&]}!!!)@(++{)%&#}*[^+$^
]^&]}&&@}#*#@%**[]${%!}*](([![@^}^![^+@%[^$*&#)}*}^_%_]%{[_*_#}!_!$({^&[(@#)$$$@
$@_$*@_{(_{$$$%_%}(}]+(}){}]]&$+*][)%]^@&#]]&}+%}**!+%*${^)^{%)&%%&#]}&%+^_@_^#]
{@*&!$&]%{[_(^$}(({]^!#[)@@[[{*]{)_}]#*}$#_((#*+^]&]}]@^#^%^^[*@$}&{{&#*[_{]%#**
}[%(^{_&!++[_)+@&^])&)+!(@%+([![$*$+&_)&_*#%!#]${[}_)+)&$#^*##_&}$]]++!#+(@#!#[}
)[+)]+{@*)&(^{^!+^^]!^)[$_!{&{+]{@&#!_)#%_[@@_[#%*%)*])$}%{++%[)&^[+!#))#(+_+$${
#})(%&!!!${($[$&[]+)^}_$$[%}^[^%!)$#%!}[@}%%*_^[+!{!_!!@^{{_]%}#+{}{{$+#}]%[{*}_
+#@[(+!%]*#]{#$%[]+[*[#_+(^]{}#(!!{]&!}}#{#&{)!(]%*#*$(@}!^]+{!(&+%**#@@$](%#[!+
!!)&!]!+^$(}(@{#@${]{^&$^)[!*[@#]*{%)+^}@)(%$$*{{+@!!@{@{}@+@!*&{%]_@^)*$+&&@+)*
^}{&*{*}_$%&((#&}%($*%]#+!*)@{$@#^+}([[*%)+%$@]}@]%({(]$$__+!}}+@@!${%(])+{}![@{
{_]+[&&@%%(#{(^%)++%)!&!)+&)&]&}[&[^*!${&!#&&*^)[&$]!]%@{%%!&@@+}{#*]+%&!#^_]!_@
@}_%^^[+{_*[]%!@(#]*%}{+@_}*{[%^@_#{@}}[%[@+]@_()$)[[*!)(#)$%$%(^[^++_++@})$[&+(
%^^%#{!)#*{[+!(!_}[!}_)&$#&]$%##))#&%!+^#}()+@{^^@)^)}]^{]+[]+[[_(]+}*+_*+]$%)&(
[)%&$}&!{+&}]{@%]@#_[]({+]@%@&]@}))!@({$]*!)])[!@(&%++(}[[$%!![$*&^+}]][)!)_^*&#
%[+#}(&!&^_*]$^${[^_)_%!}%*{@$]^}}!_$%*%_$#_({+${_]*_$[)[^{%^@@[##&{)]%]%*%)&_#^
&@(^}(){)&$[#[##%]*^@*{&(]$$](+%(^}@!&)]@##!&@!^)![#@%[&+@%^&@^{_&%&[(^(}+&[(&%}
(%+{*{)]^+[{*+&+_)^)$)[]{}]&}%((_%%[_#}[}*%[^_@!$%)*^@]@%+[#$}##&!_}[%[![*^}_)+#
_*+@[!(^)&++[+&_^%(_@]_@!^&{]+*+&@(&{@@&{##@)_*#{)^[%+%]@^$%@([#)[+@[_]+#}!#+!&]
@[(&+_{@#^&{$$[#]&@!$#$%$%(((_)!)]]*}(@*^$)!!+%_!!^__@@}[*^)%@**@!@}&!{&%+!#!(^@
@{^#*)+$!@$&([++@$@_##+%}_[{}_#{@!@@#$(@&]^]*%!+$)(+{[+^%^{+!}!&$[&#@}_&{%![^#*+
#]@(&}]!@[}+_]!{]%+&)^{[@[__}}$$&!]#)_!(**@(+*!^}+#)}!)@$^![^){!#$[*{%&![!^^^_{[
*[*#_*&%)&)}@%!@!&#]+%^#))#_#(#]*#*!@^{()&}[{%(&[^)@$^%(}&#@(%+@%{%%]*{$(@%$]*$]
}(}@$(!!^]+)%($)_[!@%#{[@#((%+]*!*)%([]{(}*$]%#^*#_@@}+_*{$+(%]}$@{!#*%*[[^!{)&#
#*!#^*+[_!$^%&)%$_@%}[%}#{{}%!$)[%+[[&%)^(@(_^)!*@#^#@@+@[(@)$(^&_}%%)@{$%+){(+[
})#[)!!@))^@_!}(+{{(%#&[_}+_)_{#%%[%^(]]([(_!@$#$%*)$+(!_##}]_@+*!]&}@}$&@!#)%_#
#@+&(@[*_*})&&#@^]{(()!#^)]{+$&(}!%{!}([^^*}(])[@(($@^!^___)!}[{}*#%_${_&}{+[}{%
^$!%@{_]@%%+$]%[)]#_#**_(_*@!_(({(&&^$#]@+{&]]{$)]$)*}^+_($#$_*][@^%&$(_+}&]${(%
+_$!$^]#@}{+#@[]_%#^${]$(@$#[!^+&)%)+&#)&{}}@([&{+{_@}[++&!&}#$}^({])^&[)&)]_{%+
@+]_*^&+}^@%*+))}@!@#@{%$_&$%(@([!)))@(+]&$*^}$_+()*[(^(((+[@@#%)&$]{}{]*(@(@+^_
){*#@)))#)}#^)%&](%(_}[{&$#]#$@*[_[]_%+&%}+%#)!^[}[%$!_](^}]){)#^[#]_(%(!+[{^^%{
^[+))[]#@}$_)((+*_]_[^(_*$)&$($!#%&_#]_^))+%+]#{[{{@*}[#(#($&@}%@%#(*}}]#+^{{&%&
{}+_}}{%*]_#$@}({%)}%_!]@$[${+]@+&@!]&$$!%}]^!%_#%#)$#^+{+%&#+^()_%@%!&[!$&[###]
+[_++$%]&_#()&#{]&]_($$)$%]+^*$[]%{*^!(}%#([!%[@{**_)@##)_$({[#()@{]}%#*@$(!^%}&
$#[()}}%)@)*([(!$+*^__]!!+}&+$*^+}{^@]!)!*{[^&_^)%{!^##}[#$!+&}$[]&_]#_%$(@^{^)}
{]#%]_%#]@*[}&[$@_*_$@]{[_#)&!@@}+]}@{*%({({((!@{{*#&#+!)$}_!!^#{^^{&}[_!*}(_}(@
@@_@@%[)$)!&^^]{$@&{]+(#}+#{^#&*&__@*&&_&!{]+%+^!)*%!$}_()$#%^{)+@@^_]_$&_)(_*&)
}]!${+_[*)+#[#^^&))@^$%&^_!(^{}[])%$][&_!)])@%}+({}+]%#{$]@^{@_]%*#!_#!((+_(#_]+
[@**!)^[#^^%#*(!_{((&!*%!!+&+%)_{$}+^@[)[@]$_$*+&(&{)^%]}(()*){[{]@}[]]*%!#](*%@
))((])]*%%%$+(%}$+%#[#^%]^@)@^_^)#%#([*%*@+(+)&+++(^%]*!$_$$$%$+&]_+[@_}%&@@%){)
_^{^+!+%^)]!_&+}@+^$_]*#]((^&$#!_)}][&#$+&)]_*#{%%]}}[%*${&)]}((^(_}(](]})*]_&))
+}^_}$)@@+{([@%!@[_]]+%(%%%{(@[_#+@&^__+!^!_^@]@^#]]##%*^]!$$!+[#)^![@%++%$[&[)[
$$!^&!({]{)(@(%]%}{]]{%#{&!{*@&)%%&)#$+($[[[$+_#@!]%#)&&&+*^%*]#_@)[]]+++[]^}]%$
+&^^%({}}{])]!!}]**&!{[}+*$@*{$$_}(^+(^(!%@^+*+%$!}{((%$%}^{%@[^@]^{{%_(#$(&+]$*
_^[$$+!(_(!](!+^{}$]@[]$}*)]})_[#+%]@%&@*&{@&+)+({[^%^++)*#+*(+!}*%^})+++@}_&#&]
][*}^+[!@*+$[%%(*[_$+}$]*}]%$%@&]@)!@+]$(&]^_$!)@+%!+&(%]&[(#[#}_@%&_{{]^@#}&_(+
#$^#$&$_(@(()$(@+*)^{(})[#%}%$(@@[*&!]_+&%%^###]%[+)$[_+$%$$_]#&#*#$+@#)%&^#_}_}
_%[@{(*)${##{*@{[]&^[&%^[)%*[]*)$*@)[$%{{^#^(}!!_$!+*^&!)([!!]_%)([(#])+$*%[@{&^
^++{&{@%{({^&}@!^)@]%&@&_+#]]%%[]+%[^)@#+{+)&{*@+&&}_!_!!$$${#({{{#+))]*(&&@{{%&
@+}{%*%[$}(#)$]][)!_%{(!){@%_##%{$)&))_!^(!#([+)]#_*)_$#}!$$})%^[%(_+!&{+^*^()![
*@$}^)}{$^+%@@^)!){@*{#*[#*#)^{@!(&@#+_#^[&^$+&$!*!@&[}&&#&@^})&(%[*%!)@[@])*{%]
@!]@([@&%${{%*@^[*$#])__&+{@@@{$+[(@!%!^_{$*^*$)%$!%_]&$($!#&)@!}#&*#(#_*#]*#%{)
*@}(]+@#@&}}_}$+&^&[#%^*%*&(!!@{^^%$_{+[!@^](@*&%#!}**^$@{$#&!!!^)]%$_)%!&{^^}!%
(*$**!(@&*+)[+(!_(]![%!^[)[!@]_$*))+(+}[+%($%!+%!&^[(^^@(_]&#@[[^]]*@_{}(}[#_{*_
!+(_^]_%&&#$*}^*+!*^}][&_[}[@]$#{]%{)*[$!^_@(&$^!%$+]{#&@%{!)@&#^}%%^+@}}%%&^^}@
*##+)__()+]!]])#!%(&+{))&)@(][++_*[@@)%{[%[+{}^*(_&(&@#&^$@^_}%^{!!][+)$%&[&]@$*@^#^#%&}*@_))!^#})$!@%)(}&_^]!![+#()}%)
%@}}%^(#(})*}){+$_[+]%%]#(*[#_(![&{#*$){_%^}+*))&+)$#_*}[][@}_#%@{+(+[#}(]^%}^}+
_$#*(^]{&$#}{@_^^!(!++]@}${]{_)%#&{@^%$+{)]+_$&@[)#[!%^((@(}$([#$%!]&[&*&!#![^@+
{#@+}@^@([++{^%$^@@}}*{!$*&^&)$)$$$}[#[%!_]+]]*_)!!&[]}+$!}%+{!(&^!#{##}!}&@(])+
^(+{+_%%])}(*!*+@+^$*#})+&{}%)(@%[{#^+&@)_[(+[@@&{@){#({++_@(((*&&{@$&[[%($}^{(+
&*%}%((!&#!$[)_(}*@^%[@)]#%*}}]#!&*$&+[^{#+##(_%_^)]@^}*^]{[^^]!!+^+^$%#))^%}*^#
%#(*}&)[{_$+@}}&_]$&_{]$)]&[{**$^(#$][__{@_%#%&^)^%!#[]@$@$([!$+%@{(@)@*^}(+{)+(
_@$)^&@+})_^^]%*$^^}_]@(*&)+){(^)_#${%{$+#@))#)[)&$][(*&}}^]^_%!*#*]$@%}+$#^$[&(
%!*_*(%))^[*+__*{+[_)++))(#_%(+!@#+@^^&[)*_^_+_%([&)@#}!#%(!#!!}$[@]_@{)^[{%]*$%
@&*!$_#{!$_*_!{*()@]+_&%{#!$%^()$](^]&#@!$%))#]@*{@($#&(*&&@^%@{_({{*))+#)){$^+_
*^{+)+{]^+%{^^&+@)#+@@}*^(^)^)_!%!&())^!+]&*@[*^@{+]$[_@%%%&)(&$$#[@&$%*{[_@)[{[
_[!^}(#$({#%[&[{]@*^^+&&((*{((!&%}$$(##[+_#]&!{$}@*]((!%_]&@]![!]{$#%^%+#{#+#[*$
(]@!%&}(]@_)!{*))+^}#&*}@##@}](&)!#${_)&]&[^%_^^{{+&&%+@&@!+@__+@#]$]*!(_^+!$^{_
*}$[%^$(%{(&])[*[@^+_[]#%#*!}{(!}#](])@++{&}%!%+*#&^&!_$(#_%([#[_^[[{$(@#]}@+[@%
@(&&@}]]!@]$}@*}#(^[%^!_(&+(!)[**(&_!*_*!*))+&@)%&{#*[%[{@+$^&@]__][#_)^+)#^&)}[
_&$)](!^{&@(&[&}$#%$%!%[#)}*_(!]%[}%__^%(@}}(^@_(&]^^#]!%@{}{&*+@)&(#$#+%*&@{_]!
}&&[(_*_%&($%(@#){@_+}$!])}%$_[+$(@)_}![_&*%_[!$}*#}&]{[^+&![}%_#{#$[{)({$$[}{%$
^!!{{})))!{#^]*@&}]_)}[%()[](*%@[__%#*}&_(+!{#[)!@(]+]}$+%_{##[(+#$^*@&@{*}%)({!
*#^$(]))^^}{&})@**}!@[{{[*@^}!}}#))@*][[[##@)([_$#*+)]%#{]![$^[@!^@[([(){+$)(]]}
($)[!+#!)*($!}%!%)]{!@}][_{]({)*^[{%[#]$]![#^(!{_(@$@{]_^%!%]%[#%)_%%{@&)@{&}@*%
)}@&+[)!]__*(#*@)_[@}+}$!^&_^]*@!)_@!)$*()}[@*&}){%@}_@$[@]$*{_^}+[{}_}#+[&(^]]#
^^^@%#((}!_*[(}({}@{{)+*}$@!^)[_%(%}#!!)+&+#&%}$*${)](^+!!])#]@}[$^!}[}}[]#_@@]}
)+#&{${%+(*_$$^]}&#+^%())^_^@(_]*^]{))]+_)$@_%*([}{${^(]{[[#(#]&}+l}%#@}{{)%@{+}
{$})($)({%@]!]]_(#_$[@_][+_)(!^!&+*{@*$#$@$$)(@$*]]{{}])}+[}!^^]}@(*%^%![+)&$}]$
^%)[*%%@(@[#_+#{*#$%{_%*{_%{{#&[_@&!}#__)$+*+*$_[+*]+#*(}&}!*!_@#%%!^[+(}[}!*{!+
#(!^_#@^{$__!_*&+}@[%+&${}()@$&^(^{[%&]_}}%^}$&+&{]*+%][@%@@#*^(^*@+*#*^](_+!$)+
*{[]{@*]$%}{&#${_!%_(@(}_)){^(#_#@*}_]+))$_$[@+])^]{$]]__%*%(%}_@@^[[)^_[@(@&**+
@(_#_&[!%$@&)&![*(^$+!^$#&@!+_}_[_]&$!]^]#{
}([{]%@^+)[_[^*}[_[}$^(&)#*&&^)}!%{[{#_#(^%^_&_&_)!+}}*]@*($^$&*{[+&}*^&%])^%]*(
@[)+)%}]){)[##&+#_)(*(#&)}_&(()){*_!}*^[$^*+$@{++@_#%_^($*}_+([]*&^$()$+])&)!]}}
{(%$#(+))^{!]@()]]$%*]%&+&)_*_{_(()^$!!_[#+^@(%%([*#{)&+)))@*$@]#_)#}!__(_!%#*{]
(!%(^@)@_&%@)(_[(#@&^[+([)+%}^*{!)!{+(+&!)+%^{*_]+&*%&_*$])&^${%+#+^+!(}+&@]$+{*
]!@$})^+*$^!$$!}_&#})+{)}[_#^[(*!)@%{!!(&^#${++*#@&^&!]%}]{!$+*[*#]}{_{_!&){%[^_
{#_(_$(^)#*@(##^*@}}(+&{}_#{*^]&)+]!++)%[^%+!+%!++%+*&$]*$!**&$$$+(_!{[++(@#[+{_
+)^$]#]*#+#_&@$#&&]*]{_**##%#{}^!]{])&($@(+**[_!+_}&#!]^&@{*_@[&*${[+}@_{}}[]+#_
^+*#!**[_@#@))@@$!!)#%$%${[(&#(&_#[{*{%@##!^@*)_!{^{%[]+%]}}()[$%(_{$[[^{(]]@%{_
[^_(@[!*%*$][+@)^]+#$!_)@}!*_&$&%^@{*%)!(*[*)(}^&{{}_[$%)*()@%)#}_)#}{}##&]{$](#
]]_$[[@!))^*&^@!#_}{_)@$&[%#[]*!${%!#[{(%$*_*+$)]%#&$!){&&_%##_]*%$@^%@&&)*$_&(]
@}!^+[]&}{&{&%]*{_*#^++#+_&(%]+_&}}^^+@+]@])}[${%$*^@]{^^{+#(#*%*[&_*(+#(*[$[*]$
!}#_(#![^%%+(^&*(({%[]@^]$([@@@@*@@)&}@)^^{*^_@*{){*((%!($+^_!_!^$$_!(@+%&[!_$#$
[*%!@+*^{}&^&]}$#{*]!}{+#%@]$!+^$_*(@]@%})%$!{[&!({[##$))(@#+(%}@)$)*@++}%*&(#^{
@^[&[%*_%#$_}${#{@@^^![[_$(!$&}[&^}_*}@@_}*+^$%[*_(+}$)%{@)&^*&(*]&$!_[{)&{[[_@!
+_!]%^[)}}){!+{%}@##&@@([%&]+]+)!@}^}&@@_[[!(!+[&!)@*%@_#*(!({^}$++!#*^!]+^%*$$)
]^(]#}]+)[@__@]#$]{^)&@[{*+%%$(&}^!_++}&&@+]}*{!}^+#(@(@$[*%)*$((&*{*@#$)]*+_%@)
@^^]^^)()*+){!+&{$}+&{!{!+@}_*&*$}$){]}&{({_]%+{_)%(#@()]}@]]#+$*{*$$@{])${*($#}
@)+}!{*_}__)(@%*&}!*@#+!_#@!&^$&})&*]!}{]]$#]&$_@[*!)%_[}}_@+%{&_+_$^&^)]]&&}(*%
((@#$$(#[(@!({{}#&$*]$[{+!@@[@*#+_#)(*_^_(%#%&[%!(+}$&&)%)[{^$(]*{++})@_[%*%]*^+
%(@#)])+^_*+*#%+_+}^{{*[+%*]$)]&^*}+#}^!(^(^)^_*[&@[}_*+$!{(@+}]{_}[^$[+_][{*]]+
!#+$$_}!%^%$]}^!*)]+*(+{#{[%^&!!^_$*+@}_$($#)&!)$_))*+##$&$$(]]^@_@_(!^%{_&@&+^{
*]!^&+%_}#@&&]%}@#^#!##*#[])*#}*](){+*}@)]+)](_(+&%{#}}})_!+*_}%%}%}(#!&*+{!!]](
+_%_([_&$*{^$*)_)#*@^_+#({$][)%@$^@$[&!+}*^$(^!)()){&&^{)]&)]{@)$_]*^@]&)@$#%#](
)+}[@({%[$!(](!)_)#[&[]}^]@*#([}^]@%%%@}[)($($]&^+$][*(%%+*$}%_]]$!@+(_%+&+{))@}
*_$#^{%_^_{$}$]^&)_!_#$[}#$}}%(*&)%+++&$[{]!#{^_{(*$[({(&(_$)])%]#(&)%$@]{${}@#&
]$&&]%^}^*#$@{#_&&(#%{)]+_[*{@^{@^%}_$[]$[((#@(+^}}!})!)$@})$%}*@]${+}%+_!!@@^*{
@^)[{#%&$*}$[*}#_^}%+}#^_+)^+$!_*#+)^_((^^+(*+[]][^(^^^&^}}}^*!(^]]*&+}[(#%)+(#!
&[&&_))#@+*^]_#[#{{$+!!%]]!!!$]^*!^^_+(%)%&@$}{&]$}#[[^($(*)]]%_(#}^((_@{)]}*#})
%$)$_%$]%!{)})!^@)[())#&#}@+_$##&$[#!}^^&_)%][]&$^#+@}_{@({[++&(_!@%@#&}^#[&[^%+
)[{^)#++[__(*+&*%(@]#](+(]]}%(^!(+@]({_[][)(%(&}}&[[_{#@#)@[#_[$#$[%[}{]{[)$)%{&
+&(@&%&^)(){)_[_]#^$)^)[!_]]&!)}+{_%(&+{(+%*}][^%)#{{+#@!$_*_[+[&}{*%+*!!%$))$(_
*@($]}^{[_&$@%^%#@([#_[_#_%]@(+^))]_@%%}%({_*^^@#_{$#_[&%{@$$^{%]}#!$^)+#)[%*^{$
$_[#^!+^[_&*%!^(%^![@^!_+{_(&*!!!$)]**^!%*$%{&*([+_!^]}&@^$)(_(%(%[}%#_^$#]@*^]!
%%][^]!%^%[@[{#!}[!}$@)@{^^[![[*$&$[#+{+(*)!^!)*+%*{{##[)%*!&#*[{%@!+((@##_}&+$*
({#!+*]+)$@+[[&#*!%(]&@^&#_^*@&@_((}_!!_^+#}%@_{%}$]&{{+{]+^]#*[^@]&}*_*}#!*_$#%
${!_{]{)$)#{*@+@@{$_]]+&+)^+!()[_*]{^^]*([(@!{&{}@()%_{+_[+&&@}{}(}$*%@$_^){*![{
(^@^&^@&!+#&(@%!)[]%((##@(}]_*#_*@%(_{$_#](#_]*]#_+_[_])(#&%#]_(!)]*$&*##){}]+*@
$+%([}$))^^!{}&%_%+$%&#[}^*_^^_#$$&^{@#}%[$&&({&^]}(}(@#*&#^)&{$(+^++$!*[{})]}^$
$#!&#[%{)(%&}^}[]()]$@])$])^#!]!%)_)!(])(}&*($%@%]#&(+^+]+%@[([@^]$_)&&%^*${[!_*
$)$__$+&@!@+#{]]@##(*]&+^}[!{(}%_}[]_^{(}@)}[$^{*_+@+[[$&#}^]^(})()())@*)!}[#^@{
(+_$$+]&#]+*^]))@}}**&[]}$]&!)^{)[]+#%$(}##!)_@$_)$_})[#%[!{*)_*(*)*$}#(^&){+]_]
()))_)+]{)*&)@!@&*__%{@*]&${]${@_&+)@&)*+*!][#_][(&])@}@!#^!+*@]!#)[+__&&%}_+$&$
]#({*#]_#]&*}_((+#!}]_}+&_+](!#%+@$}+@#&{}(&_}^*!#$_@}^*}${)_}%)_!@&#])%{#&)*!(#
##%*@!]##(_*{}@$!][]&+*#){(_!$$_]_^^]#{#))}_())[[)}@$+_}_*!{%%*![$*^#){([&&[}#%[
&]@_[@@_*)}!}#_})]_@)^[^&#&^!&{](&_[!&#}%!{)$[$()}*^#*{@&{]*$%$$*}@^+*)@(&(+%$_[
)*^{{![!&&[*]#)$}){^]%)@*]&^@@#}#*%#([]^&%}]&_)%!@@!*$%+*+#+_%%+$%#!%]+]{^{+[$%!$![^)[&#*@{+]#
_)*^@})]{])*@&]@#+$&(#$)!^$$][(*&(]))^[*^})!!#))(})]&@{}({_&)*$@{+!!]{($!{+@!({$
#*![]@@{%_^)+_#_][^$!)#*!^&]___}%%&_[}#(@+}^*)%[&!&}$^!!&]#!}(@!)%&^__@@{[+)[}#(
+@#}_*+$})@&_{_@@)(@#$!){}@${][#[#([)%[@&@@%[{%!&^#[!_{_@(&@}!^^*$(^*!%}+$)$]__%
_$%@%}*)!@[$+$[_[]^!][]{+!#]]*+&{){+)__$)_*%!{^&$&$!)*#_{{$!$!+[%[&$+!@!}@*_[$&_
)*$*}%(*^{}!$^$%*(!([##@&^*!+!%@[+[+)@({{_&**&#_@}$$[)%]@[}]}{*[++&)#)&!$!)_)}$+
#]!_^#^*%()^#&(]&}!}[[[{_&)#}#@^#[{__^]^#((^%%[}!)((%(@](#(())#{@)[!*](%}}))+!)(
})(*}]#$[_($^_)@+]@)_)*[&@]#+(_))]#}#@+#@@!_+#[_+_]*)[!$&@**_+!]@_]&[@%]+@[}%$})
_#)&&]&@}!&[&^*#!^&{*$[{+*)]&*##%!{@)@*!^_&@*#^%_]%(}]{_!)_%}_&&$$+^(_*())%#&&**
)(#!@+^!+(^+#^}[#($}*&*!@&[)+[@][[]])#&!%[++@!}]$%]*_{@%{@_!{#%([$@[^[}^!({!}^(*
[*&#$_*{$^!_!(}{*}]@($)%^)[}#&&_&@]__[$#*$*$^%]#!}[*&_@](#$_[#@]}!_[&+}[%]{)@#&)
]+(_@_{#]^^!&}%^(*^*[)#]({+@+@}#(*##{)&[#}]{!#!@+++^_](%+(()_^]*}!}!$(+}&{*_!@#]
)*@#}[}([%{[#^${[&)*[}#+(%^$@+]&@{$[^_^[*)@$]]+}{{^%&({](!&(%#^[!}&]$)+*@%%)_&{(
)%*@_]+][&{@$$]]+*}&+*{+$)$}^)!{+**!@[(])@)^*%@^@]}%^^!!$&!}^{&^@_+]_]$)*#({%]#*
[+%]@[^$%_}))_({_]%${)}_[}^+(%_+}*}*!_+^^@#]@{)}&[*#((#!$[@}$)!!]&[{)_#%]}*^@[@$$]&#[@[()${*#){)$(&*#(}@_%]})&[][*])+%#{{^
#}%)!))({*^@^_%@!)(@_+$&){[[(_+&^_@+%[&_*&#%#)[)^_[*+]+)[!^}]%&([}%@[+@&^^((^+^]
&(^[%[$!!]#]^_!!!&&{]}]]&)@#}@_]]]]${&#)@{}{!{@%*)^@{$^!^+@]$$$&)**_{[[(%)]@{*^(
_++]]}[%{(!!(*]_!^]{]$#{&$#$})+*}$^}&!]{}^_{#!}{(!%[%%{$%(}]@&$]#+]**!_#&[&$$!{^
#+*&!_^@@^_#$[&@(@$+&!)_^+{{__}#_)^+(@@{[){))@[+#*}_#]])^]^%^*_$#}]%)@@%*!!{^&+$
$_+($!%{_]$^+^@_()#^[[^)$+@))&}+_$[_&+{!$]}]%$_!}[@)}[_($^+#}_%*%@*+(^!+)()[#*_#
({}[*$@]#*[#&%#!^){@__[]#]@}^$*]$%%(^@^]+!}$#$#@$[@}{#+!)!#%%!#[(^*(*_+*{#%#)%(*
_@([()+#_){${^[__}+#%{[&{_@![{}+[#][)!&!&^$(_*_^^}[)&)}$%%)(#[)&)+!+{@^@%#__]$+]
{}+[(^{!(&)%*(@^#+#)*&{)[^+@#{{}&$]{{@_}@{&${#%]}*!@)}^{}!)(!(_$%((#%%%!{(_(_$@@
[@(!%!@_]!%*(+(^[_!_#!)[+@{(#%!%*]$&#@_@!*&){__(@@_}&*+){_^#^_}*+@*()[}){]!#!&^#
@!_%&))_^@!$)%^!*%@{+)@%$$]&{!*_!}@{&**(}&^+[@%(%*^$(^+{{($&]!_{})!*)_!}!%++[%)@
$){^@]#^{(!&][%]+[^**^$&*@&[)%%#$)%[^_[@^*+)@)}#&&#(_^+(%{)}}!@^+$&&$]{(^*(@]@%&
#+{$]$_[^{[*#}%@+}[@}+@**$})^@]#&[+(@^&%_%!$_}*@{]}}_(#@[*]}+!_#}))+]]&]]@$^^{+[
(!_}&&}&}!_@$#)]{!#^{%(#]#^&]#}^%{&&*}&(+@^+_%]#)#)_()[(_&!$+^#[%@+$]]]!!&^%[]!^
%%@%)]**%+_^${$(}]}^{]])@!%+@!$#!})!($%$@)+*[![}]&__[$%!&$^})%]^&%(+^+#@&^$]{{!)
[(%%!{![]#[^$%_!#]^)!]![])!$@+!^@{%}$@[_#_+{#](!^*(%#@_^}^__[(@&]}]@&!)_!$^%*(}[
+*}[%]*#@{_![]$+@_)]#@+]#_^(!*{#]!()@}}%!_&@]()()]*+(%*_{@)]#{[*^${_+$)@[{[$*!!{
%)+$^^[!!#^]^+*}#{_(^*!_!@]}[}%]}#]!(_+[[_)%!+($*@&$#*_{^@[()&+)$_[%}(^*^+}[^&^#
@$}]%(%&_&&*))&%$![}[$%}@!]^*}*)_{^$_!(%]^}!_#_$$^__)}[#^(]+@&^!&*($_[_$%])]*%%!
#!%+_{]$}($_+{^*}]&[@$^($^]()]+)+]+_]!*_^*^!@{]_){#+_#%(*#&#%(]*$[%%]$[}!&*!^^()
!}[}{!+^{@}!$)%+)}{*#}%}@]#}+_#+&(*)_]}#(!{(*&#&)$_{^%$*)]!##*}$}[_&(#^{&)%+{(_%
&[#$!&+}!*#%_!%+&&[@(![+*@}^%@)@+(_@(%{$[]_[%)}_))}$*#+$(]@%{#!)&_#@!!]{!}){&@@(
)(_)[&{!]%*{^{{]$_&]^![{_##($%)%}#})(]$&^^}&!#@@#]*^^$[)@}!!)@)_*$$[{@%)_^!}_^]]
})]]{!_@)[%!$#{&@!_+_$[_*@!@@@_(}$!$!%*($[%)[(]{[]#%*(**{#%$)_@_*]({^@!$))[$*$#+
[+!&#$$!})[{#(@{}&&@)&}^$!%_*@%#*)++{+]@}{@}*@^!}+])+{[^*#%(*(+$_!{])}+$](!*{{[{
^[#++^*[_^&![@&^^])&%#_*}^$(#^&[&(#(@{)%$(%]*%!)^*+[!_%@^+&(+([(^)#[{***![{*$@[[
]}_&]{[})+[^+%!^^@&[+[)$%)}(%}&[_}}(&#^]#!@(+*)){([)**({{^]@_}+@$%{)_&{[{_}{_[_#
!&@@$][{)_{$_)[&+]^!$*]}]+[$$#+@*_}$*!#]()*[[&{*!#)(@%+%[{)@@@}}[}&[+)[}{_^}*{+[
$}([#)%(!(#[([@^)%+[#&[#%)}+&*{(^*(])^{_]%{^+^^}{#&#[*$^*{#!&](}_#$++#]$[^%+}*&@
]+]@]&[{*!)[{(((^&$%}[^#[+][@_%!#[}][}^{%{^*(!!*+)^]][+{_%^*^&+{#{$}#$%_!*!&*#^!
%*)_@)+_$+[&@*{@(+^@&({##}#{*([^_+@]*{+@})!)&))]@@[(({!!)*(}($__(]+*}[}&+@}]$$]*
%%%@$+$]$!%@(&]]}{([_$*_)(}$]&*[%_#**^(!$#(+}$}))$]]!#^&[}]#!&$){@({$%(&@*}](+@]
_@[(%)])^!#(}_}!)$%@*+]@{&(^}!([!%$!!]@$$!}*%!_#{($*]+(!@@)_(+^]*#*)]){}!_^&&&]&
)[^@$+%]@%){!]]}}$!&}###)&[[@$)_*@}^[**)+#{{}_{#]!+(}%]$###{(!]*+[&+^(_(&$)_%]![
})+$)#]](#!{+)(${$*)[)$_&)[_%)+@*_]]{[{&_!}%%*$+]@%#^}^+*_}$!}@$]@&[{%(&%](}!)&*
%![[!$$^]}*!$[&_{_@](+(_}@_@^{$+%&(+[((@}[&${@%@[@%]})$)&&#*(%}_)]%%&@]&&{[*#@@@
!^__!&+^_@_){}[)*[#__[[+&{(!*&[!_@*&}*)%{*+{(^++^!][&#))*]*^_&^+({)[)$#+(+%{&[^(
*}*&*#[*!+^^#}!($[!&@!&_*$}+%&(%$!!^$#]&*[#)[}!^^@+^+#)^$])$^[%#[_+}]_)$}+[!]%*$
@_%]^@}))}*&){!%^{_(!]&+*%!*)@)@%$%{[@**!)#!%@+&!)@{[})*@)*[+}((])*)[{!@#[[}}*!+
(!%$^({*[^#&_](^]*%_}*}@+(}!}*}%$)[$}_{###@*&})_+%!*)[#__*]){_^&$%+$*@{%!}!!(![!
$(){)%!+{!_]+][}(($!_+^&_+_#&{%)$_#)!#_+@[{#*{]^+)@%&{@$$$@+_+^[%&(&#^^%}}%!)&&*
!([+{#(+})+${@*!{]}_^&_^}(@(](%^)%+[$!^^@_}[&][}@*]{@[]$#$@)++#&)#@[#)^(@{[!)}!&
###*#()&#*}%$#@^(*[!+#}*(}*!&{*$%%@@%[)_{))(+)[#_}$(^#$%}!$(#+(*{]![(]]^)@##@{#%
]*{+#%@^_%[+${+]{{]@@#%!+}#_{_(^!^}%[{$(@]&[#^_^}$!}%}@$(&[&**]++[*!(!@$[@&+]+(]
]^!]})@%_([{)(+$^{{_+_&_^&!+}*(}%&)+(^}{)@*^#(%[{]_+^!%!_++_[{}(_[}@$@]!^*_+{&{]
@}]}*!#@%{{)**)&!#@#{}(_*^))[{](@!(#!)}]}$]++]_$!+(!)[&}^])+(]#$[%&]#@%^](&&#+]{
[(*}*[{(%&_{#^}#*([&)*$+__#^@%]#^@])^*)$_^}@(_%+*}%%^_+###_{+*_$]%$%*[@%_((_#[+&
![%({@]#{]+*{@{!+]$+&^)_$[([([!!%&&+]^{_)*[#[)[*&}){%%]@$([@{%!(#+]^@(%*}([!^[_(
(]](}^]#}_!@!#)!+_}&!+_()]{&+]_+*[([))$_)$!#(+@@*_[$)$[!^&%@{^%{@%+!+{$&[#!&!$!^
+]@#[&&*[^$(@&&{((^)%[%%@[])&}%]]^[)]&@{*@([($_]_{[[@}^(%#]@^]^$%_!%_$+*@&+[+)(&
@)[$#+[}#[}^!)$+!_[}@&}**{+&%$](_&](&[]){($_^}}*!_*!]@$+($%]){[_^^_%([)_$@(*###@
}^*&%}{%[^]$&*]_%@&&]))$_@%[@@}!^@%[!_#@(#[[$_&!_+)@+!*#*({)**(]&])#^][[%_%[}${]
]_(!%@$++])^+_!#___(^#{!(}^${%[##+]+[!%%*&{*#^$]#[$}(^@_%*$[@][%{#*$[^*]&}[+#*+!
]#)}]&#}&%^$]%!#$!{)[*%))!!%_%*%%#@@*}&[&@([[})$&($*}+*&((%$}+$})(^^_%!+%+(@#]#)
}${@&$![)#)$$(#][(%{$+(({[)$$@%}+#*]%{&[#_}@_%%@!]+){{@}&}%*][}+*]*%@@{{}**+{%^)
%]![]!$_)@(@#+)[$(^![&@})%]}{%*%%+!]]}+&@^*^__%&*$]]}$^$)$}*@}+}{}_)[{_+_+%^[)}!
$*^%$%$+}_)#%+[}*)!](!^*&[*[%+[&+$^$@]!(!]+[!}+(&*^&#}@}([]*@$]]%_{]$*!_{)(!+_+@
((!&}!({^(#*(*#@%__(_)])*])($}[]&@_{(#*_&$&++&@(}@)+^&#+})@{]({]&})&_^*]_#^}$$)[
*$}(@^{[[#_{*^)+(_@&@)*^]@{$]+}*+@}!^[!!$^@*_*_!$!_{][{#*!%{^[&[}*%!(_@+*^*(+_}[
&#&]#_$}@_]$@_+_)%*@^${)$(^&**$!{)+[%]%!!@%^&$[)%&*&]^+$}#}{^_*}%&)!*%[[!!#_*@+[
^@^$!{_^&%%]@&_)}&_@#%*{!*[(($#[[}{#&_+%&+{*_*$%[{{[[$@@^_%(*#)#@%{*)#&)]{[_{(]+
^*&#%!%){+#)!%@]]{#&^)_{%&+[*](%@+]]}}&+@&]#}{{*@+]_!^@@)_[@_)@)&]%{_#]$_!({_!!_
$&{(@*{_$$*)(^*[)+_{!+%}+)}!}$^#++@@^$@[${%+([+_[&]}_$]!^%_+$%*[!%(()%$%{!@{^*@#
_&$)]^!(!*%&#+)^)^$&}(((!^%*[+({(&!*_#[@)!_}%!_{_)%_)$%$^%^!+)*&_*)*@})&&@#@*]}@
_@+#+&[^$#[&)%+@]!*(}@+#*[^@]%#^!*#+#$()+))[!)]]}@)!]*@#&#*!$&@!{]{^$*{{$$]%&++[
^(_}{%%}+%](#+}^&@*){+]@]}{)!@[#{!(%{!&@({_}{_#&(^()[}[[%##*{}$}$$()}&@++[!%}@$_
_[!(_^@{#[$))(#$^]*_%[$!&]$!@_+[#%@+()^[(]&!)[{$+*$)#)@)_@_)([%[&{&^[}*+!_]_}]##
)*!@&[+$}#!&@{&@+#[]*)}!_+}){{+)@!}!!#(#)_#!{][}@{^#}}_$]&*%*[^(@]]*@($]$$]$_+^[
$$%][[_^[*$*)(+_]{]}$%&}}(^{+#&]$&^&!!{[[@)_%!][_]@![[$_%}_[[{]!_*}[&{$+![_%(#!}
$)#^)#*&*+(#)#*$+)#]_%]%!+(()+_^({$**[}%@*!_)}%!@[_+$_))&(**}][$([)%{}#}&(]}[&+(
$&+!*]!{+_&@@}}@!&}@#%*{}%_^]%(_%)^!#(]^^@@}}(}_&#+_+*$!}[)*^_#!)+@(%]&#[_)[({*+
#!}^#^]]@$[(%&}#!#$+)^#$++*+^_]_)[$_]((+(}+*_#&*{}_&+%#+@&!}#%]#)@&__}@})}))*]*_
#)$&%%)%$+#&[(&*&^$*%@[)_)^(%^()!]!{$$*}(]}#_)}+*&&$}^(@)$^+*+%*(]+}_!$%@&%@_}*[
*[*$$@}@_![^]+_}!&_&{^+!@{{^@}}_*))%)]]}#*[*@+@#^+[+(#)]{_&&%@$&$$@}*}!%_!^*((%[
^*!_(${)(_+)!))$&!*%^#@!${($*)^)@+@+*}%(}@(+@#^%![([%*)@@)++*!@&(+[$_*^$}%)})^)^
_+[{__[@(&%!)^!^^@}![{{^]$!(]!{}!*^)@)_^$][#+}$[(])]&+@^_[]@^@&^$]$&!@_)[$$[(_#)
+^})*$%&!#)!)}*[#&@!_{&)[&@*@^#_(%!*]%#!#)(^[^@[%)&!+]#${%{&#{+^^___@+[{]%&!#((#
@@^})&)$@[@*&#$&+*!)]*+%#]$%$(}&!#}*[))(]@#+_%_#]}}+(^[_)&##&^&){[#{*+#)}#!!&#!}
+*#(}@^(+^+[^(]]^{*}}_^{@&+(}(+)%+}[!*@)#{{#{#&_&$*+&%[_!)($([+%$^$!)%]%&^[^@@%+
(*(*#_*_](@!$$#{&#*!)_@#})_]^$$^)[^*!*@!#})]){}{^{@)#&}[^^])!$^${^$$#{]^}#_*+!%{
^!+@#@[)@{^){{])#((!(@!%&$%#+}#&!^}+(#()^){}]{)!(%)^#}#)!*+}))+(+(&&[+%[##*$[)}]
_(@}(^[&@)^&}#+}{#(!@$@&*@+}%(@)]!#_#){^%^%_[&+[(&{^[**@^(&]+!#(}&%]_^_{(&**$@*_
}{{$&)^##]()_}_}])@$[$]}%}(^[^{(!%[)$[]^[{+(!}%]^#&#!*^%)^(&])%!#($[+^[($%$&$[(]
(+}%$*@+{)&]@+_[_]!!^^&#_&(^$[*}])%+{(@$$+_}%)}^)(&#)_(()!@[]&*++^]#$)!*]*+)^$[(
{}@*^[@)@![+#^$!%$!_!+_^*@$@@@$$+[$+}*)}#{))&)]^@($[__+%&+__}$_&#}[#&[)}^^]}}]]}
!*&^%]+_@#+[[@_%+$!@$%!^)*^}(+}$&&!!*_[}+@*@)^_%]]$%!&%*+_[&![}(*!}!+(*+[#}{_)}*
&&!]+++^^#{#__$@&([)*}]%$+@##}[&[*]_{*@]%&!)$}@&}!^)&@$+@#&!%#+*%[*[]$)##)##_+{{
$@^)[($^}@%}(&&)_}$%}&{$)@[+!}+^]+{$+$({_[)@*)&!{}+[$*}#!}@_{{(]]@*%*)]%_}%[{&$)
*_![&()!%]@[{[@%_)$$^#+$)_$]+_*_{%&{*__#_*+^_)$[%&@}}##+(}{%#+!%]!({$+_(}^@^#_@+
!($(#@)*%{]*+$++)]{){{@#}%)^]#{&){^+]+]++{_)&$*+&][}&#^^{__{))^^{&@{%+][[{*!_!+$
(&*}]{%{#!^!**+}[(}^}!%){(!&#^[+![*$&${]^(])#&[#](}[%%*${[)(*@_@_(((+@(({]%#})%)
#&^#*&+$)&($]!+]&^$@)#*^_^]{#%)*_@^!_+*+*{](*&[}*[[]#*%!*(&!(@)[))!%%)&@_{{!@({#
!_}![($%)}__*&%(^_&+){_#]{_!]&(@^{[#)%)(++&{{^}!^}&%$#%_}_!($%]$}_()&&#{))$(%*&{
([^@+^![{@%_@@@!(%}#@})#_){@__^@_[_!^$(#!^^(@}]+${)]*_^%@$%$(_^]@_$+^_!#}(]%+%[@
@)][!!*((]}^(*([__#*#}%$!]+&_[}*@(@^()_*]%&%)&[){((@*%%+)@$+_+{]^$+{%^%}@[*_${]!
[!^%%$+%*%&&!!&+^])}&^$$!*&(#){&^&[$}#*&}%#(}@@_*}*(}]_*}%*]+&){*{_&^%+]$)&($!!_
#(&$*!@^*[&#@(#[{]{%@!@[#@@[+%_{^[]%(]#&^$&+{{$+*@+_(&^{^!)}+^$$(*)${(%@_{!{}(#(
}{#$_!*^)@}&%*^_&^)])[#!}##^%@]([[{@_*}$^(}%&+&{[@#)){$[+){@}$]{)@_){{^($*_[($+@
@@@$]&@{_#[{!&$!%##+&(%@)^_}+^%#@{^{%[*%{&#[}(^}%((@#&_)}^][#})[%%}(_)_+*%{^*{}_
{%(#+(@%($*&%*##^+){([%[_&#(]*@+_!^_{%{@%_&%&[[$%+)@{[&(_@%+*{#_*!%+&[*(+{+*$})+
#%[^{#(_}!+!$)%@{}&^#+_](]+)}_([*@[^#$@&}]}*!@{@[++$++(&]_}@{+}#%)}+&_$[}%[%{+@)
#@%{&_@})$}*&!$}@^&[}%*!$(](#[#$+}@#%&(+($*}$]%]$$]$*%%%
$(+((!)){*&!@_%((^${&($@+[!!])&!#%)_$[{$]&[)@[$[^)$^#^)@^]%[&{*[_{}&$)_)+}$#}{$(
#_)}}@^${#%)@@[]]}]%$_#&}[@%{{(+{@%)_^%}*^}$#]$!{)#])&#^(]]#}^^*])*#$#@()^!)$$!@
+_[)*^_*^+(_]){*!^&][&!])$_@@]!@*!*$^+&@__##^##[%_^[@)_$%^^_&+@^{(}#]+#}$[^!*(_^
[[^#(%%([#)${*]}#$+))%}&#_](}$+*@]#]**$+@{}&{*[{%*[@^)+]([]{![_&&!_}#${%@]!(+%!}
%*#}![%&$]^!+({}+])()[]_$(^#)*^$}$^)%]{&#^)!^$}!_&++#&_[&!!_#$!_%#}[+)+@}^_)&&__
*}&+*{^[!#!}&+_(+]_#*+^@*({%((])}%%!%+#^_&^}[*#{])()]@#%@^$[$^!_)!@_@({@#&@%$+)]
_^{+#$@^*^{%]]^&)&}!_}[@[({[+){+%%+&_@}@^&#$#_[*_[%{){_[]*!$]^__{)!%[#^_*+!)&&%@
]{{{%[@_%)^&]{#($]&&$}#%++)*+&#__}&$*{]@}@}{{+*}!&{&#{[++*)}&{%*)!)_@@}%#@@+{)[!
$+**(@!{%!@^^]^#(@}[]@]#^[*+*[^!*_&))*_&((]#}&^$^!#+[)(#+(}[^(%}[${*)[@(&^^*{{%$
^^_)&([^()^!@)!}&$@([&%@${[$%#@}+@_*!}&]%]#_[]([%]+}}&[&![_}*+)}{&*)@*)@@$_^]@^%
)^%({{%_^_+@%%+#[#$$@#_&%##{+)+$%*((!{{}{]+%%+^_!+{@]{^^+^}{[_([_^)!%_[+%@!%!@(+
@&$^$(+^^]$$**{^&*&!_[_{_)*[{@![@%+@){*{&}%_%(%(@+{!&)![$$+(}^)[]##$^($+[#]}!_#&
_^]!$_)[]+@)_*@![_($+!@[#)&]!+&(_*]]%%_@*@!&&_!{^*#{+[%$$##[]@&[#{$^%@&$([{+_#{(
})_{&{}#_+&@$)}#&]$(&}+&+!&_[]%@}[@^]{)!_*+$!$(}+*%{!*)]}@%!*&!}([*+]*%*)_%}(@^]
%&[^!%!%]$!((+&%%@)!%_!+#^&}&$*^%%)#%#_%+[_$^#)%#@})]%^![^]*$&%^@)$+##}]#*]&$*()
&)))+&^[_}()^@@!)&]&&!$*!#+#()^{]__@!+%(({[__&**]&!*(@+!{^@^*[!)@#))$!]^[%}]#(![
*)$_($)){@%}}*+*#{]}}@)!){$+$^&$!]*)$_**(_+@)+{)![!&+&_#[_$%^+&^}&**&_*)]+)]]%{!
]*$[$^$^+%&*!&[(%(^+&{+&]+*&$_!+{)#]#}{+***)+}&@%!^{{@+#^&*$)&^_&!@!#%^%!@]){{+]
@)$%{&+{@)+)&^&*(!(%^_&+]%_)]_+!)@!)$$@}(@@$#^[%$(&@[(($}!&^#]()[%){]@]$*[{)&%})
!+!{%[)[(%$#[*)]@!^{#^@}&]&([^*{$#$&%#@][)^+!%@]]]$$(!$@[&]{%%!^&(+{!&#*_]$@_$^]
$*&}_&$!]%+[){+{@[(&!%@&$*@()&!@}@^!#]!)#^*{)^}}+@[[!_}@![$]!((&+]}}@%^]@[]])%_&
_@*!^_%}@[$)#%($+!]!)(!}]+%!+*{%]$^*][%({]@][}!%_*)^]}+)[[)(!&{]$(^%#_$!#!#!}&#(
*!]((**^))({^]{#@}$_$^*)$+)!]}()^+^!_&{][%%^#}{#}_)&}*&@(@%[##@*(}+@$_#$%)_{(!$[
^!!+)*^^])[+{&++{[[##$**]]%^]_+*%*@%!{)@%(}*!#]@$]^@)(%$$+$#$[^_${)&_)!}{[+&[@}&
(]!@^](!![@#}%@][{*}$*!]{(!#})(&^^@${&)]$^(#}{***%{!&[%)!#%(%+(_@}^}*{^!${[)#}_#
%[_!#]_+(!%[&]+^%^&![^+##*&{_*[($#[@$%&${}_[_%)_*_&{#*#[!)&_#^}&&@#))]+{(&$##**+
^_*($_^)]#!%{]_[!@(&$}($!_#&!_&^%&((&#*{}[$}&@{&(]#&_+*}&)#^}([{$&(]^@@}]%))(&@[
)$)@+[(}(*%^%$](_)#*_+*%#+{{&^@*!@&([#}{+_#{(@&_%}(]]}^_}+^+@*)_(&}]$($&+@]&[!@{
{)*]%({&%^@))_)&(}*((_]&$+&(}_)]{$^+]$+}{[#[#$%]#(%+_(@*&!]@)&*${@#+&_#^_}+&@*&#
}&[)}${@([!+]$}@%(%$_)&#{[]@_*@$}$+!$)@]%@*}%^}{**#{]!$]{$%%))!{((^)[}+]}{{&*$)_
_[+%{{@)@(!*_]#]((^+!!{_&]^%[^!^)%]$]#_^%$^#{}^}+[]}@$)}*@$)*{![})}([_{_*&[[{{+#
{&@[@+(*@(&&^%*[_${%{^}[[_]*)[!%%)$)])]^#+&}#[^(}]{[$%^^)$$&_(_#)@_)!#}*)+]}{!!%
&@&!##}_]#[*^^&&^$!#^%{%{#$%]%}{%^_+&%_]#__*&*{%!(]#)^*+&@#[$)}}{%^}!#%)[{@!{&[^
$]$++##!$*((#[(+[&)&^&&{{{)+##)_${{(!^*{)^!)}%}@]#*^%_@@*#[*(_#((_}*}]&+}$+)[$)@
@{!%#&{+$]#%]!*[{$![%(^!$$$$+{!@!#+&]%%^_(@}^*@%+&[!%$[})($(([{@#]*{_[@#&(![{$_)
${([*!{!)(%&]*%@{^_%[__#%[^%({[][^{{$%!$[_$@${]!(+*@}$((_%{$$(]](}##[!{@[@_+))*[
^}%${(@^]#%*^#+}^]&#$&{}[[_!)[$[&)}#[$&%])_^)[)!_}{@^^###_#(#!*##[*_+@(!$%@%$)#_
@%%}@$*}(!(+_[&^([]%}(%@@]%_}#(@&$!]$_@^!@}$_{]@#($)$)!{*_%$)]#%]$)_^&@($*}^&_[^
)%}%]}&]$]*#^{]%^$}&$(#&+[[][!@&}$]_]({)(#!@_((^@@%%_]___%]][&}})]{%%}[#@__+%)(%
}_]*$$*_@+])&{$(+*+]!_[#&_)}{#)!^^}[%^+*})!(@}*@}$]))((@$(@[$_{@$^^@^*_%@%]_++(#
$$)*@&[%@((#**]*][{(@+#%_+_++!&[$%&#_#!#{_(^^@[}@&#$[[*!]+^{$]!}+[}$!$!&#$__$%]!
@]_$}*}____$%[)$]#++#!+*&{$$!@%^$*)}{]}}*)^(^%!]+$%_@%}$]++($#&&[](&^%&}!(]@@!)_
}%%*%(#+{@@_&[$+*{]&&$!*}#$$[]%+}@_&)[@+!%{@%()[*{@*[}&%@]#@*$${_^{}^{]]$_+[+)({
)@$)}^+#[$@!+&(@*^}}@{!#[][#[&)%%^#]_$}})_@{$&*&)+^#%}{__)}#%[[%_$[[)}()()^_#$%!
*+$}&_)_@!{(}*{[&!)$[&@^#(@(!#(#!*_}!++#{#(]}!))]!}(#**({]&{)$+)+&$[^!&&[&!^[*(^
#()#![_$)^@+@)[!_({(])+{)@$[$_&+*$%)+[[][+]@^}**((!%!$+&#][_#}$)_@!}[*%)$%@#&(@(
^[&[}#]_}{)#}_[#]+}@$[(%+&${[[+%&%]!&*(^(#)%%[+]@[+]#}#@#]}+_+}__[^$#+^)$$!^!)!+
$}##]]*&#[@()!)#([]@#+)^+{*&$_@_([[]^@_)+$#+)]*@!!]})}__!@##{[(&%![[)[*){@^[@(]+
&]{#%]__@_^$@&+$$@![@_#@[!%_!*{}}^%_]&$$!&&$#^^*[&!#%!&}%&}[^$$*([&(_*{{%{#&{{){
!^$(()[*&{]&&_}]+]#[[^]#}+#+($^&$[^!+$^!}+*[]%+]@]*^)}(%$}(+)^#[@}}[*!@]*[}[]^&_
#)]*%_}[{%[%!^][^^)^(_@$!_*)$$&&#]^##[)[#$$$_&$((_)[{+%[**^#{)%#^^_[]_&(**_(}(&}
)**$}#})*@$@*$^(]+)#^$]!^&]$#!#$+](!%(*^$@*!_]@_@){+(%*^^&*@]%+_([^^%}&%!#@_({+[
)[^[*+${{_)*@)){(}[{{{(@{$_}+@&}^^%)*@*!{(!%#_}!&++%&+_({!{!][)]!}!%[}_{{+@[*([[
[++#[^%_[*+(%)^@@+_+}(#*@!))!%@(!#_*_}$@!$)^($#]([@*)$#*%*_&_!%%[}_}](#$&+)_^[[$
_][[#@%!_})!]@$[}&&#)#_%[!%]*&)###}*#!^{+@[!%^)[%{}__]+%%{&)}!$!}}*&{{#++{_@{(})
)@%}{&]%!*#_#]+[][!+)[{$!{+*+($&@%&+)&#_&(@}}}}(*$+_((!!$[+{]@])_($($}]]+&)#][@_
@#]&{]$(%}$@^{$@]*%]_*}#}}!]^*&{+{_*$(##{{_^%#!}+^!$_}^)^{]+*@_{{&+)^$[_[[!+)^)^
!#]]({_*[%((+(+%&+&$[]++{!{)})_*^{[$^+(+%({!(@{#(^[(%}*$_**[)+#*&*[_%{*@)_{%(@}#
_+@+_!]#]([[*]}]}#]$)[^+%}+]#@%@&%^%@$(%(^+@$^_$_((*{[&%{}^$[_&_}!%(^$[+%[[}!@@#
*}+^@){^#%}]{!#^]]!#}{{$][}%[*)#^}@![$@%!&)^]^({![#{$*@*%@+_@%@%*)@]+%[[++$(#{@_
)]$%]*[*$(}&}%)}{&(_^}{_*[^}$(+*}(!_{[_!@*[+}*&+^^]]@@{^+]^%}!^{)_}!%%&@)$]_(*@@
!^]}()*+{{}$@[{)+%}[{)$&@[{*%%@(%(@[+%@_^%$+#@[^{*%[++&%*({_]])(@!%!#*$%$}{+{+}[
{})$+)](^}{_&+%!}^&[)_%{(_#[{[[!$*&*&!%*%+)&+++!^#&$%@})$^{(+{)(@{(@]!^!%_)}*#$$
{{}}^)@[$]^%*#])#(]%&)]+}_*{_}{*!]+]%$!!)&%#](*^[)]&_&[}[}$}*&^{@}*(#*{+!%)++*@_
%]{#@+_&$^&}_{(#)*!{%+#+()+]{&[+#]([{%^_)+{!)+()!%*#__##^*!_+((%@_@!+[&![_*[$*}#
}]_)^&{]#@({}+_&!+#_([+)!#[%#{[}+&+(!%*{}^#)%)&^()^{((^^((%{#^&!%_%*+%{)$)^_}$][
#@^%+(!{$}+_+]!#^[]_&!{]_]]]&($}^)}&*}}(#(%&$%*__{)!])[(%)#})$*$_]^($*[^{+(^}}&[
](}}^](^{%!*[[[[_#&)_(#&$[#&&+&^]$])^}((#%_)+%]}@]*{*({[{${!+@[@_%]!(}!$%]*%[}_%
}&_]*_@$!}{}(]#)_#)%[)!+!${}(}_(}^@{_@!#*_()#$!_]%#@}[#{@)!{+{!&(]}][_#%*^#%!{!!
*%&[$$]+@){!{($$*!}]{$!{+^+*%%%!{%^$(!$)%^{(&)!{!^^]#$]%#^@!^(]$$^!^]@&*{)_())!(
+)]!(*@#%$&)]]*))%$!#$@{]$#^@]__!*]^}#(#+$_#+@$+]{)]{(]]*__%&%$_^+)${$$&^{%^%){%
][(%_[])+!(&][#@_%+{##@($($#@}#}!(}*+}_(_[]+{^@@}@@^(&@[(+({+$^^_%*!##]%]#}$({)[
[@%**}){@}}[*!@@)^]*+%{!}&+^*]}&#{*[{}]]^#!%_@!}*+(#*$@@{&}_$^^[_[@[%}]@}{])!+*@
@)(}&*@+*$$*@&&^](+#%+)+@*]_%}*^@*_!#@%)$![*&#&[]&%@}_}$]{}&^{%+{})#%%$[+*#!@{*[
%{#)&_%^(@&)#$#*@+*)$)+]_+#[$((@%[%&!%#!{_^^]{]@%^){(+!)#_^!_[+!*{*)&&*&]#{[[^!{
_*]#*_{$(!(&#@%&+_$&}@^+]+[(@{[))**![&)*&#]]]*)_&+!^#]
{!!!]_}#&+^&)&[^@#)[[}}]@+$#_^@}*%#++_)_&*)#*}][&@*_%_!#*^!}[[}]*}}+*_{!#%(&&)+$
*#+}+^)&)^+&{)^$*}}%@^+@%^)}!!%#+_@*(*]+)+_@#*}&%&*}[#!{@@%&_$)[]$#!}${_+!({]$}[
{!!@(+})(%!{+#+${{})(@*+@@&{@&+&$&&[&&&^###![)&#))@@_*!([^$$%$#[&[&+%}$*)[+$%!*%
@*})[_+@!]#$^^@@$&$)!#[!#[_@@@]_*$}^^[]@_{(%#*[^[#[[}@{__+@!#{[&)@&)^#()$^+*[@!+
}%{^*)@[{#)%)]{_*__^%!@)#[![_}[{(}]@[)]%(&^$+)%_#&_)])[(}(!&[(&(]@@$!&@&!&*!_}&!
#+{@$^$%#]$([**&_{)}#!(((@$#%})+#++(__{%#}+}!)%#^**+!}%+}})#}[@^**!*_+_++&}_^_**
&^+_(}@{((][#&)*__!$#(@_^{{)}_^$[+)!}@&*$$^{&&()%[+!&#&^]}{%{!@{[}$_#[#$[(_)&%&#
{}[_(}*$$)@!{@]{+&!+#@*}_]^[}&%&_^^({}{]!![^}+^(!{+[%})@][}]}[#)}]%{{%&!#^#_^[+^
@&{!+%{&$_#}(*!@]^+!@+!&$)++@[)^^##@+@*(}))%))%{}%((!%_*]!^)(!$^[}](!+){@++]]^%@
#+(][{!##}^{)$##{*^+#}$)+(*%)&&*[!]]^(**@{+[^%^*){{&$&[{}!]_$@]!#$@[(]^%&%$]!%$+
^*_%#}%+[$^#!#*+}_)!%+[)$%}(@+**)(}{$%!{}!!{^!@__{+#+)*]+[@_}+*^(&^}]]@)($(]&_*!
**#$_##%([*#%(*+^(@&_((*)*$%%+_@])}&$%+*$$+{#^&}[#$^*_][}]@](]+#&*[#+*%(&*&(%*[]
^])^%]*+_&{)}^!!@%$#*]*^&{[$^}[$}@%[%%@$+^]#)}^&})#)@]^&+}#!{**+(@+}}&^()$)#%)]_
$()#}$%}@^_()+[!(!*($##%!)$])$+@*[{)&+)&%+}[[}^_#}#*&(^)@[_*^[%$}%)#)#!](+([%*+)
$&$!_]+&)}[_%[%(!!#*}&(][_@}+@*+&&_{_(#%(!!+{&}_@$#^!#&}}@[%_$&]*$_^}%)^_({][}$}
#]{%($@%%]&)))$*^%+]^^&{*&#[))]*(+]*{[!_#[}]{_^%!_{[{%#]}{_#]&^^^)+!^{*_{+[}*#+)
[_)^_}_]&![!+&+_#@*%_#]#!&^!*[#{+%]{{]*%$}!*}$#$_[%})##}}_#}%]_}@*^]*@^(_)}+^^!+
*^]*([&{{#%{[{&@%)%+&!^&]^**}+_!!_(#&}{@@*}({^&!^*)^]%_**((@++#&)@&*%[]]+!$_[*^]
+$!)(%[{]{((_{*}%+_$+&_)^}@@^*+!(_@($&@()]]]&!{_++(^^_{_[!$[*!%@(][(]_{!(}%[*!])
!]%+@*+@#$^)^[^^)&#}#($*#&)}!#[*]%[}#*}_@(]}+*]]^)_(%&}*^+%#*%&{^}%(}]{$+)!*(&&&
+]$^@#_@[{!+^)}}[)_([!%[{(%)@)&^*{!)%&)&!$@]}@([*(^#%@^&))%*[[}_((@&)]+]}}_))(}*
!_}}]@#&({_#@{&))$^@@*@}]*+[*%)+[[{&!!^$($+]#$@)*%*_^{_^%)__!&$+#]#**)+*@+@%#]]{
*_[)+)*((]!{@^!@]%(%@[]^%[&$+^}$$@&{_(!*}$]%_#_!%*++}_])]}$@+&#(]$*[+[)&)([@+])_
[+!%^#%)!#_}]#]]_)]^**_^)^%&$$)%!*!+@#*[^&@^}^[{(@{%(![]#{&%[$]))(}[)^(({_&*#}*}
[+(]+@){$(@!{%$%)$+{$!$$&*^+&@)%}![)*]{^%#(+{}![_&[@]+!@*^}}{})++_${_&%!@((@]{$#
{+@&[}^&%%$&())#!_##$!(&@@*}^[%@$^#*&!@@&_[(!}+{}*&+{**$)}])%()!_&_)!*((^+%)_#[(
_^&!&(%{_%*[($])}%[{@{{^_[$}@&&_)^()({%![#][(}+&^(]&}!*@#{)]{i]%_]%^(&]^{+_([)$%
{&*[$@^{(]@}]%)(@&}&)}_@$}&]{#$^}@@&[]#+!%^]@!]]$+$]#^_]{](^*&!%_!!&}$^&#!}++)_!
^@]]*$*_#+$!^{$&[$_+#^@@}#)(#*&)$#&+#}+{{&@$^%+{++[&}#}[*#]^^()+(#![]^$)^#*])%((
*[)]#!]$]{+([^)%@{^_$!(*@}#@{{^%@}#&##*!_&^{%$_{_+%)#{{!$[&&]#^)+!]*&]{[!^)%}}*%
_{{$%_+*^{+!*!$)}*)_&%@[$*!&*#][!!&#{]$}}]^_$*!*&]&(@%_$*@+!{}^^+()+_$(!]*@)&#[#
(#[@!$[+{_{^%+&}($[^^}${^_[#)({)++&_%*{@+(^+_%!_)%)^+@#(${)*]&!)^{[#%+[*))(${&{#
&$+#])%@_*}{[}$!{#}!^%*)++$]&]!_{_+_]%#&@&$&)*!(]+_+}}]_+#){[^]#^+)$#!)&)[+#[)}}
)*(!%&]*{$+(_()_%$#_{}[]_%#{![}}{(!@{#$[*&(^((*+^(^(_]%{!]}+^)%^{{$**]@$$$_!]((%
(&[${_&)}[$$_{$[]{#{%%!&@(#+%@%)+_}&*##]!&^_[^[*]([*!]]!]{#*!^&$!*)!}_#{_#*[%[^)
[!*@]%*[_^!#{{)@![$+^__[]{($+}}{}[$[]^{+(*(^)&*&#^][@{&@)+^*{%@^)+++&!&[{(@{!]{]
)%$]{()*{)[)}@&@&@%^#{*]@@]&_]!))$^(%@+@+]&}]+{*[]+(!_{]@_[(]_][}}_}@[_}##+@]]]*
+@*_*[&%%(_++!{]*+&(*%%@{!!*%!*^}]}$&}#[+*&+@((#](@(&*{+](#%!%[%]@#+[_+(^[*&[}${
($@]^)!&_*]#{#&}({@](*_{!{*})%}#&#_][%)]$_*&(]_*%]%$(&+$(#+[][*{]&+^+!&^@}$}(}]$
]*%@_+!}&+}_}[%^]#&{{+*%$&_#}#*]&%%})$+]@)+^})[&(+@$)^#)${@_(%!($]}^!{@}){+@!__)
$]&{*%#]_!$&@$(}#^{]!!%^%_#&!$+$&%^_#((#)$@($[+}^[_+$@}_)]*!^$!{^)@&*[[%!_&*$_$#
^+][](^[]^^($_#[+})__}@{{%_%*&_[)!}+[!@&*}}$#%**#}%})_^$^}}&+*}({(]_!*)#[%(!+_%*
)+@%[+#$(+%@%%%_{+&_&_]()$][)(${{@(+}+_$_!+##@@&}[#+!(#+!){))@*+((}@#![!)&(@@)(]
^#!+{_(_$+!!%##{[{]($+@)#&$%)))&[&*!^%+]!#]^#{)][()**$^_!@)^}%$}+_]+{[]*_}[[*)*{
{+]@[!%)@&^^@$%!!({##_#[}[+@_*@]&@[]}${((^[{][]+#%![(_{[*)#}]@{]#(^])_&!%{^!#%{@
_]}]!^_[!)&&&]_(#]+_!_}&&)#$*+^###[**@{}{%^[&#+&__@@@[+t]+&)^{*((@$!$)%]$[{}$}&$
%!+$[(*%](@*!*})!#+*#+(}$(*@*[#]#)[^*#@}*_#%@@+@[!!{*^})_!^&^({(%(%%@(#_(&{__[!+
(#})___!{^@*}#(%#)_]_%{{]+@%${+![^{(*$+)$[&[${)&#%+$![{}(@^+}]#(}@#]}($($[$[+{}(
&}+%*$[(_+{#!+]@)%#)}_+{%&*)#^[$@_@}[^*+_*(!%&#*^@)@%^[@%*$_{}{{%[@^+%[$])])@[!^
+#@$%^@^#%}+)*!+!$%(}](&)##$+&[[#&^*!^$]*!#}{%#{*+&[]$)]%}*[*_)*#@^{%)}{+$^)_{$(
%){!}(#]^_(!^]({%@_@$%*{*@)*#!%$(*_(]!#*#%[*[&*)^[%&$_)!$[_&($]]%{!%&)[(]&{[[[{+
{{@]+](@&@$_^^(*@})$!}{@$_^+{*)[({^}_!#[@$[*%!%^_*@@}#_{[{_@**++)!]!@{_#]&&*{+&$
**^[)[%$_^)*)_%+]&_[)&$}}_]%+%)}^_]#}]__]*!}&#[[##![$[)%+_))_&)$_(@&@}&&{)+#_%![
]}(^#*@^)$$%[%*(({(^]}_$+^%{$#*#^+({)[*}@]+![&%_%&_$#@[$^+@$(##[[$}+*$!@*!{_@})&
}![+_#}%{{_$}}+]+#{]#+$![@(!%_&$$}+^*{^#^^[&&(^^##**$_{+*!]}][}&&%]]*&(}{+@+!]({
!$@+[&@)]_!__})]+]+&{_($!$)#$)&$]&&}{!^$)$}(!@%$%(!+*!*#)+$&_&[[]})^#]{$}&@$^{##
]#%@+!^)$^+&^_{({+[}#()_(!*_@$}}!}*+_[@^{{{#+)%&&&}*{*))+}&[#++{}%@(]_@$![$$$&^*
__}$)$+%$%(*^@)++@!*%]^){]]_}]++$!()&[{*^$%+]+*_{[{$[#*[[%^}]&_[^@^+@@^)#)${$^&+
(}$)][$&}#*_&+%#)(%^){](*]}}]}!+[)##&!^!+{_!@&^[[(#{[&#%$!(#{__}#&@$*}#^*#]@!}^_
!^$!@y{$][%@+^##](_*(##^_{#)$+$*&}[#%&_!+)*@{][_($#_$*{(}_[{+)$[)+{#)+($_]{}!]+#
(#_$!@*+#%+(#@_}}@^!$_[&_&@})}}$(]^]^(_^**@%**#&^+@[!]^+}+&+&[^()+$*$(}$!%@!({^[
)]*{(%#[_%{}(+!##[)%&!((^[}&(!#^!([)[&!_)(%&#@)&*$+]&!]^#!!^$*^$!(_+![]*{!${@%+)
^#)$#{}]%%$(*}(]#&&$)@_&+)%}}*(([]![$!!^&[%!{&^(&@&%$)@{!@}!}$_*$%+#]{])@!@)@_)]
}]{_}!%{^$))&_(+}+#&+*&+!{_*^)[}(((}_@(]^)_}!]}&}{&[((}@{+(([{])_}^(@^+^+^}(!)&]
_%*}_!^#[*$_+]@&#+{*@*+{)]^^!](]_@^}#^^%(*+]@^@]$*%_$#^*@[$]]_)]$+$+@*{$[}[%*{+)
(&{@%^+*}^(^&_+$#(@$[#@@(){!($)^)!])(_&%#*&[@{]{]#@(]%@}{${[})($+++@*${+&}(^%)+*
{#]!#)]*&@)+#[+_)@&}+]+_*}}]*{{%^!+$+#$(%!^**!])%*_}$]!)({$^_^+]*#{(_*[&!(*))#@&
@^%@@}]]}%#%]{{#(#**[#(_#(#$]]*)_*#+[_#+}{&!]@&[]+{*^]!%^*_@)]^%#++$&@[)([+}!*](
&%+(&])^[)@$](**}]&}$]&%^]@)[&(*[(#^{$+^]@[%![_{[#_[){_$)!%![]^_%*$!@+{[&%)!_#((
$)[__^{%_!]_#[&_$(!)!_&}&$$}](]%{^(&{%$!]+[+_^+{*[@+*+%[$@&#+#$*}&{@%##*@(({)_(]
}_)[^$}^{[$@^$@$&##)@[#$&$&_@]@{_][{}!(+[!+@%&^&[%&${()@@_[&+^^+{)#^#)&%_]@{$&(*
{()}$]!%*+{[[}!+][_*!&]{%+)!^)!*{{})_&[*[})+[(@!__!{!]&{^@%!@]&[&^}+#[{_}@!+_*{&
^[%#!^]+(*#&))([%[%$_[#_+{{_%!#&)^&#)#!](+(@!(}}*#(&&+%!}@]%@#$*_[$](#[@#[_^+(%{
@#(*!__{)_#^!{!&%_*@+*(&[^_(*$#!@_*}#+$_*${@!}*]!}@)$^@_@{^(++(%({[#$)!}!##%]&[{
!(+}(*!(&_[}}{}+#{!#)_[!)&))%%#}_!]${*}@})_)}%+&#$]&(^*[^+&{)}+@((&]])%$@((_(^$[
_@$)[[+(!@]_()}*]*+[{&@&[##}[&]%$](+*{]!%)]_&%^$+%_@!#&+@[(&{){)(]+[]{)!^}+!}{[$
{)@_&{_[^++^{[%*!(]]@_]}_})(%+_#!+]$$_&!+*[(])$(!^($)}^+{&!&__+_{@+}[((&%)$][^{&
*{_%#&${{!@$)$(@%{{*%[+[*@#$[@_{}{[#($}}_)%)&+*]((}*)+_%#{%]_$]%][!+[+[%[@&&){!@
(&(+*[($}**}$^_!@]_{%#{](]@{!#&&&)[$!_(#(#$!*![##!$_!*{{$@@*_!#[%)}%^%(%#$@(}+}$
_#@&({+)+}^*]^!^})[(^@)*+#@]%_(**_+}###[_}]*$${]&_[&{[*}+@#}&^{]_!&#{%())](^@}%*
$[%@##)(@__+{#@^_$}}$)}]#^^@#&_^++!$^^%%#($+]&%&+]}^+_@%$})*$^*&*+[##@_{(&}@^*]_
_$_[@%#[+***&@%!^{}!$#}](_({@]]{)^$]^*[*]+}}!({)
[%%(@_${[(#@%*_+^]{}+^{}!)&#}*#%(##))%(+[+@!}$^}](!_$%}$&([#%}[##*[#*]{$^#*(^+[^
}!&]!%!+@){](^(*}^_!$%]^[&*)_^}!@]*+((!^+_$+]_%[@&+(*@}}+}!]#})!*}!)+@}^}_+*#+^!
)#$_#{&)!$@]@@[#(!]^&^+!_+@^!&[*!)(*)*&[{_@%$!__!%%[#$(%#(]$$![[@!^#%(_)#!{%]]*[
+^$@$&!^%+!^[_}&*$__@}{])%)((}_^)(%^)$@}#)]_)[)#{!}*^&&__}!{&)]#_)[$$%@{@$&*@)#{
^#{}%^&]+}(%$@+{*^})]@^#^#]@$%([[#^(%[)]%#$}}*_$]}^]*$@#%$#[^[[%__*#@)(_![{${{{$
^*{##%*!!&]{_[$#_]!&{(!@*(+%*[%_($]*#)($)%][^]#+}[+_{})@)}*&(]{(&(}%%@(++$}@(#[_
}(#[(@])[+(^$}}+!){_&*)&&$((+[+)+#&]!@^+]}[#}$!*$_}_$__@^))$*){%!@}_){(@^($)_&^%
]))^)^][&$+)[!(#!()(&[%(&[@$*!{]+{{$(#^&_!!%@)%[@_(+^]#@$]#*!$%#()@++&}+%[[_#(*]
#!&&([_}[+]]*%_$+(^[^)$*#{+{!$%}_*!%_([{*^{*(#}&[$@[[_^$&!()*(({]##$@@&@$}&#{#@!
&_@+){!($$(_}++&+*%@[%+([)(}!%%{$_{@$[*}_!^)#+)+{*&)^]+[$^))+{(++%*^!]({!&^}&_(_
[&^#)(&)[)}[}}+$]*)+)&_{%}(!}(+%(]+*#([+*##{()_(}}[%[]*]{${+(&)}]){)_^}[]()}#$@%
]_}(_]&}[&#%!{+@(##({^[+#_)]@!$]_@+[[%*_)}]([$}}*+#$+{$+_{}^][]{!^!#^{{_$}$(%)+[
[^%]]+@}_%){$%&[@!*{){)%##(_{!#(![#*(^@{$$))#}@_]{#_@{)]#!]!#&^]!@^_++(^($)^#^%}
*($%[*(++@_([!@)%&%^])&&]_%*_+)$[+)){_[)+*+**)][_@@!]&[%@$(!#__@]+_{$@+*+)_[%^}[
(++$%*@_](}_(+!}!(%!*([&#[$@]#}+@@%^[]&^[%]+[{!_#+{(*)!*+@*}+(+!*+#@[@#!)#*[]#%&
[_%^!#%_]$}#+[+&[@)_#]+$%{]*_%#}}&[}#*(!))@_+@$}$#[]}*@%!}^^&$%&]_@}!!}}{_{&#_&}
$@$+(*!{{{_}!+[}$+_)_++$+}$({$^!*_@]$&^${%$}_!%_{*_[$+)@%+{%&_^%%!+_([$_]+&&%_%[
*]+[!%[^_*+&*$(&@@(+)$!(!#)}!}{+*)_^_*^(}^}+}][&*_@#%{^!&{)%_](**_%&%!!{${#+@$#^
%)^!^$!$#*^]$*}&{]#{*]!{%%+_({%)%+}&$%+_(}_^(%{*++!@^}*_{([[_#_++@+(*&$(%+)+$}[)
[!}&#{$+_@&_!}){{$(}[{*@%[(!@]!{&&%$!#[[(){%#%_^#_{_!}$!{)$$#&_^){[(#&$_^{%$!^}!
((*&@}}&$)!*@$}*^!]+]))!!*%^[%(+[{!(_%]&^$[#!#]{+$+]*}[[*@&&!+^#%!})&$]{*(&+@&+^
{$!#&$[$}$!][@{%@$$$}([{)(]*+$!}$*$&+@%[$*)#]}_&_#@{^#@!@@%+@([)]}{!_[@^+^%]{){&
$@(%@)^]*]&%%_%*#[@(&]])#$#!$%$}@{}!}[[@}#@#](@)$%{&)}[&][[_%%(!!(}%([[){^$@[@[}
%#**%{@_)%{@{*[@#(+&+%[]{+&{}_*[%#!!**+{{_^+@[[@^}[$@(}@[${@@}!*@!(%{}!#*_[&^@[%
)]!)(*(@]#@{%_*+@_&(&*$+&$$$$)+}+@$&)@}}+_*}!(){@@@]%]$}@%@())$^]!]*{!^!$&!([*%*
{]){#}@!+*%(#((($+(_++)*$%#!)$*[]_%)]&}@_{#]]&!##&$$)&^@&{*!{{[))(*{([^*&$})@$*{
}]]}%}!!*_%_(^%{%&*)@^&]]_!*[*{[^%[(]%]*!+])[*(!}&^)]#{&&%*)$[(]#(*@^}[(!](+_[%[
%@&!&*_]^#*_$]^$}^]##+_}*@)%{@[$$#)$*_&)+()@*@&^_${[@%&&$[!+_)#^_${+!&{[#^^(*)&!
#%(^&!+$@!)_*##{[&]^+}(](+%#*%#&##!(]%)!($#!^^)!(_$*!_]&%@#}**+@&+])[%$%@$!]$[!@
%*}_@^$%^]$&#{+]!({[@}&{+@]!{&!&)#((&&(!]!_$}_!!(#}#&&[@_]+%%[_]!}%###*&}&]^^[[_
}[}^*{+]@_)]@)_#*+]+$}+]{]!+&}}}@@)&{*+&#*}#*)__*@@!]!]}}#{!$}*@@@}#^{{!}##^!&@!
)##!]#$[@!{*%+)*#+__)_(%}^*#&[*}{_@&+[]_*[[}[@{]][@)#[%(*$[*{%@]]#${)!%_!*}+]$_)
})_%())]{(]^+)[+)#^{*_^([%]&*+_)][%^&*)++^&{]+]$&_+[@!%$_([&%%!@!%*)+(}]+)()!#}{
(^*&^{[!#$](%_!_**!}$$!&[^{(!#{#@_&^]{)[*+^](!&!(@^(@!@]%+]$(#%_}+)$@}&!#&&{^*{]
+%[!{!$((^_+&]_!@^%#_+}({^^}*{{%]^@&+${%%^[*!#}_(_&&)!$}!_^{[(&$][(%_+$^&}#&((#^
!&]{[+)@}%![(#*)$+$#){)[^+@^_)]%$#}!&}&]$&{*&[$!}%$]&_}[*$^)%&{]}]![[^_}(#{^*!!&
*&[(_{{+}($[}$*()&}$&#%!%#)]{@&$)[}&{&_%_[(((%@{]_*^(!+*[[(*}@]%_}])^%+%&([]}{*&
+![}{([&]$^[+{*]()&!&@}^#+][(!^*^+^&&$#]_{@$+@[({]&)!&)))&#*(%+*&}$^_*+_&@[}{}%^
{_$^+%+@(&@[[)}*@{!_$)@((_*^_$${*#{_#{%@()[@(+$#[)!#*^!(}{!@[+%[&_%()+#%%{)}^)$*
&(@%[{^&%^({(@$*(**$}+)%}&)!+$[((&)@]@@+{{]])]]^$#(@%)!(&&]+#+[&[&}$__$$@$+]@{*#
{]@]@*)!])})!}!%[+$)@)]](}*&!}]+![##]])]_[](+${+]_*@)_^{}&^)_*{*]#[]*{+{)(*&^_#_
&[#_)$){$*!)!){$*$(]{]_&%%}$@&[**+![#{$}]@$$[@@]@{[#])^$***+_#%@$%]{_+}&*!&_%&()
$]*&[)#(%^{]%^)])@#{!^@(@)#+}{$}@_${_(_}[^[+#*&^{%!%+$)){_]*%+(@^{_*#$*(!)[*[&))
^&+^@!!!+])*@__%__@${%#_(^@@*}!&])[#%_!%}!%{@}#!)(_*^@*_)$&_+!#$)&}#_&$}%!{^!#&^
)_)$}%^+_!*$(@%}}@)})}_&{!@^^!&_]**!*[[()]*%(]}!#))+*@($%)+){!#@^+}@((*@[}&%#{_+
{@^+!([!)_!+@+}_+^!%_([(+)($@&@##($_&!@##$%+$@#[_[$!^&*])!&(_]*]}&)[(((]}%[@&[^@
]{*+&_)#!(@&#+((&!%!%}^*&[]#$*^}}$]&(&_{}+_}$%#&%[&}*)*]]&+!_)#[^+%_*}]])+$)%!]{
]##]^($^_)}[}[[)}{++}+(^%^!}}%)&[}${{*+&+@%}&(})@)+%!%_*(*[!+$_)[!#!@[%)@^}(#*%+
#]$]%^)$!{]&_[%%*}_#_)__^[^^#(})}&^%%%_&}&$_$!&{![*^}#+@!*){%)+!]_&*[$)%[)!{]!#^
[{*__(+#_)+^%(%]_%@[++[((^*($_*(_!*+$+[&!(*_[{{&}&%*%@#&%[#*_[_&@&]__+{@$)^%_#$^
@@%!+%+]_{{}*{[]+^*$!]&[$+_}{])]$]*##__##{#!!&)%!@^!!*+#}_{^)%{^*(}])]@$_!__)!#+
@%({&[^${{_{}#([{+{]!@((&*@!)*[}$}(]%+#@$%%&!%&]@$(_][#)))@$+}@%*#^^@%&]}{()%%^!
#&&)++!(}{]}*}}!}(@*@!]^%*!$_[($)!_^^$_[#!(%[!}#&$)@$}#$))&)[##**@](]]$)}}[^@@^#
}&&){[$${&}+[+%_}!#^#%{]_%#%*&_}}+]&_$*!&&][_%!]_*+#^!]{}_!@(}(*(^^*__+$#@#^]*%%
%]^}!_{}!)%!{)%+[___]]$](*^)))*^${)^^$_&[)&}*}($#{#^^#_@[[+[^{{[*__{%$^$}*{{+#{{
$%&+])(^^*$(}#*[$_#%$}!!^%&($}!!(]*{!}(!_&{##[{!+]&#(((%@]!($#%$^@(%))@_)@}*})+[
^]^(}${[(^!*{}!(_[{^*&{&})]]&}![}$!}*+])}[@(_&)[}*@_$_{%[+&#(*_#+)^)!&@!%($!}#[%
[@&[+^@$}&{]{)+^&^#{{}@!}{^{%}#)@!%([$(_!([+({)@^(#@!)$[_&](!}@$*$@!(#[$+!@][}_*
_^#&{)][@*!])^))+@+$%[%&}(^(!@}([#+***_*[^)$((^*(}([!]@##@$%^[{+^**{&[&@@##)#(@#
{+_**$%(#$&^$^]__*)%$*+{#+()[%[(}#]}*&$^%]{}%&(_([]&_}&}*@}#{((&!@!{#+__#*#))&[(
&[[*+]&{[$_}*#@!{]}__!+!!@$@#}+@!%^(^_^{}]+^({)*[**%!@[^)#$%{&[[_!(^(}}{!}!)@###
(#*+!#@&+{_{&%&$}+}]!@*$#&&({(^(_#{+$*+_)}^&$##&+$]$(&$}!}!](%#+^]*$%]%)}^}])#_!
@)*%_$^]$@*&(()&+)_[*#@&{+&}^#*+*{[)[^$+#**^^(^]}][$_+]*&&)@}{&]](_]_]^&&}%@+}[$
(+[%*^!#)#+(}!)[!^{&[*[($%{+([#[[@&})$]@{^^#!$]_}!{*{*{![@$_@]+*{}[!#%@}]&#{*%{}
!!(}&+%%{%{%({!_(%_]@}+^%@_!*[&{)$%&)@]{+&_+%&^_}([_@@)$^!^}{#[$@{(_]{))*$(#!(+%
&!+]}]__^&@)^[}]!*#@)%!(%#)_*#}%*__}_+(**{[+)]%${+@^(]$()@([[{(]}$)}#!&+{%@$+{{^
&}_!$_+$_($*{!*#&$]%_)[({}+^}[@#+**%*}#[([@$][)[&+_%!!}@@}*!@%(#[}&$#]&{+^{#_*^]
@_%(]${&*&#^#_@^{%*^%$^+*(%}[!!)(!%_}!@@#&+)^^{##%&}$+*+!#}+%{%#)^@%[{}^%#+(+$[@
#%}+%%#_&}++)}{]%#]*)]+)]+^(*({}+}@_&&!![)^_$[][@*]!%@(@*+*_]{&}%*^)+[{!_%$+%[*+
&&&!#[_]])$}%[_*!)@}[{*]%!@!_))]*%#^{{+]%]]*@%{%@+^[@}(^#%&**+)*^*[)[)%$[^#@^&+#
+++}%_@@[(}[*%$#}^#+&}+%%)$+_{^(#%*{&*%(*)+([!@)*#[&!@(&_$@%$%]^^&{]@$}[({$((^}!
&%+{#_]{{{^[*_^#^!*@}_}*}+*&(@!^#%)@%[#(&$!&%)][#{*$${(+}($*(*(*&*$^{{@^]{]&*@!)
%&)**+]][!+@]*})(@)_]@{($!%+%%]_)(@{+!*__@[(%&&^]@([@[&%$+(}{{&]]+*}($+&}%(!%*@!
(^)${)%)]]$*!++[_(_($}(@++[^{]%{{!%#!%+*$)_&@&__#([__$[&^!}%$)(]}@+]+_@*]%%{&(@@
$_[(![!)+@[]][]($@%*(}$_(!^^)]^%){{(_#)*#][}$([)[]!_![![@}}}!%^^!}!*#%&{$&!#_!}#
[^{%{$^+@}&_}*_]%(}@*^}]^@*_^&&)^^}[}@]+^*%({*$^{+])_^%*#!${!#+$](+&]{@_+&[!()@{
@)[[){&(#[#{&@&(_]@##])}#&%*)_&!(}_^^$_)))}}}+&&$%&]}}$)![&{#_!(!![[![&(+{{++!}}
%}^%%#)))!}^)}%{*@*_{}*^{&(^+$}!@$_^}{$*^#)(@^]_@@%)@_[}&[)++)$&]+!}![#]@$%@]]!@
^()&++$(_[!^#[&@*@@(){#%@((!(%@(#[&+*%+(^%{{*$%#!(#^{(&*_}!^#%({*_#)%+#{$##{!$]*
{+_!{+^$!_&}%(]+}_}@**(&}(+@^%#+!#{#*@@{+!)}^{^+#(!&}[+*%+@}_+&+&]&+(+)_)((!{%*)
([)_+_[]&}}[{{+[)]!%{&&__&$+${_+#]_$}!&#%[@^^]!)#)_+#$]((*@+#$#)@[)*{[}(()$(@{*[
]}#*_+{)%[+!^{+{(&#_[_}_!{!*{[[)]][$[}*@[$*&))+$[&]@)[*}+^___%!]()^)&!@&[*@_+{}&
[{]$#{!^]^$#+*$}#*)(]!@^&#){][$)}!+%^)@#&!%(+^^($(%}^+[*)#+{%!))}(*&]__})][_))}#
())#&##{]$#$](&$%&&$)^{(@%)$%()#)&&*{]&^^+%$##%{!(_$(**&(_]+{%[%$!_){$*@@++]&^$(
@+{+&%]$)+@({$(+{!*#(%)]+[}){]]#)*[]%&{+)$){!&$]+^++_@]#%)[&&^%]#@#@)]@}%$[_*@%)
[&*^*})@(!{&^#!([%@_![{)+)$}_+)%&^#@#$$}))^&)}({+*&_()&@]$^#(&&{){)_[}{@(}#)!)%&
({+$[!#()[]%{$_*]*^%&]@{^@{)}}_^}@!^*)_[([{}]{*#{]&}}[$_[}!%%&_{{!$[}&[[@#[&_$()
*_$+)&}*){${}!]+%[{{!+)+{!&]$!}{_]&)!!^+){&*#{@!##_(^%^$([!+&+($&)##[&[^_{##{(**
{{)#*%@*[(^(}!%}@*}@+]^_}&&&}&{[$(@[#*+%[&%{$$**]]%(!$+$!]^+[^_(&*{#_^%[[#+{]#_[
*}]#)!%!_[})^%*@{!{$)*_+$$*}%(&]%^+$@!&{[]}**})}#}[#{%{$#@##(])&)((${^]^[%^&(!_&
{@((&@&)]!&{}@#])$($}#}@)#[+$^{%%&*]&_!+{$+[*{()_&&(}%[})}!}(}[!%@$#!*(^^![+(^@{
(+]]@{++#)@@%(!&_#@^$*%^)*](^}#]@]}]@++*^+$_+]^_][]@^#$^&!_+!(^+((&@^@_^@)$[#!)*
$)_#]*^{@_*[}@}*@@^+**+[[)**{!)!{##[(*}{)+@{}}{]+!*+&*&)_^&{*+@[*_#{)#(}_*]%{+!%
(}_%$#)^*&+[([@*!!{@*[{%@[__)&*&@{__+_}[_#{]!@*%(^&^_$_+[([(($@)])]))%_{(^@!{!#@
#*%%[#&#[+)%__]{++*)]}![[_%+__{${}%!}+!!)}*)_+!#%^}[!)[@[]@@_(@&&*]^_{+[)}@#{#*{
*%!%@{$%!_^+&]+@{$)!&_}_}&!}#)#[$&_&&_)*({({$[$)]]%#^{^%}}^%#]&+^}[!&_[[[(&{@+&^
_()%@#@{%_({${*!)(*+$*!!+$&&]{^^!(#}@[&@&[}^#)]+{)__@_[+]%$)]}$[([^{)%&)@[+]!_!+
_#$))}!+&&#(^[^(}%(%%$%+}{$^^)&^[@}#$]{!}+*}]{_}}*(*@]}#+{}@@!$(}])%+^!#@(_^(@[(
_#}$[^[@^+_&}*#$}%^)(#*}%_+]@_%]%&&()[^(}[*[}#@(%%$}]_)^(!#%]#@(#+!#{#$]^!}))_]*
]+%^+%))]+%$]+!%@[#@[@@[*#!+$#}}*#()$_*$[^}+)@#^^$(^+)^@%](!+)^[#!#_*{^^]&[_[_+}
$]%@^+!##}*(*)&([]+]##%$)+$_^%^@&((+@&)%}${#&$!!($#&^](^^{{(&+]_]@&*#_^+#!(}]$*&
_+@#[})]])[((#@&]!&]*&{*&#_[#(]{(}!]_+@[{^+{{!)*{!}]@@^#*{*({(%#(@@(]{%]!@@+%!!*
%(!{&^%%&$(!#@{+*#+*{]!%&)%*]*#$]()
]!{][@$}@)$__*_]}^(#*!#!_!**@{(&![]$_+_%^#_!%!$&@](!%%((%[#]&&}_{]+[+*##])(]%^(+
#(_}((]@}#$^_})%#&&((!^![^}+!}{$(%*{*$@%][)[[%&^[{](&+^*!!!([__[{^}&%+&^(*&])*$&
$#_}*!(+([_&%{^&[([%]}*^{{([@+@]@*&@_!]_+([(#&!]]#$$#]@#{_]][_{@]{*))$({%}_![@$]
#)+[])%]$+^^(}^!([&{)!#}#}}#!}[]]{[++&!)]#]]%^%_&_}!&&$@#&!#}&+]$)^_^*$]!$)}&{#)
+[+!_${^](+([&_%&$)#{$%#[%%][($()+!*_(*&!}%@}@%_#+%{%&*$]*{$(}}{)}]%))$}*^$]$(^^
!&*[^]]&#%&%!_+#&}_#}_]&^+@]%(!+!_*](@]}__+@%+^!&[@[)@(!*[%}^$!$(]!^_])!&_!!_[{*
(+*]_}%+(%[{)]({#[+$&&[^@{&#!@%)!+&}$@+@[+&_*!$(+#*%!+$@{{^**{)(]*$(}+(#+^}%@%^^
!$%}$$}+@^$$%{}{#!(%[]$!*}+(]!%{(^{&^{$[$)]&&^+{+%!#[([%^!{]]#^@!{#(&]@_$*_&!%(!
+_+}%@#{_}^#*)%*(*}*![}[%_[[^@$&%)([*{_${)$^^_!+}{)!)@_[*$_}*}$#[+}{]*+!^])}&{+#
+!@!^*@}!}&{]*#^@}_[)}#@%!_*#!$}!)[(${+^&!{[&&&*[{}+*+(#+_[}{$$)#([*!)%@^%_]#%$$
(++^+&)}*_&%@#[^^+^&@_%]+$%$#$*)@!(]*+@]}%$$}$(#$&^(%[*([&]*^&}(!#{&_}^(*{(+$#}}
(&_+][&_@)$&$&^[_$(++$^}]&^^*(+*!&#_$]*+@!]+{%^_*+!&}@$!#^{+_#([+@(((*+)[()__}(^
@)](+[$*_(]*$[[&@^(_*#(*&!^{+]_%)_)^[}@]#]%#@+^+[%{_*{!)}#$@#)_$!_(!*+#}%%}+$&$[
%&]!{{%*_!*}&)}$**_{*!#%[[#]!](^^$![#[[*}%(_#^^!%))!_^@)@**@}}(%%{#*%@(((]^%^![&
}!)$]&($)@](+(#{$)_%^%_^^#][{*[)%}+[##(##^{$}^]#&(&*{)%)&][&{]&#]}[[^^&[!#}${@_(
#@}&$[[%]_&$+)$!%{(}$^$}*
'''
import collections
dic = collections.OrderedDict()
for c in orginStr:
dic[c] = dic.get(c, 0) + 1
print(''.join([c for c in dic]))
|
[
"lingyunyumo@gmail.com"
] |
lingyunyumo@gmail.com
|
ac334cf5eb6b2d7571fd321354c1331013b2da2f
|
453c2179ebd6bfe86e30fca3924671248cbff4f8
|
/polls/views.py
|
b7473eb6320791798adf4fc14fe7e7693a0d7d5a
|
[] |
no_license
|
oyty/mysite
|
b0875c721933fdcf3630d53d50aba1148df80db5
|
486cfb044566cde710edabd1d52190e92b62b601
|
refs/heads/master
| 2023-02-23T17:46:28.567404
| 2021-01-30T10:27:11
| 2021-01-30T10:27:11
| 333,329,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,909
|
py
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.db.models import Q, Sum, Count
from polls.models import Product
def index(request):
# # 数据插入的两种方式
# Product.objects.create(name='oyty', weight='111g', size='120*75*77', type_id=1)
#
# p = Product(name='oyty', weight='111g', size='120*75*77', type_id=1)
# p.save()
#
# # 数据更新
# p = Product.objects.get(id=9)
# p.name = 'haha'
# p.save()
#
# # 更新单条数据
# Product.objects.get(id=9).update(name='haha')
# # 更新多条数据
# Product.objects.filter(name='oyty').update(name='haha')
# # 无过滤条件,全表更新
# Product.objects.update(name='haha')
#
# # 删除数据
# # 删除全部
# Product.objects.all().delete()
# Product.objects.get(id=9).delete()
# # 删除多条
# Product.objects.first(name='oyty').delete()
#
# # 查询数据
# # 查询全部, 返回数组
# p = Product.objects.all()
# 'select * from product'
#
# # 查询前5条
# p = Product.objects.all()[:5]
# 'select * from product limit 5'
#
# # 查询某个字段
# p = Product.objects.values('name')
# 'select name from product'
#
# # values_list 方法,以列表表示返回数据,列表元素以元组格式表示
# p = Product.objects.values_list('name')[:3]
#
# # get方法查询数据
# p = Product.objects.get(id=2)
# 'select * from product where id=2'
#
# # filter方法查询数据, 返回数组
# p = Product.objects.filter(id=2)
#
# # 多条件查询
# p = Product.objects.filter(name='oyty', id=9)
#
# # or查询,需要引入Q,格式为:Q(field=value) | Q(field=value)
# p = Product.objects.filter(Q(name='oyty') | Q(id=9))
# 'select * from product where name="oyty" or id=9'
#
# # count方法统计查询数据的数据量
# p = Product.objects.filter(name='oyty').count()
# p = 2
#
# # 去重查询,distinct
# p = Product.objects.values('name').filter(name='oyty').distinct()
# 'select DISTINCT name from product where name="oyty"'
#
# # 根据id降序排列, 降序只要在order_by里面的字段前面加上"-"即可
# p = Product.objects.order_by('-id')
#
# # 聚合查询, 实现对数据值求和,求平均值等,Django提供annotate和aggregate方法实现
# p = Product.objects.values('name').annotate(Sum('id'))
# 'select name, sum(id) as "id_sum" from product group by name order by null'
#
# # aggregate将某个字段的值进行计算并返回计算结果
# p = Product.objects.aggregate(id_count=Count('id'))
# {'id_count': 11}
# 'select COUNT(id) as "id_count" from product'
return HttpResponse("Hello, world, You're at the polls index.")
|
[
"cooloyty@gmail.com"
] |
cooloyty@gmail.com
|
db0dfd08395653a664dbd60963525a118e951acf
|
c7882a23ea89c05cd9d670ca713108da2e786f3b
|
/src/utilities/models/wrnet.py
|
2ee496be038bdd7b0b5a88363418b979ceec8e8e
|
[] |
no_license
|
c-w-m/ldpc-decoders
|
7513871f28eb34113edf129b3072ce8a60dc43ca
|
4f541efbc8dd20b59ccc0ae4284ddfc6e339f6e4
|
refs/heads/master
| 2023-04-18T05:17:23.351576
| 2021-04-16T16:50:24
| 2021-04-16T16:50:24
| 358,638,672
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,204
|
py
|
'''
code taken from: https://github.com/arberzela/ImageNet32-tensorflow/blob/master/WRN_main.py
TF implementation of results in: https://arxiv.org/pdf/1707.08819.pdf
Number of parameters in following model: imagenet32 - 1,595,320
'''
import tensorflow as tf
_BATCH_NORM_DECAY = 0.9
_BATCH_NORM_EPSILON = 1e-4
def create_plh(with_data=True):
is_training = tf.placeholder(tf.bool, name='is_training')
feed_dicts = {is_training:True}, {is_training:False}
kwargs = {'is_training': is_training}
return kwargs, feed_dicts
def batch_norm_relu(inputs, is_training, data_format):
"""Performs a batch normalization followed by a ReLU."""
# We set fused=True for a significant performance boost. See
# https://www.tensorflow.org/performance/performance_guide#common_fused_ops
inputs = tf.layers.batch_normalization(
inputs=inputs, axis=1 if data_format == 'channels_first' else 3,
momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,
scale=True, training=is_training, fused=True)
inputs = tf.nn.relu(inputs)
return inputs
def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):
return tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding='SAME', use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(scale=2.0, distribution='normal'),
data_format=data_format)
def building_block(inputs, filters, is_training, projection_shortcut, strides,
dropoutrate, data_format):
"""Standard building block for residual networks with BN before convolutions.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
is_training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts (typically
a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block.
"""
shortcut = inputs
inputs = batch_norm_relu(inputs, is_training, data_format)
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training, data_format)
inputs = tf.layers.dropout(inputs=inputs, rate=dropoutrate, training=is_training)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=1,
data_format=data_format)
return inputs + shortcut
def bottleneck_block(inputs, filters, is_training, projection_shortcut,
strides, data_format):
"""Bottleneck block variant for residual networks with BN before convolutions.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the first two convolutions. Note that the
third and final convolution will use 4 times as many filters.
is_training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts (typically
a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block.
"""
shortcut = inputs
inputs = batch_norm_relu(inputs, is_training, data_format)
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=1, strides=1,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training, data_format)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training, data_format)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=4 * filters, kernel_size=1, strides=1,
data_format=data_format)
return inputs + shortcut
def block_group(inputs, filters, block_fn, blocks, strides, dropoutrate, is_training, name,
data_format):
"""Creates one layer of blocks for the ResNet model.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the first convolution of the layer.
block_fn: The block to use within the model, either `building_block` or
`bottleneck_block`.
blocks: The number of blocks contained in the layer.
strides: The stride to use for the first convolution of the layer. If
greater than 1, this layer will ultimately downsample the input.
is_training: Either True or False, whether we are currently training the
model. Needed for batch norm.
name: A string name for the tensor output of the block layer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block layer.
"""
# Bottleneck blocks end with 4x the number of filters as they start with
filters_out = 4 * filters if block_fn is bottleneck_block else filters
def projection_shortcut(inputs):
return tf.layers.conv2d(
inputs=inputs, filters=filters_out, kernel_size=1, strides=strides,
padding='SAME', use_bias=False,
kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=True),
data_format=data_format)
# Only the first block per block_layer uses projection_shortcut and strides
inputs = block_fn(inputs, filters, is_training, projection_shortcut, strides,
dropoutrate, data_format)
for _ in range(1, blocks):
inputs = block_fn(inputs, filters, is_training, None, 1,
dropoutrate, data_format)
return tf.identity(inputs, name)
# ##################### Build the neural network model #######################
def create_model(inputs, is_training, depth=28, k=2, num_classes=1000, dropoutrate=0):
"""Constructs the ResNet model given the inputs."""
img_size = inputs.shape[1]
num_blocks = (depth - 4) // 6
if depth % 6 != 4: raise ValueError('depth must be 6n + 4:', depth)
# https://stackoverflow.com/questions/41651628/negative-dimension-size-caused-by-subtracting-3-from-1-for-conv2d
data_format = 'channels_last' #('channels_first' if tf.test.is_built_with_cuda() else 'channels_last')
inputs = conv2d_fixed_padding(
inputs=inputs, filters=16, kernel_size=3, strides=1,
data_format=data_format)
inputs = tf.identity(inputs, 'initial_conv')
num_filters = int(16*k)
inputs = block_group(
inputs=inputs, filters=num_filters, block_fn=building_block, blocks=num_blocks,
strides=1, dropoutrate=dropoutrate, is_training=is_training, name='block_layer1',
data_format=data_format)
if img_size >= 16:
num_filters = int(32*k)
inputs = block_group(
inputs=inputs, filters=num_filters, block_fn=building_block, blocks=num_blocks,
strides=2, dropoutrate=dropoutrate, is_training=is_training, name='block_layer2',
data_format=data_format)
if img_size >= 32:
num_filters = int(64*k)
inputs = block_group(
inputs=inputs, filters=num_filters, block_fn=building_block, blocks=num_blocks,
strides=2, dropoutrate=dropoutrate, is_training=is_training, name='block_layer3',
data_format=data_format)
if img_size >= 64:
num_filters = int(128*k)
inputs = block_group(
inputs=inputs, filters=num_filters, block_fn=building_block, blocks=num_blocks,
strides=2, dropoutrate=dropoutrate, is_training=is_training, name='block_layer4',
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training, data_format)
inputs = tf.layers.average_pooling2d(
inputs=inputs, pool_size=8, strides=1, padding='VALID',
data_format=data_format)
inputs = tf.identity(inputs, 'final_avg_pool')
inputs = tf.reshape(inputs, [-1, num_filters])
inputs = tf.layers.dense(inputs=inputs, units=num_classes)
inputs = tf.identity(inputs, 'final_dense')
logits = inputs
return logits #, [v for v in tf.trainable_variables()]
|
[
"craig.william.miller@gmail.com"
] |
craig.william.miller@gmail.com
|
b7a3c7491ece3a1f29a9bd9a42c46b6fc1f0e7ee
|
03b704c6daec8113b7845ac3f11a3d56df930000
|
/1. Básico/27- PassEEllipsis.py
|
6214ecca0a43b9d282cc8e4a8df4099dd0c373aa
|
[] |
no_license
|
AmandaRH07/PythonUdemy
|
fc31eed99a42a97500462bc1507ee4215a271019
|
143fb382f55e4be7ede9bd994f442b24f8762c4e
|
refs/heads/master
| 2023-02-08T14:15:41.382018
| 2021-01-05T15:04:07
| 2021-01-05T15:04:07
| 291,131,929
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
valor = True
if valor:
# pass voltar depois e escrever o código ou
...
else:
print("tchau")
|
[
"amandarafahass13@gmail.com"
] |
amandarafahass13@gmail.com
|
95a78e0697cb46280fd58d9e07fb637d9f1a6c0b
|
8234a8657664f12831daa71cf265f6d6732010f2
|
/ilc_tools/filters.py
|
b3bc729745cf78b52bc65a46c4b2db0879ad193b
|
[] |
no_license
|
LBJ-Wade/SZ-Filtering
|
fb778fc164a981c164cf7a13bb66a5f1cca2736f
|
4bcb8a2600bec7b363c07cf6ed077b05ed73040b
|
refs/heads/main
| 2023-04-04T04:46:59.968331
| 2021-01-04T22:15:51
| 2021-01-04T22:15:51
| 356,129,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,915
|
py
|
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
from ilc_tools import misc
from scipy import interpolate
from scipy import ndimage
from scipy import signal
from pymf import make_filter_map
from ilc_tools import data_tools
import nifty5 as ift
def radialprofile_cmb(data, rmax=100, nbins=20):
''' Compute the radial profile of nc, number of clusters centered on a map
Parameters:
-----------
data: fits file
3D array, with first dim belonging to number of files
rmax: int
maximum radius of pixels. Default is 100
nbins: int
number of bins to consider for profile. Default is 20
Returns:
--------
rad_profile: float
2D array with radial profile of cluster for nc number of clusters
error: float
1D array with the standard deviation
'''
npix = data.shape[1]
center = (npix/2,npix/2)
y, x = np.indices((data.shape[1], data.shape[1])) # first determine radii of all pixels
r = np.sqrt((x - center[0])**2 + (y - center[1])**2) #compute distance
bins = np.linspace(0, rmax, nbins)
nc = data.shape[0]
rad_profile = np.zeros((nc,2, nbins-1))
error = np.zeros((nbins-1))
for f in np.arange(nc):
for i in np.arange(len(bins)-1):
index = (r>bins[i]) & (r <bins[i+1])
rad_profile[f,0,i] = (bins[i] + bins[i+1])/2
rad_profile[f,1,i] = np.mean(data[f,index])
for i in np.arange(len(bins)-1):
error[i] = np.std(rad_profile[:,1,i])
return rad_profile, error
def radialprofile_ksz(data, rmax=100, nbins=20):
''' Compute the radial profile of a given of cluster centered on a map
Parameters:
-----------
data: fits file
2D array with dimensions corrresponding to number of pixels in the map
rmax: int
maximum radius of pixels. Default is 100
nbins: int
number of bins to consider for profile. Default is 20
Returns:
--------
rad_profile: float
2D array with radial profile of cluster
'''
npix = data.shape[1]
center = (npix/2,npix/2)
y, x = np.indices((data.shape[1], data.shape[1]))
r = np.sqrt((x - center[0])**2 + (y - center[1])**2)
bins = np.linspace(0, rmax, nbins)
rad_profile = np.zeros((2, nbins-1))
error = np.zeros((nbins-1))
for i in np.arange(len(bins)-1):
index = (r>bins[i]) & (r <bins[i+1])
rad_profile[0,i] = (bins[i] + bins[i+1])/2
rad_profile[1,i] = np.mean(data[index])
return rad_profile
def WienerFilter(data, signal_template, pixel_size, smooth = True, FWHM = 9.68, pxsize = 0.2, npix = 400):
hdulist = fits.open(signal_template)
ksz = hdulist[0].data
header = hdulist[0].header
hdulist.close()
if smooth is True:
sigma_beam = FWHM / (2*np.sqrt(2*np.log(2))) / pxsize
ksz = ndimage.gaussian_filter(ksz, sigma=sigma_beam, order=0, mode='wrap', truncate=20.0)
signal_fft = misc.power_spec(ksz, pxsize, True)
signal_fft_nok = misc.power_spec(ksz, pxsize, False)
hdulist = fits.open(data)
data_map = hdulist[0].data
header = hdulist[0].header
hdulist.close()
pixisize = data_tools.ascii.read(pixel_size)
pixsize = np.array(pixisize[:]['pixel_size']).tolist()
nclusters = data_map.shape[0]
filtered_maps = []
filters = []
for i in np.arange(nclusters):
noise_fft = misc.power_spec(data_map[i], pxsize = pixsize[i], return_k = True) #create power spec
noise_fft_nok = misc.power_spec(data_map[i], pxsize = pixsize[i], return_k = False)
signal_ps = np.interp(noise_fft[0], signal_fft[0], signal_fft[1]) #interpolate
noise_ps_map = make_filter_map(data_map[i], noise_fft_nok[0], noise_fft_nok[1]) #make filter maps
signal_ps_map = make_filter_map(data_map[i], noise_fft_nok[0], signal_ps)
filter_window = signal_ps_map / (signal_ps_map + noise_ps_map)
#plt.semilogy(np.real(filter_window[200,200:]))
data_fft = np.fft.fftshift(np.fft.fft2(data_map[i], norm = None)/ npix**2)
filtered_image = np.real(np.fft.ifft2(np.fft.ifftshift(filter_window*data_fft))) * npix**2
filtered_maps.append(filtered_image)
filters.append(filter_window)
return (filtered_maps, filters)
def nifty_wf(signal, noise, y_map, npix = 400, pxsize = 1.5, kernel = 9.68, n = 10, smooth = False):
cmb_mocks = noise.shape[0]
A = (2*np.sqrt(2*np.log(2)))
if smooth is True:
signal_smooth = np.zeros((cmb_mocks, npix, npix))
noise_smooth = np.zeros((cmb_mocks, npix, npix))
for i in np.arange(cmb_mocks):
noise_data = ndimage.gaussian_filter(noise[i], sigma= kernel/A/pxsize, order=0, mode = "reflect", truncate = 10)
#signal_data = ndimage.gaussian_filter(signal[i], sigma= kernel/A/pxsize, order=0, mode = "reflect", truncate = 10)
signal_data = signal[i] #uncomment here if smoothing signal and noise
noise_smooth[i,:,:] = noise_data
signal_smooth[i,:,:] = signal_data
else:
noise_smooth = noise
signal_smooth = signal
pixel_space = ift.RGSpace([npix, npix])
fourier_space = pixel_space.get_default_codomain()
s_data = np.zeros((cmb_mocks, npix, npix))
m_data = np.zeros((cmb_mocks, npix, npix))
d_data = np.zeros((cmb_mocks, npix, npix))
for i in np.arange(cmb_mocks):
signal_field = ift.Field.from_global_data(pixel_space, signal_smooth.astype(float)) #[i] for mock_data
HT = ift.HartleyOperator(fourier_space, target=pixel_space)
power_field = ift.power_analyze(HT.inverse(signal_field), binbounds=ift.PowerSpace.useful_binbounds(fourier_space, True))
Sh = ift.create_power_operator(fourier_space, power_spectrum=power_field)
R = HT
noise_field = ift.Field.from_global_data(pixel_space, noise_smooth[i].astype(float))
noise_power_field = ift.power_analyze(HT.inverse(noise_field), binbounds=ift.PowerSpace.useful_binbounds(fourier_space, True))
N = ift.create_power_operator(HT.domain, noise_power_field)
N_inverse = HT@N@HT.inverse
data = signal_field + noise_field # --->when using mock_data
# Wiener filtering the data
j = (R.adjoint @N_inverse.inverse)(data)
D_inv = R.adjoint @ N_inverse.inverse @ R + Sh.inverse
IC = ift.GradientNormController(iteration_limit=500, tol_abs_gradnorm=1e-3)
D = ift.InversionEnabler(D_inv, IC, approximation=Sh.inverse).inverse
m = D(j)
#s_data[i,:,:] = (signal_field).to_global_data()
m_data[i,:,:] = HT(m).to_global_data()
#d_data[i,:,:] = data.to_global_data()
#Squaring the filtered map and also taking the absoute val of filtered map
# uncomment here for no cross correlation
squared_m_data = np.zeros((cmb_mocks, npix, npix))
abs_m_data = np.zeros((cmb_mocks, npix, npix))
for i in np.arange(m_data.shape[0]):
squared_m_data[i,:,:] = m_data[i,:,:] * m_data[i,:,:]
abs_m_data[i,:,:] = np.abs(m_data[i,:,:])
#Stacking all filtered maps
stack1 = np.sum(squared_m_data, axis = 0)/m_data.shape[0]
stack2 = np.sum(abs_m_data, axis = 0)/m_data.shape[0]
return (m_data, squared_m_data, abs_m_data, stack1, stack2) #change here to return the right values ---->, stack_square, stack_abs
'''
#Stacking progressively
stack_maps = np.zeros((npix,npix))
stack_square = np.zeros((int(m_data.shape[0]/n), npix, npix))
k = 0
for i in np.arange(m_data.shape[0]):
stack = stack_maps + squared_m_data[i]
stack_maps[:,:] = stack
if np.mod(i,n) == 0:
stack_square[k,:,:] = stack_maps[:,:]
k = k+1
stack_abs_maps = np.zeros((npix,npix))
stack_abs = np.zeros((int(m_data.shape[0]/n), npix, npix))
l = 0
for i in np.arange(m_data.shape[0]):
stack = stack_abs_maps + abs_m_data[i]
stack_abs_maps[:,:] = stack
if np.mod(i,n) == 0:
stack_abs[l,:,:] = stack_abs_maps[:,:]
l = l+1
# cross correlating filtered_map(m_data) with y_map
squared_corr_data = np.zeros((cmb_mocks, npix, npix))
abs_corr_data = np.zeros((cmb_mocks, npix, npix))
for i in np.arange(m_data.shape[0]):
from scipy import signal
corr_data = signal.correlate(y_map, m_data[i], mode = 'same', method = 'fft')
squared_corr_data[i,:,:] = corr_data * corr_data
abs_corr_data[i,:,:] = np.abs(corr_data)
stack1_corr = np.sum(squared_corr_data, axis = 0)/m_data.shape[0]
stack2_corr = np.sum(abs_corr_data, axis = 0)/m_data.shape[0]
stack_maps = np.zeros((npix,npix))
stack_square_corr = np.zeros((int(m_data.shape[0]/n), npix, npix))
k = 0
for i in np.arange(m_data.shape[0]):
stack = stack_maps + squared_corr_data[i]
stack_maps[:,:] = stack
if np.mod(i,n) == 0:
stack_square_corr[k,:,:] = stack_maps[:,:]
k = k+1
stack_abs_maps = np.zeros((npix,npix))
stack_abs_corr = np.zeros((int(m_data.shape[0]/n), npix, npix))
l = 0
for i in np.arange(m_data.shape[0]):
stack = stack_abs_maps + abs_corr_data[i]
stack_abs_maps[:,:] = stack
if np.mod(i,n) == 0:
stack_abs_corr[l,:,:] = stack_abs_maps[:,:]
l = l+1
'''
#Simple Wiener Filter
def wf(signal, noise, signal_boost, npix = 400):
pixel_space = ift.RGSpace([npix, npix])
fourier_space = pixel_space.get_default_codomain()
signal_field = ift.Field.from_global_data(pixel_space, signal.astype(float))
HT = ift.HartleyOperator(fourier_space, target=pixel_space)
power_field = ift.power_analyze(HT.inverse(signal_field), binbounds=ift.PowerSpace.useful_binbounds(fourier_space, True))
Sh = ift.create_power_operator(fourier_space, power_spectrum=power_field)
R = HT
noise_field = ift.Field.from_global_data(pixel_space, noise.astype(float))
noise_power_field = ift.power_analyze(HT.inverse(noise_field), binbounds=ift.PowerSpace.useful_binbounds(fourier_space, True))
N = ift.create_power_operator(HT.domain, noise_power_field)
N_inverse = HT@N@HT.inverse
amplify = len(signal_boost)
s_data = np.zeros((amplify, npix, npix))
m_data = np.zeros((amplify, npix, npix))
d_data = np.zeros((amplify, npix, npix))
for i in np.arange(amplify):
data = signal_field * signal_boost[i] + noise_field #
# Wiener filtering the data
j = (R.adjoint @N_inverse.inverse)(data)
D_inv = R.adjoint @ N_inverse.inverse @ R + Sh.inverse
IC = ift.GradientNormController(iteration_limit=500, tol_abs_gradnorm=1e-3)
D = ift.InversionEnabler(D_inv, IC, approximation=Sh.inverse).inverse
m = D(j)
s_data[i,:,:] = (signal_field * signal_boost[i]).to_global_data()
m_data[i,:,:] = HT(m).to_global_data()
d_data[i,:,:] = data.to_global_data()
return (s_data, m_data, d_data)
def wf_test(signal, noise, signal_boost, npix = 400):
pixel_space = ift.RGSpace([npix, npix])
fourier_space = pixel_space.get_default_codomain()
signal_field = ift.Field.from_global_data(pixel_space, signal.astype(float))
HT = ift.HartleyOperator(fourier_space, target=pixel_space)
power_field = ift.power_analyze(HT.inverse(signal_field), binbounds=ift.PowerSpace.useful_binbounds(fourier_space, True))
Sh = ift.create_power_operator(fourier_space, power_spectrum=power_field)
R = HT
noise_field = ift.Field.from_global_data(pixel_space, noise.astype(float))
noise_power_field = ift.power_analyze(HT.inverse(noise_field), binbounds=ift.PowerSpace.useful_binbounds(fourier_space, True))
N = ift.create_power_operator(HT.domain, noise_power_field)
N_inverse = HT@N@HT.inverse
amplify = len(signal_boost)
s_data = np.zeros((amplify, npix, npix))
m_data = np.zeros((amplify, npix, npix))
d_data = np.zeros((amplify, npix, npix))
for i in np.arange(amplify):
data = noise_field
# Wiener filtering the data
j = (R.adjoint @N_inverse.inverse)(data)
D_inv = R.adjoint @ N_inverse.inverse @ R + Sh.inverse
IC = ift.GradientNormController(iteration_limit=500, tol_abs_gradnorm=1e-3)
D = ift.InversionEnabler(D_inv, IC, approximation=Sh.inverse).inverse
m = D(j)
s_data[i,:,:] = (signal_field * signal_boost[i]).to_global_data()
m_data[i,:,:] = HT(m).to_global_data()
d_data[i,:,:] = data.to_global_data()
return (s_data, m_data, d_data)
|
[
"rpinto@smail.uni-koeln.de"
] |
rpinto@smail.uni-koeln.de
|
b655d4f9b4db58e142ef260d81f513e74d101882
|
ded2c619df771c28203fd855a337c4ccdff816a7
|
/stapled/lib/__init__.py
|
a48b95256c330b00767c57e41c1e6dca5a0d940f
|
[
"Apache-2.0"
] |
permissive
|
Surfndez/stapled
|
b65c2f02152e34a9045a1f48f84eb06641ed1ccc
|
86b9272adaf07549b7f2ecbd7950d9d327a8d06f
|
refs/heads/master
| 2023-05-10T06:28:44.374940
| 2019-10-01T08:49:54
| 2019-10-01T08:49:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,881
|
py
|
"""
Various functions to deal with locally installed libraries.__doc__
Contains functions that finds libraries in ``stapled/libs`` and return Various
listing forms and functions to add libs to the python path.
This is meant for libraries that have a free license and that are not available
as debian packages. We package them along with stapled, so we don't have to
install them using PIP.
"""
import os
import sys
def _libs():
"""
Make a dict containing the name and path of each of the libs.
:return dict: name of the lib as key, path of the lib as value
"""
exclude = ('__init__.py', '__init__.pyc', '__pycache__')
lib_dir = os.path.relpath(os.path.dirname(__file__))
# Filter out self
libs = filter(lambda p: p not in exclude, os.listdir(lib_dir))
return dict((lib, os.path.join(lib_dir, lib)) for lib in libs)
def find_lib_paths():
"""
Find libs in the paths of ``_libs`` and return only the paths.
This is meant to be used in a setup.py file.
:return list: list of lib directories to search for packages
"""
return _libs().values()
def find_lib_path_dict():
"""
Use ``_libs`` and add the name of the package to the end of the paths.
This is done because the paths are ``lib/[project]/[project]`` not
``lib/[project]``.
This is meant to be used in a setup.py file.
:return list: list of lib paths where the __init__.py of the lib lives.
"""
paths = _libs().items()
return dict(
(lib, os.path.join(path, lib)) for lib, path in paths
)
def add_paths():
"""
Add the found libs to the python path so they can be imported.
:returns list|NoneType: A list of paths added to sys.path or None.
"""
libs = _libs().values()
if libs:
for lib in libs:
sys.path.append(lib)
return libs
else:
return None
|
[
"chris@greenhost.nl"
] |
chris@greenhost.nl
|
1970d0316668952d2087e13cade39010d449d122
|
44dc61f79ed697b1f9207aecb72e129afc80bd64
|
/kvt/losses/focal_loss.py
|
9f95b993901de03738edfde7ca1a71277ef3d1bf
|
[
"BSD-2-Clause"
] |
permissive
|
samlinxyz/kaggle-understanding-clouds
|
37711ec0a07f29a3273a050b4ad7c96b863390de
|
22213077ebe027a1cca73efd148e241f81181986
|
refs/heads/master
| 2022-11-30T09:03:25.865023
| 2020-08-14T01:23:59
| 2020-08-14T01:23:59
| 285,413,159
| 0
| 0
|
BSD-2-Clause
| 2020-08-05T21:57:38
| 2020-08-05T21:57:37
| null |
UTF-8
|
Python
| false
| false
| 2,408
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
def __init__(self, gamma=0, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.size_average = size_average
def forward(self, input, target):
if input.dim()>2:
input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
input = input.transpose(1,2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1,1)
logpt = F.log_softmax(input)
logpt = logpt.gather(1,target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
if self.alpha is not None:
if self.alpha.type()!=input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0,target.data.view(-1))
logpt = logpt * Variable(at)
loss = -1 * (1-pt)**self.gamma * logpt
if self.size_average: return loss.mean()
else: return loss.sum()
class BinaryFocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=0.25, pos_weight=None, **_):
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.pos_weight = pos_weight
def forward(self, input, target, reduction=True, weight=None):
target = target.float()
input = input.view(-1, 1)
target = target.view(-1, 1)
assert target.size() == input.size(), f'{target.size()} vs {input.size()}'
if weight is not None:
assert target.size() == weight.size()
# For test
if isinstance(self.pos_weight, float) or isinstance(self.pos_weight, int):
weight = target * (self.pos_weight - 1.0) + 1.0
else:
if weight is None:
weight = target + 2.0
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()
invprobs = F.logsigmoid(-input * (target * 2 - 1))
loss = (invprobs * self.gamma).exp() * loss
if weight is not None:
loss = loss * weight
if reduction:
return loss.mean()
else:
return loss
|
[
"pudae81@gmail.com"
] |
pudae81@gmail.com
|
47c7662f968130690ed6765123e2e6505261ef44
|
9f79a52d647ae0727b9df967bf9779acfcb9d0a5
|
/Random Drafts/prime factor.py
|
864f13a80904bf006f6b6a66352eaa3906157a67
|
[
"MIT"
] |
permissive
|
Kevinskwk/ILP
|
2c82e495a6877d4de08c80a1ecd38b8ebb3d792e
|
7a3925a22232d486a5a8f5df8255f9297fd73fec
|
refs/heads/master
| 2020-07-06T08:35:44.081946
| 2019-08-18T04:09:25
| 2019-08-18T04:09:25
| 202,956,903
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
n=int(input(':'))
listf=[]
listpf=[]
for x in range(1,n+1):
if n%x==0:
listf.append(x)
print listf
for y in listf:
l=[]
for z in range(1,y+1):
if y%z==0:
l.append(z)
if len(l)==2:
listpf.append(y)
print listpf
|
[
"noreply@github.com"
] |
Kevinskwk.noreply@github.com
|
b4cf72a707ccebd5377a2a20a4a4ce2e728c7ff6
|
e9543720e53de3e387133497e66af3b039129dd8
|
/extra_apps/DjangoUeditor/test_try.py
|
f6ca687e0165e7dc854378e96768e445628342b0
|
[] |
no_license
|
weicunheng/BookStore
|
d0e5782e45578bf84a36c98c2e029dfc10582959
|
d2fd226e130627ae3b39470260ef0961796900a4
|
refs/heads/master
| 2020-03-25T08:35:07.862245
| 2018-08-17T13:42:33
| 2018-08-17T13:42:33
| 143,620,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 84
|
py
|
# coding:utf-8
from trade.utils import FileSize
MF = FileSize("36723678")
print(MF)
|
[
"1261360292@qq.com"
] |
1261360292@qq.com
|
c6237d72e9716dbbb5e79d81d46c0273ac31e4bd
|
7b38f0efcc222e28ddfacec16239ac57c6915cf8
|
/PythonSelenium/locators.py
|
5b1a266e113d624eabf82d57758fa856b7412914
|
[] |
no_license
|
ChaithanyaRepo/PythonTesting
|
e6bb728137013e3ecbdac69a88bd8a2453137ee4
|
00ed637e96572dc6fa9a8fd826dd3f14809f66e6
|
refs/heads/master
| 2022-04-18T13:54:08.847939
| 2020-04-17T02:30:43
| 2020-04-17T02:30:43
| 256,381,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,934
|
py
|
from selenium import webdriver
from time import sleep
driver = webdriver.Chrome(executable_path='/home/chaitanya/Documents/software/drivers/chromedriver_linux64/chromedriver')
# driver = webdriver.Firefox(executable_path='/home/chaitanya/Documents/software/drivers/geckodriver-v0.26.0-linux64/geckodriver')
driver.get("https://www.flipkart.com/")
sleep(1)
# driver.find_element_by_css_selector("input[class='_2zrpKA _1dBPDZ']").send_keys("chaitanyab94@gmail.com")
driver.find_element_by_xpath("//input[@class='_2zrpKA _1dBPDZ']").send_keys("chaitanyab94@gmail.com")
# driver.find_element_by_name("identifier").send_keys("chaitanyab94@gmail.com")
# driver.find_element_by_css_selector("span[class='CwaK9']").click()
sleep(1)
driver.find_element_by_css_selector("input[class='_2zrpKA _3v41xv _1dBPDZ']").send_keys("December13!")
# driver.find_element_by_name("password").send_keys("December13!")
# driver.find_element_by_css_selector("button[class='_2AkmmA _1LctnI _7UHT_c']").click()
driver.find_element_by_css_selector("[class*='_1LctnI _7UHT_c']").click()
# driver.find_element_by_name("identifier").send_keys("chaitanyab94@gmail.com")
# driver.find_element_by_class_name("CwaK9").click()
# sleep(1)
# driver.find_element_by_name("password").send_keys("Nayak@123")
# driver.find_element_by_class_name("CwaK9").click()
# Copying the text from web page
# print(driver.find_element_by_class_name("oZoRPi").text)
# print(driver.find_element_by_css_selector("a[class='oZoRPi']").text)
# print(driver.find_element_by_xpath("//a[@class='oZoRPi']").text)
sleep(1)
# XPath with regular expression
# print(driver.find_element_by_xpath("//*[contains(@class,'oZoRPi')]").text)
sleep(1)
# CSS Selector with regular expression
# print(driver.find_element_by_css_selector("[class='oZoRPi']").text)
driver.get("https://www.amazon.in/")
sleep(1)
# Generating CSS using ID
driver.find_element_by_css_selector("input#twotabsearchtextbox").click()
|
[
"nayakchanakya88@gmail.com"
] |
nayakchanakya88@gmail.com
|
55b72c60ab6537917927db1260ce8688c7d61b4b
|
fa94acebd82a527654a228186ddc6e8aff067d14
|
/flaskbb/themes/drpalms/blueprints/calendar/views.py
|
1c99fb3e48d94d4a1fc3d5905c43ac329af487b4
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
jtom343/flaskbb
|
fa3645402ed3d3796e5389683b288160a8ceae61
|
5294587f2afc49c228b2b66ddd32c8632ef33ed7
|
refs/heads/master
| 2022-12-13T00:48:14.412261
| 2019-10-01T03:58:25
| 2019-10-01T03:58:25
| 211,943,806
| 0
| 0
|
NOASSERTION
| 2022-11-22T03:58:38
| 2019-09-30T19:54:05
|
C++
|
UTF-8
|
Python
| false
| false
| 1,593
|
py
|
from flask import (
Blueprint,
current_app,
render_template,
url_for,
request,
redirect,
flash,
)
from flask_login import login_required, current_user
from flask_babel import gettext as _
from config import settings
from lib.util_json import render_json
from snakeeyes.blueprints.billing.forms import SubscriptionForm, \
UpdateSubscriptionForm, CancelSubscriptionForm, PaymentForm
from snakeeyes.blueprints.billing.models.coupon import Coupon
from snakeeyes.blueprints.billing.models.subscription import Subscription
from snakeeyes.blueprints.billing.models.invoice import Invoice
from snakeeyes.blueprints.billing.decorators import subscription_required, \
handle_stripe_exceptions
billing = Blueprint('billing', __name__, template_folder='../templates',
url_prefix='/subscription')
@billing.route('/pricing')
def pricing():
if current_user.is_authenticated and current_user.subscription:
return redirect(url_for('billing.update'))
form = UpdateSubscriptionForm()
return render_template('billing/pricing.html', form=form,
plans=settings.STRIPE_PLANS)
@billing.route('/coupon_code', methods=['POST'])
@login_required
def coupon_code():
code = request.form.get('coupon_code')
if code is None:
return render_json(422,
{'error': 'Coupon code cannot be processed.'})
coupon = Coupon.find_by_code(code)
if coupon is None:
return render_json(404, {'error': 'Coupon code not found.'})
return render_json(200, {'data': coupon.to_json()})
|
[
"jtom343@gmail.com"
] |
jtom343@gmail.com
|
0fb9eee149e8e1314707960d82b1858bab694364
|
ccb54253ade49adce593c9d944e080d34a779e59
|
/uploader.py
|
b838fc5b8c17ae477c0df7d4a4cab2aa6f4dd3c4
|
[] |
no_license
|
kirillkuzin/prekol-backend
|
16de1162970386169f74c08b95ceb76b50649911
|
4e9696e151f32b2c68696355fadb07cd1855fe5a
|
refs/heads/master
| 2023-04-13T19:43:37.113090
| 2021-03-28T08:01:27
| 2021-03-28T08:01:27
| 352,021,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,455
|
py
|
import csv
import asyncio
from tqdm import tqdm
from data import db, get_cities, save_place
async def main():
await db.connect()
cities = {}
cities_data = await get_cities()
for city_data in cities_data:
cities.update({city_data['title']: city_data['id']})
print(cities)
with open('data.csv') as file:
data = csv.DictReader(file)
data = list(data)
with tqdm(total=len(data)) as pbar:
for row in data:
try:
title = row['name']
description = row['wikipedia_extracts']
place_type = row['category']
lon = float(row['longitude'])
lat = float(row['latitude'])
image_url = row['clear_link']
city_id = cities[row['capital']]
await save_place(title=title,
description=description,
place_type=place_type,
lon=lon,
lat=lat,
image_url=image_url,
city_id=city_id)
pbar.update(1)
except Exception as e:
print(e)
await db.disconnect()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
[
"offkirillkuzin@gmail.com"
] |
offkirillkuzin@gmail.com
|
a36015f5613612fa2f21854f55e993893eecaad2
|
553c0d835da8807a9abf9ead626f490c017a182b
|
/0x0F-python-object_relational_mapping/model_city.py
|
0aeb8491ca999d10cd138f091bc83450521631ce
|
[] |
no_license
|
robertrowe1013/holbertonschool-higher_level_programming
|
49c7d47b533b579da5582dcf4afb009d8319355b
|
0669d897f8dd156f11e8a26f0850bc78187b07a1
|
refs/heads/master
| 2022-12-19T13:00:07.040357
| 2020-09-25T03:35:39
| 2020-09-25T03:35:39
| 259,423,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
#!/usr/bin/python3
""" city model """
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from model_state import Base
class City(Base):
""" city class """
__tablename__ = 'cities'
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(128), nullable=False)
state_id = Column(Integer, ForeignKey('states.id'), nullable=False)
|
[
"robertrowe1013@gmail.com"
] |
robertrowe1013@gmail.com
|
d5070d491cc56d77d14d2e53c3b438857d146d2b
|
b3101e2551ca152229619dea05272334a438f7ee
|
/dbcalculatorv2.py
|
f78bc113b761b802bef7ca526745d47a2aaefdb4
|
[] |
no_license
|
db1985/Python-Test-Scripts
|
bc499b8c7b472f3c24b990a4640fbb0492698bfa
|
730c765ad6aeabfaaddc107f4d698b63bbbdb91b
|
refs/heads/master
| 2022-09-22T15:50:22.688566
| 2020-06-05T09:49:00
| 2020-06-05T09:49:00
| 268,920,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
#create a basic calculator
num1 = int(input("Enter 1st number:"))
num2 = int(input("Enter 2nd number:"))
choice = input("Enter +, -, *, /:")
if choice == "+":
result = num1 + num2
elif choice == "-":
result = num1 - num2
elif choice == "*":
result = num1 * num2
elif choice == "/":
result = num1 / num2
else:
print("Wrong option!")
print(num1,choice,num2,"=",result)
|
[
"noreply@github.com"
] |
db1985.noreply@github.com
|
a1f6800ccccb1a4261c386a81ed7d202ada35784
|
d6b031991b9755d52e982a6db3fed8a6569988fa
|
/dio.py
|
f583874173e9328566f3f1a3e54055298523e4e3
|
[
"Python-2.0"
] |
permissive
|
shun60s/Python-WORLD-Win10
|
bf7f940b6ac20106ae8992581ca23848f7b51f6f
|
45630d486512b7ab8c1a92c91ce87b464796192a
|
refs/heads/master
| 2020-04-03T01:31:38.406619
| 2019-02-13T12:55:31
| 2019-02-13T12:55:31
| 154,933,399
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,945
|
py
|
#coding: utf-8
#---------------------------------------------------------------------------------------------
# Description: A method of F0 estimation
# Input
# x : input signal
# fs : sampling frequency
# option : user setting (f0_floor (Hz), f0_ceil (Hz), target_fs (Hz)
# channels_in_octave (ch), allowed_range, and frame_period (ms))
# Caution: minimum frame_period is 1.
# Output
# f0_paramter : f0 infromation
#
# Change: comment out numba
# Date: 2018.10
#---------------------------------------------------------------------------------------------
# This is based on
# dio.py
# of Python-WORLD <https://github.com/tuanad121/Python-WORLD>
# Copyright (c) 2017
# Tuan Dinh Anh <dintu@ohsu.edu>
# Alexander Kain <kaina@ohsu.edu>
# Pls see LICENSE_Python-WORLD-master.txt in the docs folder
#
# This WORLD, a high-quality speech analysis/synthesis system on the basis of Vocoder,
# origins of <http://www.kki.yamanashi.ac.jp/~mmorise/world/> work.
# Pls see LICENSE-WORLD.txt in the docs folder
#
#----------------------------------------------------------------------------------------------
# built-in imports
import math
# 3rd-party imports
from scipy.interpolate import interp1d
from scipy import signal
# import numba
import numpy as np
def dio(x, fs, f0_floor=71, f0_ceil=800, channels_in_octave=2, target_fs=4000, frame_period=5, allowed_range=0.1):
'''
F0 estimation by DIO consisting 3 steps
+ Low-pass filtering
+ Calculate F0 candidates and their reliabilities from filtered signal
+ Select highest reliable candidates
f0_parameter = Dio(x, fs, f0_ceil, channels_in_octave, target_fs, frame_period, allowed_range);
Inputs
x : input signal
fs : sampling frequency
other settings : f0_floor (Hz), f0_ceil (Hz), target_fs (Hz)
channels_in_octave (ch), allowed_range, and frame_period (ms)
Output
f0 infromation
Caution: minimum frame_period is 1.
'''
num_samples = int(1000 * len(x) / fs / frame_period + 1)
temporal_positions = np.arange(0, num_samples) * frame_period / 1000
#temporal_positions = np.arange(0, np.size(x) / fs, frame_period / 1000) #careful!! check later
# log2(f0_ceil / f0_floor) = number of octaves
boundary_f0_list = np.arange(math.ceil(np.log2(f0_ceil / f0_floor) * channels_in_octave)) + 1
boundary_f0_list = boundary_f0_list / channels_in_octave
boundary_f0_list = f0_floor * (2.0 ** boundary_f0_list)
#down sample to target Hz
#y, actual_fs = get_downsampled_signal(x, fs, target_fs)
y = decimate(x, int(fs / target_fs))
actual_fs = target_fs
y_spectrum = get_spectrum(y, actual_fs, f0_floor)
raw_f0_candidate, raw_stability = get_candidate_and_stability(np.size(temporal_positions),
boundary_f0_list, np.size(y),
temporal_positions, actual_fs,
y_spectrum, f0_floor, f0_ceil)
f0_candidates, _ = sort_candidates(raw_f0_candidate, raw_stability)
f0_candidates_tmp = np.copy(f0_candidates)#just want to keep original values of f0_candidates, maybe we don't need this line
f0, vuv = fix_f0_contour(f0_candidates, frame_period, f0_floor, allowed_range)
return {'f0':f0,
'f0_candidates':f0_candidates_tmp,
'raw_f0_candidates':raw_f0_candidate,
'temporal_positions':temporal_positions,
'vuv':vuv
}
##########################################################################################################
def get_downsampled_signal(x, fs, target_fs):
decimation_ratio = int(fs / target_fs + 0.5)
if fs < target_fs:
y = np.empty_like(x)
y[:] = x
actual_fs = fs
else:
# decimate can be troublesome
y = decimate_matlab(x, decimation_ratio, n = 3)
actual_fs = fs / decimation_ratio
y -= np.mean(y)
return y, actual_fs
##########################################################################################################
def get_spectrum(x, fs, lowest_f0):
'''
First step: Low-pass filtering with different cut-off frequencies
'''
fft_size = 2 ** math.ceil(math.log(np.size(x) + int(fs / lowest_f0 / 2 + 0.5) * 4,2))
#low-cut filtering
cutoff_in_sample = int(fs / 50 + 0.5)
low_cut_filter = signal.hanning(2 * cutoff_in_sample + 3)[1:-1] # remove zeros at starting and ending
low_cut_filter = -low_cut_filter / np.sum(low_cut_filter)
low_cut_filter[cutoff_in_sample] = low_cut_filter[cutoff_in_sample] + 1
low_cut_filter = np.r_[low_cut_filter, np.zeros(fft_size - len(low_cut_filter))]
low_cut_filter = np.r_[low_cut_filter[cutoff_in_sample:], low_cut_filter[:cutoff_in_sample]]
x_spectrum = np.fft.fft(x, fft_size) * np.fft.fft(low_cut_filter, fft_size)
return x_spectrum
##########################################################################################################
def get_candidate_and_stability(number_of_frames, boundary_f0_list, y_length, temporal_positions,
actual_fs, y_spectrum, f0_floor, f0_ceil):
'''
Second step: Caculate F0 candidates and F0 stability
'''
raw_f0_candidate = np.zeros((np.size(boundary_f0_list), number_of_frames))
raw_f0_stability = np.zeros((np.size(boundary_f0_list), number_of_frames))
for i in range(np.size(boundary_f0_list)):
interpolated_f0, f0_deviations = get_raw_event(boundary_f0_list[i],
actual_fs, y_spectrum,
y_length, temporal_positions,
f0_floor, f0_ceil)
raw_f0_stability[i, :] = np.exp(-(f0_deviations / np.maximum(interpolated_f0, 0.0000001)))
raw_f0_candidate[i, :] = interpolated_f0
return raw_f0_candidate, raw_f0_stability
##########################################################################################################
def sort_candidates(f0_candidate_map, stability_map):
'''
Third step: Select the most reliable F0 candidates
'''
number_of_candidates, number_of_frames = f0_candidate_map.shape
sorted_index = np.argsort(-stability_map, axis=0, kind='quicksort')
f0_candidates = np.zeros((number_of_candidates, number_of_frames))
f0_candidates_score = np.zeros((number_of_candidates, number_of_frames))
for i in range(number_of_frames):
f0_candidates[:, i] = f0_candidate_map[sorted_index[:number_of_candidates,i], i]
f0_candidates_score[:,i] = stability_map[sorted_index[:number_of_candidates,i], i]
return f0_candidates, f0_candidates_score
##########################################################################################################
def get_raw_event(boundary_f0, fs, y_spectrum, y_length, temporal_positions, f0_floor, f0_ceil):
half_filter_length = int(fs / boundary_f0 / 2 + 0.5)
low_pass_filter = nuttall(half_filter_length * 4)
index_bias = low_pass_filter.argmax()
spectrum_low_pass_filter = np.fft.fft(low_pass_filter, len(y_spectrum))
filtered_signal = np.real(np.fft.ifft(spectrum_low_pass_filter * y_spectrum))
filtered_signal = filtered_signal[index_bias + np.arange(1, y_length + 1)]
# calculate 4 kinds of event
neg_loc, neg_f0 = ZeroCrossingEngine(filtered_signal, fs)
pos_loc, pos_f0 = ZeroCrossingEngine(-filtered_signal, fs)
peak_loc, peak_f0 = ZeroCrossingEngine(np.diff(filtered_signal), fs)
dip_loc, dip_f0 = ZeroCrossingEngine(-np.diff(filtered_signal), fs)
f0_candidate, f0_deviations = get_f0_candidates(neg_loc, neg_f0, pos_loc, pos_f0,
peak_loc, peak_f0, dip_loc, dip_f0, temporal_positions)
# remove untrustful candidates
f0_candidate[f0_candidate > boundary_f0] = 0
f0_candidate[f0_candidate < (boundary_f0 / 2)] = 0
f0_candidate[f0_candidate > f0_ceil] = 0
f0_candidate[f0_candidate < f0_floor] = 0
f0_deviations[f0_candidate == 0] = 100000 #rough safe guard
return f0_candidate, f0_deviations
##########################################################################################################
def get_f0_candidates(neg_loc, neg_f0, pos_loc, pos_f0,
peak_loc, peak_f0, dip_loc, dip_f0, temporal_positions):
#test this one
usable_channel = max(0, np.size(neg_loc) - 2) * \
max(0, np.size(pos_loc) - 2) * \
max(0, np.size(peak_loc) - 2) * \
max(0, np.size(dip_f0) - 2)
interpolated_f0_list = np.zeros((4, np.size(temporal_positions)))
if usable_channel > 0:
interpolated_f0_list[0, :] = interp1d(neg_loc,
neg_f0,
fill_value='extrapolate')(temporal_positions)
interpolated_f0_list[1, :] = interp1d(pos_loc,
pos_f0,
fill_value='extrapolate')(temporal_positions)
interpolated_f0_list[2, :] = interp1d(peak_loc,
peak_f0,
fill_value='extrapolate')(temporal_positions)
interpolated_f0_list[3, :] = interp1d(dip_loc,
dip_f0,
fill_value='extrapolate')(temporal_positions)
interpolated_f0 = np.mean(interpolated_f0_list, axis=0)
f0_deviations = np.std(interpolated_f0_list, axis=0, ddof=1)
else:
interpolated_f0 = temporal_positions * 0
f0_deviations = temporal_positions * 0 + 1000
return interpolated_f0, f0_deviations
##########################################################################################################
# negative zero crossing: going from positive to negative
# @numba.jit((numba.float64[:], numba.float64), nopython=True, cache=True)
def ZeroCrossingEngine(x, fs):
y = np.empty_like(x)
y[:-1] = x[1:]
y[-1] = x[-1]
negative_going_points = np.arange(1, len(x) + 1) * \
((y * x < 0) * (y < x))
edge_list = negative_going_points[negative_going_points > 0]
fine_edge_list = (edge_list) - x[edge_list - 1] / (x[edge_list] - x[edge_list - 1])
interval_locations = (fine_edge_list[:len(fine_edge_list) - 1] + fine_edge_list[1:]) / 2 / fs
interval_based_f0 = fs / np.diff(fine_edge_list)
return interval_locations, interval_based_f0
##########################################################################################################
def nuttall(N):
t = np.asmatrix(np.arange(N) * 2 * math.pi / (N-1))
coefs = np.array([0.355768, -0.487396, 0.144232, -0.012604])
window = coefs @ np.cos(np.matrix([0,1,2,3]).T @ t)
return np.squeeze(np.asarray(window))
##########################################################################################################
def fix_f0_contour(f0_candidates, frame_period, f0_floor, allowed_range):
# if abs((f0(n) - f0(n+1)) / f0(n)) exceeds this value,
# f0(n) is not reliable.
# F0 is continuous at least voice_range_minimum (sample)
voice_range_minimum =int(1 / (frame_period / 1000) / f0_floor + 0.5) * 2 + 1
f0_step1 = fix_step1(f0_candidates, voice_range_minimum, allowed_range)
f0_step2 = fix_step2(f0_step1, voice_range_minimum)
section_list = count_voiced_sections(f0_step2)
f0_step3 = fix_step3(f0_step2, f0_candidates, section_list, allowed_range)
f0_step4 = fix_step4(f0_step3, f0_candidates, section_list, allowed_range)
f0 = np.copy(f0_step4)
vuv = np.copy(f0)
vuv[vuv != 0] = 1
return f0, vuv
##########################################################################################################
#Step 1: Rapid change of F0 is replaced by zeros
def fix_step1(f0_candidates, voice_range_minimum, allowed_range):
f0_base = f0_candidates[0]
f0_base[ : voice_range_minimum] = 0
f0_base[-voice_range_minimum : ] = 0
f0_step1 = np.copy(f0_base)
rounding_f0_base = np.array([float("{0:.6f}".format(elm)) for elm in f0_base])
for i in np.arange(voice_range_minimum - 1, len(f0_base)):
if abs((rounding_f0_base[i] - rounding_f0_base[i-1]) / (0.000001 + rounding_f0_base[i])) > allowed_range:
f0_step1[i] = 0
return f0_step1
##########################################################################################################
#Step2: short-time voiced period (under voice_range_minimum) is replaced by 0
def fix_step2(f0_step1, voice_range_minimum):
f0_step2 = np.copy(f0_step1)
for i in np.arange((voice_range_minimum - 1) / 2 , len(f0_step1) - (voice_range_minimum - 1) / 2).astype(int):
for j in np.arange( -(voice_range_minimum - 1) / 2 , (voice_range_minimum - 1) / 2 + 1).astype(int):
if f0_step1[i + j] == 0:
f0_step2[i] = 0
break
return f0_step2
##########################################################################################################
# Step3: short-time voiced period (under voice_range_minimum) is replaced by 0
def fix_step3(f0_step2, f0_candidates, section_list, allowed_range):
f0_step3 = np.empty_like(f0_step2)
f0_step3[:] = f0_step2
for i in np.arange(section_list.shape[0]):
if i == section_list.shape[0] - 1:
limit = len(f0_step3) - 1
else:
limit = section_list[i + 1, 0] + 1
for j in np.arange(section_list[i, 1], limit).astype(int):
f0_step3[j + 1] = select_best_f0(f0_step3[j], f0_step3[j - 1],
f0_candidates[:, j + 1], allowed_range)
if f0_step3[j + 1] == 0:
break
return f0_step3
##########################################################################################################
def fix_step4(f0_step3, f0_candidates, section_list, allowed_range):
f0_step4 = np.copy(f0_step3)
for i in range(section_list.shape[0] - 1, -1 , -1):
if i == 0:
limit = 1
else:
limit = section_list[i - 1, 1]
for j in np.arange(section_list[i, 0], limit - 1, -1).astype(int):
f0_step4[j - 1] = select_best_f0(f0_step4[j], f0_step4[j + 1], f0_candidates[:, j - 1], allowed_range)
if f0_step4[j - 1] == 0:
break
return f0_step4
##########################################################################################################
def select_best_f0(current_f0, past_f0, candidates, allowed_range):
from sys import float_info
reference_f0 = (current_f0 * 3 - past_f0) / 2
minimum_error = abs(reference_f0 - candidates[0])
best_f0 = candidates[0]
for i in range(1, len(candidates)):
current_error = abs(reference_f0 - candidates[i])
if current_error < minimum_error:
minimum_error = current_error
best_f0 = candidates[i]
if abs(1 - best_f0 / (reference_f0 + float_info.epsilon)) > allowed_range:
best_f0 = 0
return best_f0
##########################################################################################################
def count_voiced_sections(f0):
vuv = np.copy(f0)
vuv[vuv != 0] = 1
diff_vuv = np.diff(vuv)
boundary_list = np.append(np.append([0], np.where(diff_vuv != 0)[0]), [len(vuv) - 2])# take care of len(vuv) - 2 or len(vuv) - 1
first_section = np.ceil(-0.5 * diff_vuv[boundary_list[1]])
number_of_voiced_sections = np.floor((len(boundary_list) - (1 - first_section)) / 2).astype(int)
voiced_section_list = np.zeros((number_of_voiced_sections, 2))
for i in range(number_of_voiced_sections):
voiced_section_list[i, :] = np.array([1 + boundary_list[int((i - 1) * 2 + 1 + (1 - first_section)) + 1],
boundary_list[int((i * 2) + (1 - first_section)) + 1]])
return voiced_section_list
#############################################################################################################
def decimate_matlab(x, q, n=None, axis=-1):
"""
:param x: signal
:param q: decimation ration
:param n: order of filter
:param axis:
:return: resampled signal
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is not None and not isinstance(n, int):
raise TypeError("n must be an integer")
system = signal.dlti(*signal.cheby1(n, 0.05, 0.8 / q))
zero_phase = True
y = signal.filtfilt(system.num, system.den, x, axis=axis, padlen=3 * (max(len(system.den), len(system.num)) - 1))
# make it the same as matlab
nd = len(y)
n_out = np.ceil(nd / q)
n_beg = int(q - (q * n_out - nd))
return y[n_beg - 1::q]
########################################
def FilterForDecimate(x,r):
'''
Lowpass filter coefficients
'''
a = np.zeros(3)
b = np.zeros(2)
if r==11:
a[0] = 2.450743295230728
a[1] = -2.06794904601978
a[2] = 0.59574774438332101
b[0] = 0.0026822508007163792
b[1] = 0.0080467524021491377
elif r==12:
a[0] = 2.4981398605924205
a[1] = -2.1368928194784025
a[2] = 0.62187513816221485
b[0] = 0.0021097275904709001
b[1] = 0.0063291827714127002
elif r==10:
a[0] = 2.3936475118069387
a[1] = -1.9873904075111861
a[2] = 0.5658879979027055
b[0] = 0.0034818622251927556
b[1] = 0.010445586675578267
elif r==9:
a[0] = 2.3236003491759578
a[1] = -1.8921545617463598
a[2] = 0.53148928133729068
b[0] = 0.0046331164041389372
b[1] = 0.013899349212416812
elif r==8:
a[0] = 2.2357462340187593
a[1] = -1.7780899984041358
a[2] = 0.49152555365968692
b[0] = 0.0063522763407111993
b[1] = 0.019056829022133598
elif r==7:
a[0] = 2.1225239019534703
a[1] = -1.6395144861046302
a[2] = 0.44469707800587366
b[0] = 0.0090366882681608418
b[1] = 0.027110064804482525
elif r==6:
a[0] = 1.9715352749512141
a[1] = -1.4686795689225347
a[2] = 0.3893908434965701
b[0] = 0.013469181309343825
b[1] = 0.040407543928031475
elif r==5:
a[0] = 1.7610939654280557
a[1] = -1.2554914843859768
a[2] = 0.3237186507788215
b[0] = 0.021334858522387423
b[1] = 0.06400457556716227
elif r==4:
a[0] = 1.4499664446880227
a[1] = -0.98943497080950582
a[2] = 0.24578252340690215
b[0] = 0.036710750339322612
b[1] = 0.11013225101796784
elif r==3:
a[0] = 0.95039378983237421
a[1] = -0.67429146741526791
a[2] = 0.15412211621346475
b[0] = 0.071221945171178636
b[1] = 0.21366583551353591
elif r==2:
a[0] = 0.041156734567757189
a[1] = -0.42599112459189636
a[2] = 0.041037215479961225
b[0] = 0.16797464681802227
b[1] = 0.50392394045406674
else:
a[0] = 0.0
a[1] = 0.0
a[2] = 0.0
b[0] = 0.0
b[1] = 0.0
# Filtering on time domain
w = np.zeros(3)
y_prime = np.zeros_like(x)
for i in range(len(x)):
wt = x[i] + a[0] * w[0] + a[1] * w[1] + a[2] * w[2]
y_prime[i] = b[0] * wt + b[1] * w[0] + b[1] * w[1] + b[0] * w[2]
w[2] = w[1]
w[1] = w[0]
w[0] = wt
return y_prime
###################
def decimate(x,r):
kNFact = 9
x_length = len(x)
y = []
tmp1 = np.zeros(x_length + kNFact * 2)
tmp2 = np.zeros(x_length + kNFact * 2)
for i in range(kNFact):
tmp1[i] = 2 * x[0] - x[kNFact - i]
for i in range(kNFact, kNFact + x_length):
tmp1[i] = x[i - kNFact]
for i in range(kNFact + x_length, 2 * kNFact + x_length):
tmp1[i] = 2 * x[-1] - x[x_length - 2 - (i - (kNFact + x_length))]
tmp2 = FilterForDecimate(tmp1, r)
for i in range(2 * kNFact + x_length):
tmp1[i] = tmp2[2 * kNFact + x_length - i - 1]
tmp2 = FilterForDecimate(tmp1, r)
for i in range(2 * kNFact + x_length):
tmp1[i] = tmp2[2 * kNFact + x_length - i - 1]
nout = np.ceil(x_length / r + 1)
nbeg = int(r - r * nout + x_length)
count = 0
for i in range(nbeg, x_length + kNFact, r):
y.append(tmp1[i + kNFact - 1])
count += 1
return np.array(y)
|
[
"shun@example.com"
] |
shun@example.com
|
2e57111a6646a9d8c29ab97a7c98307d19b2d161
|
b5b7f63d33aa445516d77302857550cf8d335427
|
/obstacle.py
|
92ece1d802adf909030c46240bade5acc3c13f0b
|
[] |
no_license
|
swastik24/JetpackJoyride
|
92a714002bc7c4b1d5dcc87c0a840ba4be4e185b
|
c49085c64fd415582eaec36359c9f26a1da1cd8e
|
refs/heads/master
| 2022-04-12T07:09:26.761701
| 2020-01-21T20:00:37
| 2020-01-21T20:00:37
| 235,408,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,726
|
py
|
from background import *
from character import *
from coin import *
import random
class obstacle:
_shape=["|","-","/"]
_powerup_structure=["2","x"]
def removeobstacle(self):
for i in range(board.y):
for j in range(board.pointertomygame(),board.pointertomygame()+board.size):
if board.matrix[i][j] in self._shape or self._powerup_structure:
board.matrix[i][j]=" "
class firebeam(obstacle):
def __init__(self):
self.__size=20
for i in range(int((board.lengthofmygame()-2*board.size)/self.__size)):
x=board.size+i*self.__size
length=random.randint(5,15)
y=random.randint(board.roofhight+16,board.y-board.groundheight-1)
t=random.randint(0,2)
if t is 0:
for j in range(length):
board.matrix[y][x]=self._shape[t]
y-=1
elif t is 1:
for j in range(length):
board.matrix[y][x]=self._shape[t]
x+=1
else:
for j in range(length):
board.matrix[y][x]=self._shape[t]
x+=1
y-=1
def get_shape(self):
return self._shape
def set_shape(self,shape):
self._shape=shape
shape=property(get_shape,set_shape)
def get_powerup_structure(self):
return self._powerup_structure
def set_powerup_structure(self,powerup_structure):
self._powerup_structure=powerup_structure
powerup_structure=property(get_powerup_structure,set_powerup_structure)
def get_size(self):
return self.__size
def set_size(self,size):
self.__size=size
size=property(get_size,set_size)
def beamcollison(self):
if din.shield is False:
din.x+=board.pointertomygame()
positions={board.matrix[din.y][din.x],board.matrix[din.y][din.x+1],board.matrix[din.y-1][din.x],board.matrix[din.y-1][din.x+1],board.matrix[din.y-2][din.x],board.matrix[din.y-2][din.x+1]}
collison=False
for z in positions:
if z in self._shape:
collison=True
break
if collison is True:
self.removeobstacle()
din.HP-=1
din.x-=board.pointertomygame()
else:
din.x+=board.pointertomygame()
positions=[[din.y,din.x],[din.y,din.x+1],[din.y-1,din.x],[din.y-1,din.x+1],[din.y-2,din.x],[din.y-2,din.x+1]]
for i in positions:
if board.matrix[i[0]][i[1]] in self._shape:
if board.matrix[i[0]][i[1]]=="|":
j=i[0]
while True:
if board.matrix[j][i[1]]=="|":
board.matrix[j][i[1]]=" "
j-=1
else:
break
j=i[0]+1
while True:
if board.matrix[j][i[1]]=="|":
board.matrix[j][i[1]]=" "
j+=1
else:
break
elif board.matrix[i[0]][i[1]]=="-":
j=i[1]
while True:
if board.matrix[i[0]][j]=="-":
board.matrix[i[0]][j]=" "
j-=1
else:
break
j=i[1]+1
while True:
if board.matrix[i[0]][j]=="-":
board.matrix[i[0]][j]=" "
j+=1
else:
break
else:
x=i[1]
y=i[0]
while True:
if board.matrix[y][x]=="/":
board.matrix[y][x]=" "
x+=1
y-=1
else:
break
x=i[1]-1
y=i[0]+1
while True:
if board.matrix[y][x]=="/":
board.matrix[y][x]=" "
x-=1
y+=1
else:
break
din.x-=board.pointertomygame()
def bulletcollison(self,bullet):
positions=bullet.bulletarray
for i in positions:
if board.matrix[i[0]][i[1]] in self._shape:
if board.matrix[i[0]][i[1]]=="|":
j=i[0]
while True:
if board.matrix[j][i[1]]=="|":
board.matrix[j][i[1]]=" "
j-=1
else:
break
j=i[0]+1
while True:
if board.matrix[j][i[1]]=="|":
board.matrix[j][i[1]]=" "
j+=1
else:
break
elif board.matrix[i[0]][i[1]]=="-":
j=i[1]
while True:
if board.matrix[i[0]][j]=="-":
board.matrix[i[0]][j]=" "
j-=1
else:
break
j=i[1]+1
while True:
if board.matrix[i[0]][j]=="-":
board.matrix[i[0]][j]=" "
j+=1
else:
break
elif board.matrix[i[0]][i[1]]=="/":
x=i[1]
y=i[0]
while True:
if board.matrix[y][x]=="/":
board.matrix[y][x]=" "
x+=1
y-=1
else:
break
x=i[1]-1
y=i[0]+1
while True:
if board.matrix[y][x]=="/":
board.matrix[y][x]=" "
x-=1
y+=1
else:
break
def removeobstacle(self):
for i in range(board.y):
for j in range(board.pointertomygame(),board.pointertomygame()+board.size):
if board.matrix[i][j] in self._shape:
board.matrix[i][j]=" "
class Magnet:
__shape=[["*","*"],["*","*"]]
def __init__(self):
self.__x=105 #random.randint(board.size,board.lengthofmygame()-board.size-1-len(self.shape[0]))
self.__y=random.randint(board.roofhight,board.y-board.groundheight-1-len(self.__shape))
board.matrix[self.__y:self.__y+len(self.__shape),self.__x:self.__x+len(self.__shape[0])]=self.__shape
def get_shape(self):
return self.__shape
def set_shape(self,shape):
self.__shape=shape
shape=property(get_shape,set_shape)
def get_x(self):
return self.__x
def set_x(self,x):
self.__x=x
x=property(get_x,set_x)
def get_y(self):
return self.__y
def set_y(self,y):
self.__y=y
y=property(get_y,set_y)
def draw(self):
board.matrix[self.__y:self.__y+len(self.__shape),self.__x:self.__x+len(self.__shape[0])]=self.__shape
def magneteffect(self):
if self.__x>board.pointertomygame() and self.__x<board.pointertomygame()+board.size:
din.reset()
din.x+=board.pointertomygame()
if din.x<self.__x:
din.x-=board.pointertomygame()
for i in range(2):
din.move_right()
coins.checkcollison()
beam.beamcollison()
speedup.powerup_collison()
din.x+=board.pointertomygame()
elif din.x>self.__x:
din.x-=board.pointertomygame()
for i in range(2):
din.move_left()
coins.checkcollison()
beam.beamcollison()
speedup.powerup_collison()
din.x+=board.pointertomygame()
din.x-=board.pointertomygame()
if din.y<self.__y:
din.y+=1
coins.checkcollison()
beam.beamcollison()
speedup.powerup_collison()
if din.y>board.y-board.groundheight-1:
din.y=board.y-board.groundheight-1
elif din.y>self.__y:
din.y-=1
coins.checkcollison()
beam.beamcollison()
speedup.powerup_collison()
if din.y<din.maxheight:
din.y= din.maxheight
din.jumpcount=0
din.reset()
class powerup(obstacle):
def __init__(self):
self.__maxno=3
for i in range(self.__maxno):
y=random.randint(board.roofhight,board.y-board.groundheight-1)
x=random.randint(board.size,board.lengthofmygame()-2-2*board.size)
board.matrix[y][x]=self._powerup_structure[0]
board.matrix[y][x+1]=self._powerup_structure[1]
def get_maxno(self):
return self.__maxno
def set_maxno(self,maxno):
self.__maxno=maxno
maxno=property(get_maxno,set_maxno)
def get_powerup_structure(self):
return self._powerup_structure
def set_powerup_structure(self,powerup_structure):
self._powerup_structure=powerup_structure
powerup_structure=property(get_powerup_structure,set_powerup_structure)
def powerup_collison(self):
din.x+=board.pointertomygame()
positions=[[din.y,din.x],[din.y,din.x+1],[din.y-1,din.x],[din.y-1,din.x+1],[din.y-2,din.x],[din.y-2,din.x+1]]
for i in positions:
if board.matrix[i[0]][i[1]] in self._powerup_structure:
if board.matrix[i[0]][i[1]]=="2":
# board.matrix[i[0]][i[1]]=" "
# board.matrix[i[0]][i[1]+1]=" "
self.removeobstacle(i[0],i[1])
else:
# board.matrix[i[0]][i[1]]=" "
# board.matrix[i[0]][i[1]-1]=" "
self.removeobstacle(i[0],i[1]-1)
board.gamefaster()
din.x-=board.pointertomygame()
def removeobstacle(self,y,x):
board.matrix[y][x]=" "
board.matrix[y][x+1]=" "
magnet=Magnet()
beam=firebeam()
speedup=powerup()
|
[
"mswastik764@gmail.com"
] |
mswastik764@gmail.com
|
8c2ed71f3f6628ddcb19c8e8a467e02d3839d336
|
34e64e8fc12f0768fe3b48677e6941c2577aec0d
|
/index.py
|
f703098fdd0195af0267e64c455c3a442a992cd8
|
[] |
no_license
|
spettinichi/openlayers-test
|
2c0090b36319e918e1ce5cd7408da88d45c5a951
|
24cef36c1b1d6610ec7d3e17ebfd99341a18af82
|
refs/heads/master
| 2021-06-13T05:44:17.166748
| 2017-03-07T23:30:41
| 2017-03-07T23:30:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
import random
import json
from sys import stdout
from time import sleep
coords = {
'latitude' : 0,
'longitude' : 0
}
while (1):
coords['latitude'] = random.randint(-90,90)
coords['longitude'] = random.randint(-180,180)
json_string = json.dumps(coords)
print(json_string)
stdout.flush()
sleep(1)
|
[
"root@vps80361.vps.ovh.ca"
] |
root@vps80361.vps.ovh.ca
|
be4635db09dd2d056c1526e212b767694daff46f
|
f7fc260a5c894d80b4c3d4e22d1ffe5f504ed480
|
/groups/migrations/0002_initial.py
|
f22f02b73b74e834b240c16d47bf6fcbaedc2760
|
[] |
no_license
|
CodifyLaboratory/bishmeet_backend
|
dc689daec21a98de8239337895cecbd437660650
|
c3033b02b7f1d128e1b822986f2bfa594bf0836a
|
refs/heads/main
| 2023-07-08T19:07:36.538465
| 2021-08-21T07:39:14
| 2021-08-21T07:39:14
| 398,242,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 863
|
py
|
# Generated by Django 3.2.6 on 2021-08-20 10:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('groups', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='rating',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='user_ratings', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='group',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='group', to='groups.category'),
),
]
|
[
"desm606@gmail.com"
] |
desm606@gmail.com
|
6f181ca8fbe6e15c253a3aa487f9fe774d3f0a0a
|
49c2daf810d71e05ed5433f9e1171e1cc1f9d52e
|
/dowser/__init__.py
|
4b935f0658446276f40d0dc2369c48c47e1dd0ab
|
[
"MIT"
] |
permissive
|
appknox/dowser-py3
|
4cb6bec94cc23e34f037b3006cd52f286f5d3c18
|
d570fcac5c228d629ace7affdd14401e0d657769
|
refs/heads/develop
| 2021-07-21T14:13:48.182610
| 2015-11-24T09:48:25
| 2015-11-24T09:48:25
| 46,715,372
| 6
| 8
|
NOASSERTION
| 2021-01-28T12:59:50
| 2015-11-23T11:16:36
|
Python
|
UTF-8
|
Python
| false
| false
| 53
|
py
|
from dowser.dowser import Root, ReferrerTree # NOQA
|
[
"dhilipsiva@gmail.com"
] |
dhilipsiva@gmail.com
|
e2a19d39fceb295a71113b961358b852d6fb51ec
|
b03f6ffa2077780bdf5b7a0090d0a57d443c02ba
|
/웹스크레이핑_실습.py
|
12360b857a4860ca9bc24df0fedb2b861482ad05
|
[] |
no_license
|
SUDON-NOH/Python_data
|
69f02df13ee6f7521405c203f2f5561aadaa1755
|
6e2bfb10ce0199fe17d6fa6b24068479441e91cc
|
refs/heads/master
| 2021-07-16T21:27:57.521899
| 2020-08-06T07:17:19
| 2020-08-06T07:17:19
| 198,587,775
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,728
|
py
|
# 웹스크레이핑실습문제.py
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import urllib.request as REQ
import urllib
# import json
# import folium
# 1.1
print('1.1', '-' * 50)
baseurl = 'http://www.menupan.com'
murl = '/restaurant/bestrest/bestrest.asp?pt=rt&areacode=dj201'
response = REQ.urlopen(baseurl + murl)
soup = BeautifulSoup(response, 'html.parser')
listData = []
href = []
ulList = soup.select_one('.rankingList .list')
for li in ulList.select('li'):
rank = li.select_one('.numTop,.rankNum').string
href.append(li.select_one('.listName a')['href'])
listName = li.select_one('.listName').string
listType = li.select_one('.listType').string
listArea = li.select_one('.listArea').string
listData.append({'랭킹': rank,
"상호명": listName,
"업종": listType,
"지역": listArea})
df = pd.DataFrame(listData)
print(df)
# 1.2
print('1.2', '-' * 50)
tel = []
addr = []
for h in href:
response = REQ.urlopen(baseurl + h)
sMenu = BeautifulSoup(response, 'html.parser')
stel = sMenu.select_one('.tel1').string
saddr = sMenu.select_one('.add1').string
tel.append(stel)
addr.append(saddr)
# print(stel, saddr)
df['전화번호'] = tel
df['주소'] = addr
print(df)
# 1.3
print('1.3', '-' * 50)
df.set_index('랭킹', inplace=True)
# inplace : bool, default False
# Modify the DataFrame in place (do not create a new object).
print(df)
df.to_excel("menu.xlsx")
# 1.4
print('1.4', '-' * 50)
m = df[df['업종'].str.contains('한식')]
print(m)
# 1.5
print('1.5', '-' * 50)
area = df[df['지역'].str.contains('대흥')]
print(area)
# 1.6
print('1.6', '-' * 50)
import googlemaps
gmaps_key = "AIzaSyCcw89SLkOaJ5r8xoL3aFkUx0kpu7baLRg" # 자신의 key를 사용합니다.
gmaps = googlemaps.Client(key=gmaps_key)
def get_lat_lng(addr):
area = gmaps.geocode(addr, language='ko')
latlng = area[0].get("geometry")
lat = latlng['location']['lat']
lng = latlng['location']['lng']
# print(lat,lng)
return lat, lng
import folium
lat, lng = get_lat_lng(df['주소'].values[0])
map = folium.Map(location=[lat, lng], zoom_start=12)
for addr in range(len(df['주소'].values)):
lat, lng = get_lat_lng(df['주소'].values[addr])
# print(lat,lng)
# print(df['주소'].values[addr])
# print(df['상호명'].values[addr])
m = folium.Marker([lat, lng], popup=str(addr) + df['상호명'].values[addr],
icon=folium.Icon(icon='cloud')).add_to(map)
m.add_to(map)
map.save('Daejoen_맛집.html')
|
[
"noreply@github.com"
] |
SUDON-NOH.noreply@github.com
|
9711b1491693de81959da0049fe79087eb1ddf0a
|
35645f8111faa8b4d87c1572a125aac36d24499d
|
/linshan_luoty/merge_zip_crime_earnings.py
|
4be0682e47aef94aec66168e553c9cf86e8d88c4
|
[] |
no_license
|
nlouie/course-2016-spr-proj-two
|
af8d268eba417fe15981de0a015be1fe06210b16
|
098d9cfb75bc9d783cf768eb733abbf55901e334
|
refs/heads/master
| 2020-12-26T01:12:06.236993
| 2016-05-16T00:42:52
| 2016-05-16T00:42:52
| 55,449,019
| 0
| 0
| null | 2016-04-04T22:10:08
| 2016-04-04T22:10:07
| null |
UTF-8
|
Python
| false
| false
| 4,411
|
py
|
import json
import datetime
import pymongo
import prov.model
import provenance
import uuid
# Until a library is created, we just use the script directly.
exec(open('../pymongo_dm.py').read())
exec(open('get_repo.py').read())
zip_location_crimes_db = repo[auth['admin']['name']+'.'+'zip_location_crimes']
zip_avg_earnings_db = repo[auth['admin']['name']+'.'+'zip_avg_earnings']
startTime = datetime.datetime.now()
zip_location_crimes = zip_location_crimes_db.find({},{
'_id': False,
'zip': True,
'crimes': True,
'longitude': True,
'latitude': True,
'region': True,
})
zip_location_crimes_earnings = []
for document in zip_location_crimes:
avg_earning = zip_avg_earnings_db.find_one({'zip': document['zip']}, {'_id': False, 'avg_earning': True})
if avg_earning is None:
document['avg_earning'] = 0
else:
document['avg_earning'] = avg_earning['avg_earning']
zip_location_crimes_earnings.append(document)
# export zip_location_crimes_earnings to JSON
f = open('zip_location_crimes_earnings.json','w')
f.write(json.dumps(zip_location_crimes_earnings, indent=4))
f.close()
# save it to a permanent folder
repo.dropPermanent("zip_location_crimes_earnings")
repo.createPermanent("zip_location_crimes_earnings")
repo[auth['admin']['name']+'.'+'zip_location_crimes_earnings'].insert_many(zip_location_crimes_earnings)
zip_location_crimes_earnings_sorted = repo[auth['admin']['name']+'.'+'zip_location_crimes_earnings'].find({},{
'_id': False,
'zip': True,
'crimes': True,
'longitude': True,
'latitude': True,
'region': True,
'avg_earning': True,
}).sort([('avg_earning', pymongo.ASCENDING)])
f = open('zip_location_crimes_earnings_sorted.json','w')
f.write(json.dumps(list(zip_location_crimes_earnings_sorted), indent=4))
f.close()
endTime = datetime.datetime.now()
startTime = None
endTime = None
# Create the provenance document describing everything happening
# in this script. Each run of the script will generate a new
# document describing that invocation event. This information
# can then be used on subsequent runs to determine dependencies
# and "replay" everything. The old documents will also act as a
# log.
doc = provenance.init()
doc.add_namespace('alg', 'https://data-mechanics.s3.amazonaws.com/linshan_luoty/algorithm/') # The scripts in <folder>/<filename> format.
doc.add_namespace('dat', 'https://data-mechanics.s3.amazonaws.com/linshan_luoty/data/') # The data sets in <user>/<collection> format.
doc.add_namespace('ont', 'https://data-mechanics.s3.amazonaws.com/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.
doc.add_namespace('log', 'https://data-mechanics.s3.amazonaws.com/log#') # The event log.
doc.add_namespace('bdp', 'https://data.cityofboston.gov/resource/')
this_script = doc.agent('alg:merge_zip_crime_earnings', {prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension':'py'})
zip_location_crimes = doc.entity('dat:zip_location_crimes', {prov.model.PROV_LABEL:'Zip Location Crimes', prov.model.PROV_TYPE:'ont:DataSet'})
zip_avg_earning = doc.entity('dat:zip_avg_earnings', {prov.model.PROV_LABEL:'Zips Average Earnings', prov.model.PROV_TYPE:'ont:DataSet'})
merge_zip_crime_earnings = doc.activity('log:a'+str(uuid.uuid4()), startTime, endTime, {prov.model.PROV_LABEL: "Merge zips, crimes, locations, and earnings."})
doc.wasAssociatedWith(merge_zip_crime_earnings, this_script)
doc.usage(merge_zip_crime_earnings, zip_location_crimes, startTime, None,
{prov.model.PROV_TYPE:'ont:Computation'
}
)
doc.usage(merge_zip_crime_earnings, zip_avg_earning, startTime, None,
{prov.model.PROV_TYPE:'ont:Computation'
}
)
zip_location_crimes_earnings = doc.entity('dat:zip_location_crimes_earnings', {prov.model.PROV_LABEL:'Zips with Crime and Earnings', prov.model.PROV_TYPE:'ont:DataSet'})
doc.wasAttributedTo(zip_location_crimes_earnings, this_script)
doc.wasGeneratedBy(zip_location_crimes_earnings, merge_zip_crime_earnings, endTime)
doc.wasDerivedFrom(zip_location_crimes_earnings, zip_location_crimes, merge_zip_crime_earnings, merge_zip_crime_earnings, merge_zip_crime_earnings)
doc.wasDerivedFrom(zip_location_crimes_earnings, zip_avg_earning, merge_zip_crime_earnings, merge_zip_crime_earnings, merge_zip_crime_earnings)
repo.record(doc.serialize()) # Record the provenance document.
provenance.update(doc)
print(doc.get_provn())
repo.logout()
|
[
"programmer.lty@gmail.com"
] |
programmer.lty@gmail.com
|
fd237d95c58094e30fc70370ad5e53d529408be5
|
57fc5d54f5df359c7a53020fb903f36479d3a322
|
/controllers/.history/supervisor/supervisor_20201127165503.py
|
870391013e28d07767d47598a75b41d93fd5df4f
|
[] |
no_license
|
shenwuyue-xie/webots_testrobots
|
929369b127258d85e66c5275c9366ce1a0eb17c7
|
56e476356f3cf666edad6449e2da874bb4fb4da3
|
refs/heads/master
| 2023-02-02T11:17:36.017289
| 2020-12-20T08:22:59
| 2020-12-20T08:22:59
| 323,032,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,392
|
py
|
import math
import numpy as np
from numpy import random
from numpy.core.fromnumeric import size
from numpy.lib.function_base import meshgrid
import utilities as utils
from deepbots.supervisor.controllers.supervisor_emitter_receiver import \
SupervisorCSV
# # from deepbots.supervisor.wrappers.tensorboard_wrapper import TensorboardLogger
from tensorboardX import SummaryWriter
from models.networks import TD3
from controller import Keyboard
import os
Max_robotnum = 6
OBSERVATION_SPACE = (Max_robotnum-1) * 4 + 7 + 9 * Max_robotnum
ACTION_SPACE = Max_robotnum * 2 + 3
MAX_DSNUM = (Max_robotnum-1) * 4 + 7
DIST_SENSORS_MM = {'min': 0, 'max': 1000}
XPOSITION = {'min':-2, 'max':2}
YPOSITION = {'min':-1.5 , 'max':1.5}
ZPOSITION = {'min': -1, 'max' : 8}
MAX_DISTANCE = {'min':0, 'max':10}
MAX_ANGLE = {'min':-math.pi, 'max':math.pi}
# import ptvsd
# print("waiting for debugger attach")
# ptvsd.enable_attach(address=("127.0.0.1",7788))
# ptvsd.wait_for_attach()
class TaskDecisionSupervisor(SupervisorCSV):
def __init__(self,robot,observation_space,log_dir,v_action,v_observation,v_reward,windows=[10,100,200]):
super(TaskDecisionSupervisor,self).__init__()
self.timestep = int(self.supervisor.getBasicTimeStep())
self.keyboard = Keyboard()
self.keyboard.enable(self.timestep)
self.emitter = self.supervisor.getEmitter('emitter')
self.receiver = self.supervisor.getReceiver('receiver')
self.robot_list = robot
self.robot_handles = []
self.observation = [0 for i in range(observation_space)]
self.findThreshold = 0.2
self.steps = 0
self.steps_threshold = 6000
self.endbattery = [50000 for i in range(Max_robotnum)]
self.final_distance = [50 for i in range(Max_robotnum)]
self.final_target = self.supervisor.getFromDef('final_target')
self.should_done = False
self.startbattery = 50000
self.setuprobots()
self.step_cntr = 0
self.step_global = 0
self.step_reset = 0
self.score = 0
self.score_history = []
self.v_action = v_action
self.v_observation = v_observation
self.v_reward = v_reward
self.windows = windows
self.file_writer = SummaryWriter(log_dir, flush_secs=30)
def setuprobots(self):
for defname in self.robot_list:
self.robot_handles.append(self.supervisor.getFromDef(defname))
def handle_receiver(self):
message = []
for i in range(self.robot_num):
if self.receiver.getQueueLength() > 0:
string_message = self.receiver.getData().decode("utf-8")
string_message = string_message.split(",")
for ms in string_message:
message.append(ms)
self.receiver.nextPacket()
return message
def get_observations(self):
self.ds_values = []
self.final_distance = [50 for i in range(Max_robotnum)]
self.message = [1000 for i in range(MAX_DSNUM)]
self.angles = []
observation = []
message = self.handle_receiver()
self.angles = [0 for i in range(Max_robotnum)]
if len(message) != 0:
for i in range(len(message)):
self.message[i] = float(message[i])
self.ds_values.append(float(message[i]))
for j in range(MAX_DSNUM):
observation.append(utils.normalize_to_range(float(self.message[j]),DIST_SENSORS_MM['min'],DIST_SENSORS_MM['max'], 0, 1))
for k in range(0,self.robot_num):
robot_position = []
robot_position = self.robot_handles[k].getPosition()
robot_rotation = []
robot_rotation = self.robot_handles[k].getOrientation()
observation.append(utils.normalize_to_range(float(robot_position[0]),XPOSITION['min'],XPOSITION['max'],0,1))
observation.append(utils.normalize_to_range(float(robot_position[1]),YPOSITION['min'],YPOSITION['max'],0,1))
observation.append(utils.normalize_to_range(float(robot_position[2]),ZPOSITION['min'],ZPOSITION['max'],0,1))
observation.append(utils.normalize_to_range(float(robot_rotation[0]),-1,1,0,1))
observation.append(utils.normalize_to_range(float(robot_rotation[1]),-1,1,0,1))
observation.append(utils.normalize_to_range(float(robot_rotation[2]),-1,1,0,1))
observation.append(utils.normalize_to_range(float(robot_rotation[3]),-math.pi,math.pi,0,1))
self.final_distance[k] = utils.get_distance_from_target(self.robot_handles[k],self.final_target)
observation.append(utils.normalize_to_range(float(self.final_distance[k]),MAX_DISTANCE['min'],MAX_DISTANCE['max'],0,1))
self.angles[k] = utils.get_angle_from_target(self.robot_handles[k],self.final_target)
observation.append(utils.normalize_to_range(float(self.angles[k]),MAX_ANGLE['min'],MAX_ANGLE['max'],0,1))
for m in range(self.robot_num,Max_robotnum):
for n in range(9):
observation.append(0.5)
else :
observation = [0 for i in range(OBSERVATION_SPACE)]
self.observation = observation
return self.observation
# robot_children = self.robot_handles[k].getField('children')
# frontjoint_node = robot_children.getMFNode(3)
# frontjoint = frontjoint_node.getField('jointParameters')
# frontjoint = frontjoint.getSFNode()
# para = frontjoint.getField('position')
# front_hingeposition = para.getSFFloat()
# observation.append(utils.normalize_to_range(float(front_hingeposition),-math.pi/2,math.pi/2,0,1))
# front_ep = frontjoint_node.getField('endPoint')
# front_ep = front_ep.getSFNode()
# frontrotation_field = front_ep.getField('rotation')
# front_rotation = frontrotation_field.getSFRotation()
# for f in range(3):
# observation.append(utils.normalize_to_range(float(front_rotation[f]),-1,1,0,1))
# observation.append(utils.normalize_to_range(float(front_rotation[3]),-math.pi/2,math.pi/2,0,1))
# robot_children = self.robot_handles[k].getField('children')
# rearjoint_node = robot_children.getMFNode(4)
# rearjoint = rearjoint_node.getField('jointParameters')
# rearjoint = rearjoint.getSFNode()
# para = rearjoint.getField('position')
# rear_hingeposition = para.getSFFloat()
# observation.append(utils.normalize_to_range(float(rear_hingeposition),-math.pi/2,math.pi/2,0,1))
# rear_ep = rearjoint_node.getField('endPoint')
# rear_ep = rear_ep.getSFNode()
# rearrotation_field = rear_ep.getField('rotation')
# rear_rotation = rearrotation_field.getSFRotation()
# for r in range(3):
# observation.append(utils.normalize_to_range(float(rear_rotation[r]),-1,1,0,1))
# observation.append(utils.normalize_to_range(float(rear_rotation[3]),-math.pi/2,math.pi/2,0,1))
# final_position = []
# final_position = self.final_target.getPosition()
# observation.append(utils.normalize_to_range(float(final_position[0]),XPOSITION['min'],XPOSITION['max'],0,1))
# observation.append(utils.normalize_to_range(float(final_position[1]),YPOSITION['min'],YPOSITION['max'],0,1))
# observation.append(utils.normalize_to_range(float(final_position[2]),ZPOSITION['min'],ZPOSITION['max'],0,1))
# final_distance = []
# for d in range(self.robot_num):
# final_distance.append(utils.get_distance_from_target(self.robot_handles[d],self.final_target))
# self.final_distance[d] = final_distance[d]
def get_default_observation(self):
self.observation = [0 for i in range(OBSERVATION_SPACE)]
return self.observation
def empty_queue(self):
self.observation = [0 for i in range(OBSERVATION_SPACE)]
# self.shockcount = 0
self.overrangecount = 0
# self.flagadd = False
# self.flagreduce = False
self.dscount = 0
while self.supervisor.step(self.timestep) != -1:
if self.receiver.getQueueLength() > 0:
self.receiver.nextPacket()
else:
break
def get_reward(self,action):
if (self.observation == [0 for i in range(OBSERVATION_SPACE)] or len(self.observation) == 0 ) :
return 0
reward = 0
translations = []
for i in range(len(self.robot_handles)):
translation = self.robot_handles[i].getField('translation').getSFVec3f()
translations.append(translation)
if self.steps >= self.steps_threshold:
return -20
if np.min(self.ds_values) <= 50:
reward = reward -2
self.dscount = self.dscount + 1
if self.dscount > 60:
reward = reward -20
self.should_done = True
if self.dscount > 30:
reward = reward - 5
if np.min(self.ds_values) <= 150:
reward = reward -1
for j in range(len(self.robot_handles)):
if translations[j][2] <= ZPOSITION['min'] or translations[j][2] >= ZPOSITION['max']:
reward = reward - 2
self.overrangecount = self.overrangecount + 1
if translations[j][0] <= XPOSITION['min'] or translations[j][0] >= ZPOSITION['max']:
reward = reward - 2
self.overrangecount = self.overrangecount + 1
if self.overrangecount >40:
reward = reward -20
self.should_done = True
if min(self.final_distance) < self.findThreshold:
reward = reward + 100
for m in range(Max_robotnum):
consumption = self.startbattery - self.endbattery[m]
reward = reward - float(consumption/self.startbattery) * 6
return reward
else :
reward = reward - float(min(self.final_distance))
return reward
# """惩罚不停+-+-的行为 """
# if action[-1] > 0.9 :
# if self.flagreduce == True:
# self.shockcount = self.shockcount + 1
# self.flagadd = True
# self.flagreduce = False
# if action[-1] < 0.1:
# if self.flagadd == True:
# self.shockcount = self.shockcount + 1
# self.flagadd = False
# self.flagreduce =True
# if action[-1] >=0.1 and action[-1] <=0.9:
# self.shockcount = self.shockcount - 1
# self.flagadd = False
# self.flagreduce = False
# if self.shockcount >= 8:
# reward = reward - 4
# if self.shockcount >= 12:
# reward = reward - 8
# self.should_done = True
# """如果ban的动作值有十个值出现在动作区域,不稳定给负的reward,训练到100代左右时,模块几乎不再动自己的前后motor"""
# count = 0
# for k in range(12,24):
# action[k] = utils.normalize_to_range(float(action[k]),-0.2,1.2,0,1)
# if action[k] > 0.95 or action[k] < 0.05:
# count = count + 1
# if count > 9 :
# reward = reward - 2
"""something worse need to be modified"""
"""加机器人时还需要考虑rearmotor的位置,测试后发现是hingejoint的jointParameters域的position参数,需要找到这个参数"""
"""可以只改变相对应的hingejoint参数使两者结合,也可以改变模块位置和角度,但是改变模块位置和角度比较复杂"""
# position = abs(get...)
# 改变hingejoint,只需要改变front hingejoint的position参数
# 改变模块位置和角度
# deltax和deltaz可以根据position来计算,主要是rotation要更改,绕x轴旋转(1,0,0,rad)
# 但是之前寻找模块的位置时已经修改过自己的rotation,所以不好更改,并且更改了rotation,translation也要更改,用这套体姿表征体系更改起来特别复杂
# 另外,因为是往后加模块,所以除非尾巴上翘,否则都不能这样加(陷到地底下了)
# 况且,即便尾巴上翘,可以直接加到后ban上,可能也会因为重力原因把整个构型掀翻
# 综上所述,无论是可行性,还是稳定性原因,都建议只修改front_hingejoint的position值
def robot_step(self,action):
# x = np.random.rand()
# e = 0.8 + ep * 0.2/10000
# if x > e :
# action[-1] = np.random.rand()
if action[-1] > 0 and action[-1] <= 0.05 and self.robot_num < Max_robotnum:
last_translation = self.robot_handles[-1].getField('translation').getSFVec3f()
last_angle = self.robot_handles[-1].getField('rotation').getSFRotation()[3]
last_rotation = self.robot_handles[-1].getField('rotation').getSFRotation()
delta_z = 0.23 * math.cos(last_angle)
delta_x = 0.23 * math.sin(last_angle)
new_translation = []
new_translation.append(last_translation[0] - delta_x)
new_translation.append(last_translation[1])
new_translation.append(last_translation[2] - delta_z)
robot_children = self.robot_handles[-1].getField('children')
rearjoint_node = robot_children.getMFNode(4)
joint = rearjoint_node.getField('jointParameters')
joint = joint.getSFNode()
para = joint.getField('position')
hingeposition = para.getSFFloat()
if hingeposition > 0.8 or hingeposition < -0.8:
delta = 0.03 - 0.03 * math.cos(hingeposition)
delta_z = delta * math.cos(last_angle)
delta_x = delta * math.sin(last_angle)
new_translation[0] = new_translation[0] + delta_x
new_translation[2] = new_translation[2] + delta_z
new_rotation = []
for i in range(4):
new_rotation.append(last_rotation[i])
flag_translation = False
flag_rotation = False
flag_battery = False
battery_remain = float(self.endbattery[self.robot_num])
importname = "robot_" + str(self.robot_num) + '.wbo'
new_file =[]
with open(importname,'r') as f:
lines = f.readlines()
for line in lines:
if "translation" in line:
if flag_translation == False:
replace = "translation " + str(new_translation[0]) + " " + str(new_translation[1]) + " " + str(new_translation[2])
line = "\t" + replace +'\n'
flag_translation = True
if "rotation" in line:
if flag_rotation == False:
replace = "rotation " + str() + " " + str(0) + " " + str(0) + " " \
+str(0)
line = "\t" + replace +'\n'
flag_rotation = True
if "50000" in line :
line = "\t" + "\t" + str(battery_remain) + "," + " " + '\n'
new_file.append(line)
rootNode = self.supervisor.getRoot()
childrenField = rootNode.getField('children')
importname = "robot_" + str(self.robot_num) + '.wbo'
childrenField.importMFNode(-1,importname)
defname = 'robot_' + str(self.robot_num)
self.robot_handles.append(self.supervisor.getFromDef(defname))
self.robot_num = self.robot_num + 1
# new_translation_field = self.robot_handles[-1].getField('translation')
# new_translation_field.setSFVec3f(new_translation)
# new_rotation_field = self.robot_handles[-1].getField('rotation')
# new_rotation_field.setSFRotation(new_rotation)
# robot_children = self.robot_handles[-1].getField('children')
# frontjoint_node = robot_children.getMFNode(3)
# joint = frontjoint_node.getField('jointParameters')
# joint = joint.getSFNode()
# para = joint.getField('position')
# para.setSFFloat(-hingeposition)
battery_remain = float(self.endbattery[self.robot_num - 1])
battery_field = self.robot_handles[-1].getField('battery')
battery_field.setMFFloat(0,battery_remain)
battery_field.setMFFloat(1,self.startbattery)
elif action[-1] >= 0.95 and action[-1] < 1 and self.robot_num >1:
battery_field = self.robot_handles[-1].getField('battery')
battery_remain = battery_field.getMFFloat(0)
self.endbattery[self.robot_num - 1] = battery_remain
removerobot = self.robot_handles[-1]
removerobot.remove()
self.robot_num = self.robot_num - 1
del(self.robot_handles[-1])
def step(self,action):
if self.supervisor.step(self.timestep) == -1:
exit()
self.handle_emitter(action)
key = self.keyboard.getKey()
observation = self.get_observations()
reward = self.get_reward(action)
isdone = self.is_done()
info = self.get_info()
if key == Keyboard.CONTROL + ord("A"):
print()
print("Actions: ", action)
if key == ord("R"):
print()
print("Rewards: ", reward)
if key == Keyboard.CONTROL + ord("Y"):
print()
print("Observations: ", observation)
if key == Keyboard.CONTROL + ord("M"):
print()
print("message", self.message)
if (self.v_action > 1):
self.file_writer.add_histogram(
"Actions/Per Global Step",
action,
global_step=self.step_global)
if (self.v_observation > 1):
self.file_writer.add_histogram(
"Observations/Per Global Step",
observation,
global_step=self.step_global)
if (self.v_reward > 1):
self.file_writer.add_scalar("Rewards/Per Global Step", reward,
self.step_global)
if (isdone):
self.file_writer.add_scalar(
"Is Done/Per Reset step",
self.step_cntr,
global_step=self.step_reset)
self.file_writer.flush()
self.score += reward
self.step_cntr += 1
self.step_global += 1
return observation,reward,isdone,info
def is_done(self):
self.steps = self.steps + 1
self.file_writer.flush()
if min(self.final_distance) <= self.findThreshold:
print("======== + Solved + ========")
return True
if self.steps >= self.steps_threshold or self.should_done:
return True
# rotation_field = self.robot_handles[0].getField('rotation').getSFRotation()
# """需要计算出模块完全侧边倒的rotation是多少,遇到这种情况直接进行下一次迭代"""
# # if rotation_field[0] < -0.4 and rotation_field[1] > 0.4 and rotation_field[2] > 0.4 and rotation_field[3] < -1.5708:
# # return True
return False
def reset(self):
print("Reset simulation")
self.respawnRobot()
self.steps = 0
self.should_done = False
self.robot_num = 2
"""observation 源代码wrapper有问题"""
self.score_history.append(self.score)
if (self.v_reward > 0):
self.file_writer.add_scalar(
"Score/Per Reset", self.score, global_step=self.step_reset)
for window in self.windows:
if self.step_reset > window:
self.file_writer.add_scalar(
"Score/With Window {}".format(window),
np.average(self.score_history[-window:]),
global_step=self.step_reset - window)
self.file_writer.flush()
self.step_reset += 1
self.step_cntr = 0
self.score = 0
return self.get_default_observation()
def flush(self):
if self._file_writer is not None:
self._file_writer.flush()
def close(self):
if self._file_writer is not None:
self._file_writer.close()
def get_info(self):
pass
def respawnRobot(self):
for robot in self.robot_handles:
if robot is not None:
robot.remove()
rootNode = self.supervisor.getRoot()
childrenField = rootNode.getField('children')
childrenField.importMFNode(-1,"robot_0.wbo")
childrenField.importMFNode(-1,"robot_1.wbo")
# childrenField.importMFNode(-1,"robot_2.wbo")
# childrenField.importMFNode(-1,"robot_3.wbo")
# childrenField.importMFNode(-1,"robot_4.wbo")
# childrenField.importMFNode(-1,"robot_5.wbo")
self.robot_handles = []
for defrobotname in self.robot_list:
self.robot_handles.append(self.supervisor.getFromDef(defrobotname))
self.final_target = self.supervisor.getFromDef('final_target')
self.supervisor.simulationResetPhysics()
self._last_message = None
print(os.getcwd())
# robot_defnames = ['robot_0','robot_1']
# supervisor_env = TaskDecisionSupervisor(robot_defnames, observation_space=OBSERVATION_SPACE,log_dir="logs/results/ddpg", v_action=1,v_observation=1,v_reward=1,windows=[10,\
# 10000, 2000])
# agent = TD3(lr_actor=0.00025,
# lr_critic=0.0025,
# input_dims= OBSERVATION_SPACE,
# gamma=0.99,
# tau=0.001,
# env=supervisor_env,
# batch_size=512,
# layer1_size=400,
# layer2_size=300,
# layer3_size=200,
# layer4_size=400,
# layer5_size=300,
# layer6_size=200,
# n_actions=ACTION_SPACE,
# load_models=True,
# save_dir='./models/saved/ddpg/')
# score_history = []
# np.random.seed(0)
# for i in range(1, 20000):
# done = False
# score = 0
# obs = list(map(float, supervisor_env.reset()))
# supervisor_env.empty_queue()
# first_iter = True
# if i % 10000 == 0:
# print("================= TESTING =================")
# while not done:
# act = agent.choose_action_test(obs).tolist()
# supervisor_env.robot_step(act)
# new_state, _, done, _ = supervisor_env.step(act)
# obs = list(map(float, new_state))
# else:
# print("================= TRAINING =================")
# while not done:
# if (not first_iter):
# act = agent.choose_action_train(obs).tolist()
# else:
# first_iter = False
# act = [0,0]
# for k in range(0,13):
# act.append(0.5)
# supervisor_env.robot_step(act)
# new_state, reward, done, info = supervisor_env.step(act)
# agent.remember(obs, act, reward, new_state, int(done))
# agent.learn()
# score += reward
# obs = list(map(float, new_state))
# score_history.append(score)
# print("===== Episode", i, "score %.2f" % score,
# "100 game average %.2f" % np.mean(score_history[-100:]))
# if i % 100 == 0:
# agent.save_models()
|
[
"1092673859@qq.com"
] |
1092673859@qq.com
|
6b453f03561951c446a8b40db6f1d6e5df813429
|
0a9d1aca6ceb6982e05440e29d8dbd6c3015b858
|
/zogglio/main3.py
|
babcedd227c84f57828b9b72ef8e914684624430
|
[
"MIT"
] |
permissive
|
zogglio/Pil_RotateImages
|
2062b5976b8d3d3f0d681b5c6d9a78c36fddd163
|
04c8d2fec8628d79cdd6abebe8288d3500d3efb8
|
refs/heads/main
| 2023-01-28T03:19:54.143215
| 2020-12-11T23:00:50
| 2020-12-11T23:00:50
| 320,702,249
| 1
| 0
|
MIT
| 2020-12-11T22:57:11
| 2020-12-11T22:57:11
| null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
from PIL import Image
# Abrir a imagem existente
imageOriginal = Image.open('panda.jpg')
# fazer a rotação.FLIP_LEFT_RIGHT
imageEspelhada = imageOriginal.transpose(Image.FLIP_LEFT_RIFHT)
# Mostrar a imagem original
imageOriginal.show
# mostrar a imagem rotacionada
imageEsoelhada.show()
# Salvar a imagem
img.save('pandaespelhada.jpg')
|
[
"noreply@github.com"
] |
zogglio.noreply@github.com
|
5437a37ac12a9facdf23c097ec5be008cfcce7b9
|
62f893ecff745802f53835520e871e671c4eb111
|
/plugins/operators/stage_redshift.py
|
6e766e48b70c4353d87ffd4447e88fef864a30df
|
[] |
no_license
|
friendkak/etl_pipeline_with_airflow
|
a40dd7fbcc067a26ab03d73433775a9bac31c901
|
1fc8e7f402d31871dbf055461948a0977e92345e
|
refs/heads/master
| 2022-05-07T21:39:27.000176
| 2019-12-10T16:20:15
| 2019-12-10T16:20:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,480
|
py
|
from airflow.hooks.postgres_hook import PostgresHook
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class StageToRedshiftOperator(BaseOperator):
ui_color = '#358140'
copy_sql_date = """
COPY {} FROM '{}/{}/{}/'
ACCESS_KEY_ID '{}'
SECRET_ACCESS_KEY '{}'
REGION '{}'
{} 'auto';
"""
copy_sql = """
COPY {} FROM '{}'
ACCESS_KEY_ID '{}'
SECRET_ACCESS_KEY '{}'
REGION '{}'
{} 'auto';
"""
@apply_defaults
def __init__(self,
redshift_conn_id = "",
aws_conn_id="",
table = "",
s3_path = "",
region= "us-west-2",
data_format = "",
*args, **kwargs):
super(StageToRedshiftOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id = redshift_conn_id
self.aws_conn_id = aws_conn_id
self.table = table
self.s3_path = s3_path
self.region = region
self.data_format = data_format
self.execution_date = kwargs.get('execution_date')
def execute(self, context):
aws_hook = AwsHook(self.aws_conn_id)
credentials = aws_hook.get_credentials()
postgres_hook = PostgresHook(postgres_conn_id=self.redshift_conn_id)
self.log.info("Deleting data from Redshift table")
postgres_hook.run(f"DELETE FROM {self.table}")
self.log.info("Copying data from S3 to Redshift")
# Backfill a specific date
if self.execution_date:
sql_statement = StageToRedshiftOperator.copy_sql_time.format(
self.table,
self.s3_path,
self.execution_date.strftime("%Y"),
self.execution_date.strftime("%d"),
credentials.access_key,
credentials.secret_key,
self.region,
self.data_format,
self.execution_date
)
else:
sql_statement = StageToRedshiftOperator.copy_sql.format(
self.table,
self.s3_path,
credentials.access_key,
credentials.secret_key,
self.region,
self.data_format,
self.execution_date
)
postgres_hook.run(sql_statement)
|
[
"aabid0193@gmail.com"
] |
aabid0193@gmail.com
|
3f826143961dc6846f3be08a916d426280fe3b9f
|
ac216a2cc36f91625e440247986ead2cd8cce350
|
/appengine/monorail/search/test/backendsearch_test.py
|
dd5ed18ba10efc6e7ee3e923957dd55faaa32484
|
[
"BSD-3-Clause"
] |
permissive
|
xinghun61/infra
|
b77cdc566d9a63c5d97f9e30e8d589982b1678ab
|
b5d4783f99461438ca9e6a477535617fadab6ba3
|
refs/heads/master
| 2023-01-12T21:36:49.360274
| 2019-10-01T18:09:22
| 2019-10-01T18:09:22
| 212,168,656
| 2
| 1
|
BSD-3-Clause
| 2023-01-07T10:18:03
| 2019-10-01T18:22:44
|
Python
|
UTF-8
|
Python
| false
| false
| 4,815
|
py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Unittests for monorail.search.backendsearch."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
import mox
import settings
from search import backendsearch
from search import backendsearchpipeline
from services import service_manager
from testing import fake
from testing import testing_helpers
class BackendSearchTest(unittest.TestCase):
def setUp(self):
self.services = service_manager.Services(
issue=fake.IssueService(),
)
self.mr = testing_helpers.MakeMonorailRequest(
path='/_backend/besearch?q=Priority:High&shard=2')
self.mr.query_project_names = ['proj']
self.mr.specified_logged_in_user_id = 111
self.mr.specified_me_user_ids = [222]
self.mr.shard_id = 2
self.servlet = backendsearch.BackendSearch(
'req', 'res', services=self.services)
self.mox = mox.Mox()
def tearDown(self):
self.mox.UnsetStubs()
self.mox.ResetAll()
def testHandleRequest_NoResults(self):
"""Handle the case where the search has no results."""
pipeline = testing_helpers.Blank(
SearchForIIDs=lambda: None,
result_iids=[],
search_limit_reached=False,
error=None)
self.mox.StubOutWithMock(backendsearchpipeline, 'BackendSearchPipeline')
backendsearchpipeline.BackendSearchPipeline(
self.mr, self.services, 100, ['proj'], 111, [222]
).AndReturn(pipeline)
self.mox.ReplayAll()
json_data = self.servlet.HandleRequest(self.mr)
self.mox.VerifyAll()
self.assertEqual([], json_data['unfiltered_iids'])
self.assertFalse(json_data['search_limit_reached'])
self.assertEqual(None, json_data['error'])
def testHandleRequest_ResultsInOnePagainationPage(self):
"""Prefetch all result issues and return them."""
allowed_iids = [1, 2, 3, 4, 5, 6, 7, 8]
pipeline = testing_helpers.Blank(
SearchForIIDs=lambda: None,
result_iids=allowed_iids,
search_limit_reached=False,
error=None)
self.mox.StubOutWithMock(backendsearchpipeline, 'BackendSearchPipeline')
backendsearchpipeline.BackendSearchPipeline(
self.mr, self.services, 100, ['proj'], 111, [222]
).AndReturn(pipeline)
self.mox.StubOutWithMock(self.services.issue, 'GetIssues')
# All issues are prefetched because they fit on the first pagination page.
self.services.issue.GetIssues(self.mr.cnxn, allowed_iids, shard_id=2)
self.mox.ReplayAll()
json_data = self.servlet.HandleRequest(self.mr)
self.mox.VerifyAll()
self.assertEqual([1, 2, 3, 4, 5, 6, 7, 8], json_data['unfiltered_iids'])
self.assertFalse(json_data['search_limit_reached'])
self.assertEqual(None, json_data['error'])
def testHandleRequest_ResultsExceedPagainationPage(self):
"""Return all result issue IDs, but only prefetch the first page."""
self.mr.num = 5
pipeline = testing_helpers.Blank(
SearchForIIDs=lambda: None,
result_iids=[1, 2, 3, 4, 5, 6, 7, 8],
search_limit_reached=False,
error=None)
self.mox.StubOutWithMock(backendsearchpipeline, 'BackendSearchPipeline')
backendsearchpipeline.BackendSearchPipeline(
self.mr, self.services, 100, ['proj'], 111, [222]
).AndReturn(pipeline)
self.mox.StubOutWithMock(self.services.issue, 'GetIssues')
# First 5 issues are prefetched because num=5
self.services.issue.GetIssues(self.mr.cnxn, [1, 2, 3, 4, 5], shard_id=2)
self.mox.ReplayAll()
json_data = self.servlet.HandleRequest(self.mr)
self.mox.VerifyAll()
# All are IDs are returned to the frontend.
self.assertEqual([1, 2, 3, 4, 5, 6, 7, 8], json_data['unfiltered_iids'])
self.assertFalse(json_data['search_limit_reached'])
self.assertEqual(None, json_data['error'])
def testHandleRequest_QueryError(self):
"""Handle the case where the search has no results."""
error = ValueError('Malformed query')
pipeline = testing_helpers.Blank(
SearchForIIDs=lambda: None,
result_iids=[],
search_limit_reached=False,
error=error)
self.mox.StubOutWithMock(backendsearchpipeline, 'BackendSearchPipeline')
backendsearchpipeline.BackendSearchPipeline(
self.mr, self.services, 100, ['proj'], 111, [222]
).AndReturn(pipeline)
self.mox.ReplayAll()
json_data = self.servlet.HandleRequest(self.mr)
self.mox.VerifyAll()
self.assertEqual([], json_data['unfiltered_iids'])
self.assertFalse(json_data['search_limit_reached'])
self.assertEqual(error.message, json_data['error'])
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
b8106e5924340ad358a10d584a8ca0cdb6474b72
|
3204246c32efaa8341f552c13bf7135b322cb270
|
/scripts/tentabot_drl/tentabot_drl_config.py
|
627ad9de62679ccda11b7f31b2ff16de30ad33b6
|
[] |
no_license
|
RIVeR-Lab/tentabot
|
9f74f480d0fb8a0460c4bee98ac396328229d121
|
9ce2ba857d56f73a1097b449202f670e1074af61
|
refs/heads/master
| 2023-06-09T10:58:08.891666
| 2023-06-02T16:08:04
| 2023-06-02T16:08:04
| 234,122,206
| 38
| 15
| null | 2022-05-17T23:33:43
| 2020-01-15T16:22:03
|
C++
|
UTF-8
|
Python
| false
| false
| 17,822
|
py
|
#!/usr/bin/env python3
'''
LAST UPDATE: 2022.04.03
AUTHOR: Neset Unver Akmandor (NUA)
Eric Dusel (ED)
E-MAIL: akmandor.n@northeastern.edu
dusel.e@northeastern.edu
DESCRIPTION: TODO...
NUA TODO:
'''
import rospy
import rospkg
import csv
import numpy as np
'''
DESCRIPTION: TODO...
'''
def write_data(file, data):
file_status = open(file, 'a')
with file_status:
write = csv.writer(file_status)
write.writerows(data)
print("tentabot_drl_config::write_data -> Data is written in " + str(file))
'''
DESCRIPTION: TODO...
'''
def read_data(file):
with open(file, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
data = np.array(next(reader))
for row in reader:
data_row = np.array(row)
data = np.vstack((data, data_row))
return data
'''
DESCRIPTION: TODO...
'''
def get_training_param(initial_training_path, param_name) -> str:
log_file = initial_training_path + 'training_log.csv'
with open(log_file, newline='') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if row[0] == param_name:
return row[1]
'''
NUA TODO: This config class seems unnecessary. Include all parameters into the main config file called by the launch file.
DESCRIPTION: TODO...
'''
class Config():
def __init__(self, data_folder_path="", odom={}, goal={}):
rospack = rospkg.RosPack()
tentabot_path = rospack.get_path('tentabot') + "/"
## General
self.mode = rospy.get_param('mode', "")
if self.mode == "training":
self.world_frame_name = rospy.get_param('world_frame_name', "")
self.max_episode_steps = rospy.get_param('max_episode_steps', 0)
self.training_timesteps = rospy.get_param('training_timesteps', 0)
## Sensors
self.laser_size_downsampled = rospy.get_param('laser_size_downsampled', 0)
self.laser_normalize_flag = rospy.get_param('laser_normalize_flag', False)
self.laser_error_threshold = rospy.get_param('laser_error_threshold', 0.0)
## Robots
self.velocity_control_msg = rospy.get_param('robot_velo_control_msg', "")
self.velocity_control_data_path = rospy.get_param('trajectory_data_path', "")
velocity_control_data_str = read_data(tentabot_path + self.velocity_control_data_path + "velocity_control_data.csv")
self.velocity_control_data = np.zeros(velocity_control_data_str.shape)
for i, row in enumerate(velocity_control_data_str):
for j, val in enumerate(row):
self.velocity_control_data[i][j] = float(val)
self.min_lateral_speed = min(self.velocity_control_data[:,0]) # [m/s]
self.max_lateral_speed = max(self.velocity_control_data[:,0]) # [m/s]
self.init_lateral_speed = self.velocity_control_data[0,0] # [m/s]
self.min_angular_speed = min(self.velocity_control_data[:,1]) # [rad/s]
self.max_angular_speed = max(self.velocity_control_data[:,1]) # [rad/s]
self.init_angular_speed = self.velocity_control_data[0,1] # [rad/s]
## Algorithm
self.observation_space_type = rospy.get_param('observation_space_type', "")
self.goal_close_threshold = rospy.get_param("goal_close_threshold", 0.0)
self.obs_min_range = rospy.get_param('obs_min_range', 0.0)
self.n_actions = len(self.velocity_control_data)
self.n_observations = self.n_actions
self.n_obs_stack = rospy.get_param("n_obs_stack", 0.0)
self.n_skip_obs_stack = rospy.get_param("n_skip_obs_stack", 0.0)
self.cnn_obs_shape = (1,-1)
self.fc_obs_shape = (-1, )
self.cit_flag = rospy.get_param("cit_flag", True)
if self.cit_flag == False:
self.cnn_obs_shape = (-1,1)
if self.observation_space_type == "Tentabot_WP_FC" or \
self.observation_space_type == "laser_WP_1DCNN_FC":
self.n_wp = rospy.get_param('n_wp',8)
self.look_ahead = rospy.get_param('look_ahead',1.5)
self.wp_reached_dist = rospy.get_param('wp_reached_dist',0.2)
self.wp_global_dist = rospy.get_param('wp_global_dist',0.025)
self.wp_dynamic = rospy.get_param('wp_dynamic',1)
if self.observation_space_type == "laser_image_2DCNN_FC" or \
self.observation_space_type == "laser_rings_2DCNN_FC":
self.laser_image_width = rospy.get_param('laser_image_width',0)
self.laser_image_height = rospy.get_param('laser_image_height',0)
# Rewards
self.reward_terminal_success = rospy.get_param('reward_terminal_success', 0.0)
self.reward_step_scale = rospy.get_param('reward_step_scale', 0.0)
self.penalty_terminal_fail = rospy.get_param('penalty_terminal_fail', 0.0)
self.penalty_cumulative_step = rospy.get_param('penalty_cumulative_step', 0.0)
#self.reward_terminal_mintime = rospy.get_param('reward_terminal_mintime', 0.0)
if data_folder_path:
## Write all parameters
training_log_file = data_folder_path + "training_log.csv"
training_log_data = []
training_log_data.append(["mode", self.mode])
training_log_data.append(["world_frame_name", self.world_frame_name])
training_log_data.append(["max_episode_steps", self.max_episode_steps])
training_log_data.append(["training_timesteps", self.training_timesteps])
training_log_data.append(["laser_size_downsampled", self.laser_size_downsampled])
training_log_data.append(["laser_normalize_flag", self.laser_normalize_flag])
training_log_data.append(["laser_error_threshold", self.laser_error_threshold])
training_log_data.append(["velocity_control_msg", self.velocity_control_msg])
training_log_data.append(["velocity_control_data_path", self.velocity_control_data_path])
training_log_data.append(["min_lateral_speed", self.min_lateral_speed])
training_log_data.append(["max_lateral_speed", self.max_lateral_speed])
training_log_data.append(["init_lateral_speed", self.init_lateral_speed])
training_log_data.append(["min_angular_speed", self.min_angular_speed])
training_log_data.append(["max_angular_speed", self.min_angular_speed])
training_log_data.append(["init_angular_speed", self.init_angular_speed])
training_log_data.append(["observation_space_type", self.observation_space_type])
training_log_data.append(["goal_close_threshold", self.goal_close_threshold])
training_log_data.append(["obs_min_range", self.obs_min_range])
training_log_data.append(["n_actions", self.n_actions])
training_log_data.append(["n_observations", self.n_observations])
training_log_data.append(["n_obs_stack", self.n_obs_stack])
training_log_data.append(["n_skip_obs_stack", self.n_skip_obs_stack])
training_log_data.append(["cit_flag", self.cit_flag])
training_log_data.append(["cnn_obs_shape", self.cnn_obs_shape])
training_log_data.append(["fc_obs_shape", self.fc_obs_shape])
training_log_data.append(["reward_terminal_success", self.reward_terminal_success])
training_log_data.append(["reward_step_scale", self.reward_step_scale])
training_log_data.append(["penalty_terminal_fail", self.penalty_terminal_fail])
training_log_data.append(["penalty_cumulative_step", self.penalty_cumulative_step])
#training_log_data.append(["reward_terminal_mintime", self.reward_terminal_mintime])
if self.observation_space_type == "Tentabot_WP_FC" or \
self.observation_space_type == "laser_WP_1DCNN_FC":
training_log_data.append(["n_wp", self.n_wp])
training_log_data.append(["look_ahead", self.look_ahead])
training_log_data.append(["wp_reached_dist", self.wp_reached_dist])
training_log_data.append(["wp_global_dist", self.wp_global_dist])
training_log_data.append(["wp_dynamic", self.wp_dynamic])
write_data(training_log_file, training_log_data)
elif self.mode == "testing":
self.initial_training_path = tentabot_path + rospy.get_param('initial_training_path', "")
self.max_testing_episodes = rospy.get_param('max_testing_episodes', "")
self.world_frame_name = get_training_param(self.initial_training_path, "world_frame_name")
self.max_episode_steps = int(get_training_param(self.initial_training_path, "max_episode_steps"))
self.training_timesteps = int(get_training_param(self.initial_training_path, "training_timesteps"))
## Sensors
self.laser_size_downsampled = int(get_training_param(self.initial_training_path, "laser_size_downsampled"))
self.laser_error_threshold = float(get_training_param(self.initial_training_path, "laser_error_threshold"))
if get_training_param(self.initial_training_path, "laser_normalize_flag") == "False":
self.laser_normalize_flag = False
else:
self.laser_normalize_flag = True
## Robots
self.velocity_control_msg = rospy.get_param('robot_velo_control_msg', "")
self.velocity_control_data_path = get_training_param(self.initial_training_path, "velocity_control_data_path")
velocity_control_data_str = read_data(tentabot_path + self.velocity_control_data_path + "velocity_control_data.csv")
self.velocity_control_data = np.zeros(velocity_control_data_str.shape)
for i, row in enumerate(velocity_control_data_str):
for j, val in enumerate(row):
self.velocity_control_data[i][j] = float(val)
self.min_lateral_speed = min(self.velocity_control_data[:,0]) # [m/s]
self.max_lateral_speed = max(self.velocity_control_data[:,0]) # [m/s]
self.init_lateral_speed = self.velocity_control_data[0,0] # [m/s]
self.min_angular_speed = min(self.velocity_control_data[:,1]) # [rad/s]
self.max_angular_speed = max(self.velocity_control_data[:,1]) # [rad/s]
self.init_angular_speed = self.velocity_control_data[0,1] # [rad/s]
## Algorithm
self.observation_space_type = get_training_param(self.initial_training_path, "observation_space_type")
self.goal_close_threshold = float(get_training_param(self.initial_training_path, "goal_close_threshold"))
self.obs_min_range = float(get_training_param(self.initial_training_path, "obs_min_range"))
self.n_actions = len(self.velocity_control_data)
self.n_observations = self.n_actions
self.n_obs_stack = int(get_training_param(self.initial_training_path, "n_obs_stack"))
self.n_skip_obs_stack = int(get_training_param(self.initial_training_path, "n_skip_obs_stack"))
self.cnn_obs_shape = (1,-1)
self.fc_obs_shape = (-1, )
if get_training_param(self.initial_training_path, "cit_flag") == "False":
self.cit_flag = False
else:
self.cit_flag = True
if self.cit_flag == False:
self.cnn_obs_shape = (-1,1)
# Waypoints
if self.observation_space_type == "Tentabot_WP_FC" or \
self.observation_space_type == "laser_WP_1DCNN_FC":
self.n_wp = int(get_training_param(self.initial_training_path, "n_wp"))
self.look_ahead = float(get_training_param(self.initial_training_path, "look_ahead"))
self.wp_reached_dist = float(get_training_param(self.initial_training_path, "wp_reached_dist"))
self.wp_global_dist = float(get_training_param(self.initial_training_path, "wp_global_dist"))
self.wp_dynamic = int(get_training_param(self.initial_training_path, "wp_dynamic"))
# Rewards
self.reward_terminal_success = float(get_training_param(self.initial_training_path, "reward_terminal_success"))
self.reward_step_scale = float(get_training_param(self.initial_training_path, "reward_step_scale"))
self.penalty_terminal_fail = float(get_training_param(self.initial_training_path, "penalty_terminal_fail"))
self.penalty_cumulative_step = float(get_training_param(self.initial_training_path, "penalty_cumulative_step"))
#self.reward_terminal_mintime = float(get_training_param(self.initial_training_path, "reward_terminal_mintime"))
'''
if data_folder_path:
## Write all parameters
testing_log_file = data_folder_path + "testing_input_log.csv"
testing_log_data = []
testing_log_data.append(["mode", self.mode])
testing_log_data.append(["initial_training_path", self.initial_training_path])
write_data(testing_log_file, testing_log_data)
'''
'''
print("tentabot_drl_config::__init__ -> world_frame_name: " + str(self.world_frame_name))
print("tentabot_drl_config::__init__ -> max_episode_steps: " + str(self.max_episode_steps))
print("tentabot_drl_config::__init__ -> training_timesteps: " + str(self.training_timesteps))
print("tentabot_drl_config::__init__ -> laser_size_downsampled: " + str(self.laser_size_downsampled))
print("tentabot_drl_config::__init__ -> laser_normalize_flag: " + str(self.laser_normalize_flag))
print("tentabot_drl_config::__init__ -> laser_error_threshold: " + str(self.laser_error_threshold))
print("tentabot_drl_config::__init__ -> velocity_control_data_path: " + str(self.velocity_control_data_path))
print("tentabot_drl_config::__init__ -> observation_space_type: " + str(self.observation_space_type))
print("tentabot_drl_config::__init__ -> goal_close_threshold: " + str(self.goal_close_threshold))
print("tentabot_drl_config::__init__ -> obs_min_range: " + str(self.obs_min_range))
print("tentabot_drl_config::__init__ -> n_actions: " + str(self.n_actions))
print("tentabot_drl_config::__init__ -> n_observations: " + str(self.n_observations))
print("tentabot_drl_config::__init__ -> n_obs_stack: " + str(self.n_obs_stack))
print("tentabot_drl_config::__init__ -> n_skip_obs_stack: " + str(self.n_skip_obs_stack))
print("tentabot_drl_config::__init__ -> reward_terminal_success: " + str(self.reward_terminal_success))
print("tentabot_drl_config::__init__ -> reward_step_scale: " + str(self.reward_step_scale))
print("tentabot_drl_config::__init__ -> penalty_terminal_fail: " + str(self.penalty_terminal_fail))
print("tentabot_drl_config::__init__ -> penalty_cumulative_step: " + str(self.penalty_cumulative_step))
'''
if odom:
self.x = odom["x"]
self.y = odom["y"]
self.th = odom["theta"]
self.v = odom["u"]
self.omega = odom["omega"]
if goal:
self.goalX = goal["x"]
self.goalY = goal["y"]
'''
print("--------------")
print("Config::__init__ -> x: " + str(odom["x"]))
print("Config::__init__ -> y: " + str(odom["y"]))
print("Config::__init__ -> theta: " + str(odom["theta"]))
print("Config::__init__ -> u: " + str(odom["u"]))
print("Config::__init__ -> omega: " + str(odom["omega"]))
print("--------------")
'''
'''
NUA TODO:
'''
def set_odom(self, odom):
self.x = odom["x"]
self.y = odom["y"]
self.theta = odom["theta"]
self.v = odom["u"]
self.omega = odom["omega"]
'''
NUA TODO:
'''
def set_goal(self, goal):
self.goalX = goal["x"]
self.goalY = goal["y"]
'''
NUA TODO:
'''
def set_laser_data(self, laser_scan):
self.laser_frame_id = laser_scan.header.frame_id
self.laser_angle_min = laser_scan.angle_min # [rad]
self.laser_angle_max = laser_scan.angle_max # [rad]
self.laser_angle_increment = laser_scan.angle_increment # [rad]
self.laser_range_min = laser_scan.range_min # [m]
self.laser_range_max = laser_scan.range_max # [m]
self.laser_time_increment = laser_scan.time_increment
self.laser_scan_time = laser_scan.scan_time
self.laser_n_range = len(laser_scan.ranges)
self.laser_downsample_scale = 1
if 0 < self.laser_size_downsampled < len(laser_scan.ranges):
self.laser_downsample_scale = int(len(laser_scan.ranges) / self.laser_size_downsampled)
self.laser_n_range = self.laser_size_downsampled
self.laser_angle_increment = (self.laser_angle_max - self.laser_angle_min) / self.laser_size_downsampled
|
[
"akmandor.n@northeastern.edu"
] |
akmandor.n@northeastern.edu
|
a10de8be7a530cb8a968b65af316c038668a877b
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/470/usersdata/282/112149/submittedfiles/Av2_Parte3.py
|
00981b0e1e62d56207807f1fb8f1f85ec3f1759a
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 73
|
py
|
# -*- coding: utf-8 -*-
m=(int(input('Digite a quantidade de listas: ))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
3655ae88d7540d09282c06da474ddd9c6b0e7dad
|
1e92a800a70698eeeaaf7c9c6a990e23707865dc
|
/pwned/__init__.py
|
0c91a6872bc2d8084a6cbb6b9d3ae3de54f6a75a
|
[] |
no_license
|
yuriifreire/pywned
|
21ad85719428ed9d3946d8090720686bc9738533
|
8a6aa21d79131cc779a58c19a65155b340d34ff3
|
refs/heads/master
| 2020-03-18T07:37:16.344041
| 2018-05-22T19:27:43
| 2018-05-22T19:27:43
| 134,460,685
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31
|
py
|
from .pwned import check, main
|
[
"yuriifreire@gmail.com"
] |
yuriifreire@gmail.com
|
2918ca9901abd71a46c9c87ccb1de609a18f0b0c
|
e7a2f8cdcb6bd6fe4cc80e446ddb0d5a7b497bed
|
/run_async/utils.py
|
d69454d10fa80231f3d02c2e9e80b660f2181eff
|
[
"MIT"
] |
permissive
|
leriomaggio/async-ipython-magic
|
eebbd3a2fd866b70314715647f89a2b7f2c7fd4e
|
595d4aec8ea027eb23da0f0ae765e20adc987dc4
|
refs/heads/master
| 2022-08-27T00:53:14.787536
| 2020-12-04T14:18:29
| 2020-12-04T14:18:29
| 66,557,604
| 21
| 3
|
MIT
| 2022-08-23T17:10:28
| 2016-08-25T12:53:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,673
|
py
|
"""
"""
# Author: Valerio Maggio <valeriomaggio@gmail.com>
# Copyright (c) 2015 Valerio Maggio <valeriomaggio@gmail.com>
# License: BSD 3 clause
from IPython.utils.coloransi import TermColors, color_templates
from .settings import SERVER_ADDR, SERVER_PORT, CONNECTION_ID_SEP
COLORS = [color[1] for color in color_templates]
def strip_ansi_color(text):
"""
Removes ANSI colors from the text
Parameters
----------
text : str
The input text string to process
Returns
-------
str : the plain text with all ANSI colors stripped.
"""
text = text.replace(TermColors.Normal, TermColors.NoColor)
for color in COLORS:
text = text.replace(TermColors._base % (color), TermColors.NoColor)
return text
def connection_string(web_socket=True, extra=''):
if web_socket:
protocol = 'ws'
else:
protocol = 'http'
return '{proto}://{server}:{port}/{extra}'.format(proto=protocol, server=SERVER_ADDR,
port= SERVER_PORT, extra=extra)
def format_ws_connection_id(role_name, session_id):
"""
Format and return a (likely) unique string
to be fed to the Websocket server in the
url. This string will be used to uniquely
identify the open connection.
See `run_server.WebSocketConnectionHandler` for further
details.
Parameters
----------
role_name : str
The name of the role of the client trying to connect
to the WebSocket
session_id : str
The uniquely defined `uuid` generated for the
connecting client.
Returns
-------
ws_conn_id : str
String representing the connection ID to be
fed to the WebSocket server (url)
"""
return "{0}{1}{2}".format(role_name, CONNECTION_ID_SEP, session_id)
def parse_ws_connection_id(connection_id):
"""
Get and return the role name and the
session id associated to the input connection id.
Parameters
----------
connection_id : str
The connection ID formatted according to the
`format_ws_connection_id` function.
Returns
-------
_connection_id : str
The name of the role of the connected client associated to the connection ID
(namely, "JS" or "PY")
session_id : str
The session id associated to the client at connection time.
Note
----
This function is the counterpart of the `format_ws_connection_id` function.
This function decompose a connection id, while the former composes it.
"""
role_name, session_id = connection_id.split(CONNECTION_ID_SEP)
return role_name, session_id
|
[
"valerio.maggio@gmail.com"
] |
valerio.maggio@gmail.com
|
4cdce4ddb73b2165948e69cc8a58f22ae467e772
|
0add7953d3e3ce2df9e8265102be39b758579753
|
/built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/vega/algorithms/nas/sm_nas/mmdet_meta_cfgs/dataset.py
|
24541f70042b7db556335340b2d21c2c1bf9c144
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
Huawei-Ascend/modelzoo
|
ae161c0b4e581f8b62c77251e9204d958c4cf6c4
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
refs/heads/master
| 2023-04-08T08:17:40.058206
| 2020-12-07T08:04:57
| 2020-12-07T08:04:57
| 319,219,518
| 1
| 1
|
Apache-2.0
| 2023-03-24T22:22:00
| 2020-12-07T06:01:32
|
Python
|
UTF-8
|
Python
| false
| false
| 6,221
|
py
|
"""Modules for Dataset."""
import random
from ..utils import dict2str, str_warp
from .module import Module
class Dataset(Module):
"""Class of dataset."""
attr_space = {
'img_scale': [
{'train': (512, 512), 'val': (512, 512), 'test': (512, 512)},
{'train': (800, 600), 'val': (800, 600), 'test': (800, 600)},
{'train': (1000, 600), 'val': (1000, 600), 'test': (1000, 600)}]}
id_attrs = ['dataset', 'img_scale', 'multiscale_mode']
def __init__(self,
dataset='CocoDataset',
batch_size=2,
num_workers=2,
multiscale_mode='range',
num_classes=81,
**kwargs):
super(Dataset, self).__init__(**kwargs)
data_root_default = '/cache/data/COCO2017'
img_scale_default = dict(
train=[(512, 512), (512, 300)],
val=(512, 512),
test=(512, 512))
img_norm_cfg_default = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)
self.dataset_type = dataset
self.batch_size = batch_size
self.workers = num_workers
self.multiscale_mode = multiscale_mode
self.num_classes = num_classes
self.data_root = kwargs['data_root'] if 'data_root' in kwargs else data_root_default
img_scale = kwargs['img_scale'] if 'img_scale' in kwargs else img_scale_default
self.img_scale = dict()
for k, v in img_scale.items():
self.img_scale.update({k: tuple(v)})
self.img_norm = kwargs['img_norm_cfg'] if 'img_norm_cfg' in kwargs else img_norm_cfg_default
self.flip_ratio = kwargs['flip_ratio'] if 'flip_ratio' in kwargs else 0.5
self.size_divisor = kwargs['size_divisor'] if 'size_divisor' in kwargs else 32
self.data_setting = []
for task in ['train', 'val', 'test']:
img_scale_ = self.img_scale[task]
setting = [img_scale_]
if task == 'train':
setting.append(str_warp(self.multiscale_mode))
self.data_setting.extend(setting)
def __str__(self):
"""Get image size(test) str."""
return '{}({}x{})'.format(self.dataset_type.upper(),
self.test_img_size[0], self.test_img_size[1])
@property
def test_img_size(self):
"""Get test image size."""
return self.img_scale['test']
@property
def config(self):
"""Return dataset config for mmdet."""
return dict(dataset_type=str_warp(self.dataset_type),
img_norm=self.img_norm_cfg,
train_pipeline=self.train_pipeline_cfg,
test_pipeline=self.test_pipeline_cfg,
data_setting=self.data_setting_cfg)
@property
def train_pipeline_cfg(self):
"""Generate train pipeline config."""
img_scale = self.img_scale['train']
data = (
"[\n"
" dict(type='LoadImageFromFile'),\n"
" dict(type='LoadAnnotations', with_bbox=True),\n"
f" dict(type='Resize', img_scale=({img_scale[0]}, {img_scale[1]}), keep_ratio=True),\n"
f" dict(type='RandomFlip', flip_ratio={self.flip_ratio}),\n"
" dict(type='Normalize', **img_norm_cfg),\n"
f" dict(type='Pad', size_divisor={self.size_divisor}),\n"
" dict(type='DefaultFormatBundle'),\n"
" dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n"
"]")
return data
@property
def test_pipeline_cfg(self):
"""Generate test pipeline config."""
img_scale = self.img_scale['test']
data = (
"[\n"
" dict(type='LoadImageFromFile'),\n"
" dict(\n"
" type='MultiScaleFlipAug',\n"
f" img_scale=({img_scale[0]}, {img_scale[1]}),\n"
" flip=False,\n"
" transforms=[\n"
" dict(type='Resize', keep_ratio=True),\n"
" dict(type='RandomFlip'),\n"
" dict(type='Normalize', **img_norm_cfg),\n"
f" dict(type='Pad', size_divisor={self.size_divisor}),\n"
" dict(type='ImageToTensor', keys=['img']),\n"
" dict(type='Collect', keys=['img']),\n"
" ])\n"
"]")
return data
@property
def data_setting_cfg(self):
"""Return data setting."""
data = (
"dict(\n"
f" imgs_per_gpu={self.batch_size},\n"
f" workers_per_gpu={self.workers},\n"
" type=dataset_type,\n"
" train=dict(\n"
" type=dataset_type,\n"
" ann_file=data_root + 'annotations/instances_train2017.json',\n"
" img_prefix=data_root + 'train2017/',\n"
" pipeline=train_pipeline),\n"
" val=dict(\n"
" type=dataset_type,\n"
" ann_file=data_root + 'annotations/instances_val2017.json',\n"
" img_prefix=data_root + 'val2017/',\n"
" pipeline=test_pipeline),\n"
" test=dict(\n"
" type=dataset_type,\n"
" ann_file=data_root + 'annotations/instances_val2017.json',\n"
" img_prefix=data_root + 'val2017/',\n"
" pipeline=test_pipeline))")
return data
@property
def img_norm_cfg(self):
"""Return image normalization config."""
return dict2str(self.img_norm, in_one_line=True)
@classmethod
def sample(cls, data_setting, fore_part=None):
"""Return sampled_params."""
sampled_params = dict()
for key, value in cls.attr_space.items():
attr = random.choice(value)
sampled_params.update({key: attr})
sampled_params.update(data_setting)
sampled_params.update(cls.quest_param(fore_part=fore_part))
return cls(**sampled_params, fore_part=fore_part)
|
[
"1571856591@qq.com"
] |
1571856591@qq.com
|
6ced258c763dc4524d1b9b2e0f77056a5adccb17
|
ffb87018f36ee5f8d0025febd10156ab9d6a0459
|
/05_Countries_Capitals_GUI_v2.py
|
ca1e2b029157f40ec2346cb83f9a4116979e3cba
|
[] |
no_license
|
NathanM3/Quiz-Program
|
9b8ba2058ae071c7edf34cc56841101a2a8a6e4f
|
0dcc288cc34b66320eeb9370215f60e4a0c41a49
|
refs/heads/main
| 2023-07-26T02:31:22.665012
| 2021-09-09T23:09:29
| 2021-09-09T23:09:29
| 389,466,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,844
|
py
|
"""Based on 05_Countries_Capitals_GUI_v1.py
This version adds to the main menu GUI by dealing with duplicate windows.
Taking code from the 'Help' class as it already deals with duplicates
Lines 198-201 and lines 181-184. Added padding to label (line 227)
"""
# importing modules to use within program
# tkinter module used to create GUIs
from tkinter import *
from tkinter.ttk import Separator
# using re to control what characters are allowed within a string
import re
# random will be used later on in the quiz component to randomise the questions
import random
from functools import partial # to eliminate duplicate windows
# Creating my own Errors that apply to my functions conditions
class Error(Exception):
"""Bass Class for other exceptions"""
pass
# Is a custom Error to be raised when conditions are not met in try except
class NamingError(Error):
"""Raised when there is more than one sequential space or a string with
less than 3 characters"""
pass
class Menu:
def __init__(self):
# Formatting variables of the GUI
background_color = "#89B4E5"
# Main Menu GUI
self.main_menu_frame = Frame(width=375, height=300,
bg=background_color)
self.main_menu_frame.grid()
# Main Menu heading (row 0)
self.main_menu_label = Label(self.main_menu_frame,
text="Main Menu", font="Arial 18 bold",
bg=background_color, padx=10, pady=10)
self.main_menu_label.grid(row=0)
# Get name label (row 1)
self.get_name_label = Label(self.main_menu_frame,
text="Enter your name and press one of "
"the buttons below",
font="Arial 10 italic", wrap=250, pady=10,
bg=background_color)
self.get_name_label.grid(row=1)
# Name entry box (row 2)
self.get_name_entry = Entry(self.main_menu_frame, width=17,
font="Arial 12")
self.get_name_entry.grid(row=2)
# Buttons Frame
self.buttons_frame = Frame(self.main_menu_frame, pady=10, padx=10,
width=200, bg=background_color)
self.buttons_frame.grid(row=3)
# Countries button (column 0) shows a button to open countries mode
self.countries_button = Button(self.buttons_frame, text="Countries",
font="Arial 14 bold", width=10,
bg="blue", fg="snow",
command=self.countries)
self.countries_button.grid(row=0, column=0, padx=5, pady=10)
# Capitals button (column 1) shows a button to open capitals mode
self.capitals_button = Button(self.buttons_frame, text="Capitals",
font="Arial 14 bold", width=10,
bg="forest green",
fg="snow", command=self.capitals)
self.capitals_button.grid(row=0, column=1, padx=5, pady=10)
# Separator (row 1) should produce a line inside the buttons frame
# that separates the top 2 buttons from the bottom 2
self.frame_separator = Separator(self.buttons_frame,
orient="horizontal")
self.frame_separator.grid(row=1, sticky="ew", columnspan=2)
# History button (row 2, column 0) shows a button on the
# GUI to go to history
self.history_button = Button(self.buttons_frame,
text="Answer Record History", wrap=150,
font="Arial 12 bold", bg="light grey",
width=12)
self.history_button.grid(row=2, column=0, padx=5, pady=10)
# Help Button (row 2, column 1) shows a button to open help window
self.help_button = Button(self.buttons_frame, text="Help", width=12,
font="Arial 12 bold", bg="light grey",
height=2, command=self.help)
self.help_button.grid(row=2, column=1, padx=5, pady=10)
def name_check(self):
valid_char = "[A-Za-z ]"
while True: # Loop does not end
name = str(self.get_name_entry.get())
try:
for letter in name:
if not re.match(valid_char, letter):
self.get_name_label.configure(
text="no {}'s allowed".format(letter), fg="red")
raise NamingError
if len(name) < 3 or " " in name:
raise NamingError
# If conditions are met, name will be returned
self.get_name_label.configure(
text="Enjoy the quiz {}".format(name), fg="black")
return name
except NamingError:
self.get_name_label.configure(
text="Please enter a name that contains at least 3 "
"letters and only 1 space between names: ", fg="red")
break
# functions that open pop up windows and also start using corresponding
# classes
def capitals(self):
user_name = self.name_check()
if user_name:
get_capitals = Capitals(self) # Opens Capitals GUI
def countries(self):
user_name = self.name_check()
if user_name:
get_countries = Countries(self) # Opens Countries GUI
def help(self):
get_help = Help(self) # Opens Help GUI
class Help:
def __init__(self, partner):
background = "dark orange"
# to disable help button
partner.help_button.config(state=DISABLED)
# sets up child window (ie: help box)
self.help_box = Toplevel()
# If a user presses the red cross at the top, it will use close_help
# This enables the help button.
self.help_box.protocol('WM_DELETE_WINDOW', partial(self.close_help,
partner))
# set up GUI Frame
self.help_frame = Frame(self.help_box, width=450, bg=background)
self.help_frame.grid()
# set up Help heading (row 0)
self.how_heading = Label(self.help_frame, text="Help / Information",
font="Arial 10 bold",
bg=background, pady=10)
self.how_heading.grid(row=0)
# Help text (label, row 1)
self.help_text = Label(self.help_frame,
text="This is a Geographical Quiz that will "
"show you the name of a country or "
"capital and ask you to enter the "
"corresponding capital or country.\nAfter "
"you have completed the quiz or decided "
"to stop playing, a record of answers can "
"be exported by pressing the 'Answer "
"Record History' button and following the "
"instructions given.\nEnjoy", justify=LEFT,
width=40, bg=background, wrap=250)
self.help_text.grid(row=1)
# Dismiss button (row 2)
self.dismiss_btn = Button(self.help_frame, text="Dismiss",
width=10, bg="slate gray",
font="arial 10 bold",
command=partial(self.close_help, partner),
fg="white")
self.dismiss_btn.grid(row=2, pady=5)
def close_help(self, partner):
# Enabling the help button again in the close help function
partner.help_button.config(state=NORMAL)
self.help_box.destroy()
class Capitals:
def __init__(self, partner):
background_color = "chartreuse4"
# to disable countries and capitals buttons when quiz is open.
partner.countries_button.config(state=DISABLED)
partner.capitals_button.config(state=DISABLED)
# Sets up a child window for capitals
self.capitals_box = Toplevel()
# If a user presses the red cross at the top, it will use close_quiz
# This closes the quiz window and enables the capitals and countries
# buttons in the main menu.
self.capitals_box.protocol('WM_DELETE_WINDOW', partial(self.close_quiz,
partner))
# Capitals GUI
self.capitals_frame = Frame(self.capitals_box, width=375, height=300,
bg=background_color)
self.capitals_frame.grid()
# Capitals heading (row 0)
self.capitals_label = Label(self.capitals_frame,
text="Capitals", font="Arial 18 bold",
bg=background_color, padx=10, pady=10)
self.capitals_label.grid(row=0)
# Question label (row 1)
self.question_label = Label(self.capitals_frame,
text="<ask question here>",
font="Arial 12", wrap=250, pady=10,
bg=background_color)
self.question_label.grid(row=1)
# Answer entry box label (row 2)
self.answer_label = Label(self.capitals_frame,
text="Type your Answer and press 'Check' "
"to enter", font="Arial 10 italic",
bg=background_color)
self.answer_label.grid(row=2, padx=30)
# Answer entry box (row 3)
self.get_answer_entry = Entry(self.capitals_frame, width=17,
font="Arial 12")
self.get_answer_entry.grid(row=3)
# Buttons Frame (row 4)
self.buttons_frame = Frame(self.capitals_frame, pady=10, padx=10,
width=200, bg=background_color)
self.buttons_frame.grid(row=4)
# Check button will inform the user if they were correct or incorrect
# (row 0, column 0 of button frame)
self.check_button = Button(self.buttons_frame, text="Check",
font="Arial 14 bold", width=8,
bg="sandy brown", fg="black")
self.check_button.grid(row=0, column=0, padx=5, pady=10)
# 'Next' button will change the GUI to the next question of the quiz
# (row 0, column 1) on the same horizontal line as check button.
self.next_button = Button(self.buttons_frame, text="Next",
font="Arial 14 bold", width=8,
bg="firebrick2", fg="snow")
self.next_button.grid(row=0, column=1, padx=5, pady=10)
def close_quiz(self, partner):
# Enabling the help button again in the close help function
partner.capitals_button.config(state=NORMAL)
partner.countries_button.config(state=NORMAL)
self.capitals_box.destroy()
class Countries:
def __init__(self, partner):
background = "Green"
# to disable countries and capitals buttons when quiz is open.
partner.countries_button.config(state=DISABLED)
partner.capitals_button.config(state=DISABLED)
# main routine
if __name__ == "__main__":
root = Tk()
root.title("Geographical Quiz Game")
something = Menu()
root.mainloop()
|
[
"morrisonn2@middleton.school.nz"
] |
morrisonn2@middleton.school.nz
|
8efa3223acd3a9a6487c682356b2e6cb3f33484a
|
ae08fbd406a3523107176471df149bbb1006637f
|
/qa/rpc-tests/bipdersig.py
|
6c1849312cdc9e36997a2acbb41a84d51d8c8676
|
[
"MIT"
] |
permissive
|
mirzaei-ce/core-popbit
|
9e7676c95f4d7cb9ad73be456fc98c9aaea4aba3
|
e4230fcbfdf8a5bca54fb636e3019c42d40f0a14
|
refs/heads/master
| 2021-07-25T23:25:24.765814
| 2017-11-02T16:00:48
| 2017-11-02T16:00:48
| 109,287,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,128
|
py
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test the BIP66 changeover logic
#
from test_framework.test_framework import PopbitTestFramework
from test_framework.util import *
class BIP66Test(PopbitTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, []))
self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=2"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=3"]))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some old-version blocks
self.nodes[1].generate(100)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 100):
raise AssertionError("Failed to mine 100 version=2 blocks")
# Mine 750 new-version blocks
for i in xrange(15):
self.nodes[2].generate(50)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 850):
raise AssertionError("Failed to mine 750 version=3 blocks")
# TODO: check that new DERSIG rules are not enforced
# Mine 1 new-version block
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 851):
raise AssertionError("Failed to mine a version=3 blocks")
# TODO: check that new DERSIG rules are enforced
# Mine 198 new-version blocks
for i in xrange(2):
self.nodes[2].generate(99)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1049):
raise AssertionError("Failed to mine 198 version=3 blocks")
# Mine 1 old-version block
self.nodes[1].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1050):
raise AssertionError("Failed to mine a version=2 block after 949 version=3 blocks")
# Mine 1 new-version blocks
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Failed to mine a version=3 block")
# Mine 1 old-version blocks
try:
self.nodes[1].generate(1)
raise AssertionError("Succeeded to mine a version=2 block after 950 version=3 blocks")
except JSONRPCException:
pass
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Accepted a version=2 block after 950 version=3 blocks")
# Mine 1 new-version blocks
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1052):
raise AssertionError("Failed to mine a version=3 block")
if __name__ == '__main__':
BIP66Test().main()
|
[
"mirzaei@ce.sharif.edu"
] |
mirzaei@ce.sharif.edu
|
c3f2cf99a8af7fe6d4ea17d7e13306e113ac0c83
|
ad4ec1f4f670de4ec801008f61399d47f5381174
|
/05_Django/05_model_relation/manytomany/migrations/0001_initial.py
|
d5c72dca15c1c46e920eacb5f5a38b6a8021ad2d
|
[] |
no_license
|
minseunghwang/TIL
|
b6c0ce67ac86465b331b7430f22c66795c0eb246
|
12d829e51a611e3c2d1118a0e82becbdc6d0d89b
|
refs/heads/master
| 2022-12-27T06:59:46.853273
| 2019-11-27T07:34:47
| 2019-11-27T07:34:47
| 216,502,881
| 0
| 0
| null | 2022-04-22T22:38:55
| 2019-10-21T07:21:24
|
Python
|
UTF-8
|
Python
| false
| false
| 841
|
py
|
# Generated by Django 2.2.6 on 2019-11-12 07:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Doctor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
],
),
migrations.CreateModel(
name='Patient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('doctor', models.ManyToManyField(related_name='patients', to='manytomany.Doctor')),
],
),
]
|
[
"49888816+minseunghwang@users.noreply.github.com"
] |
49888816+minseunghwang@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.