content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def get_gzip_uncompressed_file_size(file_name):
"""
this function will return the uncompressed size of a gzip file
similar as gzip -l file_name
"""
file_obj = gzip.open(file_name, 'r')
file_obj.seek(-8, 2)
# crc32 = gzip.read32(file_obj)
isize = gzip.read32(file_obj)
return isize
| 10,000
|
def Capitalize(v):
"""Capitalise a string.
>>> s = Schema(Capitalize)
>>> s('hello world')
'Hello world'
"""
return str(v).capitalize()
| 10,001
|
def pg_index_exists(conn, schema_name: str, table_name: str, index_name: str) -> bool:
"""
Does a postgres index exist?
Unlike pg_exists(), we don't need heightened permissions on the table.
So, for example, Explorer's limited-permission user can check agdc/ODC tables
that it doesn't own.
"""
return (
conn.execute(
"""
select indexname
from pg_indexes
where schemaname=%(schema_name)s and
tablename=%(table_name)s and
indexname=%(index_name)s
""",
schema_name=schema_name,
table_name=table_name,
index_name=index_name,
).scalar()
is not None
)
| 10,002
|
def create_mock_data(bundle_name: str,
user_params: dict):
"""
create some mock data and push to S3 bucket
:param bundle_name: str, bundle name
:param user_params: dict, what parameters to save
:return:
"""
api.context(context_name)
api.remote(context_name, remote_context=context_name, remote_url=s3_path)
component_signature = {k: str(v) for k, v in user_params.items()}
proc_name = api.Bundle.calc_default_processing_name(
bundle_name, component_signature, dep_proc_ids={})
with api.Bundle(context_name, name=bundle_name, processing_name=proc_name) as b:
b.add_params(component_signature) # local_path will be replaced by S3 by Disdat
api.commit(context_name, bundle_name)
api.push(context_name, bundle_name) # save the bundle to S3
return b.uuid # return the bundle uuid
| 10,003
|
def check_bullet_alien_collisions(settings, screen, stats, sb, ship, aliens, bullets):
""" Respond to any bullet-alien collision."""
# remove any bullets and aliens that have collided,
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
if collisions:
stats.score += settings.alien_points
sb.prep_score()
check_high_score(stats, sb)
if len(aliens) == 0:
# Destroy existing bullets, speed up game, and create new fleet.
bullets.empty()
settings.increase_speed()
create_fleet(settings, screen, ship, aliens)
| 10,004
|
def main():
"""
Run the server.
"""
factory = server.DNSServerFactory(
clients=[DynamicResolver(), client.Resolver(resolv="/etc/resolv.conf")]
)
protocol = dns.DNSDatagramProtocol(controller=factory)
reactor.listenUDP(10053, protocol)
reactor.listenTCP(10053, factory)
reactor.run()
| 10,005
|
def DiscoverNameServers():
"""Don't call, only here for backward compatability. We do discovery for
you automatically.
"""
pass
| 10,006
|
def _vertex_arrays_to_list(x_coords_metres, y_coords_metres):
"""Converts set of vertices from two arrays to one list.
V = number of vertices
:param x_coords_metres: length-V numpy array of x-coordinates.
:param y_coords_metres: length-V numpy array of y-coordinates.
:return: vertex_list_xy_metres: length-V list, where each element is an
(x, y) tuple.
"""
_check_polyline(
x_coords_metres=x_coords_metres, y_coords_metres=y_coords_metres)
num_vertices = len(x_coords_metres)
vertex_list_xy_metres = []
for i in range(num_vertices):
vertex_list_xy_metres.append((x_coords_metres[i], y_coords_metres[i]))
return vertex_list_xy_metres
| 10,007
|
def info_from_apiKeyAuth(token: str, required_scopes) -> Optional[Dict[str, Any]]:
"""
Check and retrieve authentication information from an API key.
Returned value will be passed in 'token_info' parameter of your operation function, if there
is one. 'sub' or 'uid' will be set in 'user' parameter of your operation function, if there
is one. Should return None if auth is invalid or does not allow access to called API.
The real work happens in Auth0._set_user().
"""
return {"token": token, "method": "apikey"}
| 10,008
|
def xgb_test(
context,
models_path: DataItem,
test_set: DataItem,
label_column: str,
plots_dest: str = "plots",
default_model: str = "model.pkl",
) -> None:
"""Test one or more classifier models against held-out dataset
Using held-out test features, evaluates the peformance of the estimated model
Can be part of a kubeflow pipeline as a test step that is run post EDA and
training/validation cycles
:param context: the function context
:param models_path: model artifact to be tested
:param test_set: test features and labels
:param label_column: column name for ground truth labels
:param plots_dest: dir for test plots
:param default_model: 'model.pkl', default model artifact file name
"""
xtest = test_set.as_df()
ytest = xtest.pop(label_column)
try:
model_file, model_obj, _ = get_model(models_path.url, suffix=".pkl")
model_obj = load(open(model_file, "rb"))
except Exception as a:
raise Exception("model location likely misspecified")
eval_metrics = eval_model_v2(context, xtest, ytest.values, model_obj)
| 10,009
|
def convert_unix2dt(series):
"""
Parameters
----------
series : column from pandas dataframe in UNIX microsecond formatting
Returns
-------
timestamp_dt : series in date-time format
"""
if (len(series) == 1):
unix_s = series/1000
else:
unix_s = series.squeeze()/1000
timestamp_dt = np.zeros(len(unix_s), dtype='datetime64[ms]')
for i in range(len(timestamp_dt)):
timestamp_dt[i] = datetime.fromtimestamp(unix_s.iloc[i])
return timestamp_dt
| 10,010
|
def test_add_invalid_path(qtbot, pathmanager):
"""Checks for unicode on python 2."""
pathmanager.show()
count = pathmanager.count()
def interact_message_box():
qtbot.wait(500)
messagebox = pathmanager.findChild(QMessageBox)
button = messagebox.findChild(QPushButton)
qtbot.mouseClick(button, Qt.LeftButton)
timer = QTimer()
timer.setSingleShot(True)
timer.timeout.connect(interact_message_box)
timer.start(500)
pathmanager.add_path('/foo/bar/測試')
qtbot.wait(500)
# Back to main thread
assert len(pathmanager.get_path_dict()) == 2
assert len(pathmanager.get_path_dict(True)) == 3
| 10,011
|
def main():
"""Program, kok"""
args = parse_args()
if args.bind:
override_socket(args.bind)
try:
check_update()
except Exception as ex:
print("Failed to check for new version:", ex,
file=sys.stderr, flush=True)
from volapi import Room
stat = Statistics()
total_current = 0
try:
print("Starting DoS... ", end="", flush=True)
with Room(args.room, args.user, subscribe=False) as room:
print("done")
if args.passwd:
print("Greenfagging in as {}... ".format(args.user),
end="", flush=True)
room.user.login(args.passwd)
print("done")
files = args.files
if any(f.name == "Thumbs.db" for f in files):
class NotGonnaDoIt(Exception):
"""roboCop, pls"""
pass
raise NotGonnaDoIt("No Thumbs.db for you!")
total_length = sum(f.size for f in files)
print("Pushing attack bytes to mainframe... {:.2f}MB in total".
format(total_length / FAC),
flush=True)
upload_file = partial(upload,
room=room,
block_size=args.block_size,
force_server=args.force_server,
prefix=args.prefix)
for i, file in enumerate(files):
for attempt in range(args.attempts):
try:
nums = dict(item=i + 1, files=len(files),
cur=total_current, total=total_length)
upload_file(file=file, nums=nums)
total_current += file.size
stat.record(total_current)
if args.delete:
try_unlink(file)
# Exit attempt loop
break
except Exception as ex:
print("\nFailed to upload {}: {} (attempt: {})".
format(file, ex, attempt),
file=sys.stderr, flush=True)
time.sleep(attempt * 0.1)
except Exception as ex:
print("\nFailure to fly: {} ({})".format(ex, type(ex)), file=sys.stderr, flush=True)
return 1
except KeyboardInterrupt:
print("\nUser canceled", file=sys.stderr, flush=True)
return 3
finally:
print("All done in {:.2f}secs ({:.2f}MB/s)".
format(stat.runtime, stat.rate))
return 0
| 10,012
|
def staging(new_settings={}):
"""Work on the staging environment"""
load_environ('staging', new_settings)
| 10,013
|
def sentence_segment(text, delimiters=('?', '?', '!', '!', '。', ';', '……', '…'), include_symbols=True):
"""
Sentence segmentation
:param text: query
:param delimiters: set
:param include_symbols: bool
:return: list(word, idx)
"""
result = []
delimiters = set([item for item in delimiters])
delimiters_str = '|'.join(delimiters)
blocks = re.split(delimiters_str, text)
start_idx = 0
for blk in blocks:
if not blk:
continue
result.append((blk, start_idx))
start_idx += len(blk)
if include_symbols and start_idx < len(text):
result.append((text[start_idx], start_idx))
start_idx += 1
return result
| 10,014
|
def create_users_table(conn):
"""Create table for users"""
try:
sql = '''CREATE TABLE users (
numCorrect Int,
defaultPath String,
timerDuration Int,
charTimerValue Int,
charBasedTimer Bool,
noTyping Bool,
autoStart Bool,
showCorrectAnswer Bool,
darkMode Bool
)'''
conn.execute(sql)
except sqlite3.Error as err:
print(err)
| 10,015
|
def time_rep_song_to_16th_note_grid(time_rep_song):
"""
Transform the time_rep_song into an array of 16th note with pitches in the onsets
[[60,4],[62,2],[60,2]] -> [60,0,0,0,62,0,60,0]
"""
grid_16th = []
for pair_p_t in time_rep_song:
grid_16th.extend([pair_p_t[0]] + [0 for _ in range(pair_p_t[1]-1)])
return grid_16th
| 10,016
|
def _double_threshold(x, high_thres, low_thres, n_connect=1, return_arr=True):
"""_double_threshold
Computes a double threshold over the input array
:param x: input array, needs to be 1d
:param high_thres: High threshold over the array
:param low_thres: Low threshold over the array
:param n_connect: Postprocessing, maximal distance between clusters to connect
:param return_arr: By default this function returns the filtered indiced, but if return_arr = True it returns an array of tsame size as x filled with ones and zeros.
"""
assert x.ndim == 1, "Input needs to be 1d"
high_locations = np.where(x > high_thres)[0]
locations = x > low_thres
encoded_pairs = find_contiguous_regions(locations)
filtered_list = list(
filter(
lambda pair:
((pair[0] <= high_locations) & (high_locations <= pair[1])).any(),
encoded_pairs))
filtered_list = connect_(filtered_list, n_connect)
if return_arr:
zero_one_arr = np.zeros_like(x, dtype=int)
for sl in filtered_list:
zero_one_arr[sl[0]:sl[1]] = 1
return zero_one_arr
return filtered_list
| 10,017
|
def _convert_dataset(split_name, files, labels, dataset_dir):
"""Converts the given filenames to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'test'.
filenames: A list of absolute paths to png or jpg images.
class_names_to_ids: A dictionary from class names (strings) to ids
(integers).
dataset_dir: The directory where the converted datasets are stored.
"""
assert split_name in ['train', 'test']
with tf.Graph().as_default():
image_reader = ImageReader()
with tf.Session('') as sess:
for file_indx in range(len(files)):
output_filename = _get_dataset_filename(
dataset_dir, files[file_indx], split_name)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
sys.stdout.write('\r>> Converting image %d/%d' % (
file_indx, len(files)))
sys.stdout.flush()
# Read the filename:
image_data = tf.gfile.FastGFile(files[file_indx], 'rb').read()
#image_raw = image_reader.decode_jpeg(sess, image_data)
#height, width = image_raw.shape[0], image_raw.shape[1]
height, width = image_reader.read_image_dims(sess, image_data)
example = dataset_utils.image_to_tfexample(image_data, b'jpeg', height, width, labels[file_indx])
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
| 10,018
|
def upload():
"""
Implements the upload page form
"""
return render_template('upload.html')
| 10,019
|
def detect_changepoints(points, min_time, data_processor=acc_difference):
""" Detects changepoints on points that have at least a specific duration
Args:
points (:obj:`Point`)
min_time (float): Min time that a sub-segmented, bounded by two changepoints, must have
data_processor (function): Function to extract data to feed to the changepoint algorithm.
Defaults to `speed_difference`
Returns:
:obj:`list` of int: Indexes of changepoints
"""
data = data_processor(points)
changepoints = pelt(normal_mean(data, np.std(data)), len(data))
changepoints.append(len(points) - 1)
result = []
for start, end in pairwise(changepoints):
time_diff = points[end].time_difference(points[start])
if time_diff > min_time:
result.append(start)
# adds the first point
result.append(0)
# adds the last changepoint detected
result.append(len(points) - 1)
return sorted(list(set(result)))
| 10,020
|
def Sum(idx, *args, **kwargs):
"""Instantiator for an arbitrary indexed sum.
This returns a function that instantiates the appropriate
:class:`QuantumIndexedSum` subclass for a given term expression. It is the
preferred way to "manually" create indexed sum expressions, closely
resembling the normal mathematical notation for sums.
Args:
idx (IdxSym): The index symbol over which the sum runs
args: arguments that describe the values over which `idx` runs,
kwargs: keyword-arguments, used in addition to `args`
Returns:
callable: an instantiator function that takes a
arbitrary `term` that should generally contain the `idx` symbol, and
returns an indexed sum over that `term` with the index range specified
by the original `args` and `kwargs`.
There is considerable flexibility to specify concise `args` for a variety
of index ranges.
Assume the following setup::
>>> i = IdxSym('i'); j = IdxSym('j')
>>> ket_i = BasisKet(FockIndex(i), hs=0)
>>> ket_j = BasisKet(FockIndex(j), hs=0)
>>> hs0 = LocalSpace('0')
Giving `i` as the only argument will sum over the indices of the basis
states of the Hilbert space of `term`::
>>> s = Sum(i)(ket_i)
>>> unicode(s)
'∑_{i ∈ ℌ₀} |i⟩⁽⁰⁾'
You may also specify a Hilbert space manually::
>>> Sum(i, hs0)(ket_i) == Sum(i, hs=hs0)(ket_i) == s
True
Note that using :func:`Sum` is vastly more readable than the equivalent
"manual" instantiation::
>>> s == KetIndexedSum.create(
... ket_i, ranges=(IndexOverFockSpace(i, hs=hs0),))
True
By nesting calls to `Sum`, you can instantiate sums running over multiple
indices::
>>> unicode( Sum(i)(Sum(j)(ket_i * ket_j.dag())) )
'∑_{i,j ∈ ℌ₀} |i⟩⟨j|⁽⁰⁾'
Giving two integers in addition to the index `i` in `args`, the index will
run between the two values::
>>> unicode( Sum(i, 1, 10)(ket_i) )
'∑_{i=1}^{10} |i⟩⁽⁰⁾'
>>> Sum(i, 1, 10)(ket_i) == Sum(i, 1, to=10)(ket_i)
True
You may also include an optional step width, either as a third integer or
using the `step` keyword argument.
>>> #unicode( Sum(i, 1, 10, step=2)(ket_i) ) # TODO
Lastly, by passing a tuple or list of values, the index will run over all
the elements in that tuple or list::
>>> unicode( Sum(i, (1, 2, 3))(ket_i) )
'∑_{i ∈ {1,2,3}} |i⟩⁽⁰⁾'
"""
from qalgebra.core.hilbert_space_algebra import LocalSpace
from qalgebra.core.scalar_algebra import ScalarValue
from qalgebra.library.spin_algebra import SpinSpace
dispatch_table = {
tuple(): _sum_over_fockspace,
(LocalSpace,): _sum_over_fockspace,
(SpinSpace,): _sum_over_fockspace,
(list,): _sum_over_list,
(tuple,): _sum_over_list,
(int,): _sum_over_range,
(int, int): _sum_over_range,
(int, int, int): _sum_over_range,
}
key = tuple((type(arg) for arg in args))
try:
idx_range_func = dispatch_table[key]
except KeyError:
raise TypeError("No implementation for args of type %s" % str(key))
def sum(term):
if isinstance(term, ScalarValue._val_types):
term = ScalarValue.create(term)
idx_range = idx_range_func(term, idx, *args, **kwargs)
return term._indexed_sum_cls.create(term, ranges=(idx_range,))
return sum
| 10,021
|
def quad1(P):
"""[summary]
Arguments:
P (type): [description]
Returns:
[type]: [description]
"""
x1, z1, x2, z2 = P
return (Fraction(x1, z1) - Fraction(x2, z2))**2
| 10,022
|
def test_construct_form_fail():
"""Form objects must be constructed from form html elements."""
soup = bs4.BeautifulSoup('<notform>This is not a form</notform>', 'lxml')
tag = soup.find('notform')
assert isinstance(tag, bs4.element.Tag)
with pytest.warns(FutureWarning, match="from a 'notform'"):
mechanicalsoup.Form(tag)
| 10,023
|
async def test_set_hold_mode_eco(opp):
"""Test setting the hold mode eco."""
await common.async_set_preset_mode(opp, PRESET_ECO, ENTITY_ECOBEE)
await opp.async_block_till_done()
state = opp.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_ECO
| 10,024
|
def update_position(position, velocity):
"""
:param position: position(previus/running) of a particle
:param velocity: the newest velocity that has been calculated during the specific iteration- new velocity is calculated
before the new position
:return: list - new position
"""
pos = []
length = len(position)
for i in range(length):
pos.append(position[i] + velocity[i])
return pos
| 10,025
|
def respond_batch():
"""
responses with [{"batch": [{blacklist_1_name: true}, ]}]
"""
result = get_result(request)
return jsonify([{"batch": result}])
| 10,026
|
def get_iterable_itemtype(obj):
"""Attempts to get an iterable's itemtype without iterating over it,
not even partly. Note that iterating over an iterable might modify
its inner state, e.g. if it is an iterator.
Note that obj is expected to be an iterable, not a typing.Iterable.
This function leverages various alternative ways to obtain that
info, e.g. by looking for type annotations of '__iter__' or '__getitem__'.
It is intended for (unknown) iterables, where the type cannot be obtained
via sampling without the risk of modifying inner state.
"""
# support further specific iterables on demand
if isinstance(obj, _typechecked_Iterable):
return obj.itemtype
try:
if isinstance(obj, range):
tpl = tuple(deep_type(obj.start), deep_type(obj.stop), deep_type(obj.step))
return Union[tpl]
except TypeError:
# We're running Python 2
pass
if type(obj) is tuple:
tpl = tuple(deep_type(t) for t in obj)
return Union[tpl]
elif type(obj) is types.GeneratorType:
return get_generator_yield_type(obj)
else:
tp = deep_type(obj)
if is_Generic(tp):
if issubclass(tp.__origin__, Iterable):
if len(tp.__args__) == 1:
return tp.__args__[0]
return _select_Generic_superclass_parameters(tp, Iterable)[0]
if is_iterable(obj):
if type(obj) is str:
return str
if hasattr(obj, '__iter__'):
if has_type_hints(obj.__iter__):
itrator = _funcsigtypes(obj.__iter__, True, obj.__class__)[1]
if is_Generic(itrator) and itrator.__origin__ is _orig_Iterator:
return itrator.__args__[0]
if hasattr(obj, '__getitem__'):
if has_type_hints(obj.__getitem__):
itrator = _funcsigtypes(obj.__getitem__, True, obj.__class__)[1]
if is_Generic(itrator) and itrator.__origin__ is _orig_Iterator:
return itrator.__args__[0]
return None # means that type is unknown
else:
raise TypeError('Not an iterable: '+str(type(obj)))
| 10,027
|
def set_subscription_policy(project, subscription_id):
"""Sets the IAM policy for a topic."""
# [START pubsub_set_subscription_policy]
from google.cloud import pubsub_v1
# TODO(developer)
# project_id = "your-project-id"
# subscription_id = "your-subscription-id"
client = pubsub_v1.SubscriberClient()
subscription_path = client.subscription_path(project, subscription_id)
policy = client.get_iam_policy(subscription_path)
# Add all users as viewers.
policy.bindings.add(role="roles/pubsub.viewer", members=["allUsers"])
# Add a group as an editor.
policy.bindings.add(role="roles/editor", members=["group:cloud-logs@google.com"])
# Set the policy
policy = client.set_iam_policy(subscription_path, policy)
print("IAM policy for subscription {} set: {}".format(subscription_id, policy))
client.close()
# [END pubsub_set_subscription_policy]
| 10,028
|
def getTimerIPs():
"""
returns list of ip addr
"""
client = docker.from_env()
container_list = client.containers.list()
timer_ip_list = []
for container in container_list:
if re.search("^timer[1-9][0-9]*", container.name):
out = container.exec_run("awk 'END{print $1}' /etc/hosts", stdout=True)
timer_ip_list.append(out.output.decode().split("\n")[0])
client.close()
return timer_ip_list
| 10,029
|
def analyze_elb_logs(bucket, key):
"""
This is the main function of the script. This function will trigger all the helper functions
In order to:
1. Get the Log file, unzip ip, load the requests.
2. Analyze the requests.
3. Save all bad requests in a database
"""
# Check that the storage type is valid
if os.environ.get('STORAGE') != "print" and os.environ.get('STORAGE') != "mysql" and os.environ.get('STORAGE') != "mongodb" and os.environ.get('STORAGE') is not None:
raise Exception('Please enter a valid storage type: [print, mysql, mongodb]')
# Initialize the AWS S3 Client
s3Client = awsModule.initialize_s3_client()
# Retrieve the log file from S3, and load the requests into a list
requests = awsModule.get_requests_from_file(s3Client, bucket, key)
# Analyze and retrieve the bad requests
badRequests = helpers.analyze_requests(requests)
# Store the requests in a database
helpers.store_bad_requests(badRequests)
| 10,030
|
def country_buttons():
"""Generates the country buttons for the layout
TODO(@andreasfo@gmail.com)
Fix to use this one instead of the dropdown menu
Returns:
dbcButtonGroup -- A button group of all countries
"""
countries = [{'label': '🇸🇪 Sweden',
'value': 'Sweden'
},
{
'label': '🇫🇮 Finland',
'value': 'Finland'
},
{
'label': '🇳🇴 Norway',
'value': 'Norway'
},
{
'label': '🇩🇰 Denmark',
'value': 'Denmark'
},
{
'label': '🇮🇸 Iceland',
'value': 'Iceland'
}]
button_style = {
'padding': '.25rem .5rem',
'font-size': '10px',
'line-height': '1',
'border-radius': '10px',
'height': '25px',
'align-items': 'center',
'justify-content': 'center',
}
buttons = []
for country in countries:
buttons.append(dbc.Button(
country['label'], id=country['value'], style=button_style))
return dbc.ButtonGroup(buttons, id="country_buttons")
| 10,031
|
def BRepBlend_BlendTool_NbSamplesV(*args):
"""
:param S:
:type S: Handle_Adaptor3d_HSurface &
:param v1:
:type v1: float
:param v2:
:type v2: float
:rtype: int
"""
return _BRepBlend.BRepBlend_BlendTool_NbSamplesV(*args)
| 10,032
|
def patch_urllib(monkeypatch, requests_monitor):
"""
Patch urllib to provide the following features:
- Retry failed requests. Makes test runs more stable.
- Track statistics with RequestsMonitor.
Retries could have been implemented differently:
- In test.geocoders.util.GeocoderTestBase._make_request. The issue
is that proxy tests use raw urlopen on the proxy server side,
which will not be covered by _make_request.
- With pytest plugins, such as pytest-rerunfailures. This
might be a good alternative, however, they don't distinguish
between network and test logic failures (the latter shouldn't
be re-run).
"""
def mock_factory(do_open):
def wrapped_do_open(self, conn, req, *args, **kwargs):
requests_monitor.record_request(req)
retries = max_retries
netloc = netloc_from_req(req)
is_proxied = req.host != netloc
if is_proxied or netloc in no_retries_for_hosts:
# XXX If there's a system proxy enabled, the failed requests
# won't be retried at all because of this check.
# We need to disable retries for proxies in order to
# not retry requests to the local proxy server set up in
# tests/proxy_server.py, which breaks request counters
# in tests/test_proxy.py.
# Perhaps we could also check that `req.host` points
# to localhost?
retries = 0
for i in range(retries + 1):
try:
start = default_timer()
resp = do_open(self, conn, req, *args, **kwargs)
end = default_timer()
if i == retries or resp.getcode() not in retry_status_codes:
# Note: we shouldn't blindly retry on any >=400 code,
# because some of them are actually expected in tests
# (like input validation verification).
# TODO Retry failures with the 200 code?
# Some geocoders return failures with 200 code
# (like GoogleV3 for Quota Exceeded).
# Should we detect this somehow to restart such requests?
requests_monitor.record_response(req, resp, end - start)
return resp
except: # noqa
if i == retries:
raise
requests_monitor.record_retry(req)
sleep(error_wait_seconds)
raise RuntimeError("Should not have been reached")
return wrapped_do_open
original_http_do_open = HTTPHandler.do_open
original_https_do_open = HTTPSHandler.do_open
monkeypatch.setattr(http_handler_do_open, mock_factory(original_http_do_open))
monkeypatch.setattr(https_handler_do_open, mock_factory(original_https_do_open))
| 10,033
|
def get_response(session, viewstate, event_validation, event_target, outro=None, stream=False, hdfExport=''):
"""
Handles all the responses received from every request made to the website.
"""
url = "http://www.ssp.sp.gov.br/transparenciassp/"
data = [
('__EVENTTARGET', event_target),
('__EVENTARGUMENT', ''),
('__VIEWSTATE', viewstate),
('__EVENTVALIDATION', event_validation),
('ctl00$cphBody$hdfExport', hdfExport),
]
if outro:
data.append(('ctl00$cphBody$filtroDepartamento', '0'))
data.append(('__LASTFOCUS', ''))
response = session.post(url, headers=headers, data=data, stream=stream)
return response
| 10,034
|
def test_permission_dependency_returns_requested_resource(mocker):
""" If a user has a permission, the resource should be returned """
mocker.patch("fastapi_permissions.has_permission", return_value=True)
mocker.patch("fastapi_permissions.Depends")
from fastapi_permissions import Depends, permission_dependency_factory
# since the resulting permission function is wrapped in Depends()
# we need to extract it from the mock
permission_dependency_factory(
"view",
dummy_resource_callable,
"active_principals_func",
"permisssion_exception",
)
assert Depends.call_count == 2
args, kwargs = Depends.call_args_list[1]
permission_func = args[0]
result = permission_func()
assert result == Depends(dummy_resource_callable)
| 10,035
|
def validate_method(method, version):
"""Confirm that the specified method / version combination is available and exit accordingly"""
try:
get_predictor(method, version)
except PredictorCreationException:
sys.exit(1)
sys.exit(0)
| 10,036
|
def write_summary_to_aiplatform(metric_tag, destdir, metric):
"""Write a metric as a TF Summary.
AI Platform will use this metric for hyperparameters tuning.
Args:
metric_tag: A string with the tag used for hypertuning.
destdir: Destination path for the metric to be written.
metric: Value of the metric to be written.
"""
summary = Summary(value=[Summary.Value(tag=metric_tag,
simple_value=metric)])
eval_path = os.path.join(destdir, metric_tag)
LOGGER.info("Writing metrics to %s" % eval_path)
summary_writer = tf.summary.FileWriter(eval_path)
summary_writer.add_summary(summary)
summary_writer.flush()
| 10,037
|
def matchVuln(vuln, element, criteria):
"""
================================================================================
Name:
matchVuln
Description:
Sets the finding details of a given VULN.
Parameter(s):
vuln: The VULN element to be searched.
element: The element to find.
criteria: The search criteria against which to match.
Returns:
True: If a match is found.
False: If a match is not found.
Notes:
N/A
================================================================================
"""
if (getVulnElementValue(vuln, element) == criteria): return True
return False
| 10,038
|
def require(*modules):
"""Check if the given modules are already available; if not add them to
the dependency list."""
deplist = []
for module in modules:
try:
__import__(module)
except ImportError:
deplist.append(module)
return deplist
| 10,039
|
def download_weekly_deaths_numbers_rki(data_path):
"""!Downloads excel file from RKI webpage
@param data_path Path where to store the file.
"""
name_file = "RKI_deaths_weekly.xlsx"
url = "https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Projekte_RKI/" \
"COVID-19_Todesfaelle.xlsx?__blob=publicationFile"
# data_path: path where to safe Excel-file
r = requests.get(url)
filename = os.path.join(data_path, name_file)
with open(filename, 'wb') as output_file:
output_file.write(r.content)
| 10,040
|
def test_degree_centrality():
"""
The following test checks the degree centrality before any perturbation.
"""
g = GeneralGraph()
g.load("tests/TOY_graph.csv")
g.check_before()
g.degree_centrality()
degree_centrality = {
'1': 0.1111111111111111,
'2': 0.1111111111111111,
'3': 0.1111111111111111,
'4': 0.1111111111111111,
'5': 0.1111111111111111,
'6': 0.2777777777777778,
'7': 0.1111111111111111,
'8': 0.16666666666666666,
'9': 0.16666666666666666,
'10': 0.1111111111111111,
'11': 0.16666666666666666,
'12': 0.2222222222222222,
'13': 0.2222222222222222,
'14': 0.2777777777777778,
'15': 0.05555555555555555,
'16': 0.16666666666666666,
'17': 0.16666666666666666,
'18': 0.05555555555555555,
'19': 0.2777777777777778
}
g_degree_centrality = nx.get_node_attributes(g, 'degree_centrality')
np.testing.assert_array_almost_equal(
np.asarray(sorted(degree_centrality.values())),
np.asarray(sorted(g_degree_centrality.values())),
err_msg="DEGREE CENTRALITY failure")
| 10,041
|
def fixture_base_context(
env_name: str,
) -> dict:
"""Return a basic context"""
ctx = dict(
current_user="a_user",
current_host="a_host",
)
return ctx
| 10,042
|
def parse_pharmvar(fn):
"""
Parse PharmVar gene data.
Parameters
----------
fn : str
Gene data directory.
"""
gene = os.path.basename(fn).split('-')[0]
rs_dict = {}
vfs = {'GRCh37': [], 'GRCh38': []}
alleles = {}
for i, assembly in enumerate(['GRCh37', 'GRCh38']):
for r, d, f in os.walk(f'{fn}/{assembly}'):
for file in f:
if file.endswith('.tsv'):
df = pd.read_table(f'{r}/{file}', comment='#')
if file.endswith('.vcf'):
vf = pyvcf.VcfFrame.from_file(f'{r}/{file}')
vfs[assembly].append(vf)
chrom = vfs['GRCh37'][0].contigs[0]
for j, r in df.iterrows():
name = r['Haplotype Name'].replace(gene, '')
if name not in alleles:
alleles[name] = [[], []]
if pd.isna(r['Variant Allele']):
continue
variant = f"{chrom}-{r['Variant Start']}-{r['Reference Allele']}-{r['Variant Allele']}"
rs_dict[variant] = r['rsID']
alleles[name][i].append(variant)
variants = {'GRCh37': {}, 'GRCh38': {}}
for name in alleles:
for i, assembly in enumerate(['GRCh37', 'GRCh38']):
for variant in alleles[name][i]:
if variant not in variants[assembly]:
variants[assembly][variant] = []
if name not in variants[assembly][variant]:
variants[assembly][variant].append(name)
for name in alleles:
alleles[name] = [','.join(alleles[name][0]), ','.join(alleles[name][1])]
df1 = pd.DataFrame(alleles).T
df1.columns = ['GRCh37', 'GRCh38']
df1 = df1.replace('', 'N/A')
df1.to_csv(f'{gene}-allele-table.csv')
def func(r):
if len(r.REF) == len(r.ALT) == 1:
return f'{r.CHROM}-{r.POS}-{r.REF}-{r.ALT}'
elif len(r.REF) == 1 and len(r.ALT) > 1:
return f'{r.CHROM}-{r.POS}---{r.ALT[1:]}'
elif len(r.REF) > 1 and len(r.ALT) == 1:
return f'{r.CHROM}-{r.POS+1}-{r.REF[1:]}--'
else:
raise ValueError('Something went wrong')
for assembly in ['GRCh37', 'GRCh38']:
df2 = pyvcf.merge(vfs[assembly]).update_chr_prefix(mode='remove').df
df2['Name'] = df2.apply(func, axis=1)
df2['Alleles'] = df2.apply(lambda r: ','.join(variants[assembly][r.Name]), axis=1)
df2['rsID'] = df2.apply(lambda r: rs_dict[r.Name], axis=1)
df2.to_csv(f'{gene}-{assembly}.csv')
| 10,043
|
def test_index():
"""Test if you can mask out periodogram
"""
lc = LightCurve(time=np.arange(1000), flux=np.random.normal(1, 0.1, 1000),
flux_err=np.zeros(1000)+0.1)
p = lc.to_periodogram()
mask = (p.frequency > 0.1*(1/u.day)) & (p.frequency < 0.2*(1/u.day))
assert len(p[mask].frequency) == mask.sum()
| 10,044
|
def definition(server: KedroLanguageServer, params: TextDocumentPositionParams) -> Optional[List[Location]]:
"""Support Goto Definition for a dataset or parameter.
Currently only support catalog defined in `conf/base`
"""
if not server.is_kedro_project():
return None
document = server.workspace.get_document(params.text_document.uri)
word = _word_at_position(params.position, document)
if word.startswith("params:"):
param_location = _get_param_location(server.project_metadata, word)
if param_location:
return [param_location]
catalog_paths = get_conf_paths(server.project_metadata)
for catalog_path in catalog_paths:
catalog_conf = yaml.load(catalog_path.read_text(), Loader=SafeLineLoader)
if word in catalog_conf:
line = catalog_conf[word]["__line__"]
location = Location(
uri=f"file://{catalog_path}",
range=Range(
start=Position(line=line - 1, character=0),
end=Position(
line=line,
character=0,
),
),
)
return [location]
return None
| 10,045
|
def augmented_neighbors_list(q_id,
neighbors,
is_training,
processor,
train_eval=False):
"""Retrieve and convert the neighbors to a list.
Args:
q_id: a question id
neighbors: a table mapping q_id to a list of top candidates
is_training: True for training set examples
processor: Helper object
train_eval: If this is on, we have a sub-set of the training set for which
we don't add the gold answer if it is not in the neighbors list
Returns:
lists of passage ids, list of corresponding labels, list of scores,
and the index of the first random negative
"""
n_pb = neighbors[q_id]
n_list = []
n_labels = []
n_scores = [] # the higher, the better
n_positive = 0
answers = processor.get_answers(q_id)
for n in range(len(n_pb)):
if n >= FLAGS.max_neighbors:
break # ignore any later neighbors
next_n = n_pb[n]
if processor.answer_match(q_id, next_n[0], answers):
n_list.append(next_n[0])
n_labels.append(1)
n_scores.append(-next_n[1])
n_positive += 1
else:
# see if we keep it
n_list.append(next_n[0])
n_labels.append(0)
n_scores.append(-next_n[1])
if not n_positive:
if (is_training or FLAGS.add_gold_to_eval):
gold_p_id = processor.get_gold_passage_id(q_id)
if gold_p_id is None and is_training:
print("Did not find answer matches.")
return [], [], [], 0
if gold_p_id is not None:
n_list.append(gold_p_id)
n_labels.append(1)
prior_gold = 0
n_scores.append(prior_gold)
n_positive += 1
else:
if is_training:
print("Did not find answer matches.")
return [], [], [], 0
# add the same number of random examples as we have neighbors
# we should add about
# (FLAGS.num_candidates -1) * FLAGS. train_records_per_query/2 random
index_rand_start = len(n_list)
num_random = index_rand_start
if is_training and not train_eval: # getting fewer random for speed
num_random = (int)(
(FLAGS.num_candidates - 1) * FLAGS.train_records_per_query / 2)
if FLAGS.add_random:
random_passages = processor.get_random(num_random)
random_labels = []
random_scores = [0] * num_random
for r in range(len(random_passages)):
n_scores.append(random_scores[r])
if processor.answer_match(q_id, random_passages[r], answers):
random_labels.append(1)
else:
random_labels.append(0)
n_list.extend(random_passages)
n_labels.extend(random_labels)
return n_list, n_labels, n_scores, index_rand_start
| 10,046
|
def estimate_tau_exp(chains, **kwargs):
"""
Estimate the exponential auto-correlation time for all parameters in a chain.
"""
# Calculate the normalised autocorrelation function in each parameter.
rho = np.nan * np.ones(chains.shape[1:])
for i in range(chains.shape[2]):
try:
rho[:, i] = autocorr.function(np.mean(chains[:, :, i], axis=0),
**kwargs)
except:
continue
# Take the max rho at any step.
rho_max = np.max(rho, axis=1)
# Now fit the max rho with an exponential profile.
x = np.arange(rho_max.size)
func = lambda tau_exp: np.exp(-x/tau_exp)
chi = lambda tau_exp: func(tau_exp[0]) - rho_max # tau_exp is a list
# Start with 50% of the chain length. probably OK.
tau_exp, ier = leastsq(chi, [chains.shape[1]/2.])
return (tau_exp, rho, func(tau_exp))
| 10,047
|
def setobjattr(obj, key, value, set_obj=None):
"""Sets an object attribute with the correct data type."""
key = inflection.underscore(key)
if set_obj:
setattr(obj, key, set_obj(value))
else:
if isinstance(value, bool):
setattr(obj, key, bool(value))
else:
try:
setattr(obj, key, int(value))
except ValueError:
try:
setattr(obj, key, float(value))
except ValueError:
try:
if 'ordinal' not in key:
setattr(obj, key, parser.parse(value))
else:
try:
setattr(obj, key, str(value))
except UnicodeEncodeError:
setattr(obj, key, value)
except (TypeError, ValueError):
try:
setattr(obj, key, str(value))
except UnicodeEncodeError:
setattr(obj, key, value)
| 10,048
|
def lda_model_onepass(dictionary, corpus, topics):
"""Create a single pass LDA model"""
start_time = time.time()
model = LdaMulticore(corpus, id2word = dictionary, num_topics = topics)
model.save(""./data/lda/all_topics_single.lda"")
print(model.print_topics(-1))
print("\nDone in {}".format(time.time() - start_time))
return model
| 10,049
|
def install(ctx: Context, requirement: Optional[str]):
"""Install the dependencies."""
_try_to_load_agent_config(ctx)
if requirement:
logger.debug("Installing the dependencies in '{}'...".format(requirement))
dependencies = list(map(lambda x: x.strip(), open(requirement).readlines()))
else:
logger.debug("Installing all the dependencies...")
dependencies = ctx.get_dependencies()
for d in dependencies:
logger.info("Installing {}...".format(d))
try:
subp = subprocess.Popen([sys.executable, "-m", "pip", "install", d])
subp.wait(30.0)
assert subp.returncode == 0
except Exception:
logger.error("An error occurred while installing {}. Stopping...".format(d))
sys.exit(1)
| 10,050
|
def test_magic_len():
"""Test the magic function __len__."""
signal = Signal([1, 2, 3], 44100)
assert len(signal) == 3
| 10,051
|
def paramclass(cls: type) -> type:
""" Parameter-Class Creation Decorator
Transforms a class-definition full of Params into a type-validated dataclass,
with methods for default value and description-dictionary retrieval.
Hdl21's `paramclass`es are immutable, strongly-typed data-storage structures.
They are defined through a syntax similar to `@dataclass`, but using the `Param`
constructor, and assignment rather than type annotation.
@paramclass
class C:
reqd = Param(dtype=int, desc="A Required Parameter")
optn = Param(dtype=int, desc="An Optional Parameter", default=11)
`Param`s each have required datatype (`dtype`) and description (`desc`) fields,
and optional default values.
Each `paramclass` constructor can be called with ordered arguments,
in the order defined in the `paramclass`, or with named arguments.
Named arguments are highly recommended for more than a single parameter.
Note Python's function-argument ordering requirements also dictate
that all `paramclass` required-arguments be declared *before* any optional arguments.
This also reinforces good practice for communicating which parameters are required.
Each `paramclass` comes with class-methods `descriptions` and `defaults`,
which return dictionaries of the parameter names to descriptions and
names to default values (for those with defaults), respectively.
Requirements of the input `cls`:
* *All* non-Python-internal fields must be of type `Param`
* Inheritance is not supported
"""
if cls.__bases__ != (object,):
raise RuntimeError(f"Invalid @hdl21.paramclass inheriting from {cls.__bases__}")
protected_names = ["descriptions", "defaults"]
dunders = dict()
params = dict()
# Take a lap through the class dictionary, type-check everything and grab Params
for key, val in cls.__dict__.items():
if key in protected_names:
raise RuntimeError(f"Invalid field name {key} in paramclass {cls}")
elif key.startswith("__"):
dunders[key] = val
elif isinstance(val, Param):
params[key] = val
else:
raise RuntimeError(
f"Invalid class-attribute {key} in paramclass {cls}. All attributes should be `hdl21.Param`s."
)
# Translate the Params into dataclass.field-compatible tuples
fields = list()
for name, par in params.items():
field = [name, par.dtype]
if par.default is not _default:
field.append(dataclasses.field(default=par.default))
# Default factories: not supported, yet. See `Param` below.
# elif par.default_factory is not _default:
# field.append(dataclasses.field(default_factory=par.default_factory))
fields.append(tuple(field))
# Add a few helpers to the class namespace
ns = dict(
__params__=params,
__paramclass__=True,
descriptions=classmethod(
lambda cls: {k: v.desc for k, v in cls.__params__.items()}
),
defaults=classmethod(
lambda cls: {
k: v.default
for k, v in cls.__params__.items()
if v.default is not _default
}
),
)
# Create ourselves a (std-lib) dataclass
cls = dataclasses.make_dataclass(cls.__name__, fields, namespace=ns, frozen=True)
# Pass this through the pydantic dataclass-decorator-function
cls = pydantic.dataclasses.dataclass(cls, frozen=True)
# Pydantic seems to want to add this one *after* class-creation
def _brick_subclassing_(cls, *_, **__):
msg = f"Error: attempt to sub-class `hdl21.paramclass` {cls} is not supported"
raise RuntimeError(msg)
cls.__init_subclass__ = classmethod(_brick_subclassing_)
# And don't forget to return it!
return cls
| 10,052
|
def text_process(mess):
"""
Takes in a string of text, then performs the following:
1. Remove all punctuation
2. Remove all stopwords
3. Returns a list of the cleaned text
"""
# Check characters to see if they are in punctuation
nopunc = [char for char in mess if char not in string.punctuation]
# Join the characters again to form the string.
nopunc = ''.join(nopunc)
# Now just remove any stopwords
words = [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
words = [word for word in words if word.lower() not in pills['BrandName'].values]
# words = [word for word in words if word.lower() not in pills['ChemName'].values]
words = [word.lower() for word in words if word.isalpha()]
words = [word.lower() for word in words if len(word) > 2]
return words
| 10,053
|
def IntCurveSurface_ThePolyhedronToolOfHInter_IsOnBound(*args):
"""
:param thePolyh:
:type thePolyh: IntCurveSurface_ThePolyhedronOfHInter &
:param Index1:
:type Index1: int
:param Index2:
:type Index2: int
:rtype: bool
"""
return _IntCurveSurface.IntCurveSurface_ThePolyhedronToolOfHInter_IsOnBound(*args)
| 10,054
|
def read_mrc_like_matlab(mrc_file):
""" Read MRC stack and make sure stack is 'Fortran indexed' before returning it. """
mrc_stack = mrcfile.open(mrc_file).data
fortran_indexed_stack = c_to_fortran(mrc_stack)
return fortran_indexed_stack
| 10,055
|
def freeze_session(session: tf.Session,
keep_var_names: List[str] = None,
output_names: List[str] = None,
clear_devices: bool = True) -> tf.GraphDef:
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
:param session: The TensorFlow session to be frozen.
:param keep_var_names: A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
:param output_names: Names of the relevant graph outputs.
:param clear_devices: Remove the device directives from the graph for better
portability.
:return The frozen graph definition.
"""
graph = session.graph
with graph.as_default():
freeze_var_names = list(
set(v.op.name for v in tf.global_variables()).difference(
keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = tf.graph_util.convert_variables_to_constants(
session, input_graph_def, output_names, freeze_var_names)
return frozen_graph
| 10,056
|
async def test_hassio_dont_update_instance(hass):
"""Test we can update an existing config entry."""
entry = MockConfigEntry(
domain=config_flow.DOMAIN,
data={config_flow.CONF_BRIDGEID: "id", config_flow.CONF_HOST: "1.2.3.4"},
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
data={config_flow.CONF_HOST: "1.2.3.4", config_flow.CONF_SERIAL: "id"},
context={"source": "hassio"},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
| 10,057
|
def preprocess(songs_path = SONGS_PATH):
"""
1. Loads the songs
2. Encode each song with a music time series representation
3. Save songs to text file
Arguments:
songs_path (str): path of the songs to be loaded
Returns:
None
"""
# 1. Loads the songs
print("Loading songs...")
file_name_and_midi_files = load_songs(songs_path)
quantity_songs = len(file_name_and_midi_files)
max_digits = len(str(quantity_songs))
# Enumerate song one by one, indexing by i
saved_songs = 0
for i, songName_midiFile in enumerate(file_name_and_midi_files):
# retrieve the info from the tuple
song_name, midi_file = songName_midiFile
# 2. Encode each song with music time series representation
list_indices = midiToIdxs(midi_file)
string_indices = ",".join(map(str, list_indices))
# 3. Save encoded song to a text file
encoded_file_name = "encoded song " + str(i).zfill(max_digits) + " " + song_name
create_plain_file(ENCODED_SONGS_FOLDER_PATH, encoded_file_name, string_indices, "txt")
# Update counter
saved_songs += 1
print(f"\nNumber of encoded songs: {saved_songs}")
| 10,058
|
def linear_regression_noreg(X, y):
"""
Compute the weight parameter given X and y.
Inputs:
- X: A numpy array of shape (num_samples, D) containing feature.
- y: A numpy array of shape (num_samples, ) containing label
Returns:
- w: a numpy array of shape (D, )
"""
#####################################################
# TODO 2: Fill in your code here #
#####################################################
tmp_mult_x = np.matmul(np.linalg.inv(np.matmul(X.transpose(),X)),X.transpose())
if len(tmp_mult_x.shape) == 1:
xr = 1
xc = tmp_mult_x.shape[0]
else:
xr,xc = tmp_mult_x.shape
if len(y.shape) == 1:
yr = 1
yc = y.shape[0]
else:
yr,yc = y.shape
# if X, y both right
if xc == yr:
return np.matmul(tmp_mult_x,y)
# if X is not right and y right
elif xr == yr:
tmp_mult_x.transpose()
# if X is right and y is wrong
elif xc == yc:
y.transpose()
# if X and y is both wrong
elif xr == yc:
tmp_mult_x.transpose()
y.transpose()
return np.matmul(tmp_mult_x,y)
# w = None
# return w
| 10,059
|
def _update_core_rrds(data, core_metrics_dir, rrdclient, step, sys_maj_min):
"""Update core rrds"""
interval = int(step) * 2
total = 0
for cgrp in data:
rrd_basename = CORE_RRDS[cgrp]
rrdfile = os.path.join(core_metrics_dir, rrd_basename)
rrd.prepare(rrdclient, rrdfile, step, interval)
if rrd.update(rrdclient, rrdfile, data[cgrp], sys_maj_min):
total += 1
return total
| 10,060
|
def evaluation(evaluators, dataset, runners, execution_results, result_data):
"""Evaluate the model outputs.
Args:
evaluators: List of tuples of series and evaluation functions.
dataset: Dataset against which the evaluation is done.
runners: List of runners (contains series ids and loss names).
execution_results: Execution results that include the loss values.
result_data: Dictionary from series names to list of outputs.
Returns:
Dictionary of evaluation names and their values which includes the
metrics applied on respective series loss and loss values from the run.
"""
eval_result = {}
# losses
for runner, result in zip(runners, execution_results):
for name, value in zip(runner.loss_names, result.losses):
eval_result["{}/{}".format(runner.output_series, name)] = value
# evaluation metrics
for generated_id, dataset_id, function in evaluators:
if (not dataset.has_series(dataset_id)
or generated_id not in result_data):
continue
desired_output = dataset.get_series(dataset_id)
model_output = result_data[generated_id]
eval_result["{}/{}".format(generated_id, function.name)] = function(
model_output, desired_output)
return eval_result
| 10,061
|
def bestof(reps, func, *args, **kwargs):
"""Quickest func() among reps runs.
Returns (best time, last result)
"""
best = 2 ** 32
for i in range(reps):
start = timer()
ret = func(*args, **kwargs)
elapsed = timer() - start
if elapsed < best: best = elapsed
return (best, ret)
| 10,062
|
def gdal_aspect_analysis(dem, output=None, flat_values_are_zero=False):
"""Return the aspect of the terrain from the DEM.
The aspect is the compass direction of the steepest slope (0: North, 90: East, 180: South, 270: West).
Parameters
----------
dem : str
Path to file storing DEM.
output : str
Path to output file.
flat_values_are_zero: bool
Designate flat values with value zero. Default: -9999.
Returns
-------
ndarray
Aspect array.
Notes
-----
Ensure that the DEM is in a *projected coordinate*, not a geographic coordinate system, so that the
horizontal scale is the same as the vertical scale (m).
"""
if output is None:
output = tempfile.NamedTemporaryFile().name
DEMProcessing(destName=output, srcDS=dem, processing='aspect', zeroForFlat=flat_values_are_zero,
format='GTiff', band=1, creationOptions=[GDAL_TIFF_COMPRESSION_OPTION, ])
with rasterio.open(output) as src:
return np.ma.masked_values(src.read(1), value=-9999)
| 10,063
|
def _handle_braze_response(response: requests.Response) -> int:
"""Handles server response from Braze API.
The amount of requests made is well
below the limits for the given API endpoint therefore Too Many Requests
API errors are not expected. In case they do, however, occur - the API
calls will be re-tried, up to `MAX_API_RETRIES`, using exponential delay.
In case of a server error, the same strategy will be applied. After max
retries have been reached, the execution will terminate.
In case users were posted but there were minor mistakes, the errors will be
logged. In case the API received data in an unexpected format, the data
that caused the issue will be logged.
In any unexpected client API error (other than 400), the function execution
will terminate.
:param response: Response from the API
:return: Number of users that resulted in an error
:raise APIRetryError: On a 429 or 500 server error
:raise FatalAPIError: After `MAX_API_RETRIES` unsuccessful retries, or on
any non-400 client error
"""
res_text = json.loads(response.text)
if response.status_code == 201 and 'errors' in res_text:
print(
f"Encountered errors processing some users: {res_text['errors']}")
return len(res_text['errors'])
if response.status_code == 400:
print(f"Encountered error for user chunk. {response.text}")
return 0
server_error = response.status_code == 429 or response.status_code >= 500
if server_error:
raise APIRetryError("Server error. Retrying..")
if response.status_code > 400:
raise FatalAPIError(res_text.get('message', response.text))
return 0
| 10,064
|
def unpack_file(filepath, tmpdir):
"""
Attempt to unpack file.
filepath is the path to the file that should be attempted unpacked.
tmpdir is a path to a temporary directory unique to this thread where
the thread will attempt to unpack files to.
Returns a list of unpacked files or an empty list.
"""
# Other unpacking tools have been removed due to
# lacking reliability and usefulness of the tools.
# If multiple unpacking tools are to be used here,
# subdirectories below tmpdir should be created for each
# tool to avoid tools overwriting output of each other.
# Attempt static unpacking with ClamAV. Return unpacked files.
return clam_unpack(filepath, tmpdir)
| 10,065
|
def test_refunds_create():
"""Should refund a charge"""
amount = 1000
email = "test-pymango@example.org"
charge = mango.Charges.create(amount=amount, email=email, token=_create_token())
ok_(charge)
charge_uid = charge.get("uid")
ok_(charge_uid)
refund = mango.Refunds.create(charge=charge_uid)
ok_(refund)
eq_(refund.get("charge"), charge_uid)
| 10,066
|
def xarray_image_as_png(img_data, loop_over=None, animate=False, frame_duration=1000):
"""
Render an Xarray image as a PNG.
:param img_data: An xarray dataset, containing 3 or 4 uint8 variables: red, greed, blue, and optionally alpha.
:param loop_over: Optional name of a dimension on img_data. If set, xarray_image_as_png is called in a loop
over all coordinate values for the named dimension.
:param animate: Optional generate animated PNG
:return: A list of bytes representing a PNG image file. (Or a list of lists of bytes, if loop_over was set.)
"""
if loop_over and not animate:
return [
xarray_image_as_png(img_data.sel(**{loop_over: coord}))
for coord in img_data.coords[loop_over].values
]
xcoord = None
ycoord = None
for cc in ("x", "longitude", "Longitude", "long", "lon"):
if cc in img_data.coords:
xcoord = cc
break
for cc in ("y", "latitude", "Latitude", "lat"):
if cc in img_data.coords:
ycoord = cc
break
if not xcoord or not ycoord:
raise Exception("Could not identify spatial coordinates")
width = len(img_data.coords[xcoord])
height = len(img_data.coords[ycoord])
img_io = BytesIO()
# Render XArray to APNG via Pillow
# https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html#apng-sequences
if loop_over and animate:
time_slices_array = [
xarray_image_as_png(img_data.sel(**{loop_over: coord}), animate=True)
for coord in img_data.coords[loop_over].values
]
images = []
for t_slice in time_slices_array:
im = Image.fromarray(t_slice, "RGBA")
images.append(im)
images[0].save(img_io, "PNG", save_all=True, default_image=True, loop=0, duration=frame_duration, append_images=images)
img_io.seek(0)
return img_io.read()
if "time" in img_data.dims:
img_data = img_data.squeeze(dim="time", drop=True)
pillow_data = render_frame(img_data.transpose(xcoord, ycoord), width, height)
if not loop_over and animate:
return pillow_data
# Change PNG rendering to Pillow
im_final = Image.fromarray(pillow_data, "RGBA")
im_final.save(img_io, "PNG")
img_io.seek(0)
return img_io.read()
| 10,067
|
def x_section_from_latlon(elevation_file,
x_section_lat0,
x_section_lon0,
x_section_lat1,
x_section_lon1,
as_polygon=False,
auto_clean=False):
"""
This workflow extracts a cross section from a DEM
based on the input latitude and longitude point pairs.
Parameters:
-----------
elevation_file: str
Path to the elevation DEM.
x_section_lat0: float
THe first coordinate latitude.
x_section_lon0: float
THe first coordinate longitude.
x_section_lat1: float
THe second coordinate latitude.
x_section_lon1: float
THe second coordinate longitude.
as_polygon: bool, optional
If True, will return cross section as a
:obj:`shapely.geometry.Polygon`. Default is False.
auto_clean: bool, optional
If True, will attempt to clean any issues from the polygon.
Default is False.
Returns:
--------
list or :obj:`shapely.geometry.Polygon`
Cross section information.
The list will be xy coordinate pairs.
Example::
from shapely.geometry import Polygon
from xman.xsect import x_section_from_latlon
elevation_file = '/path/to/elevation.tif'
lat1 = 34.105265417341442
lon1 = 38.993958690587505
lat2 = 34.107264451129197
lon2 = 38.99355588515526)
x_sect_list = x_section_from_latlon(elevation_file,
lat1,
lon1,
lat2,
lon2)
"""
utm_proj = utm_proj_from_latlon(x_section_lat0, x_section_lon0,
as_osr=True)
sp_ref = osr.SpatialReference()
sp_ref.ImportFromEPSG(4326)
geo_to_utm_trans = osr.CoordinateTransformation(sp_ref, utm_proj)
x_line_m = LineString((
geo_to_utm_trans.TransformPoint(x_section_lon0, x_section_lat0)[:2],
geo_to_utm_trans.TransformPoint(x_section_lon1, x_section_lat1)[:2]
))
elevation_utm_ggrid = GDALGrid(elevation_file).to_projection(utm_proj)
x_sect_list = []
for x_step in np.linspace(0, x_line_m.length, num=20):
x_point = x_line_m.interpolate(x_step)
x_sect_list.append((
x_step, elevation_utm_ggrid.get_val_coord(x_point.x, x_point.y)
))
if as_polygon or auto_clean:
x_sect_poly = Polygon(x_sect_list)
if not x_sect_poly.is_valid and auto_clean:
x_sect_poly = x_sect_poly.buffer(0)
print("WARNING: Cross section cleaned up.")
if hasattr(x_sect_poly, 'geoms'):
if len(x_sect_poly.geoms) > 1:
largest_poly = x_sect_poly.geoms[0]
for geom_poly in x_sect_poly.geoms[1:]:
if geom_poly.area > largest_poly.area:
largest_poly = geom_poly
x_sect_poly = largest_poly
if as_polygon:
return x_sect_poly
x_coords, y_coords = x_sect_poly.exterior.coords.xy
return list(zip(x_coords, y_coords))
return x_sect_list
| 10,068
|
def convert_not_inline(line):
""" Convert the rest of part which are not inline code but might impact inline code
This part will dealing with following markdown syntax
- strong
- scratch
- italics
- image
- link
- checkbox
- highlight
:param line: str, the not inline code part of markdown
:return: str, the html format
"""
# deal with strong
line = strong(line)
# Scratch
line = scratch(line)
# italics
line = italics(line)
# highlight
line = highlight(line)
# image
while len(re.match(r'((?P<pre_text>.*)!\[(?P<alt_text>.*)\]\((?P<link>.*)\)(?P<after_text>.*))*', line).group()) \
!= 0:
match = re.match(r'((?P<pre_text>.*)!\[(?P<alt_text>.*)\]\((?P<link>.*)\)(?P<after_text>.*))*', line)
pre_text = match.group('pre_text')
alt_text = match.group('alt_text')
link = match.group('link')
after_text = match.group('after_text')
# scale image
if len(re.match(r'((?P<pre_link>.*)#scale=(?P<scale>[0-9]*))*', link).group()) != 0:
match_scale = re.match(r'((?P<pre_link>.*)#scale=(?P<scale>[0-9]*))*', link)
scale = match_scale.group('scale')
img_html = '<img style="display: block; margin-left: auto; margin-right: auto; height:' + str(scale) + '%" src="' + link + '" alt="' + alt_text + '">'
else:
img_html = '<img style="display: block; margin-left: auto; margin-right: auto;" src="' + link + '" alt="' + alt_text + '">'
line = pre_text + img_html + after_text
# link
while len(re.match(r'((?P<pre_text>.*)\[(?P<alt_text>.*)\]\((?P<link>.*)\)(?P<after_text>.*))*', line).group()) \
!= 0:
match = re.match(r'((?P<pre_text>.*)\[(?P<alt_text>.*)\]\((?P<link>.*)\)(?P<after_text>.*))*', line)
pre_text = match.group('pre_text')
alt_text = match.group('alt_text')
link = match.group('link')
if len(link) != 0 and link[0] == '#':
link = link.replace(' ', '-')
after_text = match.group('after_text')
img_html = '<a href="' + link + '">' + alt_text + '</a>'
line = pre_text + img_html + after_text
return line
| 10,069
|
def uniform_square_aperture(side, skypos, frequency, skyunits='altaz',
east2ax1=None, pointing_center=None,
power=False):
"""
-----------------------------------------------------------------------------
Compute the electric field or power pattern pattern at the specified sky
positions due to a uniformly illuminated square aperture
Inputs:
side [scalar] Sides of the square (in m)
skypos [list or numpy vector] Sky positions at which the power pattern
is to be estimated. Size is M x N where M is the number of
locations, N = 2 (if skyunits = altaz denoting Alt-Az
coordinates), or N = 3 (if skyunits = dircos denoting direction
cosine coordinates). If skyunits = altaz, then altitude and
azimuth must be in degrees
frequency [list or numpy vector] frequencies (in GHz) at which the power
pattern is to be estimated. Frequencies differing by too much
and extending over the usual bands cannot be given.
Keyword Inputs:
skyunits [string] string specifying the coordinate system of the sky
positions. Accepted values are 'altaz', and 'dircos'.
Default = 'altaz'. If 'dircos', the direction cosines are
aligned with the local East, North, and Up. If 'altaz', then
altitude and azimuth must be in degrees.
east2ax1 [scalar] Angle (in degrees) the primary axis of the array makes
with the local East (positive anti-clockwise).
pointing_center
[list or numpy array] coordinates of pointing center (in the same
coordinate system as that of sky coordinates specified by
skyunits). 2-element vector if skyunits='altaz'. 2- or
3-element vector if skyunits='dircos'.
power [boolean] If set to True, compute power pattern, otherwise
compute field pattern (default=False).
Output:
Electric field pattern or power pattern, number of rows equal to the number
of sky positions (which is equal to the number of rows in skypos), and number
of columns equal to the number of wavelengths.
-----------------------------------------------------------------------------
"""
try:
side, skypos, frequency
except NameError:
raise NameError('Square antenna side, skypos, frequency must be specified')
if not isinstance(sides, (int,float)):
raise TypeError('Antenna sides must be a scalar')
sides = NP.asarray([side]*2, dtype=NP.float)
ab = uniform_rectangular_aperture(sides, skypos, frequency,
skyunits=skyunits,
east2ax1=east2ax1,
pointing_center=pointing_center,
power=power)
return ab
| 10,070
|
def set_gps_location(file_name, lat, lng):
"""Adds GPS position as EXIF metadata
Keyword arguments:
file_name -- image file
lat -- latitude (as float)
lng -- longitude (as float)
"""
global attribute
lat_deg = to_deg(lat, ["S", "N"])
lng_deg = to_deg(lng, ["W", "E"])
#print lat_deg
#print lng_deg
# convert decimal coordinates into degrees, munutes and seconds
if attribute == False:
exiv_lat = (pyexiv2.Rational(lat_deg[0]*60+lat_deg[1],60),pyexiv2.Rational(lat_deg[2]*100,6000), pyexiv2.Rational(0, 1))
exiv_lng = (pyexiv2.Rational(lng_deg[0]*60+lng_deg[1],60),pyexiv2.Rational(lng_deg[2]*100,6000), pyexiv2.Rational(0, 1))
else:
exiv_lat = (pyexiv2.Rational(lat_deg[0]*60,60),pyexiv2.Rational(lat_deg[1]*100,100), pyexiv2.Rational(lat_deg[2]*10000, 10000))
exiv_lng = (pyexiv2.Rational(lng_deg[0]*60,60),pyexiv2.Rational(lng_deg[1]*100,100), pyexiv2.Rational(lng_deg[2]*10000, 10000))
exiv_image = pyexiv2.ImageMetadata(file_name)
exiv_image.read()
exif_keys = exiv_image.exif_keys
exiv_image["Exif.GPSInfo.GPSLatitude"] = exiv_lat
exiv_image["Exif.GPSInfo.GPSLatitudeRef"] = lat_deg[3]
exiv_image["Exif.GPSInfo.GPSLongitude"] = exiv_lng
exiv_image["Exif.GPSInfo.GPSLongitudeRef"] = lng_deg[3]
exiv_image["Exif.Image.GPSTag"] = 654
exiv_image["Exif.GPSInfo.GPSMapDatum"] = "WGS-84"
exiv_image["Exif.GPSInfo.GPSVersionID"] = '2 0 0 0'
exiv_image.write()
exiv_image.write()
| 10,071
|
def sources_table(citator):
"""
Return the content for an HTML table listing every template that the
citator can link to.
"""
rows = []
for template in citator.templates.values():
# skip templates that can't make URLs
if not template.__dict__.get('URL_builder'):
continue
URL = urlsplit(''.join(template.URL_builder.parts))
domain_URL = f'{URL.scheme}://{URL.netloc}'
domain_name = URL.hostname
regex = unify_regex(template, simplify_for_regexper=True)
rows.append(SOURCES_TABLE_ROW.format(
name=template.name,
domain_URL=domain_URL,
domain_name=domain_name,
escaped_regex=quote_plus(regex).replace('+', '%20')
))
return SOURCES_TABLE.format(rows=''.join(rows))
| 10,072
|
def parse_bot_commands(data, starterbot_id):
"""
Parses a list of events coming from the Slack RTM API to find bot commands.
If a bot command is found, this function returns a tuple of command and channel.
If its not found, then this function returns None, None.
"""
user_id, message = parse_direct_mention(data["text"])
print(f'user_id: {user_id}')
print(f'starterbot_id: {starterbot_id}')
if user_id == starterbot_id:
return message, data["channel"]
return None, None
| 10,073
|
def construct_obj_in_dict(d: dict, cls: Callable) -> dict:
"""
Args
d (dict):
d[name][charge][annotation]
"""
if not isinstance(d, dict):
return d
else:
new_d = deepcopy(d)
for key, value in d.items():
if value.get("@class", "") == cls.__name__:
new_d[key] = cls.from_dict(value)
else:
new_d[key] = construct_obj_in_dict(value, cls)
return new_d
| 10,074
|
def signup_user(request):
"""
Function to sing up user that are not admins
:param request: This param contain all the information associated to the request
:param type request: Request
:return: The URL to render
:rtype: str
"""
try:
log = LoggerManager('info', 'singup_manager-info', session=request.session)
if request.method == 'POST':
form = ClientRegistrationForm(request.POST)
if form.is_valid():
form.save()
max_id = Account.objects.all().aggregate(Max('id'))['id__max']
user = Account.objects.filter(id=max_id)
web_group, created = Group.objects.get_or_create(name=request.user.email)
web_group.user_set.add(request.user.id)
web_group.user_set.add(user.get().id)
log.write_info(form.data)
return redirect('client_list')
else:
form = ClientRegistrationForm()
return render(request, 'registration/signup.html', {
'form': form
})
except Exception as ex:
log = LoggerManager('exception', 'singup_manager-exception', session=request.session)
log.write_exception(ex)
| 10,075
|
def haar_rand_state(dim: int) -> np.ndarray:
"""
Given a Hilbert space dimension dim this function returns a vector
representing a random pure state operator drawn from the Haar measure.
:param dim: Hilbert space dimension.
:return: Returns a dim by 1 vector drawn from the Haar measure.
"""
unitary = haar_rand_unitary(dim)
fiducial_vec = np.zeros((dim, 1))
fiducial_vec[0] = 1
return np.matmul(unitary, fiducial_vec)
| 10,076
|
def is_src_package(path: Path) -> bool:
"""Checks whether a package is of the form:
├─ src
│ └─ packagename
│ ├─ __init__.py
│ └─ ...
├─ tests
│ └─ ...
└─ setup.py
The check for the path will be if its a directory with only one subdirectory
containing an __init__.py file.
Parameters
----------
path : Path
Full path pointing to a dir.
Returns
-------
check : bool
If the package is an src package, returns True, False otherwise.
See Also
--------
is_package
"""
check: bool = False
if path.is_dir():
maybe_subdirs = list(path.iterdir())
if len(maybe_subdirs) == 1:
check = is_package(path / maybe_subdirs[0])
return check
| 10,077
|
def plot_timeseries(
data=None,
cmap=None,
plot_kwargs=dict(marker=".", markersize=3),
figsize=(10, 5),
verbose=True,
**params,
):
"""
Plot timeseries from multiple stations on a single plot for each variable.
Parameters
----------
data : output from stations_timeseries or None.
The returned data from ``stations_timeseries``.
If None, then the user must supply param keywords to make
the API request for stations_timeseries here.
cmap : str
A matplotlib named colormap to cycle colors (e.g. 'Spectral', 'Blues').
If None, use the default color cycle.
plot_kwargs : dict
kwargs for the plotted lines
params : keyword arguments
Same as for `stations_timeseries`
"""
# User must supply the data as returned from stations_timeseries
# or the param keywords used to make the API request.
if data is None:
a = ss.stations_timeseries(verbose=verbose, **params)
else:
a = data
if not isinstance(a, list):
a = [a]
# Get unique columns names for all stations
variables = list({item for sublist in a for item in sublist})
variables.sort()
if cmap is None:
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
else:
# Cycle colors based on a matplotlib colormap
cmap = plt.get_cmap(cmap)
colors = [cmap(i) for i in np.linspace(0, 1, len(a))]
#################################
# Make the Plots
#################################
for i, var in enumerate(variables):
if var in ["metar", "wind_cardinal_direction"]:
continue
fig, ax = plt.subplots(1, 1, figsize=figsize)
var_str = var.replace("_", " ").title()
ax.set_title(f"{var_str}", loc="left")
ax.set_xlabel("")
for c, stn in zip(colors, a):
if var in stn:
if "_set_" in var:
var_units = stn.attrs["UNITS"]["_".join(var.split("_")[:-2])]
else:
var_units = stn.attrs["UNITS"][var]
stn[var].plot(ax=ax, label=stn.attrs["STID"], color=c, **plot_kwargs)
ax.set_ylabel(f"{var_str} ({var_units})")
plt.grid(linestyle="--", alpha=0.5)
plt.xlabel("")
plt.legend()
| 10,078
|
def value_element(units=(OneOrMore(T('NN')) | OneOrMore(T('NNP')) | OneOrMore(T('NNPS')) | OneOrMore(T('NNS')))('raw_units').add_action(merge)):
"""
Returns an Element for values with given units. By default, uses tags to guess that a unit exists.
:param BaseParserElement units: (Optional) A parser element for the units that are to be looked for. Default option looks for nouns.
:returns: An Element to look for values and units.
:rtype: BaseParserElement
"""
number = R('^[\+\-–−]?\d+(\.\d+)?$')
joined_range = R('^[\+\-–−]?\d+(\.\d+)?[\-–−~∼˜]\d+(\.\d+)?$')('raw_value').add_action(merge)
spaced_range = (number + Optional(units).hide() + (R('^[\-–−~∼˜]$') + number | number))('raw_value').add_action(merge)
to_range = (number + Optional(units).hide() + I('to') + number)('raw_value').add_action(join)
plusminus_range = (number + R('±') + number)('value').add_action(join)
between_range = (I('between').hide() + number + I('and') + number).add_action(join)
value_range = (Optional(R('^[\-–−]$')) + (plusminus_range | joined_range | spaced_range | to_range | between_range))('raw_value').add_action(merge)
value_single = (Optional(R('^[~∼˜\<\>]$')) + Optional(R('^[\-–−]$')) + number)('raw_value').add_action(merge)
value = Optional(lbrct).hide() + (value_range | value_single)('raw_value') + Optional(rbrct).hide()
return value + units
| 10,079
|
def get_pattern(model_id, release_id) -> list:
"""
content demo:
[
'...',
{
0.1: [
['if', 'checker.check'],
3903,
['if', 'checker.check', '*', Variable(name="ip", value='10.0.0.1')],
['if checker.check():', 'if checker.check()'],
[282. 1877],
27886975249790003104399390262688492018705644758766193963474214767849400520551
]
},
'...',
'...'
]
sensitive_pattern [List]:
- representative tokens: 符合pattern的其中一个分词
- numbers: 属于该pattern的日志数量
- pattern: 聚类模式
- raw_log: 所有原始log,list
- log_index: 所有原始log的index
- log_signature: 聚类模型signature
"""
content = AiopsModelHandler.pickle_decode(
content=AiopsModelHandler().aiops_release_model_release_id_model_file(
model_id=model_id, model_release_id=release_id
)["file_content"]
)
patterns = []
for _, sensitive_patterns in content[CONTENT_PATTERN_INDEX].items():
for sensitive_pattern in sensitive_patterns:
signature = sensitive_pattern[PATTERN_SIGNATURE_INDEX]
pattern_list = []
for pattern in sensitive_pattern[PATTERN_INDEX]:
if hasattr(pattern, "name"):
pattern_list.append("#{}#".format(pattern.name))
continue
pattern_list.append(str(pattern))
patterns.append({"signature": str(signature), "pattern": " ".join(pattern_list)})
return patterns
| 10,080
|
def bit_remove(bin_name, byte_offset, byte_size, policy=None):
"""Creates a bit_remove_operation to be used with operate or operate_ordered.
Remove bytes from bitmap at byte_offset for byte_size.
Args:
bin_name (str): The name of the bin containing the map.
byte_offset (int): Position of bytes to be removed.
byte_size (int): How many bytes to remove.
policy (dict, optional): The bit_policy policy dictionary. See: See :ref:`aerospike_bit_policies`. default: None
Returns:
A dictionary usable in operate or operate_ordered. The format of the dictionary
should be considered an internal detail, and subject to change.
"""
return {
OP_KEY: aerospike.OP_BIT_REMOVE,
BIN_KEY: bin_name,
POLICY_KEY: policy,
BYTE_OFFSET_KEY: byte_offset,
BYTE_SIZE_KEY: byte_size
}
| 10,081
|
def _log_request(request):
"""Log a client request. Copied from msrest
https://github.com/Azure/msrest-for-python/blob/3653d29fc44da408898b07c710290a83d196b777/msrest/http_logger.py#L39
"""
if not logger.isEnabledFor(logging.DEBUG):
return
try:
logger.info("Request URL: %r", request.url)
logger.info("Request method: %r", request.method)
logger.info("Request headers:")
for header, value in request.headers.items():
if header.lower() == 'authorization':
# Trim at least half of the token but keep at most 20 characters
preserve_length = min(int(len(value) * 0.5), 20)
value = value[:preserve_length] + '...'
logger.info(" %r: %r", header, value)
logger.info("Request body:")
# We don't want to log the binary data of a file upload.
import types
if isinstance(request.body, types.GeneratorType):
logger.info("File upload")
else:
logger.info(str(request.body))
except Exception as err: # pylint: disable=broad-except
logger.info("Failed to log request: %r", err)
| 10,082
|
def visualize(temporal_networks,
frame_dt,
time_normalization_factor=1,
time_unit=None,
titles=None,
config=None,
port=8226,
export_path=None,
):
"""
Visualize a temporal network or a list of temporal networks interactively.
This routine starts up an HTTP server, bins the networks according to the
time step ``frame_dt`` and copies them to ``~/.tacoma/web``. Subsequently,
a the interaction is started in the standard browser.
The visualization is stopped with KeyboardInterrupt. The temporary
temporal network files will subsequently be deleted.
Parameters
----------
temporal_networks : an instance of :class:`_tacoma.edge_changes`, :class:`_tacoma.edge_lists`, :class:`_tacoma.edge_lists_with_histograms`, :class:`_tacoma.edge_changes_with_histograms` or a list containing those.
The temporal networks to visualize. If a list is provided, all networks need to have the
same `t0` and `tmax`.
frame_dt : float
The duration of a frame in the visualization.
.. note::
This has to be given in the original time units
of the temporal network, disregarding any
value of ``time_normalization_factor``.
time_normalization_factor : float, default : 1.0
Rescale time with this factor.
time_unit : string, default : None,
Unit of time of the visualization.
titles : string or list of strings, default : None
Titles to put on the figures of the corresponding temporal networks.
config : dict or str
Configuration values for the JavaScript visualization. If this
is a string, it can be either ``hs13``, ``dtu``, or ``ht09`` and
the appropriate configuration is loaded.
port : int, default : 8226
Port of the started HTTP server.
export_path : string, default : None
path to a directory to which the whole visualization is copied.
Use ``os.get_cwd()+'/export_dir/'`` for the current working directory (after
``import os``).
.. warning::
No subdirectory will be made for the export. All visualization files
will be exported to ``export_path`` directly.
Notes
-----
The configuration dictionary is filled with values to control
the appearance of the visualizations. The standard configuration is
.. code:: python
config = {
"plot_width" : 320 ,
"network_plot_height" : 250,
"edges_plot_height" : 100,
"padding" : 10,
"start_it" : 0,
"node_radius" : 2.5,
"link_distance" : 10,
"node_charge": -8,
"edge_line_width" : 1,
"font_size_in_px" : 14,
"link_width" : 1,
"d3_format_string": ".3f",
}
"""
if not hasattr(temporal_networks, '__len__'):
temporal_networks = [temporal_networks]
if titles is None:
titles = ["" for _ in temporal_networks]
elif type(titles) == str or not hasattr(titles, '__len__'):
titles = [titles]
this_config = copy.deepcopy(standard_config)
if isinstance(config, str):
if config == 'dtu':
config = dict(dtu_config)
elif config == 'hs13':
config = dict(hs13_config)
elif config == 'ht09':
config = dict(ht09_config)
else:
raise ValueError("config", config, "is unknown.")
if config is not None:
this_config.update(config)
# print(titles)
# define the server address
# server_address = ('127.0.0.1', port)
path = '~/.tacoma/web/'
web_dir = os.path.abspath(os.path.expanduser(path))
# download d3 if that did not happen yet
download_d3()
# copy the html and js files for the visualizations
prepare_visualization_directory()
# create a subfolder based on the current time
subdir = "tmp_{:x}".format(int(time.time()*1000))
mkdirp_customdir(directory=web_dir)
subdir_path = os.path.join(web_dir, subdir)
mkdirp_customdir(directory=subdir_path)
# in case an export is demanded, prepare the export directory
if export_path is not None:
export_path = os.path.abspath(os.path.expanduser(export_path))
prepare_export_directory(export_path, subdir)
# change directory to this directory
print("changing directory to", web_dir)
print("starting server here ...", web_dir)
cwd = os.getcwd()
os.chdir(web_dir)
server = StoppableHTTPServer(("127.0.0.1", port),
http.server.SimpleHTTPRequestHandler,
subdir_path,
)
for itn, tn in enumerate(temporal_networks):
print("preparing network", titles[itn])
tn_b = _get_prepared_network(
tn, frame_dt, time_unit, time_normalization_factor)
taco_fname = os.path.join(subdir, subdir+'_'+str(itn)+'.taco')
edge_fname = os.path.join(subdir, subdir+'_'+str(itn)+'.json')
tc.write_edge_trajectory_coordinates(tn_b,
os.path.join(web_dir, edge_fname),
filter_for_duration=frame_dt * time_normalization_factor)
tc.write_json_taco(tn_b, os.path.join(web_dir, taco_fname))
this_config['temporal_network_files'].append(taco_fname)
this_config['edges_coordinate_files'].append(edge_fname)
this_config['titles'].append(titles[itn])
with open(os.path.join(web_dir, subdir+'_config.json'), 'w') as f:
json.dump(this_config, f)
if export_path is not None:
copy_tree(subdir_path, os.path.join(export_path, subdir))
with open(os.path.join(export_path, 'default_config.json'), 'w') as f:
json.dump(this_config, f)
# ========= start server ============
thread = threading.Thread(None, server.run)
thread.start()
webbrowser.open("http://localhost:"+str(port)+"/?data=" + subdir)
try:
while True:
time.sleep(2)
except KeyboardInterrupt:
# thread.join()
print('stopping server ...')
server.stop_this()
thread.join()
# time.sleep(1)
print('changing directory back to', cwd)
os.chdir(cwd)
| 10,083
|
def has_session_keys_checksums(session_key_checksums):
"""Check if this session key is (likely) already used."""
assert session_key_checksums, 'Eh? No checksum for the session keys?'
LOG.debug('Check if session keys (hash) are already used: %s', session_key_checksums)
with connection.cursor() as cur:
LOG.debug('SELECT * FROM local_ega.has_session_keys_checksums_sha256(%s);', session_key_checksums)
cur.execute('SELECT * FROM local_ega.has_session_keys_checksums_sha256(%(sk_checksums)s);',
{'sk_checksums': list(session_key_checksums)})
found = cur.fetchone()
LOG.debug("Check session keys: %s", found)
return (found and found[0])
| 10,084
|
def four2five(data, format_, dst_dtype='float16', need_custom_tiling=True):
"""
Convert 4-dims "data" to 5-dims,the format of "data" is defined in "format_"
Args:
data (tvm.tensor.Tensor): 4-dims tensor of type float16, float32
format_ (str): a str defined the format of "data"
dst_dtype (str): a str defined the type of output, could be float16 or float32
Returns:
5-dims tvm.tensor.Tensor,type is defined by dst_dtype,
which shape is [N, ceil(C / 16), H, W, 16] and attr about tiling args
Raises:
ValueError: If the type of format_ is invalid.
"""
# Check dtype
vc_util.ops_dtype_check(data.dtype, vc_util.DtypeForDavinci.ALL_FLOAT)
# Check shape
shape = get_shape(data)
vc_util.davinci_format_check(shape, format_, dim=4)
# Check format
if format_ not in ['NCHW', 'NHWC']:
raise ValueError("{} format is not support, four2five only support NCHW and NHWC format input"
.format(format_))
last_channel = 16
if format_ == "NCHW":
bs, c, h, w = get_shape(data)
else:
bs, h, w, c = get_shape(data)
pad_c = c
if c % last_channel != 0:
pad_c = (c + 15) // last_channel * last_channel
c1 = pad_c // last_channel
c0 = last_channel
is_dynamic = ds.shape_is_dynamic(data)
if not is_dynamic:
attrs = get_attrs()
else:
attrs = get_dynamic_attrs()
# Check size c when casting happens
if data.dtype != dst_dtype and c0 * c1 >= C_LIMIT_FOR_CAST:
raise ValueError("When input and output data type is not matched, shape of 'c' axis should not exceed {}, "
"while currently set is {}".format(C_LIMIT_FOR_CAST, c0 * c1))
@script(capture=locals())
def nchw_to_nc1hwc0_step(inputs, bs, c1, h, w, c0):
output = allocate((bs, c1, h, c0, w), inputs.dtype, "local")
for n_i in range(bs):
for c_i in range(c1):
for h_i in range(h):
for w_i in range(w):
for c_i0 in range(c0):
output[n_i, c_i, h_i, c_i0, w_i] = inputs[n_i, c_i * last_channel + c_i0, h_i, w_i]
output1 = allocate((bs, c1, h, w, c0), inputs.dtype, "local")
for n_i in range(bs):
for c_i in range(c1):
for h_i in range(h):
for w_i in range(w):
for c_i0 in range(c0):
output1[n_i, c_i, h_i, w_i, c_i0] = output[n_i, c_i, h_i, c_i0, w_i]
return output1
@script(capture=locals())
def nchw_to_nc1hwc0(inputs, bs, c1, h, w, c0):
output = allocate((bs, c1, h, w, c0), inputs.dtype, "local")
for n_i in range(bs):
for c_i in range(c1):
for h_i in range(h):
for w_i in range(w):
for c_i0 in range(c0):
output[n_i, c_i, h_i, w_i, c_i0] = inputs[n_i, c_i * last_channel + c_i0, h_i, w_i]
return output
@script(capture=locals())
def nhwc_to_nc1hwc0(inputs, zero, bs, c1, h, w, c0):
output = allocate((bs, c1, h, w, c0), inputs.dtype, "local")
for n_i in range(bs):
for c_i in range(c1):
for h_i in range(h):
for w_i in range(w):
for c_i0 in range(c0):
if c_i * last_channel + c_i0 < c:
output[n_i, c_i, h_i, w_i, c_i0] = inputs[n_i, h_i, w_i, c_i * last_channel + c_i0]
else:
output[n_i, c_i, h_i, w_i, c_i0] = zero
return output
cast_data = data
need_cast = data.dtype == 'float32' and dst_dtype == 'float16'
if c % last_channel != 0 or need_cast:
expansion = int(ct_util.BLOCK_SIZE / get_bytes(data.dtype))
else:
expansion = None
# float32 -> float16, need to cast before transform
if need_cast:
cast_data = akg.lang.cce.cast_to(data, dst_dtype)
zero_ = akg.tvm.const(0.0, cast_data.dtype)
if format_ == "NCHW":
if c % last_channel != 0:
pad_shape = [bs, pad_c, h, w]
if h == 1 and w == 1:
# if h and w both are 1, it is pad last dim case
output_shape = [bs, pad_c // last_channel, h, w, last_channel]
output = akg.tvm.compute(output_shape,
lambda i, c1, k, l, c0: akg.tvm.expr.Select(
c0 < c - c1 * last_channel, cast_data[i, c1 * last_channel + c0, k, l],
akg.tvm.const(0, cast_data.dtype)),
name="output")
else:
# if need to pad c dim, separate transpose to two steps
# first is nchw -> nc1hc0w, second is nc1hc0w -> nc1hwc0
pad_data = akg.tvm.compute(pad_shape,
lambda i, j, k, l: akg.tvm.expr.Select(j < c, cast_data[i, j, k, l], zero_),
name="pad_data")
output = nchw_to_nc1hwc0_step(
pad_data,
to_tvm_const(bs),
to_tvm_const(c1),
to_tvm_const(h),
to_tvm_const(w),
to_tvm_const(c0))
else:
if not is_dynamic and data.dtype == "float16" and h * w % last_channel == 0 and h * w < 3600:
output_shape = [bs, c1, h, w, c0]
output = akg.tvm.compute(output_shape, lambda n, c1, h, w, c0:
akg.lang.cce.four2five_nchw(cast_data[n, c1 * last_channel + c0, h, w]),
name="output")
else:
output = nchw_to_nc1hwc0(
cast_data,
to_tvm_const(bs),
to_tvm_const(c1),
to_tvm_const(h),
to_tvm_const(w),
to_tvm_const(c0))
else:
if not is_dynamic and c < last_channel:
rank = 5 # (n, c1, h, w, c0)
pad_before = []
pad_after = []
for _ in range(rank):
pad_before.append(0)
pad_after.append(0)
pad_after[-1] = last_channel - c
# As c < last_channel, c1 is 1
output = akg.tvm.compute((bs, c1, h, w, c), lambda bs_i, _, h_i, w_i, c_i: cast_data[
bs_i, h_i, w_i, c_i], name="output")
output = tvm_pad(output, pad_before, pad_after=pad_after, name='pad_output')
else:
output = nhwc_to_nc1hwc0(
cast_data,
zero_,
to_tvm_const(bs),
to_tvm_const(c1),
to_tvm_const(h),
to_tvm_const(w),
to_tvm_const(c0))
# float16 -> float32, need to cast after transform
if data.dtype == 'float16' and dst_dtype == 'float32':
output = akg.lang.cce.cast_to(output, dst_dtype)
vc_util.davinci_format_check(output.shape, "NC1HWC0", dim=5)
if not is_dynamic:
dim_info, _ = four2five_set_dim_func(data, format_, dst_dtype)
if dim_info != "":
attrs["dim"] = dim_info
if need_custom_tiling:
attrs["custom_tiling"] = four2five_tiling_strategy(output, format_, expansion)
elif need_custom_tiling:
attrs["custom_tiling"] = four2five_tiling_strategy_dynamic(output, format_)
if is_dynamic:
attrs["enable_feature_library_pre_poly"] = True
return output, attrs
| 10,085
|
def MT33_SDR(MT33):
"""Converts 3x3 matrix to strike dip and rake values (in radians)
Converts the 3x3 Moment Tensor to the strike, dip and rake.
Args
MT33: 3x3 numpy matrix
Returns
(float, float, float): tuple of strike, dip, rake angles in radians
(Note: Function from MTFIT.MTconvert)
"""
T,N,P,E=MT33_TNPE(MT33)
N1,N2=TP_FP(T,P)
return FP_SDR(N1,N2)
| 10,086
|
def get_commits_by_tags(repository: Repo, tag_filter_pattern: str, starting_tag: Optional[str] = None) -> List[dict]:
"""
Group commits by the tags they belong to.
Args:
repository: The git repository object
tag_filter_pattern: A regular expression pattern that matches valid tags as versions
starting_tag: Only include tags after this one
Returns:
A list of dictionaries with tag information with most recent first
"""
from generate_changelog.utilities import pairs
tags = [tag for tag in get_tags(repository) if re.match(tag_filter_pattern, tag.name)]
head_commit = repository.commit("HEAD")
head_tagger = head_commit.committer.name
if head_commit.committer.email:
head_tagger += f" <{head_commit.committer.email}>"
head = TagInfo(
name="HEAD",
commit=head_commit.hexsha,
tagger=head_tagger,
tagged_datetime=head_commit.committed_datetime,
)
tags.insert(0, head)
groups = []
for end_tag, start_tag in pairs(tags):
start_tag_name = getattr(start_tag, "name", None)
groups.append(
{
"tag_name": end_tag.name,
"tag_info": end_tag,
"commits": parse_commits(repository, start_tag_name, end_tag.name),
}
)
if starting_tag and start_tag_name == starting_tag:
break
return groups
| 10,087
|
def test_get_missing_recipe():
"""
Return 404 for a recipe that is not there.
"""
http = httplib2.Http()
response, content = http.request('http://our_test_domain:8001/recipes/not_there',
method='GET')
assert response['status'] == '404'
| 10,088
|
def GetObject():
"""
Required module function.
@returns class object of the implemented adapter.
"""
return SpacePacketAdapter
| 10,089
|
def prepare_environment(base_path):
"""
Creates ASSETS_PATH folder if not created and removes existing folder
"""
assets = Path(base_path)
if assets.exists():
shutil.rmtree(assets)
assets.mkdir(parents=True)
| 10,090
|
def filter_paths(ctx, raw_paths, path_type="repo", **kwds):
"""Filter ``paths``.
``path_type`` is ``repo`` or ``file``.
"""
cwd = os.getcwd()
filter_kwds = copy.deepcopy(kwds)
changed_in_commit_range = kwds.get("changed_in_commit_range", None)
diff_paths = None
if changed_in_commit_range is not None:
diff_files = git.diff(ctx, cwd, changed_in_commit_range)
if path_type == "repo":
diff_dirs = set(os.path.dirname(p) for p in diff_files)
diff_paths = set()
for diff_dir in diff_dirs:
while diff_dir:
if os.path.isfile(os.path.join(diff_dir, SHED_CONFIG_NAME)):
diff_paths.add(diff_dir)
break
diff_dir = os.path.dirname(diff_dir)
else:
diff_paths = diff_files
unique_paths = set(os.path.relpath(p, cwd) for p in raw_paths)
if diff_paths is not None:
new_unique_paths = []
for path in unique_paths:
if path in diff_paths:
new_unique_paths.append(path)
unique_paths = new_unique_paths
filtered_paths = sorted(io.filter_paths(unique_paths, cwd=cwd, **filter_kwds))
excluded_paths = sorted(set(unique_paths) - set(filtered_paths))
if excluded_paths:
ctx.log("List of excluded paths: %s" % excluded_paths)
path_count = len(filtered_paths)
chunk_size = ((1.0 * path_count) / kwds["chunk_count"])
chunk = kwds["chunk"]
chunked_paths = []
for i, path in enumerate(filtered_paths):
if int(math.floor(i / chunk_size)) == chunk:
chunked_paths.append(path)
return chunked_paths
| 10,091
|
def hic2cool_convert(infile, outfile, resolution=0, nproc=1, show_warnings=False, silent=False):
"""
Main function that coordinates the reading of header and footer from infile
and uses that information to parse the hic matrix.
Opens outfile and writes in form of .cool file
Params:
<infile> str .hic filename
<outfile> str .cool output filename
<resolution> int bp bin size. If 0, use all. Defaults to 0.
Final .cool structure will change depending on this param (see README)
<show_warnings> bool. If True, print out WARNING messages
<silent> bool. If true, hide standard output
<nproc> number of processes to use
"""
unit = 'BP' # only using base pair unit for now
resolution = int(resolution)
# Global hic normalization types used
global NORMS
NORMS = []
global WARN
WARN = False
req = open(infile, 'rb')
global reqarr
reqarr = []
for i in range(0, nproc):
reqarr.append(open(infile, 'rb'))
global mmap_buf
mmap_buf = mmap.mmap(req.fileno(), 0, access=mmap.ACCESS_READ)
used_chrs, resolutions, masteridx, genome, metadata = read_header(req)
pair_footer_info, expected, factors, norm_info = read_footer(req, mmap_buf, masteridx)
# expected/factors unused for now
del expected
del factors
# used to hold chr_chr key intersections missing from the hic file
warn_chr_keys = []
if not silent: # print hic header info for command line usage
chr_names = [used_chrs[key][1] for key in used_chrs.keys()]
print('##########################')
print('### hic2cool / convert ###')
print('##########################')
print('### Header info from hic')
print('... Chromosomes: ', chr_names)
print('... Resolutions: ', resolutions)
print('... Normalizations: ', NORMS)
print('... Genome: ', genome)
# ensure user input binsize is a resolution supported by the hic file
if resolution != 0 and resolution not in resolutions:
error_str = (
'!!! ERROR. Given binsize (in bp) is not a supported resolution in '
'this file.\nPlease use 0 (all resolutions) or use one of: ' +
str(resolutions))
force_exit(error_str, req)
use_resolutions = resolutions if resolution == 0 else [resolution]
multi_res = len(use_resolutions) > 1
# do some formatting on outfile filename
# .mcool is the 4DN supported multi-res format, but allow .multi.cool too
if outfile[-11:] == '.multi.cool':
if not multi_res:
outfile = ''.join([outfile[:-11] + '.cool'])
elif outfile[-6:] == '.mcool':
if not multi_res:
outfile = ''.join([outfile[:-6] + '.cool'])
elif outfile[-5:] == '.cool':
if multi_res:
outfile = ''.join([outfile[:-5] + '.mcool'])
else:
# unexpected file ending. just append .cool or .cool
if multi_res:
outfile = ''.join([outfile + '.mcool'])
else:
outfile = ''.join([outfile + '.cool'])
# check if the desired path exists. try to remove, if so
if os.path.exists(outfile):
try:
os.remove(outfile)
except OSError:
error_string = ("!!! ERROR. Output file path %s already exists. This"
" can cause issues with the hdf5 structure. Please remove that"
" file or choose a different output name." % (outfile))
force_exit(error_string, req)
if WARN:
print_stderr('!!! WARNING: removed pre-existing file: %s' % (outfile))
print('### Converting')
pool = Pool(processes=nproc)
for binsize in use_resolutions:
t_start = time.time()
# initialize cooler file. return per resolution bin offset maps
chr_offset_map, chr_bins = initialize_res(outfile, req, mmap_buf, unit, used_chrs,
genome, metadata, binsize, norm_info, multi_res, show_warnings)
covered_chr_pairs = []
for chr_a in used_chrs:
total_chunk = np.zeros(shape=0, dtype=CHUNK_DTYPE)
if used_chrs[chr_a][1].lower() == 'all':
continue
for chr_b in used_chrs:
if used_chrs[chr_b][1].lower() == 'all':
continue
c1 = min(chr_a, chr_b)
c2 = max(chr_a, chr_b)
chr_key = str(c1) + "_" + str(c2)
# since matrices are upper triangular, no need to cover c1-c2
# and c2-c1 reciprocally
if chr_key in covered_chr_pairs:
continue
tmp_chunk = parse_hic(req, pool, nproc, chr_key, unit, binsize,
pair_footer_info, chr_offset_map, chr_bins,
used_chrs, show_warnings)
total_chunk = np.concatenate((total_chunk, tmp_chunk), axis=0)
del tmp_chunk
covered_chr_pairs.append(chr_key)
# write at the end of every chr_a
write_pixels_chunk(outfile, binsize, total_chunk, multi_res)
del total_chunk
# finalize to remove chunks and write a bit of metadata
finalize_resolution_cool(outfile, binsize, multi_res)
t_parse = time.time()
elapsed_parse = t_parse - t_start
if not silent:
print('... Resolution %s took: %s seconds.' % (binsize, elapsed_parse))
req.close()
for i in range(0, nproc):
reqarr[i].close()
pool.close()
pool.join()
if not silent:
if WARN and not show_warnings:
print('... Warnings were found in this run. Run with -v to display them.')
print('### Finished! Output written to: %s' % outfile)
if multi_res:
print('... This file is higlass compatible.')
else:
print('... This file is single resolution and NOT higlass compatible. Run with `-r 0` for multi-resolution.')
| 10,092
|
def show_help( ):
""" displays the program parameter list and usage information """
stdout( "usage: " + sys.argv[0] + " -f <fasta> ")
stdout( " " )
stdout( " option description" )
stdout( " -h help (this text here)" )
stdout( " -f fasta file" )
stdout( " " )
sys.exit(1)
| 10,093
|
def parse(args):
"""Parse the command-line arguments of the `inpaint` command.
Parameters
----------
args : list of str
List of arguments, without the command name.
Returns
-------
InPaint
Filled structure
"""
struct = InPaint()
struct.files = []
while cli.next_isvalue(args):
val, *args = args
struct.files.append(val)
while args:
if cli.next_isvalue(args):
raise ParseError(f'Value {args[0]} does not seem to belong '
f'to a tag.')
tag, *args = args
if tag in ('-m', '--missing'):
struct.missing = []
while cli.next_isvalue(args):
val, *args = args
struct.missing.append(float(val))
elif tag in ('-nrls', '--max-rls'):
cli.check_next_isvalue(args, tag)
struct.max_rls, *args = args
struct.max_rls = int(struct.max_rls)
elif tag in ('-trls', '--tol-rls'):
cli.check_next_isvalue(args, tag)
struct.tol_rls, *args = args
struct.tol_rls = float(struct.tol_rls)
elif tag in ('-ncg', '--max-cg'):
cli.check_next_isvalue(args, tag)
struct.max_cg, *args = args
struct.max_cg = int(struct.max_cg)
elif tag in ('-tcg', '--tol-cg'):
cli.check_next_isvalue(args, tag)
struct.tol_cg, *args = args
struct.tol_cg = float(struct.tol_cg)
elif tag in ('-cpu', '--cpu'):
struct.device = 'cpu'
elif tag in ('-gpu', '--gpu'):
struct.device = 'cuda'
if cli.next_isvalue(args):
gpu, *args = args
struct.device = 'cuda:{:d}'.format(int(gpu))
elif tag in ('-o', '--output'):
struct.output = []
while cli.next_isvalue(args):
val, *args = args
struct.output.append(val)
elif tag in ('-v', '--verbose'):
struct.verbose = 1
if cli.next_isvalue(args):
struct.verbose, *args = args
struct.verbose = int(struct.verbose)
elif tag in ('-h', '--help'):
print(help)
return None
else:
raise ParseError(f'Unknown tag {tag}')
return struct
| 10,094
|
def test_taxi_trips_benchmark(
benchmark: BenchmarkFixture,
tmpdir: py.path.local,
pytestconfig: _pytest.config.Config,
number_of_tables: int,
write_data_docs: bool,
backend_api: str,
):
"""Benchmark performance with a variety of expectations using NYC Taxi data (yellow_trip_data_sample_2019-01.csv)
found in the tests/test_sets/taxi_yellow_trip_data_samples directory, and used extensively in unittest and
integration tests for Great Expectations.
To simulate a more realistic usage of Great Expectations with several tables, this benchmark is run with 1 or more
copies of the table, and each table has multiple expectations run on them. For simplicity, the expectations run on
each table are identical. The specific expectations are somewhat arbitrary but were chosen to be representative of
a (non-public) real use case of Great Expectations.
Note: This data being tested in this benchmark generally shouldn't be changed over time, because consistent
benchmarks are more useful to compare trends over time. Please do not change the tables being tested with nor change
the expectations being used by this benchmark. Instead of changing this benchmark's data/expectations, please
consider adding a new benchmark (or at least rename this benchmark to provide clarity that results are not directly
comparable because of the data change).
"""
_skip_if_bigquery_performance_tests_not_enabled(pytestconfig)
html_dir = (
os.environ.get("GE_BENCHMARK_HTML_DIRECTORY", tmpdir.strpath)
if write_data_docs
else None
)
checkpoint = taxi_benchmark_util.create_checkpoint(
number_of_tables=number_of_tables,
html_dir=html_dir,
backend_api=backend_api,
)
if os.environ.get("GE_PROFILE_FILE_PATH"):
cProfile.runctx(
"checkpoint.run()",
None,
locals(),
filename=os.environ["GE_PROFILE_FILE_PATH"],
)
return
else:
result: CheckpointResult = benchmark.pedantic(
checkpoint.run,
iterations=1,
rounds=1,
)
# Do some basic sanity checks.
assert result.success, result
assert len(result.run_results) == number_of_tables
if write_data_docs:
html_file_paths = list(Path(html_dir).glob("validations/**/*.html"))
assert len(html_file_paths) == number_of_tables
# Check that run results contain the right number of suites, assets, and table names.
assert (
len(
{
run_result["validation_result"]["meta"]["expectation_suite_name"]
for run_result in result.run_results.values()
}
)
== number_of_tables
)
batch_key = "batch_spec" if backend_api == "V3" else "batch_kwargs"
for field in ["data_asset_name", "table_name" if backend_api == "V3" else "table"]:
assert (
len(
{
run_result["validation_result"]["meta"][batch_key][field]
for run_result in result.run_results.values()
}
)
== number_of_tables
)
# Check that every expectation result was correct.
expected_results = taxi_benchmark_util.expected_validation_results()
for run_result in result.run_results.values():
actual_results = [
result.to_json_dict()
for result in run_result["validation_result"]["results"]
]
assert len(expected_results) == len(actual_results)
for expected_result, actual_result in zip(expected_results, actual_results):
description_for_error_reporting = (
f'{expected_result["expectation_config"]["expectation_type"]} result'
)
_recursively_assert_actual_result_matches_expected_result_keys(
expected_result, actual_result, description_for_error_reporting
)
| 10,095
|
def log_context(servicer_context: Mock) -> LogContext:
"""Mock LogContext."""
context = LogContext(
servicer_context,
"/abc.test/GetTest",
Mock(name="Request"),
Mock(name="Response", ByteSize=Mock(return_value=10)),
datetime(2021, 4, 3, 0, 0, 0, 0, timezone.utc),
datetime(2021, 4, 3, 0, 1, 0, 0, timezone.utc),
)
return context
| 10,096
|
def filename(name):
""" Get filename without extension"""
return os.path.splitext(name)[0]
| 10,097
|
def test_get_processed_string_single_expression_keeps_type():
"""Process string with interpolation honors type."""
context = Context(
{'ctx1': 'ctxvalue1',
'ctx2': 'ctxvalue2',
'ctx3': [0, 1, 3],
'ctx4': 'ctxvalue4'})
input_string = '{ctx3}'
output = context.get_formatted_value(input_string)
assert output == [0, 1, 3]
assert isinstance(output, list)
| 10,098
|
def base64_decode(string):
"""
Decodes data encoded with MIME base64
"""
return base64.b64decode(string)
| 10,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.