content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
from typing import OrderedDict
def get_explicit_kwargs_OD(f_params, bound_args, kwargs) -> OrderedDict:
"""For some call to a function f, args *arg and **kwargs,
:param f_params: inspect.signature(f).parameters
:param bound_args: inspect.signature(f).bind(*args, **kwargs)
:return: OrderedDict of the (kwd, kwargs[kwd])
for keyword parameters kwd of f that ARE explicitly passed.
Another ad-hoc little function, needed in 2 different places.
TODO (doc)tests?
"""
arguments = bound_args.arguments
return OrderedDict(
((k, kwargs[k])
for k in f_params
if k in arguments and k in kwargs)
)
|
abf069f5a15f159535825d77015af5205ff72c5c
| 3,642,300
|
import os
def generate_variable_formants_point_function(corpus_context, min_formants, max_formants):
"""Generates a function used to call Praat to measure formants and bandwidths with variable num_formants.
Parameters
----------
corpus_context : :class:`~polyglot.corpus.context.CorpusContext`
The CorpusContext object of the corpus.
min_formants : int
The minimum number of formants to measure with on subsequent passes (default is 4).
max_formants : int
The maximum number of formants to measure with on subsequent passes (default is 7).
Returns
-------
formant_function : Partial function object
The function used to call Praat.
"""
max_freq = 5500
script_dir = os.path.dirname(os.path.abspath(__file__))
script = os.path.join(script_dir, 'multiple_num_formants.praat')
formant_function = PraatAnalysisFunction(script, praat_path=corpus_context.config.praat_path,
arguments=[0.01, 0.025, min_formants, max_formants, max_freq])
formant_function._function._output_parse_function = parse_multiple_formant_output
return formant_function
|
62ba3be914caaff2dbf71b3e43ae27142c4ff6af
| 3,642,301
|
def handle_player_dead_keys(key):
"""
The set of keys for a dead player.
Can only see the inventory and toggle fullscreen.
"""
key_char = chr(key.c) if key.vk == libtcod.KEY_CHAR else ""
if key_char == 'i':
return {'show_inventory': True}
if key.vk == libtcod.KEY_ENTER and key.lalt:
# Alt+Enter: toggle full screen
return {'fullscreen': True}
elif key.vk == libtcod.KEY_ESCAPE:
# Exit the menu
return {'exit': True}
return {}
|
35f6288e7e830c36b163cd762a3a53531bbee394
| 3,642,302
|
def safe_std(values):
"""Remove zero std values for ones."""
return np.array([val if val != 0.0 else 1.0 for val in values])
|
ab09a435393ea8025af966f4b464e088dce7a00b
| 3,642,303
|
def _munge_source_data(data_source=settings.NETDEVICES_SOURCE):
"""
Read the source data in the specified format, parse it, and return a
:param data_source:
Absolute path to source data file
"""
log.msg('LOADING FROM: ', data_source)
kwargs = parse_url(data_source)
path = kwargs.pop('path')
return loader.load_metadata(path, **kwargs)
|
bda1261e3cf914402aa4e7b4e49f523088da1fe9
| 3,642,304
|
def create_app(settings_override=None):
"""
Create a flask application using the app factory pattern
:return: Flask app
"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_object('config.settings')
app.config.from_pyfile('settings.py', silent=True)
if settings_override:
app.config.update(settings_override)
app.register_blueprint(user)
extensions(app)
return app
|
de3dbd009b67cfabd37e71063dc3d2b925b08cb6
| 3,642,305
|
def convex_hull(ps: Polygon) -> Polygon:
"""Andrew's algorithm"""
def construct(limit, start, stop, step=1):
for i in range(start, stop, step):
while len(res) > limit and cross(res[-1] - res[-2], s_ps[i] - res[-1]) < 0:
res.pop()
res.append(s_ps[i])
assert len(ps) >= 3
s_ps = sorted(ps)
N = len(s_ps)
res: Polygon = []
construct(1, 0, N)
construct(len(res), N - 2, -1, -1)
return res[:-1]
|
4cdf71cdb65e838f6ef2b4617237d483257364df
| 3,642,306
|
def get_api_file_url(file_id):
"""Get BaseSpace API file URL."""
api_url = get_api_url()
return f'{api_url}/files/{file_id}'
|
eff39b6dc2470f4217b8190104b3efb7c532f995
| 3,642,307
|
def Tr(*content, **attrs):
"""
Wrapper for tr tag
>>> Tr().render()
'<tr></tr>'
"""
return KWElement('tr', *content, **attrs)
|
30b3cd48fb96a0d7d04f5deebe85f4de77d82126
| 3,642,308
|
import numpy
def _read_storm_locations_one_time(
top_tracking_dir_name, valid_time_unix_sec, desired_full_id_strings):
"""Reads storm locations at one time.
K = number of storm objects desired
:param top_tracking_dir_name: See documentation at top of file.
:param valid_time_unix_sec: Valid time.
:param desired_full_id_strings: length-K list of full storm IDs. Locations
will be read for these storms only.
:return: desired_latitudes_deg: length-K numpy array of latitudes (deg N).
:return: desired_longitudes_deg: length-K numpy array of longitudes (deg E).
"""
spc_date_string = time_conversion.time_to_spc_date_string(
valid_time_unix_sec)
desired_times_unix_sec = numpy.full(
len(desired_full_id_strings), valid_time_unix_sec, dtype=int
)
tracking_file_name = tracking_io.find_file(
top_tracking_dir_name=top_tracking_dir_name,
tracking_scale_metres2=DUMMY_TRACKING_SCALE_METRES2,
source_name=tracking_utils.SEGMOTION_NAME,
valid_time_unix_sec=valid_time_unix_sec,
spc_date_string=spc_date_string, raise_error_if_missing=True)
print('Reading storm locations from: "{0:s}"...'.format(tracking_file_name))
storm_object_table = tracking_io.read_file(tracking_file_name)
desired_indices = tracking_utils.find_storm_objects(
all_id_strings=storm_object_table[
tracking_utils.FULL_ID_COLUMN].values.tolist(),
all_times_unix_sec=storm_object_table[
tracking_utils.VALID_TIME_COLUMN].values,
id_strings_to_keep=desired_full_id_strings,
times_to_keep_unix_sec=desired_times_unix_sec, allow_missing=False)
desired_latitudes_deg = storm_object_table[
tracking_utils.CENTROID_LATITUDE_COLUMN].values[desired_indices]
desired_longitudes_deg = storm_object_table[
tracking_utils.CENTROID_LONGITUDE_COLUMN].values[desired_indices]
return desired_latitudes_deg, desired_longitudes_deg
|
7867d2a36b71d0ab0c4d694e617cabe2aa960e92
| 3,642,309
|
def ja_il(il, instr):
"""
Returns llil expression to goto target of instruction
:param il: llil function to generate expression with
:param instr: instruction to pull jump target from
:return: llil expression to goto target of instr
"""
label = valid_label(il, instr.ja_target)
return il.goto(label)
|
9eb18f9b709c960467d9c6a9d9cbed0d7619c5fe
| 3,642,310
|
def create_plot_durations_v_nrows(source, x_axis_type='log', x_range=(1, 10**5),
y_axis_type='log', y_range=(0.001, 10**3)):
"""
Create a Bokeh plot (Figure) of do_query_dur and stream_to_file_dur versus num_rows.
num_rows is the number of result rows from the query.
Parameters
----------
source : ColumnDataSource
Bokeh data source containing the navostats data
x_axis_type : str
auto, linear, log, datetime, or mercator
x_range : tuple (min, max)
The range of values to display on the x axis. When x_axis_type is 'log',
it helps if the endpoints are exact powers of 10.
y_axis_type : str
auto, linear, log, datetime, or mercator
y_range : tuple (min, max)
The range of values to display on the y axis. When y_axis_type is 'log',
it helps if the endpoints are exact powers of 10.
Returns
-------
plotting.figure
A Bokeh plot that can be shown.
"""
# create a new plot with a datetime axis type
p = plotting.figure(plot_width=500, plot_height=500,
x_axis_type=x_axis_type, x_range=x_range,
y_axis_type=y_axis_type, y_range=y_range)
hover = create_hover()
p.add_tools(hover)
# add renderers
qt_rend = p.circle(x="num_rows", y="do_query_dur", source=source, size=4, color='red', alpha=0.2)
dt_rend = p.circle(x="num_rows", y="stream_to_file_dur", source=source, size=4, color='green', alpha=0.2)
legend = Legend(items=[
("Query Duration", [qt_rend]),
("Download Duration", [dt_rend])
], location=(0, 40), click_policy='hide')
p.add_layout(legend, 'below')
p.title.text = 'Query and Download Durations v. # of Rows'
p.xaxis.axis_label = '# of Rows'
p.yaxis.axis_label = 'Durations (s)'
return p
|
2c018a68d521ba7116086f4d9f2fb015c4f584d8
| 3,642,311
|
from auroraapi.text import Text
import functools
def listen_and_transcribe(length=0, silence_len=0.5):
"""
Listen with the given parameters, but simulaneously stream the audio to the
Aurora API, transcribe, and return a Text object. This reduces latency if
you already know you want to convert the speech to text.
:param length the length of time (seconds) to record for. If 0, it will record indefinitely, until the specified amount of silence
:type length float
:param silence_len the amount of silence (seconds) to allow before stoping (ignored if length != 0)
:type silence_len float
"""
return Text(get_stt(functools.partial(stream, length, silence_len), stream=True)["transcript"])
|
2807b00fc760d4767c211ba168ac38dc793743aa
| 3,642,312
|
def load_test_dataframes(feature_folder, **kwargs):
"""
Convenience function for loading unlabeled test dataframes. Does not add a 'Preictal' column.
:param feature_folder: The folder to load the feature data from.
:param kwargs: keyword arguments to use for loading the features.
:return: A DataFrame of unlabeled test data without a 'Preictal' column.
"""
test = load_feature_files(feature_folder,
class_name="test",
# Never use sliding frames for the test-data
sliding_frames=False,
**kwargs)
test.sortlevel('segment', inplace=True)
if isinstance(test.columns, pd.MultiIndex):
test.sortlevel(axis=1, inplace=True)
return test
|
69d503c61ec752ed640c9416f7d2292788389aee
| 3,642,313
|
import random
def _match_grid(grid):
"""
given a grid, create the other side to obey:
one p1 black must be a p2 green, tan, and black; and vice versa
"""
l = [""] * 25
color_dict = {
"b": [i for i in range(25) if grid[i] == "b"],
"t": [i for i in range(25) if grid[i] == "t"],
"g": [i for i in range(25) if grid[i] == "g"]
}
random.shuffle(color_dict["b"])
l[color_dict["b"][0]] = "b"
l[color_dict["b"][1]] = "g"
l[color_dict["b"][2]] = "t"
x = random.choice(color_dict["g"])
color_dict["g"].remove(x)
l[x] = "b"
l[random.choice(color_dict["t"])] = "b"
# one green is the other player's black
# three are the other players greens
remain = {i for i in range(25) if l[i] == ""}
s = random.sample(remain.difference(set(color_dict["g"])), _NUM_GREENS - 4)
s += random.sample(color_dict["g"], 3)
for i in remain:
if i in s: l[i] = "g"
else: l[i] = "t"
return _Player_Grid(l)
|
83b2ee7ac92c5f5d085cd5da11fff7132449736e
| 3,642,314
|
def add_vary_callback_if_cookie(*varies):
"""Add vary: cookie header to all session responses.
Prevent downstream web serves to accidentally cache session set-cookie reponses,
potentially resulting to session leakage.
"""
def inner(request, response):
vary = set(response.vary if response.vary is not None else [])
vary |= set(varies)
response.vary = vary
return inner
|
ee7949f8c6ba1c11784b2c460e3c9dd962473412
| 3,642,315
|
import re
def fix_span(text_context, offsets, span):
"""
find start-end indices of the span in the text_context nearest to the existing token start-end indices
:param text_context: (str) text to search for span in
:param offsets: (List(Tuple[int, int]) list of begins and ends for each token in the text
:param span: (str) the answer span to find in the text_context
:return: span indices, distance to the nearest token indices
"""
span = span.strip()
assert span in text_context, f'answer span:{span} is not in the context: {text_context}'
begins, ends = map(list, zip(*[x for x in offsets]))
best_dist = 1e200
best_indices = None
if span == text_context:
return text_context, (0, len(text_context)), 0
# re.escape(pattern) escapes (adds '\' to special characters in pattern)
# re.finditer(pattern, string) returns match objects for all matches of pattern in the string
for m in re.finditer(re.escape(span), text_context):
begin_offset, end_offset = m.span()
fixed_begin, d1 = find_nearest(begins, begin_offset, lambda x: x < end_offset)
fixed_end, d2 = find_nearest(ends, end_offset, lambda x: x > begin_offset)
if d1 + d2 < best_dist:
best_dist = d1 + d2
best_indices = (fixed_begin, fixed_end)
if best_dist == 0:
break
assert best_indices is not None
return best_indices, best_dist
|
0c4802c20fa138a6e011f550a58328eb54a487a5
| 3,642,316
|
def update_moira_lists(
strategy, backend, user=None, **kwargs
): # pylint: disable=unused-argument
"""
Update a user's moira lists
Args:
strategy (social_django.strategy.DjangoStrategy): the strategy used to authenticate
backend (social_core.backends.base.BaseAuth): the backend being used to authenticate
user (User): the current user
"""
if features.is_enabled(features.MOIRA) and user and user.is_active:
update_user_moira_lists.delay(user.id, update_memberships=True)
return {}
|
f21f481bbf0b12e76149a93fee3c5c4923407c29
| 3,642,317
|
def certificate_get_all_by_project(context, project_id):
"""Get all certificates for a project."""
return IMPL.certificate_get_all_by_project(context, project_id)
|
6e4c83ff75c229034ae843607d20ae211e094df9
| 3,642,318
|
def simple_list(li):
"""
takes in a list li
returns a sorted list without doubles
"""
return sorted(set(li))
|
1e36f15cea4be4b403f0a9795a2924c08b2cb262
| 3,642,319
|
from typing import OrderedDict
def create_model(gpu, arch = 'vgg16', input_size = 25088, hidden_layer_size = 512, output_size = 102):
"""Creates a neural network model.
"""
if arch in archs_dict:
model = archs_dict[arch]
else:
print("You haven`t inserted a valid architecture. Check the available architectures at https://pytorch.org/docs/stable/torchvision/models.html.")
return False
for parameter in model.parameters():
parameter.requires_grad = False
model.classifier = nn.Sequential(OrderedDict([
('Input', nn.Linear(input_size, hidden_layer_size)),
('hidden1', nn.ReLU()),
('DropOut1', nn.Dropout(p=0.2)),
('layer1', nn.Linear(hidden_layer_size, int(hidden_layer_size/2))),
('hidden2', nn.ReLU()),
('layer2', nn.Linear(int(hidden_layer_size/2), output_size)),
('output', nn.LogSoftmax(dim=1))
]))
device = 'cuda' if gpu else 'cpu'
model.to(device)
return model
|
39b8b58cfe97b5872a20f67fbeadff8b5d8e1b16
| 3,642,320
|
import json
def write_json(object_list, metadata,num_frames, out_file = None):
"""
"""
classes = ["person","bicycle","car","motorbike","NA","bus","train","truck"]
# metadata = {
# "camera_id": camera_id,
# "start_time":start_time,
# "num_frames":num_frames,
# "frame_rate":frame_rate
# }
data = {}
for frame_num in range(0,num_frames):
frame_data = []
# for each object
for i in range(0,len(object_list)):
obj = object_list[i]
# see if coordinate will be in range
if obj.first_frame <= frame_num:
if obj.first_frame + len(obj.all) > frame_num:
veh_data = {}
idx = frame_num - obj.first_frame
veh_data["id_num"] = i
veh_data["class"] = classes[int(obj.cls)]
veh_data["detected"] = obj.tags[idx]
veh_data["image_position"] = (obj.all[idx]).tolist()
veh_data["world_position"] = (obj.all_world[idx]).tolist()
veh_data["gps_position"] = (obj.all_gps[idx]).tolist()
frame_data.append(veh_data)
data[frame_num] = frame_data
all_data = {
"metadata":metadata,
"data":data
}
if out_file is not None:
with open(out_file, 'w') as fp:
json.dump(all_data, fp)
return all_data
|
8b224af4edbd31570a432a8c551e95cd7a002818
| 3,642,321
|
def get_infer_iterator(src_dataset,
src_vocab_table,
batch_size,
eos,
sos,
src_max_len=None):
"""Get dataset for inference."""
# Totol number of examples in src_dataset
# (3003 examples + 69 padding examples).
src_eos_id = tf.cast(src_vocab_table.lookup(tf.constant(eos)), tf.int32)
src_sos_id = tf.cast(src_vocab_table.lookup(tf.constant(sos)), tf.int32)
src_dataset = src_dataset.map(lambda src: tf.string_split([src]).values)
# Convert the word strings to ids
src_dataset = src_dataset.map(
lambda src: tf.cast(src_vocab_table.lookup(src), tf.int32))
# Add in the word counts.
src_dataset = src_dataset.map(lambda src: (tf.concat(
([src_sos_id], src, [src_eos_id]), 0), 2 + tf.size(src)))
def batching_func(x):
return x.padded_batch(
batch_size,
# The entry is the source line rows;
# this has unknown-length vectors. The last entry is
# the source row size; this is a scalar.
padded_shapes=(
tf.TensorShape([src_max_len]), # src
tf.TensorShape([])), # src_len
# Pad the source sequences with eos tokens.
# (Though notice we don't generally need to do this since
# later on we will be masking out calculations past the true sequence.
padding_values=(
src_eos_id, # src
0),
drop_remainder=True) # src_len -- unused
batched_dataset = batching_func(src_dataset)
batched_dataset = batched_dataset.map(
lambda src_ids, src_seq_len: (
{"source": src_ids,
"source_sequence_length": src_seq_len}))
return batched_dataset
|
81ebd9f9f7d17bf1046f3fd0fafcbc3cdbb53ef4
| 3,642,322
|
def get_num_hearts(image):
"""Returns the number of full and total hearts.
Keyword arguements:
image - image of hearts region
"""
# definitions:
lower_full = np.array([0, 15, 70])
upper_full = np.array([30, 35, 250])
lower_empty = np.array([150, 160, 220])
upper_empty = np.array([255, 255, 255])
full_heart_area_lower = 200
full_heart_area_upper = 300
half_heart_area_lower = 60
half_heart_area_upper = 100
# define heart image:
hearts_image = image[98:161,967:1200] # this the heart region
# initialize hearts
full_hearts = 0
empty_hearts = 0
# calculate shapes in hearts image
shapeMask_full = cv2.inRange(hearts_image, lower_full, upper_full)
shapeMask_empty = cv2.inRange(hearts_image, lower_empty, upper_empty)
# count full hearts
cnts_full_hearts = cv2.findContours(shapeMask_full.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts_full_hearts = cnts_full_hearts[0] if imutils.is_cv2() else cnts_full_hearts[1]
for c in cnts_full_hearts:
if cv2.contourArea(c) >= full_heart_area_lower and cv2.contourArea(c) <= full_heart_area_upper:
full_hearts = full_hearts +1
if cv2.contourArea(c) >= half_heart_area_lower and cv2.contourArea(c) <= half_heart_area_upper:
full_hearts = full_hearts + 0.5
# count empty hearts
cnts_empty_hearts = cv2.findContours(shapeMask_empty.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts_empty_hearts = cnts_empty_hearts[0] if imutils.is_cv2() else cnts_empty_hearts[1]
for c in cnts_empty_hearts:
if cv2.contourArea(c) >= full_heart_area_lower and cv2.contourArea(c) <= full_heart_area_upper:
empty_hearts = empty_hearts +1
if cv2.contourArea(c) >= half_heart_area_lower and cv2.contourArea(c) <= half_heart_area_upper:
empty_hearts = empty_hearts + 0.5
return full_hearts, empty_hearts+full_hearts
|
614d11546c5d458b5e9d485024da4bdb34688e24
| 3,642,323
|
import copy
def _clean_root(tool_xml):
"""XSD assumes macros have been expanded, so remove them."""
clean_tool_xml = copy.deepcopy(tool_xml)
to_remove = []
for macros_el in clean_tool_xml.getroot().findall("macros"):
to_remove.append(macros_el)
for macros_el in to_remove:
clean_tool_xml.getroot().remove(macros_el)
return clean_tool_xml
|
9df0980265b26a2de1c88d2999f10cd5d1421e0b
| 3,642,324
|
import logging
import traceback
def page_not_found(e):
"""Return a custom 404 error."""
logging.error(':: A 404 was thrown a bad URL was requested ::')
logging.error(traceback.format_exc())
return render_template('404.html'), 404
|
11a553ab9ce52b4b1ba12916887f636d02aaef8f
| 3,642,325
|
def find_prime_factors(num):
"""Return prime factors of num."""
validate_integers(num)
zero_divisors_error(num)
potential_factor = 2
prime_factors = set()
while potential_factor <= num:
if num % potential_factor == 0:
prime_factors.add(potential_factor)
num = num/potential_factor
else:
potential_factor += 1
return prime_factors
|
fe15c75d19081ec5fa5cfdefa0c95e4bcd20c26d
| 3,642,326
|
from datetime import datetime
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
elif isinstance(obj, (dict)):
return obj
raise TypeError("Type %s not serializable" % type(obj))
|
7da3adb9dde3315741b5dcad2fa276e9f0b582af
| 3,642,327
|
import os
import yaml
def read_yaml_files(directories):
"""Read the contents of all yaml files in a directory.
Args:
directories: List of directory names with configuration files
Returns:
config_dict: Dict of yaml read
"""
# Initialize key variables
yaml_found = False
yaml_from_file = ''
all_yaml_read = ''
# Check each directory in sequence
for config_directory in directories:
# Check if config_directory exists
if os.path.isdir(config_directory) is False:
log_message = (
'Configuration directory "{}" '
'doesn\'t exist!'.format(config_directory))
log.log2die_safe(1009, log_message)
# Cycle through list of files in directory
for filename in os.listdir(config_directory):
# Examine all the '.yaml' files in directory
if filename.endswith('.yaml'):
# Read YAML data
filepath = '{}/{}'.format(config_directory, filename)
yaml_from_file = read_yaml_file(filepath, as_string=True)
yaml_found = True
# Append yaml from file to all yaml previously read
all_yaml_read = '{}\n{}'.format(all_yaml_read, yaml_from_file)
# Verify YAML files found in directory
if yaml_found is False:
log_message = (
'No files found in directory "{}" with ".yaml" '
'extension.'.format(config_directory))
log.log2die_safe(1010, log_message)
# Return
config_dict = yaml.safe_load(all_yaml_read)
return config_dict
|
c849280c1b267984d779adfbd5a18d3316254117
| 3,642,328
|
import sys
import logging
def _check_for_crash(project_name, fuzz_target, testcase_path):
"""Check for crash."""
def docker_run(args):
command = ['docker', 'run', '--rm', '--privileged']
if sys.stdin.isatty():
command.append('-i')
return utils.execute(command + args)
logging.info('Checking for crash')
out, err, return_code = helper.reproduce_impl(
project=helper.Project(project_name),
fuzzer_name=fuzz_target,
valgrind=False,
env_to_add=[],
fuzzer_args=[],
testcase_path=testcase_path,
run_function=docker_run,
err_result=(None, None, None))
if return_code is None:
return None
logging.info('stdout =\n%s', out)
logging.info('stderr =\n%s', err)
# pylint: disable=unsupported-membership-test
has_start_marker = any(
marker in out or marker in err for marker in START_MARKERS)
has_end_marker = any(marker in out or marker in err for marker in END_MARKERS)
if not has_start_marker or not has_end_marker:
return None
return _get_dedup_token(out + err)
|
00531cff78355d1b7e84bcffdcf2c1cb943803e2
| 3,642,329
|
import os
import sys
def parse_from_compdb(compdb, file_to_parse):
"""Extracts the absolute file path of the file to parse and its arguments from compdb"""
absolute_filepath = None
file_arguments = []
compdb = compdb_parser.load_compdb(compdb)
if compdb:
commands = compdb.getAllCompileCommands()
for command in commands:
if file_to_parse in command.filename:
absolute_filepath = os.path.join(command.directory, command.filename)
file_arguments = list(command.arguments)
file_arguments = tu_parser.clean_args(file_arguments)
file_arguments = tu_parser.absolute_path_include(file_arguments, command.directory)
else:
sys.exit("ERROR: Failed to load compdb")
return absolute_filepath, file_arguments
|
d1f9b3098335571f5ef2c96727d9d8be5eecaacd
| 3,642,330
|
import subprocess
def _pyenv_version():
"""Determine which pyenv
Returns:
str: pyenv version
"""
return subprocess.check_output(['pyenv', 'version']).split(' ')[0]
|
83c9f3a7ba15996392d8510f34e555e39c813a1a
| 3,642,331
|
import subprocess
def reset_config_on_routers(tgen, routerName=None):
"""
Resets configuration on routers to the snapshot created using input JSON
file. It replaces existing router configuration with FRRCFG_BKUP_FILE
Parameters
----------
* `tgen` : Topogen object
* `routerName` : router config is to be reset
"""
logger.debug("Entering API: reset_config_on_routers")
tgen.cfg_gen += 1
gen = tgen.cfg_gen
# Trim the router list if needed
router_list = tgen.routers()
if routerName:
if routerName not in router_list:
logger.warning(
"Exiting API: reset_config_on_routers: no router %s",
routerName,
exc_info=True,
)
return True
router_list = {routerName: router_list[routerName]}
delta_fmt = tgen.logdir + "/{}/delta-{}.conf"
# FRRCFG_BKUP_FILE
target_cfg_fmt = tgen.logdir + "/{}/frr_json_initial.conf"
run_cfg_fmt = tgen.logdir + "/{}/frr-{}.sav"
#
# Get all running configs in parallel
#
procs = {}
for rname in router_list:
logger.info("Fetching running config for router %s", rname)
procs[rname] = router_list[rname].popen(
["/usr/bin/env", "vtysh", "-c", "show running-config no-header"],
stdin=None,
stdout=open(run_cfg_fmt.format(rname, gen), "w"),
stderr=subprocess.PIPE,
)
for rname, p in procs.items():
_, error = p.communicate()
if p.returncode:
logger.error(
"Get running config for %s failed %d: %s", rname, p.returncode, error
)
raise InvalidCLIError(
"vtysh show running error on {}: {}".format(rname, error)
)
#
# Get all delta's in parallel
#
procs = {}
for rname in router_list:
logger.info(
"Generating delta for router %s to new configuration (gen %d)", rname, gen
)
procs[rname] = tgen.net.popen(
[
"/usr/lib/frr/frr-reload.py",
"--test-reset",
"--input",
run_cfg_fmt.format(rname, gen),
"--test",
target_cfg_fmt.format(rname),
],
stdin=None,
stdout=open(delta_fmt.format(rname, gen), "w"),
stderr=subprocess.PIPE,
)
for rname, p in procs.items():
_, error = p.communicate()
if p.returncode:
logger.error(
"Delta file creation for %s failed %d: %s", rname, p.returncode, error
)
raise InvalidCLIError("frr-reload error for {}: {}".format(rname, error))
#
# Apply all the deltas in parallel
#
procs = {}
for rname in router_list:
logger.info("Applying delta config on router %s", rname)
procs[rname] = router_list[rname].popen(
["/usr/bin/env", "vtysh", "-f", delta_fmt.format(rname, gen)],
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
for rname, p in procs.items():
output, _ = p.communicate()
vtysh_command = "vtysh -f {}".format(delta_fmt.format(rname, gen))
if not p.returncode:
router_list[rname].logger.info(
'\nvtysh config apply => "{}"\nvtysh output <= "{}"'.format(
vtysh_command, output
)
)
else:
router_list[rname].logger.warning(
'\nvtysh config apply failed => "{}"\nvtysh output <= "{}"'.format(
vtysh_command, output
)
)
logger.error(
"Delta file apply for %s failed %d: %s", rname, p.returncode, output
)
# We really need to enable this failure; however, currently frr-reload.py
# producing invalid "no" commands as it just preprends "no", but some of the
# command forms lack matching values (e.g., final values). Until frr-reload
# is fixed to handle this (or all the CLI no forms are adjusted) we can't
# fail tests.
# raise InvalidCLIError("frr-reload error for {}: {}".format(rname, output))
#
# Optionally log all new running config if "show_router_config" is defined in
# "pytest.ini"
#
if show_router_config:
procs = {}
for rname in router_list:
logger.info("Fetching running config for router %s", rname)
procs[rname] = router_list[rname].popen(
["/usr/bin/env", "vtysh", "-c", "show running-config no-header"],
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
for rname, p in procs.items():
output, _ = p.communicate()
if p.returncode:
logger.warning(
"Get running config for %s failed %d: %s",
rname,
p.returncode,
output,
)
else:
logger.info(
"Configuration on router %s after reset:\n%s", rname, output
)
logger.debug("Exiting API: reset_config_on_routers")
return True
|
3f90d31d2ac1d85bf52a341433ccb96cb044c938
| 3,642,332
|
def ignore_exception(exception):
"""Check whether we can safely ignore this exception."""
if isinstance(exception, BadRequest):
if 'Query is too old' in exception.message or \
exception.message.startswith('Have no rights to send a message') or \
exception.message.startswith('Message_id_invalid') or \
exception.message.startswith('Message identifier not specified') or \
exception.message.startswith('Schedule_date_invalid') or \
exception.message.startswith('Message is not modified: specified new message content'):
return True
if isinstance(exception, Unauthorized):
if exception.message.lower() == 'forbidden: bot was blocked by the user':
return True
if exception.message.lower() == 'forbidden: message_author_required':
return True
if exception.message.lower() == 'forbidden: bot is not a member of the supergroup chat':
return True
if exception.message.lower() == 'forbidden: user is deactivated':
return True
if exception.message.lower() == 'forbidden: bot was kicked from the group chat':
return True
if exception.message.lower() == 'forbidden: bot was kicked from the supergroup chat':
return True
if exception.message.lower() == 'forbidden: chat_write_forbidden':
return True
if isinstance(exception, TimedOut):
return True
return False
|
3f0182fbe7cec2e5978c61c110d10ed039869fd7
| 3,642,333
|
def rmse_loss(prediction, ground_truth, weight_map=None):
"""
:param prediction: the current prediction of the ground truth.
:param ground_truth: the measurement you are approximating with regression.
:param weight_map: a weight map for the cost function. .
:return: sqrt(mean(differences squared))
"""
if weight_map is not None:
residuals = tf.subtract(prediction, ground_truth)
residuals = tf.multiply(residuals, residuals)
residuals = tf.multiply(residuals, weight_map)
return tf.sqrt(tf.reduce_mean(residuals) / tf.reduce_mean(weight_map))
else:
return tf.sqrt(tf.losses.mean_squared_error(prediction, ground_truth))
|
aef45b5549151a3d6c026dd34ccd9b6aa2b6d695
| 3,642,334
|
def get_corrupted_simulation_docs():
"""Returns iterable of simdocs without samples (when num_paticles >0)
When num_particle<=0, no samples are created and the simulation is considered Finished anyway.
These ignored simulations
"""
return db[DBCOLLECTIONS.SIMULATION].find({
'procstatus.status': PROCSTATUS.FINISHED,
'samples': {'$exists': True, "$eq": []},
'num_particles': {'$gt': 0},
})
|
f23620d7ea09fd37790ceb682f15dc0183b17c9c
| 3,642,335
|
def residual_block(x: Tensor, downsample: bool, filters: int, kernel_size: int = 3) -> Tensor:
"""
Parameters
----------
x : Tensor
DESCRIPTION.
downsample : bool
DESCRIPTION.
filters : int
DESCRIPTION.
kernel_size : int, optional
DESCRIPTION. The default is 3.
Returns
-------
Tensor
DESCRIPTION.
"""
y = Conv2D(kernel_size=kernel_size,
strides= (1 if not downsample else 2),
filters=filters,
padding="same")(x)
y = relu_bn(y)
y = Conv2D(kernel_size=kernel_size,
strides=1,
filters=filters,
padding="same")(y)
if downsample:
x = Conv2D(kernel_size=1,
strides=2,
filters=filters,
padding="same")(x)
out = Add()([x, y])
out = relu_bn(out)
return out
|
092c41b6508ccb52d545219e0d2e254c3430362a
| 3,642,336
|
def dehaze(img, level):
"""use Otsu to threshold https://scikit-image.org/docs/stable/auto_examples/segmentation/plot_multiotsu.html
n.b. threshold used to mask image: dark values are zeroed, but result is NOT binary
level: value 1..5 with larger values preserving more bright voxels
level: dark_classes/total_classes
1: 3/4
2: 2/3
3: 1/2
4: 1/3
5: 1/4
"""
level = bound(1, 5, level)
n_classes = abs(3 - level) + 2
dark_classes = 4 - level
dark_classes = bound(1, 3, dark_classes)
thresholds = skimage.filters.threshold_multiotsu(img, n_classes)
thresh = thresholds[dark_classes - 1]
print("Zeroing voxels darker than ", thresh)
img[img < thresh] = 0
return img
|
bdb6f55fd09986a2abac92728644777fae77f6ca
| 3,642,337
|
def matthews_correlation_coefficient(tp, tn, fp, fn):
"""Return Matthews correlation coefficient for values from a confusion matrix.
Implementation is based on the definition from wikipedia:
https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
"""
numerator = (tp * tn) - (fp * fn)
denominator = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
if denominator == 0:
denominator = 1
return float(numerator) / denominator
|
2048fb05664b3fcab99e08c51eb11a222676df84
| 3,642,338
|
import os
import yaml
def find_graph(hostnames):
"""
Find a graph file contains all devices in testbed.
duts are spcified by hostnames
Parameters:
hostnames: list of duts in the target testbed.
"""
filename = os.path.join(LAB_GRAPHFILE_PATH, LAB_CONNECTION_GRAPH_FILE)
with open(filename) as fd:
file_list = yaml.safe_load(fd)
# Finding the graph file contains all duts from hostnames,
for fn in file_list:
print_debug_msg(debug_fname, "Looking at conn graph file: %s for hosts %s" % (fn, hostnames))
filename = os.path.join(LAB_GRAPHFILE_PATH, fn)
lab_graph = Parse_Lab_Graph(filename)
lab_graph.parse_graph()
print_debug_msg(debug_fname, "For file %s, got hostnames %s" % (fn, lab_graph.devices))
if lab_graph.contains_hosts(hostnames):
print_debug_msg(debug_fname, ("Returning lab graph from conn graph file: %s for hosts %s" % (fn, hostnames)))
return lab_graph
# Fallback to return an empty connection graph, this is
# needed to bridge the kvm test needs. The KVM test needs
# A graph file, which used to be whatever hardcoded file.
# Here we provide one empty file for the purpose.
lab_graph = Parse_Lab_Graph(os.path.join(LAB_GRAPHFILE_PATH, EMPTY_GRAPH_FILE))
lab_graph.parse_graph()
return lab_graph
|
307c939bdac3bf106ba12d12fbf0a41b06b3d525
| 3,642,339
|
import re
def run_analysis(apk_dir, md5_hash, package):
"""Run Dynamic File Analysis."""
analysis_result = {}
logger.info('Dynamic File Analysis')
domains = {}
clipboard = []
# Collect Log data
data = get_log_data(apk_dir, package)
clip_tag = 'I/CLIPDUMP-INFO-LOG'
clip_tag2 = 'I CLIPDUMP-INFO-LOG'
# Collect Clipboard
for log_line in data['logcat']:
if clip_tag in log_line:
clipboard.append(log_line.replace(clip_tag, 'Process ID '))
if clip_tag2 in log_line:
log_line = log_line.split(clip_tag2)[1]
clipboard.append(log_line)
# URLs My Custom regex
url_pattern = re.compile(
r'((?:https?://|s?ftps?://|file://|'
r'javascript:|data:|www\d{0,3}'
r'[.])[\w().=/;,#:@?&~*+!$%\'{}-]+)', re.UNICODE)
urls = re.findall(url_pattern, data['traffic'].lower())
if urls:
urls = list(set(urls))
else:
urls = []
# Domain Extraction and Malware Check
logger.info('Performing Malware Check on extracted Domains')
domains = MalwareDomainCheck().scan(urls)
# Email Etraction Regex
emails = []
regex = re.compile(r'[\w.-]{1,20}@[\w-]{1,20}\.[\w]{2,10}')
for email in regex.findall(data['traffic'].lower()):
if (email not in emails) and (not email.startswith('//')):
emails.append(email)
# Tar dump and fetch files
all_files = get_app_files(apk_dir, md5_hash, package)
analysis_result['urls'] = urls
analysis_result['domains'] = domains
analysis_result['emails'] = emails
analysis_result['clipboard'] = clipboard
analysis_result['xml'] = all_files['xml']
analysis_result['sqlite'] = all_files['sqlite']
analysis_result['other_files'] = all_files['others']
analysis_result['tls_tests'] = get_tls_logs(apk_dir, md5_hash)
return analysis_result
|
f5108c8e3a09799e451bc8182018acf1481615e4
| 3,642,340
|
from datetime import datetime
import fastapi
async def quotes(
ticker: str, date: datetime.date,
uow: UoW = fastapi.Depends(dependendies.get_uow),
) -> ListResponse[Ticker]:
"""Return the list of available tickers."""
with uow:
results = uow.quotes.iterator({'ticker': ticker, 'date': date})
return ListResponse(results=results)
|
4267815af0ec13bc617870ee16b7b452a66d7891
| 3,642,341
|
from typing import Tuple
from typing import Union
def delete_snapshot(client, data_args) -> Tuple[str, dict, Union[list, dict]]:
""" Delete exsisting snapshot from the system.
:type client: ``Client``
:param client: client which connects to api.
:type data_args: ``dict``
:param data_args: request arguments.
:return: human readable format, context output and the original raw response.
:rtype: ``tuple``
"""
snapshot_ids = argToList(data_args.get('snapshot_ids'))
body = {'ids': snapshot_ids}
client.do_request('DELETE', '/plugin/products/threat-response/api/v1/snapshot', data=body)
return f'Snapshot {",".join(snapshot_ids)} deleted successfully.', {}, {}
|
509298677120b61dd9aa050dbe40aacffff6d97c
| 3,642,342
|
from backend.models.prices import PriceDB
def api_user_submissions(user_id):
"""Gets the price submissions for a user matching the user_id.
Example Request:
HTTP GET /api/v1/users/56cf848722e7c01d0466e533/submissions
Example Response:
{
"success": "OK",
"user_submissions": [
{
"submitted_timestamp": "2016-02-25 22:52:32+00:00",
"image": null,
"business_details": {
"google_places": {
... truncated for ease of reading ...
},
"open_time": null,
"business_id": "56cf859195bfb3ccb12582e5",
"address": "6200 N Broad St, Philadelphia, PA 19141, United States",
"phone_number": "(215) 549-5089",
"name": "Shell",
"close_time": null
},
"product_id": "56bbda2dd8d9a114db76ca5c",
"price": 153,
"user_id": "56cf848722e7c01d040ae533",
"price_id": "56cf85b022e7c0197cf2a02b"
},
...
]
}
"""
if not user_is_authenticated(user_id=user_id):
return json_error("Unauthorized", status_code=403)
price_db = PriceDB()
submissions = price_db.get_submissions(user_id=user_id)
return json_success("OK", user_submissions=submissions)
|
498c91451896dde6d347492acad9573839c45e13
| 3,642,343
|
def get_pdf_info(pdf_path: str) -> PdfInfo:
"""Get meta information of a PDF file."""
info: PdfInfo = PdfInfo(path=pdf_path)
keys = get_flat_cfg_file(path="~/.edapy/pdf_keys.csv")
ignore_keys = get_flat_cfg_file(path="~/.edapy/pdf_ignore_keys.csv")
for key in keys:
info.user_attributes[key] = None
info.is_errornous = False
info.is_encrypted = False
info.nb_pages = -1
info.nb_toc_top_level = -1
info.nb_characters = 0
with open(pdf_path, "rb") as fp:
try:
pdf_toread = PdfFileReader(fp, strict=False)
except PyPDF2.utils.PdfReadError:
info.is_errornous = True
return info
except KeyError as e:
logger.warning(
"https://github.com/mstamy2/PyPDF2/issues/388 for "
f" PDF '{pdf_path}': {e}"
)
return info
except OSError as e:
logger.warning(f"OSError for PDF '{pdf_path}': {e}")
return info
except AssertionError as e:
logger.warning(f"AssertionError for PDF '{pdf_path}': {e}")
return info
except TypeError as e:
logger.warning(f"TypeError for PDF '{pdf_path}': {e}")
return info
try:
tl_toc = [el for el in pdf_toread.outlines if not isinstance(el, list)]
info.nb_toc_top_level = len(tl_toc)
except PyPDF2.utils.PdfReadError as e:
logger.error(f"{pdf_path}: PyPDF2.utils.PdfReadError {e}")
except ValueError as e:
logger.error(f"{pdf_path}: ValueError {e}")
except TypeError as e:
logger.error(f"{pdf_path}: TypeError {e}")
info_t = enhance_pdf_info(info, pdf_toread, pdf_path, keys, ignore_keys)
return info_t
|
3f80d9d50261e6b7a9e1c90b504abcbeed2b214d
| 3,642,344
|
import json
import zlib
import base64
def convert_gz_json_type(value):
"""Provide an ArgumentParser type function to unmarshal a b64 gz JSON string.
"""
return json.loads(zlib.decompress(base64.b64decode(value)))
|
1cf0300f40c8367b9129f230a7fef0c9b89ba012
| 3,642,345
|
def get_tag(tag):
"""
Returns a tag object for the string passed to it
If it does not appear in the database then return a new tag object
If it does exisit in the data then return the database object
"""
tag = tag.lower()
try:
return Session.query(Tag).filter_by(name=unicode(tag)).one()
except NoResultFound as nrf:
t = Tag(unicode(tag))
Session.add(t)
return t
|
e11ae349fa4d436a6b7057bf0b5d8b74a7e0f4e4
| 3,642,346
|
import requests
import json
def makeApiCall( url, endpointParams, debug = 'no' ) :
""" Request data from endpoint with params
Args:
url: string of the url endpoint to make request from
endpointParams: dictionary keyed by the names of the url parameters
Returns:
object: data from the endpoint
"""
data = requests.get( url, endpointParams ) # make get request
response = dict() # hold response info
response['url'] = url # url we are hitting
response['endpoint_params'] = endpointParams #parameters for the endpoint
response['endpoint_params_pretty'] = json.dumps( endpointParams, indent = 4 ) # pretty print for cli
response['json_data'] = json.loads( data.content ) # response data from the api
response['json_data_pretty'] = json.dumps( response['json_data'], indent = 4 ) # pretty print for cli
if ( 'yes' == debug ) : # display out response info
displayApiCallData( response ) # display response
return response
|
5f9362d6b34ee4809714ae7380bfaa5500763177
| 3,642,347
|
def get_closest_area(
lat: float, lng: float, locations: t.List[config.Area]
) -> t.Optional[config.Area]:
"""Return area if image taken within 50 km from center of area"""
distances = [
(great_circle((area.lat, area.lng), (lat, lng)).km, area) for area in locations
]
distance, closest_area = min(distances)
return closest_area if distance < 50 else None
|
3deb96bc1863ed1d02699cff0a48edd9471bbade
| 3,642,348
|
def disable(request):
"""
Disable Pool Member Running Script
"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
id_server_pool = request.POST.get('id_server_pool')
ids = request.POST.get('ids')
if id_server_pool and ids:
client.create_pool().disable(split_to_array(ids))
messages.add_message(
request, messages.SUCCESS, pool_messages.get('success_disable'))
else:
messages.add_message(
request, messages.ERROR, error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect(reverse('pool.manage.tab2', args=[id_server_pool]))
|
9c56d891a86db3e9f999cb8f329630172ea9703e
| 3,642,349
|
def fastapi_native_middleware_factory():
"""Create a FastAPI app that uses native-style middleware."""
app = FastAPI()
# Exception handler for `/client_error_from_handled_exception`
app.add_exception_handler(IndexError, client_induced_exception_handler)
app.add_middleware(BaseHTTPMiddleware, dispatch=xray_middleware)
app.add_api_route("/", handle_request)
app.add_api_route("/client_error_as_http_exception", handle_with_http_exception)
app.add_api_route(
"/client_error_as_response",
handle_request,
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
)
app.add_api_route("/client_error_from_handled_exception", handle_with_indexerror)
app.add_api_route("/delay", handle_with_delay)
app.add_api_route("/exception", handle_with_keyerror)
app.add_api_route(
"/unauthorized", handle_request, status_code=HTTP_401_UNAUTHORIZED
)
return app
|
5c85df93f126443b19b10b58fc7cb57661fd3388
| 3,642,350
|
def _partial_dependence(
pipeline,
X,
features,
percentiles=(0.05, 0.95),
grid_resolution=100,
kind="average",
custom_range=None,
):
"""Compute the partial dependence for features of X.
Args:
pipeline (PipelineBase): pipeline.
X (pd.DataFrame): Holdout data
features (list(str)): Column names of X to compute the partial dependence for.
percentiles (tuple float): Percentiles to use in range calculation for a given
feature.
grid_resolution: Number of points in range of values used for each feature in
partial dependence calculation.
kind (str): The type of predictions to return.
custom_range (dict[str, np.ndarray]): Mapping from column name in X to
range of values to use in partial dependence. If custom_range is specified,
the percentile + interpolation procedure is skipped and the values in custom_range
are used.
Returns:
dict with 'average', 'individual', 'values' keys. 'values' is a list of
the values used in the partial dependence for each feature.
'average' and 'individual' are averaged and individual predictions for
each point in the grid.
"""
if grid_resolution <= 1:
raise ValueError("'grid_resolution' must be strictly greater than 1.")
custom_range = custom_range or {}
custom_range = {
feature: custom_range.get(feature)
for feature in features
if feature in custom_range
}
grid, values = _grid_from_X(
X.loc[:, features],
percentiles,
grid_resolution,
custom_range,
)
averaged_predictions, predictions = _partial_dependence_calculation(
pipeline,
grid,
features,
X,
)
# reshape predictions to
# (n_outputs, n_instances, n_values_feature_0, n_values_feature_1, ...)
predictions = predictions.reshape(-1, X.shape[0], *[val.shape[0] for val in values])
# reshape averaged_predictions to
# (n_outputs, n_values_feature_0, n_values_feature_1, ...)
averaged_predictions = averaged_predictions.reshape(
-1, *[val.shape[0] for val in values]
)
if kind == "average":
return {"average": averaged_predictions, "values": values}
elif kind == "individual":
return {"individual": predictions, "values": values}
else: # kind='both'
return {
"average": averaged_predictions,
"individual": predictions,
"values": values,
}
|
1b8ae5811b7e815805e95d1c4ccc2fb1127187a9
| 3,642,351
|
from typing import Union
from typing import Tuple
def nplog(
a: np.ndarray, deriv: bool = False, eps: float = 1e-30, verbose: bool = False
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""$C^2$ extension of $\ln(a)$ below `eps`
Args:
a: a Numpy array
deriv: if `True`, the first derivative is also returned
eps: a lower bound
verbose: whether diagnoses are printed
Returns:
$\ln(a)$ $C^2$-extended below `eps`,
with its derivative if `deriv` is `True`
"""
if np.min(a) > eps:
loga = np.log(a)
return [loga, 1.0 / a] if deriv else loga
else:
logarreps = np.log(np.maximum(a, eps))
logarr_smaller = log(eps) - (eps - a) * (3.0 * eps - a) / (2.0 * eps * eps)
if verbose:
n_small_args = np.sum(a < eps)
if n_small_args > 0:
finals = "s" if n_small_args > 1 else ""
print(
f"nplog: {n_small_args} argument{finals} smaller than {eps}: mini = {np.min(a)}"
)
loga = np.where(a > eps, logarreps, logarr_smaller)
if deriv:
der_logarreps = 1.0 / np.maximum(a, eps)
der_logarr_smaller = (2.0 * eps - a) / (eps * eps)
der_loga = np.where(a > eps, der_logarreps, der_logarr_smaller)
return loga, der_loga
else:
return loga
|
cc2258b78aca58c71d61aa058b9de0d408165268
| 3,642,352
|
from typing import List
import os
import json
def translate_names(recipe_names: List[str], locale: str) -> List[str]:
"""Translates a list of recipe names to the given locale."""
if locale in ['auto', 'en-us']:
return recipe_names
translation_path = os.path.join('recipes', 'translations.json')
with open(translation_path, encoding='utf-8') as fp:
translations = json.load(fp)
return [translations[name][locale] for name in recipe_names]
|
2294f1ee2556521ae080eee7d9050873d9a723cb
| 3,642,353
|
def _section_to_text(config_section: ConfigSection) -> str:
"""Convert a single config section to text"""
return (f'[{config_section.name}]{LINE_SEP}'
f'{LINE_SEP.join(_option_to_text(option) for option in config_section.options)}{LINE_SEP}')
|
ee5feed2f9453c9f3338997d5294bada22961f07
| 3,642,354
|
import tempfile
def TempFileDecorator(func):
"""Populates self.tempfile with path to a temporary writeable file"""
def f(self, *args, **kwargs):
with tempfile.NamedTemporaryFile(dir=self.tempdir, delete=False) as f:
self.tempfile = f.name
return func(self, *args, **kwargs)
f.__name__ = func.__name__
f.__doc__ = func.__doc__
f.__module__ = func.__module__
return TempDirDecorator(f)
|
d696a16cade0eee36fee9cba4c0d8b96fbcc79e4
| 3,642,355
|
import string
import random
def get_random_string(length: int) -> str:
"""
Returns a random string starting with a lower-case letter.
Later parts can contain numbers, lower- and uppercase letters.
Note: Random Seed should be set somewhere in the program!
:param length: How long the required string must be. length > 0 required.
:return: a randomly created string
:raises: ValueError for zero and negative length
"""
if length < 1:
raise ValueError("Random Strings must have length 1 minimum.")
# choose from all lowercase letter
letters = string.ascii_letters + string.digits
first_letter = random.choice(string.ascii_lowercase)
result_str = ''.join(random.choice(letters) for i in range(length - 1))
return first_letter + result_str
|
6cf20ce7d158ac158ffa49cac427c396cfd840db
| 3,642,356
|
import numpy
def subset_by_month(prediction_dict, desired_month):
"""Subsets examples by month.
:param prediction_dict: See doc for `write_file`.
:param desired_month: Desired month (integer from 1...12).
:return: prediction_dict: Same as input but with fewer examples.
"""
error_checking.assert_is_integer(desired_month)
error_checking.assert_is_geq(desired_month, 1)
error_checking.assert_is_leq(desired_month, 12)
all_months = numpy.array([
int(time_conversion.unix_sec_to_string(t, '%m'))
for t in prediction_dict[INIT_TIMES_KEY]
], dtype=int)
desired_indices = numpy.where(all_months == desired_month)[0]
return subset_by_index(
prediction_dict=prediction_dict, desired_indices=desired_indices
)
|
4df4eca74bcb433e85a06a99b32877890909ba86
| 3,642,357
|
import time
def build_dataset_mce(platform, dataset_name, columns):
"""
Creates MetadataChangeEvent for the dataset.
"""
actor, sys_time = "urn:li:corpuser:etl", int(time.time())
fields = []
for column in columns:
fields.append({
"fieldPath": column["name"],
"nativeDataType": repr(column["type"]),
"type": { "type":get_column_type(column["type"]) },
"description": column.get("comment", None)
})
schema_metadata = {
"schemaName": dataset_name,
"platform": f"urn:li:dataPlatform:{platform}",
"version": 0,
"created": { "time": sys_time, "actor": actor },
"lastModified": { "time":sys_time, "actor": actor },
"hash": "",
"platformSchema": { "tableSchema": "" },
"fields": fields
}
return {
"auditHeader": None,
"proposedSnapshot":("com.linkedin.pegasus2avro.metadata.snapshot.DatasetSnapshot", {
"urn": f"urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},PROD)",
"aspects": [("com.linkedin.pegasus2avro.schema.SchemaMetadata", schema_metadata)]
}),
"proposedDelta": None
}
|
22fab14fe4a6e9f01b24ca67dce40b5860aebbe2
| 3,642,358
|
from copy import copy
import re
import logging
def tag_source_file(path):
"""Returns a list of tuples: (line_text, list_of_tags)"""
file = open(path, "r")
# The list of tagged lines
tagged_lines = []
# The list of tags that currently apply
current_tags = []
# Use this to store snapshots of current_tags
# Regexes for detecting when tags start and end
begin_re = re.compile(".*?\/\/ BEGIN (.*).*")
end_re = re.compile(".*?\/\/ END (.*).*")
line_num = 0
for line in file:
# If this line contains "//-", "/*-" or "-*/", it's a comment
# that should not be rendered.
if "/*-" in line or "-*/" in line or "//-" in line:
pass
# If we entered a tag, add it to the list
elif begin_re.search(line):
tag = begin_re.search(line).group(1)
if tag in current_tags:
logging.warn("{0}:{1}: \"{2}\" was entered twice without exiting it".format(path, line_num, tag))
current_tags.append(tag)
# If we left a tag, remove it
elif end_re.search(line):
tag = end_re.search(line).group(1)
if tag not in current_tags:
logging.warn("{0}:{1}: \"{2}\" was exited, but had not yet been entered".format(path, line_num, tag))
current_tags.remove(tag)
# If it's neither, add it to the list of tagged lines
else:
tagged_lines.append((line, copy(current_tags), (path, line_num)))
line_num += 1
# TODO: Error if we left a file with an unclosed tag
return tagged_lines
|
0d46748a2145bcd70115bf5047fce21184a4f535
| 3,642,359
|
def FindMissingReconstruction(X, track_i):
"""
Find the points that will be newly added
Parameters
----------
X : ndarray of shape (F, 3)
3D points
track_i : ndarray of shape (F, 2)
2D points of the newly registered image
Returns
-------
new_point : ndarray of shape (F,)
The indicator of new points that are valid for the new image and are
not reconstructed yet
"""
new_point = np.logical_and(X[:, 0] == -1, track_i[:, 0] != -1)
return new_point
|
36e0f0f2dbc6a68f20a349a92fc1c882d5d0b73a
| 3,642,360
|
def deserialize(iodata):
"""
Turn IOData back into a Python object of the appropriate kind.
An object is deemed deserializable if
1) it is recorded in SERIALIZABLE_REGISTRY and has a `.deserialize` method
2) there exists a function `file_io_serializers.<typename>_deserialize`
Parameters
----------
iodata: IOData
Returns
-------
class instance
"""
typename = iodata.typename
if typename in io_serializers.SERIALIZABLE_REGISTRY:
cls = io_serializers.SERIALIZABLE_REGISTRY[typename]
return cls.deserialize(iodata)
if hasattr(io_serializers, typename + '_deserialize'):
deserialize_method = getattr(io_serializers, typename + '_deserialize')
return deserialize_method(iodata)
raise NotImplementedError("No implementation for converting {} data to Python object.".format(typename))
|
3eb21f34b1571626a40fc52bd2eae2a42ff7dbb4
| 3,642,361
|
from typing import Union
from typing import Iterable
from typing import Tuple
from typing import Optional
def business_day_offset(dates: DateOrDates, offsets: Union[int, Iterable[int]], roll: str= 'raise', calendars: Union[str, Tuple[str, ...]]=(), week_mask: Optional[str]=None) -> DateOrDates:
"""
Apply offsets to the dates and move to the nearest business date
:param dates: The input date or dates
:param offsets: The number of days by which to adjust the dates
:param roll: Which direction to roll, in order to get to the nearest business date
:param calendars: Calendars to use for holidays
:param week_mask: Which days are considered weekends (defaults to Saturday and Sunday)
:return: A date (if dates is a single date) or tuple of dates, adjusted by the offsets
**Examples**
>>> import datetime as dt
>>> prev_bus_date = business_day_offset(dt.date.today(), -1, roll='preceding')
"""
calendar = GsCalendar.get(calendars)
res = np.busday_offset(dates, offsets, roll, busdaycal=calendar.business_day_calendar(week_mask)).astype(dt.date)
return tuple(res) if isinstance(res, np.ndarray) else res
|
32769b604b8e671b8318b0a6e00cdb8331774870
| 3,642,362
|
def factorial(n):
"""
Return n! - the factorial of n.
>>> factorial(1)
1
>>> factorial(0)
1
>>> factorial(3)
6
"""
if n<=0:
return 0
elif n==1:
return 1
else:
return n*factorial(n-1)
|
da5bc6f68375c7db03b7b2bdac1fec2b476ba563
| 3,642,363
|
from scipy.io import loadmat
from scipy.interpolate import interp1d
def _load_absorption(freqs):
"""Load molar extinction coefficients."""
# Data from https://omlc.org/spectra/hemoglobin/summary.html
# The text was copied to a text file. The text before and
# after the table was deleted. The the following was run in
# matlab
# extinct_coef=importdata('extinction_coef.txt')
# save('extinction_coef.mat', 'extinct_coef')
#
# Returns data as [[HbO2(freq1), Hb(freq1)],
# [HbO2(freq2), Hb(freq2)]]
extinction_fname = op.join(op.dirname(__file__), '..', '..', 'data',
'extinction_coef.mat')
a = loadmat(extinction_fname)['extinct_coef']
interp_hbo = interp1d(a[:, 0], a[:, 1], kind='linear')
interp_hb = interp1d(a[:, 0], a[:, 2], kind='linear')
ext_coef = np.array([[interp_hbo(freqs[0]), interp_hb(freqs[0])],
[interp_hbo(freqs[1]), interp_hb(freqs[1])]])
abs_coef = ext_coef * 0.2303
return abs_coef
|
e31c5d46c5e2de81627834f51ddb79f6f1265e5a
| 3,642,364
|
import math
def pw_sin_relaxation(b, x, w, x_pts, relaxation_side=RelaxationSide.BOTH, pw_repn='INC', safety_tol=1e-10):
"""
This function creates piecewise relaxations to relax "w=sin(x)" for -pi/2 <= x <= pi/2.
Parameters
----------
b: pyo.Block
x: pyomo.core.base.var.SimpleVar or pyomo.core.base.var._GeneralVarData
The "x" variable in sin(x). The lower bound on x must greater than or equal to
-pi/2 and the upper bound on x must be less than or equal to pi/2.
w: pyomo.core.base.var.SimpleVar or pyomo.core.base.var._GeneralVarData
The auxillary variable replacing sin(x)
x_pts: list of float
A list of floating point numbers to define the points over which the piecewise
representation will be generated. This list must be ordered, and it is expected
that the first point (x_pts[0]) is equal to x.lb and the last point (x_pts[-1])
is equal to x.ub
relaxation_side: minlp.RelaxationSide
Provide the desired side for the relaxation (OVER, UNDER, or BOTH)
pw_repn: str
This must be one of the valid strings for the peicewise representation to use (directly from the Piecewise
component). Use help(Piecewise) to learn more.
safety_tol: float
amount to lift the overestimator or drop the underestimator. This is used to ensure none of the feasible
region is cut off by error in computing the over and under estimators.
"""
check_var_pts(x, x_pts)
expr = pyo.sin(x)
xlb = x_pts[0]
xub = x_pts[-1]
if x.is_fixed() or xlb == xub:
b.x_fixed_con = pyo.Constraint(expr=w == (pyo.value(expr)))
return
if xlb < -np.pi / 2.0:
return
if xub > np.pi / 2.0:
return
if x_pts[0] >= 0:
pw_univariate_relaxation(b=b, x=x, w=w, x_pts=x_pts, f_x_expr=expr,
shape=FunctionShape.CONCAVE, relaxation_side=relaxation_side, pw_repn=pw_repn)
return
if x_pts[-1] <= 0:
pw_univariate_relaxation(b=b, x=x, w=w, x_pts=x_pts, f_x_expr=expr,
shape=FunctionShape.CONVEX, relaxation_side=relaxation_side, pw_repn=pw_repn)
return
OE_tangent_x, OE_tangent_slope, OE_tangent_intercept = _compute_sine_overestimator_tangent_point(xlb)
UE_tangent_x, UE_tangent_slope, UE_tangent_intercept = _compute_sine_underestimator_tangent_point(xub)
non_piecewise_overestimators_pts = []
non_piecewise_underestimator_pts = []
if relaxation_side == RelaxationSide.OVER:
if OE_tangent_x < xub:
new_x_pts = [i for i in x_pts if i < OE_tangent_x]
new_x_pts.append(xub)
non_piecewise_overestimators_pts = [OE_tangent_x]
non_piecewise_overestimators_pts.extend(i for i in x_pts if i > OE_tangent_x)
x_pts = new_x_pts
elif relaxation_side == RelaxationSide.UNDER:
if UE_tangent_x > xlb:
new_x_pts = [xlb]
new_x_pts.extend(i for i in x_pts if i > UE_tangent_x)
non_piecewise_underestimator_pts = [i for i in x_pts if i < UE_tangent_x]
non_piecewise_underestimator_pts.append(UE_tangent_x)
x_pts = new_x_pts
b.non_piecewise_overestimators = pyo.ConstraintList()
b.non_piecewise_underestimators = pyo.ConstraintList()
for pt in non_piecewise_overestimators_pts:
b.non_piecewise_overestimators.add(w <= math.sin(pt) + safety_tol + (x - pt) * math.cos(pt))
for pt in non_piecewise_underestimator_pts:
b.non_piecewise_underestimators.add(w >= math.sin(pt) - safety_tol + (x - pt) * math.cos(pt))
intervals = []
for i in range(len(x_pts)-1):
intervals.append((x_pts[i], x_pts[i+1]))
b.interval_set = pyo.Set(initialize=range(len(intervals)), ordered=True)
b.x = pyo.Var(b.interval_set)
b.w = pyo.Var(b.interval_set)
if len(intervals) == 1:
b.lam = pyo.Param(b.interval_set, mutable=True)
b.lam[0].value = 1.0
else:
b.lam = pyo.Var(b.interval_set, within=pyo.Binary)
b.x_lb = pyo.ConstraintList()
b.x_ub = pyo.ConstraintList()
b.x_sum = pyo.Constraint(expr=x == sum(b.x[i] for i in b.interval_set))
b.w_sum = pyo.Constraint(expr=w == sum(b.w[i] for i in b.interval_set))
b.lam_sum = pyo.Constraint(expr=sum(b.lam[i] for i in b.interval_set) == 1)
b.overestimators = pyo.ConstraintList()
b.underestimators = pyo.ConstraintList()
for i, tup in enumerate(intervals):
x0 = tup[0]
x1 = tup[1]
b.x_lb.add(x0 * b.lam[i] <= b.x[i])
b.x_ub.add(b.x[i] <= x1 * b.lam[i])
# Overestimators
if relaxation_side in {RelaxationSide.OVER, RelaxationSide.BOTH}:
if x0 < 0 and x1 <= 0:
slope = (math.sin(x1) - math.sin(x0)) / (x1 - x0)
intercept = math.sin(x0) - slope * x0
b.overestimators.add(b.w[i] <= slope * b.x[i] + (intercept + safety_tol) * b.lam[i])
elif (x0 < 0) and (x1 > 0):
tangent_x, tangent_slope, tangent_intercept = _compute_sine_overestimator_tangent_point(x0)
if tangent_x <= x1:
b.overestimators.add(b.w[i] <= tangent_slope * b.x[i] + (tangent_intercept + safety_tol) * b.lam[i])
b.overestimators.add(b.w[i] <= math.cos(x1) * b.x[i] + (math.sin(x1) - x1 * math.cos(x1) + safety_tol) * b.lam[i])
else:
slope = (math.sin(x1) - math.sin(x0)) / (x1 - x0)
intercept = math.sin(x0) - slope * x0
b.overestimators.add(b.w[i] <= slope * b.x[i] + (intercept + safety_tol) * b.lam[i])
else:
b.overestimators.add(b.w[i] <= math.cos(x0) * b.x[i] + (math.sin(x0) - x0 * math.cos(x0) + safety_tol) * b.lam[i])
b.overestimators.add(b.w[i] <= math.cos(x1) * b.x[i] + (math.sin(x1) - x1 * math.cos(x1) + safety_tol) * b.lam[i])
# Underestimators
if relaxation_side in {RelaxationSide.UNDER, RelaxationSide.BOTH}:
if x0 >= 0 and x1 > 0:
slope = (math.sin(x1) - math.sin(x0)) / (x1 - x0)
intercept = math.sin(x0) - slope * x0
b.underestimators.add(b.w[i] >= slope * b.x[i] + (intercept - safety_tol) * b.lam[i])
elif (x1 > 0) and (x0 < 0):
tangent_x, tangent_slope, tangent_intercept = _compute_sine_underestimator_tangent_point(x1)
if tangent_x >= x0:
b.underestimators.add(b.w[i] >= tangent_slope * b.x[i] + (tangent_intercept - safety_tol) * b.lam[i])
b.underestimators.add(b.w[i] >= math.cos(x0) * b.x[i] + (math.sin(x0) - x0 * math.cos(x0) - safety_tol) * b.lam[i])
else:
slope = (math.sin(x1) - math.sin(x0)) / (x1 - x0)
intercept = math.sin(x0) - slope * x0
b.underestimators.add(b.w[i] >= slope * b.x[i] + (intercept - safety_tol) * b.lam[i])
else:
b.underestimators.add(b.w[i] >= math.cos(x0) * b.x[i] + (math.sin(x0) - x0 * math.cos(x0) - safety_tol) * b.lam[i])
b.underestimators.add(b.w[i] >= math.cos(x1) * b.x[i] + (math.sin(x1) - x1 * math.cos(x1) - safety_tol) * b.lam[i])
return x_pts
|
38c5d8a459f8c90aaa17a95f4000e4388e491f69
| 3,642,365
|
import sys,time
from datetime import datetime
from datetime import timedelta
def getAsDateTimeStr(value, offset=0,fmt=_formatTimeStr()):
""" return time as 2004-01-10T00:13:50.000Z """
if (not isinstance(offset,str)):
if isinstance(value, (tuple, time.struct_time,)):
return time.strftime(fmt, value)
if isinstance(value, (int, float,)):
secs = time.gmtime(value+offset)
return time.strftime(fmt, secs)
if isinstance(value, str):
try:
value = time.strptime(value, fmt)
return time.strftime(fmt, value)
except:
secs = time.gmtime(time.time()+offset)
return time.strftime(fmt, secs)
elif (isinstance(value,datetime)):
if (offset is not None):
value += timedelta(offset)
ts = time.strftime(fmt, value.timetuple())
return ts
|
ddf47f2e8f9be7d1387baaf7c1dc0fd83ddf3022
| 3,642,366
|
def doc_to_tokenlist_no_sents(doc):
""" serializes a spacy DOC object into a python list with tokens grouped by sents
:param doc: spacy DOC element
:return: a list of of token objects/dicts
"""
result = []
for x in doc:
token = {}
if y.has_extension('tokenId'):
parts['tokenId'] = y._.tokenId
else:
parts['tokenId'] = y.i
token['value'] = x.text
token['lemma'] = x.lemma_
token['pos'] = x.pos_
token['type'] = x.tag_
token['dep'] = x.dep_
token['shape'] = x.shape_
token['is_alpha'] = x.is_alpha
token['ent_iob'] = x.ent_iob_
token['iob'] = format_iob_tag(x)
token['ent_type'] = x.ent_type_
result.append(token)
return result
|
0879e49836910d1f3a9be93281158c1b64978d53
| 3,642,367
|
def _applychange(raw_text: Text, content_change: t.TextDocumentContentChangeEvent):
"""Apply changes in-place"""
# Remove chars
start = content_change.range.start
range_length = content_change.range_length
index = _find_position(raw_text, start)
for _ in range(range_length):
raw_text.pop(index)
# Add chars
new_text = content_change.text
for char in reversed(new_text):
raw_text.insert(index, char)
return raw_text
|
9dddeb69a5830980721d747f743e5e516f2eba51
| 3,642,368
|
def get_engine():
"""Return a SQLAlchemy engine."""
connection_dict = sqlalchemy.engine.url.make_url(FLAGS.sql_connection)
engine_args = {
"pool_recycle": FLAGS.sql_idle_timeout,
"echo": False,
}
if "sqlite" in connection_dict.drivername:
engine_args["poolclass"] = sqlalchemy.pool.NullPool
return sqlalchemy.create_engine(FLAGS.sql_connection, **engine_args)
|
f93bfe29b7cf96d32f250e4385d3d2a794a02ee0
| 3,642,369
|
def ccw(p0, p1, p2):
"""
Judge whether p0p2 vector is ccw to p0p1 vector.
Return value map: \n
1: p0p2 is ccw to p0p1 (angle to x axis bigger) \n
0: p0p2 and p0p1 on a same line \n
-1: p0p2 is cw to p0p1 (angle to x axis smaller) \n
Args:
p0: base point index 0 and 1 is x and y value. [x, y, ...]
p1: first point index 0 and 1 is x and y value. [x, y, ...]
p2: second point index 0 and 1 is x and y value. [x, y, ...]
Returns:
int: judgement value -1 or 0 or 1
"""
comp = Comp(p0)
return comp.compare_angle(p2, p1)
|
46599e0df6c39346e4b61f6daeb140a8db6225a3
| 3,642,370
|
def get_ffmpeg_folder():
# type: () -> str
"""
Returns the path to the folder containing the ffmpeg executable
:return:
"""
return 'C:/ffmpeg/bin'
|
4708eec64ff56b72f7b1b9cc7f5ee7916f6310bd
| 3,642,371
|
import subprocess
def decode_typeinfo(typeinfo):
"""Invoke c++filt to decode a typeinfo"""
try:
type_string = subprocess.check_output(["c++filt", typeinfo], stdin=subprocess.DEVNULL)
except FileNotFoundError:
# This happens when c++filt (from package binutils) is not found,
# and with "wine python" on Linux systems
raise CxxFiltNotFoundException
if not type_string.startswith(b"typeinfo name for "):
raise ValueError(f"Unexpected c++filt output for {typeinfo!r}: {type_string!r}")
return type_string[18:].decode("ascii").strip()
|
95211c7175a572ba2b5a627e0e18d95e68405f86
| 3,642,372
|
def get_nums(image):
"""get the words from an image using pytesseract.
the extracted words are cleaned and all spaces, newlines and non uppercase
characters are removed.
:param image: inpout image
:type image: cv2 image
:return: extracted words
:rtype: list
"""
# pytesseract config
config = ('--psm 6 --oem 3 -c tessedit_char_whitelist=0123456789/')
# extract text and preprocess
text = pytesseract.image_to_string(image, config=config)
text = ''.join([c for c in text if c.isdigit() or c in ['\n', ' ', '.']])
# return as a lis
return text.split()
|
0ff23d8363a14a46c7f6ffa2be130c6eb61409c8
| 3,642,373
|
def create_bag_of_vocabulary_words():
"""
Form the array of words which can be conceived during the game.
This words are stored in hangman/vocabulary.txt
"""
words_array = []
file_object = open("./hangman/vocabulary.txt")
for line in file_object:
for word in line.split():
words_array.append(word)
file_object.close()
return words_array
|
e3aadad2575e28b19b83158eb2127437c8aada89
| 3,642,374
|
import math
def kato_ranking_candidates(identifier: Identifier, params=None):
"""rank candidates based on the method proposed by Kato, S. and Kano, M..
Candidates are the noun phrases in the sentence where the identifier was appeared first.
Args:
identifier (Identifier)
params (dict)
Returns:
Definition_list (List[Definition])
"""
if params is None:
params = {'sigma_d': math.sqrt(12 / math.log(2)),
'sigma_s': 2 / math.sqrt(math.log(2)),
'alpha': 1,
'beta': 1,
'gamma': 0.1,
'eta': 1}
ranked_definition_list = []
for candidate_ in identifier.candidates:
n_sentence = candidate_.included_sentence.id - identifier.sentences[0].id
delta = candidate_.word_count_btwn_var_cand + 1 # minimum is 1.
tf_candidate = candidate_.candidate_count_in_sentence / len(candidate_.included_sentence.replaced.strip())
score_match_initial_char = candidate_.score_match_character
r_sigma_d = math.exp(- 1 / 2 * (delta ** 2 - 1) /
params['sigma_d'] ** 2)
r_sigma_s = math.exp(- 1 / 2 * (n_sentence ** 2 -
1) / params['sigma_s'] ** 2)
score = (params['alpha'] * r_sigma_d
+ params['beta'] * r_sigma_s
+ params['gamma'] * tf_candidate
+ params['eta'] * score_match_initial_char)
score /= (params['alpha'] + params['beta'] +
params['gamma'] + params['eta'])
ranked_definition_list.append(
Definition(
definition=candidate_.text,
score=score,
params=params))
ranked_definition_list = sorted(
ranked_definition_list,
key=lambda x: x.score,
reverse=True)
if not ranked_definition_list:
return [Definition(definition='')]
return ranked_definition_list
|
c8a413118b599eb3cb9c9db877d7d489871d65a2
| 3,642,375
|
def _get_bag_of_pos_with_dependency(words, index):
"""Return pos list surrounding index
Args:
words (list): stanfordnlp word list object having pos attributes.
index (int): target index
Return:
pos_list (List[str]): xpos format string list
"""
pos_list = []
def _get_governor(_index, name):
governor_list = []
if int(words[_index].governor) == 0:
# case _index word has no governer
return -1, governor_list
governor_index = _index + (int(words[_index].governor) - int(words[_index].index))
if governor_index < len(words):
governor = words[governor_index]
governor_list.append(_get_word_feature(governor) + '_' + name)
else:
governor_list.append(NONE_DEPENDENCY + '_' + name)
return governor_index, governor_list
def _get_children(_index, name):
children = []
child_list = []
roots = [(i, w) for i, w in enumerate(words) if int(w.index) == 1]
start_index = 0
end_index = len(words) - 1
for i, w in roots:
if i <= _index:
start_index = i
else:
end_index = i - 1
break
for i, w in enumerate(words[start_index:end_index + 1]):
if int(w.governor) == int(words[_index].index):
children.append(start_index + i)
child_list.append(_get_word_feature(w) + '_' + name)
return children, child_list
# add governor
governor_index, governor_list = _get_governor(index, 'governor')
if 0 <= governor_index < len(words):
# case index word has a governer
pos_list.extend(governor_list)
if int(words[governor_index].governor) != 0:
# case _index word has a governer
# add ancestor
_, ancestor_list = _get_governor(governor_index, 'ancestor')
pos_list.extend(ancestor_list)
# add sibling
siblings, sibling_list = _get_children(governor_index, 'sibling')
i_index = siblings.index(index)
del sibling_list[i_index]
del siblings[i_index]
pos_list.extend(sibling_list)
# add sibling list
for i in siblings:
sibling_children, sibling_child_list = _get_children(i, 'sibling_child')
pos_list.extend(sibling_child_list)
# add child
children, child_list = _get_children(index, 'child')
pos_list.extend(child_list)
for i in children:
grandchildren, grandchild_list = _get_children(i, 'grandchild')
pos_list.extend(grandchild_list)
return pos_list
|
02fc508583d79464161927080c1c55d308926274
| 3,642,376
|
def fix_time_individual(df):
"""
1. pandas.apply a jit function to add 0 to time
2. concat date + time
3. change to np.datetime64
"""
@jit
def _fix_time(x):
aux = "0" * (8 - len(str(x))) + str(x)
return aux[:2] + ":" + aux[2:4] + ":" + aux[4:6] + "." + aux[6:]
return (df["date"] + " " + df["time"].apply(_fix_time)).astype(np.datetime64)
|
8d0c99d3f485d852130f9f4fe7ab05bbcdd99557
| 3,642,377
|
def convolve_fft(data, kernel, kernel_fft=False, return_fft=False):
"""
Convolve data with a kernel.
This is inspired by astropy.convolution.convolve_fft, but
stripped down to what's needed for the expected application. That
has the benefit of cutting down on the execution time, but limits
its use.
Beware:
- ``data`` and ``kernel`` must have the same shape.
- For the sum of all pixels in the convolved image to be the
same as the input data, the kernel must sum to unity.
- Padding is never added by default.
Args:
data (`numpy.ndarray`_):
Data to convolve.
kernel (`numpy.ndarray`_):
The convolution kernel, which must have the same shape as
``data``. If ``kernel_fft`` is True, this is the FFT of
the kernel image; otherwise, this is the direct kernel
image with the center of the kernel at the center of the
array.
kernel_fft (:obj:`bool`, optional):
Flag that the provided ``kernel`` array is actually the
FFT of the kernel, not its direct image.
return_fft (:obj:`bool`, optional):
Flag to return the FFT of the convolved image, instead of
the direct image.
Returns:
`numpy.ndarray`_: The convolved image, or its FFT, with the
same shape as the provided ``data`` array.
Raises:
ValueError:
Raised if ``data`` and ``kernel`` do not have the same
shape or if any of their values are not finite.
"""
if data.shape != kernel.shape:
raise ValueError('Data and kernel must have the same shape.')
if not np.all(np.isfinite(data)) or not np.all(np.isfinite(kernel)):
print('**********************************')
print(f'nans in data: {(~np.isfinite(data)).sum()}, nans in kernel: {(~np.isfinite(kernel)).sum()}')
raise ValueError('Data and kernel must both have valid values.')
datafft = np.fft.fftn(data)
kernfft = kernel if kernel_fft else np.fft.fftn(np.fft.ifftshift(kernel))
fftmult = datafft * kernfft
return fftmult if return_fft else np.fft.ifftn(fftmult).real
|
64fc4c02f72c419f6c315f524597a32391ea7b8c
| 3,642,378
|
import os
def segment_nifti(fname_image, folder_model, fname_prior=None, param=None):
"""
Segment a nifti file.
:param fname_image: str: Filename of the image to segment.
:param folder_model: str: Folder that encloses the deep learning model.
:param fname_prior: str: Filename of a previous segmentation that is used here as a prior.
:param param: dict: Dictionary of user's parameter
:return: fname_out: str: Output filename. If directory does not exist, it will be created.
"""
if param is None:
param = {}
nii_seg = imed.utils.segment_volume(folder_model, fname_image, fname_prior)
# Postprocessing
metadata = sct.deepseg.models.get_metadata(folder_model)
options = {**DEFAULTS, **metadata, **param}
nii_seg = postprocess(nii_seg, options)
# Save output seg
if 'o' in options:
fname_out = options['o']
else:
fname_out = ''.join([sct.utils.splitext(fname_image)[0], '_seg.nii.gz'])
# If output folder does not exist, create it
path_out = os.path.dirname(fname_out)
if not (path_out == '' or os.path.exists(path_out)):
os.makedirs(path_out)
nib.save(nii_seg, fname_out)
return fname_out
|
74ea5aa03f1f36c717e6f700c91d26fda3afb78d
| 3,642,379
|
def friable_sand(Ks, Gs, phi, phic, P_eff, n=-1, f=1.0):
"""
Friable sand rock physics model.
Reference: Avseth et al., Quantitative Seismic Interpretation, p.54
Inputs:
Ks = Bulk modulus of mineral matrix
Gs = Shear modulus of mineral matrix
phi = porosity
phic = critical porosity
P_eff = effective pressure
n = coordination number
f = shear reduction factor
Outputs:
K_dry = dry rock bulk modulus of friable rock
G_dry = dry rock shear modulus of friable rock
"""
K_hm, G_hm = hertz_mindlin(Ks, Gs, phic, P_eff, n, f)
z = G_hm/6 * (9*K_hm + 8*G_hm)/(K_hm + 2*G_hm)
A = (phi/phic)/(K_hm + 4/3*G_hm)
B = (1 - phi/phic)/(Ks + 4.0/3.0*G_hm)
K_dry = (A+B)**-1 - 4.0/3.0*G_hm
C = (phi/phic)/(G_hm+z)
D = (1.0-phi/phic)/(Gs + z)
G_dry = (C+D)**-1 - z
return K_dry, G_dry
|
ace533ee727cd4749ad210b13eec5193b74416b8
| 3,642,380
|
def get_available_currencies():
"""
This function retrieves a listing with all the available currencies with indexed currency crosses in order to
get to know which are the available currencies. The currencies listed in this function, so on, can be used to
search currency crosses and used the retrieved data to get historical data of those currency crosses, so to
determine which is the value of one base currency in the second currency.
Returns:
:obj:`list` - available_currencies:
The resulting :obj:`list` contains all the available currencies with currency crosses being either the base
or the second value of the cross, as listed in Investing.com.
In case the listing was successfully retrieved, the :obj:`list` will look like::
available_currencies = [
'AED', 'AFN', 'ALL', 'AMD', 'ANG', ...
]
Raises:
FileNotFoundError: raised if currency crosses file was not found.
IOError: raised if currency crosses retrieval failed, both for missing file or empty file.
"""
return available_currencies_as_list()
|
139f775943bc251149444c702cb4290d78a58a03
| 3,642,381
|
def mktemp(suffix="", prefix=template, dir=None):
"""User-callable function to return a unique temporary file name. The
file is not created.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
This function is unsafe and should not be used. The file name
refers to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise IOError, (_errno.EEXIST, "No usable temporary filename found")
|
0785609c3284b0052fa31767d0df11476b28c786
| 3,642,382
|
def getTaskIdentifier( task_id ) :
"""Get tuple of Type and Instance identifiers."""
_inst = Instance.objects.get( id = task_id )
return ( _inst.type.identifier , _inst.identifier )
|
fb18be814330bd02205d355b3ebfb68f777ee9c2
| 3,642,383
|
def hessian_vector_product(loss, weights, v):
"""Compute the tensor of the product H.v, where H is the loss Hessian with
respect to the weights. v is a vector (a rank 1 Tensor) of the same size as
the loss gradient. The ordering of elements in v is the same obtained from
flatten_tensor_list() acting on the gradient. Derivatives of dv/dweights
should vanish.
"""
grad = flatten_tensor_list(tf.gradients(loss, weights))
grad_v = tf.reduce_sum(grad * tf.stop_gradient(v))
H_v = flatten_tensor_list(tf.gradients(grad_v, weights))
return H_v
|
35ef7772367f56fcded2e4173fe194cb28da3bc7
| 3,642,384
|
def clean_cells(nb_node):
"""Delete any outputs and resets cell count."""
for cell in nb_node['cells']:
if 'code' == cell['cell_type']:
if 'outputs' in cell:
cell['outputs'] = []
if 'execution_count' in cell:
cell['execution_count'] = None
return nb_node
|
67dce7ecc3590143730f943d3eb07ae7df9d8145
| 3,642,385
|
from typing import Callable
from typing import Any
import asyncio
import inspect
def _spanned(scond: _SpanConductor) -> Callable[..., Any]:
"""Handle decorating a function with either a new span or a reused span."""
def inner_function(func: Callable[..., Any]) -> Callable[..., Any]:
def setup(args: Args, kwargs: Kwargs) -> Span:
if not isinstance(scond, (_NewSpanConductor, _ReuseSpanConductor)):
raise Exception(f"Undefined SpanConductor type: {scond}.")
else:
return scond.get_span(FunctionInspector(func, args, kwargs))
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
LOGGER.debug("Spanned Function")
span = setup(args, kwargs)
is_iterator_class_next_method = span.name.endswith(".__next__")
reraise_stopiteration_outside_contextmanager = False
# CASE 1 ----------------------------------------------------------
if scond.behavior == SpanBehavior.ONLY_END_ON_EXCEPTION:
try:
with use_span(span, end_on_exit=False):
try:
return func(*args, **kwargs)
except StopIteration:
# intercept and temporarily suppress StopIteration
if not is_iterator_class_next_method:
raise
reraise_stopiteration_outside_contextmanager = True
except: # noqa: E722 # pylint: disable=bare-except
span.end()
raise
if reraise_stopiteration_outside_contextmanager:
raise StopIteration
raise RuntimeError("Malformed SpanBehavior Handling")
# CASES 2 & 3 -----------------------------------------------------
elif scond.behavior in (SpanBehavior.END_ON_EXIT, SpanBehavior.DONT_END):
end_on_exit = bool(scond.behavior == SpanBehavior.END_ON_EXIT)
with use_span(span, end_on_exit=end_on_exit):
try:
return func(*args, **kwargs)
except StopIteration:
# intercept and temporarily suppress StopIteration
if not is_iterator_class_next_method:
raise
reraise_stopiteration_outside_contextmanager = True
if reraise_stopiteration_outside_contextmanager:
raise StopIteration
raise RuntimeError("Malformed SpanBehavior Handling")
# ELSE ------------------------------------------------------------
else:
raise InvalidSpanBehavior(scond.behavior)
@wraps(func)
def gen_wrapper(*args: Any, **kwargs: Any) -> Any:
LOGGER.debug("Spanned Generator Function")
span = setup(args, kwargs)
# CASE 1 ----------------------------------------------------------
if scond.behavior == SpanBehavior.ONLY_END_ON_EXCEPTION:
try:
with use_span(span, end_on_exit=False):
for val in func(*args, **kwargs):
yield val
except: # noqa: E722 # pylint: disable=bare-except
span.end()
raise
# CASES 2 & 3 -----------------------------------------------------
elif scond.behavior in (SpanBehavior.END_ON_EXIT, SpanBehavior.DONT_END):
end_on_exit = bool(scond.behavior == SpanBehavior.END_ON_EXIT)
with use_span(span, end_on_exit=end_on_exit):
for val in func(*args, **kwargs):
yield val
# ELSE ------------------------------------------------------------
else:
raise InvalidSpanBehavior(scond.behavior)
@wraps(func)
async def async_wrapper(*args: Any, **kwargs: Any) -> Any:
LOGGER.debug("Spanned Async Function")
span = setup(args, kwargs)
is_iterator_class_anext_method = span.name.endswith(".__anext__")
reraise_stopasynciteration_outside_contextmanager = False
# CASE 1 ----------------------------------------------------------
if scond.behavior == SpanBehavior.ONLY_END_ON_EXCEPTION:
try:
with use_span(span, end_on_exit=False):
try:
return await func(*args, **kwargs)
except StopAsyncIteration:
# intercept and temporarily suppress StopAsyncIteration
if not is_iterator_class_anext_method:
raise
reraise_stopasynciteration_outside_contextmanager = True
except: # noqa: E722 # pylint: disable=bare-except
span.end()
raise
if reraise_stopasynciteration_outside_contextmanager:
raise StopAsyncIteration
raise RuntimeError("Malformed SpanBehavior Handling")
# CASES 2 & 3 -----------------------------------------------------
elif scond.behavior in (SpanBehavior.END_ON_EXIT, SpanBehavior.DONT_END):
end_on_exit = bool(scond.behavior == SpanBehavior.END_ON_EXIT)
with use_span(span, end_on_exit=end_on_exit):
try:
return await func(*args, **kwargs)
except StopAsyncIteration:
# intercept and temporarily suppress StopAsyncIteration
if not is_iterator_class_anext_method:
raise
reraise_stopasynciteration_outside_contextmanager = True
if reraise_stopasynciteration_outside_contextmanager:
raise StopAsyncIteration
raise RuntimeError("Malformed SpanBehavior Handling")
# ELSE ------------------------------------------------------------
else:
raise InvalidSpanBehavior(scond.behavior)
if asyncio.iscoroutinefunction(func):
return async_wrapper
else:
if inspect.isgeneratorfunction(func):
return gen_wrapper
else:
return wrapper
return inner_function
|
4d6b9bc4a56ae50c781da2078e2d919bae19bfdf
| 3,642,386
|
def getProjectProperties():
"""
:return:
@rtype: list of ProjectProperty
"""
return getMetDataLoader().projectProperties
|
7f517a20d83002c41867bbc7911f775d64b21b88
| 3,642,387
|
def svn_client_cleanup(*args):
"""svn_client_cleanup(char dir, svn_client_ctx_t ctx, apr_pool_t scratch_pool) -> svn_error_t"""
return _client.svn_client_cleanup(*args)
|
2a9921e8521e927e124633bb932b158a1f9abdf3
| 3,642,388
|
def model_chromatic(psrs, psd='powerlaw', noisedict=None, components=30,
gamma_common=None, upper_limit=False, bayesephem=False,
wideband=False,
idx=4, chromatic_psd='powerlaw', c_psrs=['J1713+0747']):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 2A from the analysis paper + additional
chromatic noise for given pulsars
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
6. Chromatic noise for given pulsar list
global:
1.Common red noise modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param wideband:
Use wideband par and tim files. Ignore ECORR. Set to False by default.
:param idx:
Index of chromatic process (i.e DM is 2, scattering would be 4). If
set to `vary` then will vary from 0 - 6 (This will be VERY slow!)
:param chromatic_psd:
PSD to use for chromatic noise. Available options
are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
value.
:param c_psrs:
List of pulsars to use chromatic noise. 'all' will use all pulsars
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
Tspan = model_utils.get_tspan(psrs)
# white noise
s = white_noise_block(vary=False, wideband=wideband)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# common red noise block
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
name='gw')
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)
# timing model
s += gp_signals.TimingModel()
# chromatic noise
sc = chromatic_noise_block(psd=chromatic_psd, idx=idx)
if c_psrs == 'all':
s += sc
models = [s(psr) for psr in psrs]
elif len(c_psrs) > 0:
models = []
for psr in psrs:
if psr.name in c_psrs:
print('Adding chromatic model to PSR {}'.format(psr.name))
snew = s + sc
models.append(snew(psr))
else:
models.append(s(psr))
# set up PTA
pta = signal_base.PTA(models)
# set white noise parameters
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
|
568f4951930fe6f8175417785c4503895f76bc88
| 3,642,389
|
import os
import shutil
def restore(backup_path: str, storage_name: str, target: str or None = None, token: str or None = None) -> str:
"""
Downloads the information from the backup
:returns path to the file
"""
if not token:
token = _restore_token(storage_name)
print(f'[{__name__}] Getting storage...')
storage_class = get_storage_by_name(storage_name)
storage: Storage = storage_class(token=token)
# Handle files that were saved on a normal basis
remote_path_resource_id = backup_path.split('/')[-2]
_, original_name = _decode_resource_id(remote_path_resource_id)
# Handle files saved under /custom folder
# pass
if target is None:
print(f'[{__name__}] Calculating local file path...')
dl_target = f"{BASE_BACKUPS_DIRECTORY}/" + original_name + ".zip"
target = f"{BASE_BACKUPS_DIRECTORY}/" + original_name
if os.path.exists(target):
raise ValueError(f"Path {target} is not empty. Please deal with it, then try to restore file again")
else:
raise NotImplementedError()
print(f'[{__name__}] Downloading file...')
storage.download_resource(backup_path, dl_target)
try:
print(f'[{__name__}] Unpacking file...')
shutil.unpack_archive(dl_target, target, 'zip')
return target
finally:
os.unlink(dl_target)
|
380a654c98b406c892f50deba80c00ec3d37fa50
| 3,642,390
|
def test_f32(heavydb):
"""If UDF name ends with an underscore, expect strange behaviour. For
instance, defining
@heavydb('f32(f32)', 'f32(f64)')
def f32_(x): return x+4.5
the query `select f32_(0.0E0))` fails but not when defining
@heavydb('f32(f64)', 'f32(f32)')
def f32_(x): return x+4.5
(notice the order of signatures in heavydb decorator argument).
"""
@heavydb('f32(f32)', 'f32(f64)') # noqa: F811
def f_32(x): return x+4.5
descr, result = heavydb.sql_execute(
'select f_32(0.0E0) from {heavydb.table_name} limit 1'
.format(**locals()))
assert list(result)[0] == (4.5,)
|
157560cc90e3f869d84198eeb26896a76157eb39
| 3,642,391
|
from typing import Union
from pathlib import Path
def get_message_bytes(
file_path: Union[str, Path],
count: int,
) -> bytes:
"""
从 GRIB2 文件中读取第 count 个要素场,裁剪区域 (东北区域),并返回新场的字节码
Parameters
----------
file_path
count
要素场序号,从 1 开始,ecCodes GRIB Key count
Returns
-------
bytes
重新编码后的 GRIB 2 消息字节码
"""
message = load_message_from_file(file_path, count=count)
message = extract_region(
message,
0, 180, 89.875, 0.125
)
message_bytes = eccodes.codes_get_message(message)
eccodes.codes_release(message)
return message_bytes
|
6a2ad3a20e02283c2bffe31eb78cacf84d92ff6f
| 3,642,392
|
from typing import Union
from typing import List
import json
def discover_climate_observations(
time_resolution: Union[
None, str, TimeResolution, List[Union[str, TimeResolution]]
] = None,
parameter: Union[None, str, Parameter, List[Union[str, Parameter]]] = None,
period_type: Union[None, str, PeriodType, List[Union[str, PeriodType]]] = None,
) -> str:
"""
Function to print/discover available time_resolution/parameter/period_type
combinations.
:param parameter: Observation measure
:param time_resolution: Frequency/granularity of measurement interval
:param period_type: Recent or historical files
:return: Result of available combinations in JSON.
"""
if not time_resolution:
time_resolution = [*TimeResolution]
if not parameter:
parameter = [*Parameter]
if not period_type:
period_type = [*PeriodType]
time_resolution = parse_enumeration(TimeResolution, time_resolution)
parameter = parse_enumeration(Parameter, parameter)
period_type = parse_enumeration(PeriodType, period_type)
trp_mapping_filtered = {
ts: {
par: [p for p in pt if p in period_type]
for par, pt in parameters_and_period_types.items()
if par in parameter
}
for ts, parameters_and_period_types in TIME_RESOLUTION_PARAMETER_MAPPING.items()
if ts in time_resolution
}
time_resolution_parameter_mapping = {
str(time_resolution): {
str(parameter): [str(period) for period in periods]
for parameter, periods in parameters_and_periods.items()
if periods
}
for time_resolution, parameters_and_periods in trp_mapping_filtered.items()
if parameters_and_periods
}
return json.dumps(time_resolution_parameter_mapping, indent=4)
|
b96fd2a0a9bcb9a7b50018a1b7e3ae7add3e3c63
| 3,642,393
|
def set_template(template_name, file_name, p_name):
"""
Insert template into the E-mail.
"""
corp = template(template_name, file_name, p_name)
msg = MIMEMultipart()
msg['from'] = p_name
msg['subject'] = f'{file_name}'
msg.attach(MIMEText(corp, 'html'))
return msg
|
8745d9729ddbe159e0bca90dee198ce4e3efb489
| 3,642,394
|
import gettext
def lazy_gettext(string):
"""A lazy version of `gettext`."""
if isinstance(string, _TranslationProxy):
return string
return _TranslationProxy(gettext, string)
|
9229c987d6b2f300f7225ea4b58f964c70e882fc
| 3,642,395
|
def toggleautowithdrawalstatus(status, fid, alternate_token=False):
"""
Sets auto-withdrawal status of the account associated
with the current OAuth token under the specified
funding ID.
:param status: Boolean for toggle.
:param fid: String with funding ID for target account
:return: String (Either "Enabled" or "Disabled")
"""
if not status:
raise Exception('toggleautowithdrawlstatus() requires status parameter')
if not fid:
raise Exception('toggleautowithdrawlstatus() requires fid parameter')
return r._post('/accounts/features/auto_withdrawl',
{
'oauth_token': alternate_token if alternate_token else c.access_token,
'enabled': status,
'fundingId': fid
})
|
4df2be7801a23978c58b7ce8aec7e5fd30fb1e76
| 3,642,396
|
def load_avenger_models():
"""
Load each instance of data from the repository into its associated model at this point in the schema lifecycle
"""
avengers = []
for item in fetch_avenger_data():
# Explicitly assign each attribute of the model, so various attributes can be ignored
avenger = Avenger(url=item.url,
name=item.name,
appearances=item.appearances,
current=item.current == "YES",
gender=item.gender,
probationary=parse_date(item.probationary),
full_reserve=parse_date(item.full_reserve, item.year),
year=item.year,
honorary=item.honorary,
notes=item.notes)
for occurrence in range(1, 6): # Iterate over the known indices of deaths (max in data range is 5)
# If the death attribute exists and has a value, create a new Death instance and load the associated
# instance data before adding it to the the list of deaths on the current avenger
if getattr(item, f"death{occurrence}", None):
avenger.deaths.append(
Death(death=getattr(item, f"death{occurrence}") == "YES", # Convert string to boolean
returned=getattr(item, f"return{occurrence}") == "YES", # Convert string to boolean
sequence=occurrence) # Add the sequence of this death, order is important!
)
else:
break # If this is the last death, there is no reason to check subsequent iterations
avengers.append(avenger) # Add this avenger to the list of avengers
return avengers
|
70740495be63a198cf5ec1308608955f52be46f0
| 3,642,397
|
import json
import _json
import _datetime
def aggregate_points(point_layer,
bin_type=None,
bin_size=None,
bin_size_unit=None,
polygon_layer=None,
time_step_interval=None,
time_step_interval_unit=None,
time_step_repeat_interval=None,
time_step_repeat_interval_unit=None,
time_step_reference=None,
summary_fields=None,
output_name=None,
gis=None,
future=False):
"""
.. image:: _static/images/aggregate_points/aggregate_points.png
This ``aggregate_points`` tool works with a layer of point features and a layer of areas.
The layer of areas can be an input polygon layer or it can be square or hexagonal bins calculated
when the task is run. The tool first determines which points fall within each specified area.
After determining this point-in-area spatial relationship, statistics about all points in the
area are calculated and assigned to the area. The most basic statistic is the count of the
number of points within the area, but you can get other statistics as well.
For example, suppose you have point features of coffee shop locations and area features of counties,
and you want to summarize coffee sales by county. Assuming the coffee shops have a TOTAL_SALES attribute,
you can get the sum of all TOTAL_SALES within each county, the minimum or maximum TOTAL_SALES within each
county, or other statistics like the count, range, standard deviation, and variance.
This tool can also work on data that is time-enabled. If time is enabled on the input points, then
the time slicing options are available. Time slicing allows you to calculate the point-in area relationship
while looking at a specific slice in time. For example, you could look at hourly intervals, which would
result in outputs for each hour.
For an example with time, suppose you had point features of every transaction made at a coffee shop location and no area layer.
The data has been recorded over a year, and each transaction has a location and a time stamp. Assuming each transaction has a
TOTAL_SALES attribute, you can get the sum of all TOTAL SALES within the space and time of interest. If these transactions are
for a single city, we could generate areas that are one kilometer grids, and look at weekly time slices to summarize the
transactions in both time and space.
================================================= ========================================================================
**Argument** **Description**
------------------------------------------------- ------------------------------------------------------------------------
point_layer Required point feature layer. The point features that will be aggregated
into the polygons in the ``polygon_layer`` or bins of the specified ``bin_size``.
See :ref:`Feature Input<FeatureInput>`.
------------------------------------------------- ------------------------------------------------------------------------
bin_type Optional string. If ``polygon_layer`` is not defined, it is required.
The type of bin that will be generated and into which points will be aggregated.
Choice list:['Square', 'Hexagon'].
The default value is "Square".
When generating bins for Square, the number and units specified determine the height
and length of the square. For Hexagon, the number and units specified determine the
distance between parallel sides. Either ``bin_type`` or ``polygon_layer`` must be specified.
If ``bin_type`` is chosen, ``bin_size`` and ``bin_size_unit`` specifying the size of the bins must be included.
------------------------------------------------- ------------------------------------------------------------------------
bin_size (Required if ``bin_type`` is used) Optional float. The distance for the bins of type binType that
the ``point_layer`` will be aggregated into. When generating bins, for Square,
the number and units specified determine the height and length of the square.
For Hexagon, the number and units specified determine the distance between parallel sides.
------------------------------------------------- ------------------------------------------------------------------------
bin_size_unit (Required if ``bin_size`` is used) Optional string. The distance unit for the bins that the ``point_layer`` will be aggregated into.
Choice list:['Feet', 'Yards', 'Miles', 'Meters', 'Kilometers', 'NauticalMiles']
When generating bins for Square, the number and units specified determine the height and
length of the square. For Hexagon, the number and units specified determine the distance
between parallel sides. Either ``bin_type`` or ``polygon_layer`` must be specified.
If ``bin_type`` is chosen, ``bin_size`` and ``bin_size_unit`` specifying the size of the bins must be included.
------------------------------------------------- ------------------------------------------------------------------------
polygon_layer Optional polygon feature layer. The polygon features (areas) into which the input points will be aggregated.
See :ref:`Feature Input<FeatureInput>`.
One of ``polygon_layer`` or bins ``bin_size`` and ``bin_size_unit`` is required.
------------------------------------------------- ------------------------------------------------------------------------
time_step_interval Optional integer. A numeric value that specifies duration of the time step interval. This option is only
available if the input points are time-enabled and represent an instant in time.
The default value is 'None'.
------------------------------------------------- ------------------------------------------------------------------------
time_step_interval_unit Optional string. A string that specifies units of the time step interval. This option is only available if the
input points are time-enabled and represent an instant in time.
Choice list:['Years', 'Months', 'Weeks', 'Days', 'Hours', 'Minutes', 'Seconds', 'Milliseconds']
The default value is 'None'.
------------------------------------------------- ------------------------------------------------------------------------
time_step_repeat_interval Optional integer. A numeric value that specifies how often the time step repeat occurs.
This option is only available if the input points are time-enabled and of time type instant.
------------------------------------------------- ------------------------------------------------------------------------
time_step_repeat_interval_unit Optional string. A string that specifies the temporal unit of the step repeat.
This option is only available if the input points are time-enabled and of time type instant.
Choice list:['Years', 'Months', 'Weeks', 'Days', 'Hours', 'Minutes', 'Seconds', 'Milliseconds']
The default value is 'None'.
------------------------------------------------- ------------------------------------------------------------------------
time_step_reference Optional datetime. A date that specifies the reference time to align the time slices to, represented in milliseconds from epoch.
The default is January 1, 1970, at 12:00 a.m. (epoch time stamp 0). This option is only available if the
input points are time-enabled and of time type instant.
------------------------------------------------- ------------------------------------------------------------------------
summary_fields Optional list of dicts. A list of field names and statistical summary types that you want to calculate
for all points within each polygon or bin. Note that the count of points within each polygon is always
returned. By default, all statistics are returned.
Example: [{"statisticType": "Count", "onStatisticField": "fieldName1"}, {"statisticType": "Any", "onStatisticField": "fieldName2"}]
fieldName is the name of the fields in the input point layer.
statisticType is one of the following for numeric fields:
* ``Count`` -Totals the number of values of all the points in each polygon.
* ``Sum`` -Adds the total value of all the points in each polygon.
* ``Mean`` -Calculates the average of all the points in each polygon.
* ``Min`` -Finds the smallest value of all the points in each polygon.
* ``Max`` -Finds the largest value of all the points in each polygon.
* ``Range`` -Finds the difference between the Min and Max values.
* ``Stddev`` -Finds the standard deviation of all the points in each polygon.
* ``Var`` -Finds the variance of all the points in each polygon.
statisticType is one of the following for string fields:
* ``Count`` -Totals the number of strings for all the points in each polygon.
* ``Any` `-Returns a sample string of a point in each polygon.
------------------------------------------------- ------------------------------------------------------------------------
output_name Optional string. The method will create a feature service of the results. You define the name of the service.
------------------------------------------------- ------------------------------------------------------------------------
gis Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
------------------------------------------------- ------------------------------------------------------------------------
context Optional dict. The context parameter contains additional settings that affect task execution. For this task, there are four settings:
* Extent (``extent``) - a bounding box that defines the analysis area. Only those features that intersect the bounding box will be analyzed.
* Processing spatial reference (``processSR``) The features will be projected into this coordinate system for analysis.
* Output Spatial Reference (``outSR``) - the features will be projected into this coordinate system after the analysis to be saved. The output spatial reference for the spatiotemporal big data store is always WGS84.
* Data store (``dataStore``) Results will be saved to the specified data store. The default is the spatiotemporal big data store.
------------------------------------------------- ------------------------------------------------------------------------
future optional Boolean. If True, a GPJob is returned instead of
results. The GPJob can be queried on the status of the execution.
================================================= ========================================================================
:returns: result_layer : Output Features as feature layer item.
.. code-block:: python
# Usage Example: To aggregate number of 911 calls within 1 km summarized by Day count.
agg_result = aggregate_points(calls,
bin_size=1,
bin_size_unit='Kilometers',
time_step_interval=1,
time_step_interval_unit="Years",
summary_fields=[{"statisticType": "Count", "onStatisticField": "Day"}],
output_name='testaggregatepoints01')
"""
kwargs = locals()
gis = _arcgis.env.active_gis if gis is None else gis
url = gis.properties.helperServices.geoanalytics.url
params = {}
for key, value in kwargs.items():
if value is not None:
params[key] = value
if output_name is None:
output_service_name = 'Aggregate Points Analysis_' + _id_generator()
output_name = output_service_name.replace(' ', '_')
else:
output_service_name = output_name.replace(' ', '_')
output_service = _create_output_service(gis, output_name, output_service_name, 'Aggregate Points')
params['output_name'] = _json.dumps({
"serviceProperties": {"name" : output_name, "serviceUrl" : output_service.url},
"itemProperties": {"itemId" : output_service.itemid}})
if isinstance(summary_fields, list):
summary_fields = json.dumps(summary_fields)
_set_context(params)
param_db = {
"point_layer": (_FeatureSet, "pointLayer"),
"bin_type": (str, "binType"),
"bin_size": (float, "binSize"),
"bin_size_unit": (str, "binSizeUnit"),
"polygon_layer": (_FeatureSet, "polygonLayer"),
"time_step_interval": (int, "timeStepInterval"),
"time_step_interval_unit": (str, "timeStepIntervalUnit"),
"time_step_repeat_interval": (int, "timeStepRepeatInterval"),
"time_step_repeat_interval_unit": (str, "timeStepRepeatIntervalUnit"),
"time_step_reference": (_datetime, "timeStepReference"),
"summary_fields": (str, "summaryFields"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"output": (_FeatureSet, "Output Features"),
}
return_values = [
{"name": "output", "display_name": "Output Features", "type": _FeatureSet},
]
try:
_execute_gp_tool(gis, "AggregatePoints", params, param_db, return_values, _use_async, url, True, future=future)
return output_service
except:
output_service.delete()
raise
|
fe946d4273ed1ce4e4cd3e46d9f9a3e0ff5c6725
| 3,642,398
|
def scattered_embedding_lookup(params,
values,
dimension,
name=None,
hash_key=None):
"""Looks up embeddings using parameter hashing for each value in `values`.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
Feature hashing has the pleasant effect of allowing us to compute an embedding
without needing a pre-determined vocabulary, relieving some amount of process
complexity. It also allows for us to maintain embeddings for possibly
trillions of features with a fixed amount of memory.
Note that this is superior to out-of-vocabulary shared "hash buckets" in that
the embedding is extremely likely to be unique for each token as opposed to
being shared across probably-colliding tokens. The price is that we must
compute a hash once for each scalar in the token's embedding as opposed to
once per token.
If `params` is a list, it represents a partition of the embedding parameters.
Each tensor in the list should have the same length, except for the first ones
which may have an additional element. For instance 10 parameters can be
partitioned in 4 tensors with length `[3, 3, 2, 2]`.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`.
Each tensor must be of rank 1 with fully-defined shape.
values: `Tensor` of values to be embedded with shape `[d0, ..., dn]`.
dimension: Embedding dimension.
name: An optional name for this op.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
A `Tensor` with shape `[d0, ..., dn, dimension]`.
Raises:
ValueError: if dimension is not positive or the partition size is invalid.
"""
if dimension is None:
raise ValueError("You must specify dimension.")
return _sampled_scattered_embedding_lookup(
params, values, dimension=dimension, sampled_candidates=None,
hash_key=hash_key, name=name)
|
a317d7d494bd9b9918f6f2354d854c2fbffc1c6c
| 3,642,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.