content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def _get_requirements_for_lambda(node):
"""
..code:: python
Lambda(arguments args, expr body)
"""
scope = _get_scope_from_arguments(node.args)
for requirement in get_requirements(node.body):
if requirement.name not in scope:
yield requirement | 5,333,800 |
def fix_span(text_context, offsets, span):
"""
find start-end indices of the span in the text_context nearest to the existing token start-end indices
:param text_context: (str) text to search for span in
:param offsets: (List(Tuple[int, int]) list of begins and ends for each token in the text
:param span: (str) the answer span to find in the text_context
:return: span indices, distance to the nearest token indices
"""
span = span.strip()
assert span in text_context, f'answer span:{span} is not in the context: {text_context}'
begins, ends = map(list, zip(*[x for x in offsets]))
best_dist = 1e200
best_indices = None
if span == text_context:
return text_context, (0, len(text_context)), 0
# re.escape(pattern) escapes (adds '\' to special characters in pattern)
# re.finditer(pattern, string) returns match objects for all matches of pattern in the string
for m in re.finditer(re.escape(span), text_context):
begin_offset, end_offset = m.span()
fixed_begin, d1 = find_nearest(begins, begin_offset, lambda x: x < end_offset)
fixed_end, d2 = find_nearest(ends, end_offset, lambda x: x > begin_offset)
if d1 + d2 < best_dist:
best_dist = d1 + d2
best_indices = (fixed_begin, fixed_end)
if best_dist == 0:
break
assert best_indices is not None
return best_indices, best_dist | 5,333,801 |
def update_moira_lists(
strategy, backend, user=None, **kwargs
): # pylint: disable=unused-argument
"""
Update a user's moira lists
Args:
strategy (social_django.strategy.DjangoStrategy): the strategy used to authenticate
backend (social_core.backends.base.BaseAuth): the backend being used to authenticate
user (User): the current user
"""
if features.is_enabled(features.MOIRA) and user and user.is_active:
update_user_moira_lists.delay(user.id, update_memberships=True)
return {} | 5,333,802 |
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute(
"ALTER TABLE notifications ALTER force_notifications SET DEFAULT 0")
op.execute("ALTER TABLE notifications ALTER repeating SET DEFAULT 0") | 5,333,803 |
def certificate_get_all_by_project(context, project_id):
"""Get all certificates for a project."""
return IMPL.certificate_get_all_by_project(context, project_id) | 5,333,804 |
def simple_list(li):
"""
takes in a list li
returns a sorted list without doubles
"""
return sorted(set(li)) | 5,333,805 |
def test_draw_cursor(cursor: Cursor, offset: IntegerPosition2D, canvas: Canvas,
expected_canvas_after: Canvas) -> None:
"""Test illud.terminal.Terminal.draw_cursor."""
cursor.draw(offset, canvas)
assert canvas == expected_canvas_after | 5,333,806 |
async def test_device_trackers_in_zone(opp):
"""Test for trackers in zone."""
config = {
"proximity": {
"home": {
"ignored_zones": ["work"],
"devices": ["device_tracker.test1", "device_tracker.test2"],
"tolerance": "1",
}
}
}
assert await async_setup_component(opp, DOMAIN, config)
opp.states.async_set(
"device_tracker.test1",
"home",
{"friendly_name": "test1", "latitude": 2.1, "longitude": 1.1},
)
await opp.async_block_till_done()
opp.states.async_set(
"device_tracker.test2",
"home",
{"friendly_name": "test2", "latitude": 2.1, "longitude": 1.1},
)
await opp.async_block_till_done()
state = opp.states.get("proximity.home")
assert state.state == "0"
assert (state.attributes.get("nearest") == "test1, test2") or (
state.attributes.get("nearest") == "test2, test1"
)
assert state.attributes.get("dir_of_travel") == "arrived" | 5,333,807 |
def create_model(gpu, arch = 'vgg16', input_size = 25088, hidden_layer_size = 512, output_size = 102):
"""Creates a neural network model.
"""
if arch in archs_dict:
model = archs_dict[arch]
else:
print("You haven`t inserted a valid architecture. Check the available architectures at https://pytorch.org/docs/stable/torchvision/models.html.")
return False
for parameter in model.parameters():
parameter.requires_grad = False
model.classifier = nn.Sequential(OrderedDict([
('Input', nn.Linear(input_size, hidden_layer_size)),
('hidden1', nn.ReLU()),
('DropOut1', nn.Dropout(p=0.2)),
('layer1', nn.Linear(hidden_layer_size, int(hidden_layer_size/2))),
('hidden2', nn.ReLU()),
('layer2', nn.Linear(int(hidden_layer_size/2), output_size)),
('output', nn.LogSoftmax(dim=1))
]))
device = 'cuda' if gpu else 'cpu'
model.to(device)
return model | 5,333,808 |
def write_json(object_list, metadata,num_frames, out_file = None):
"""
"""
classes = ["person","bicycle","car","motorbike","NA","bus","train","truck"]
# metadata = {
# "camera_id": camera_id,
# "start_time":start_time,
# "num_frames":num_frames,
# "frame_rate":frame_rate
# }
data = {}
for frame_num in range(0,num_frames):
frame_data = []
# for each object
for i in range(0,len(object_list)):
obj = object_list[i]
# see if coordinate will be in range
if obj.first_frame <= frame_num:
if obj.first_frame + len(obj.all) > frame_num:
veh_data = {}
idx = frame_num - obj.first_frame
veh_data["id_num"] = i
veh_data["class"] = classes[int(obj.cls)]
veh_data["detected"] = obj.tags[idx]
veh_data["image_position"] = (obj.all[idx]).tolist()
veh_data["world_position"] = (obj.all_world[idx]).tolist()
veh_data["gps_position"] = (obj.all_gps[idx]).tolist()
frame_data.append(veh_data)
data[frame_num] = frame_data
all_data = {
"metadata":metadata,
"data":data
}
if out_file is not None:
with open(out_file, 'w') as fp:
json.dump(all_data, fp)
return all_data | 5,333,809 |
def get_infer_iterator(src_dataset,
src_vocab_table,
batch_size,
eos,
sos,
src_max_len=None):
"""Get dataset for inference."""
# Totol number of examples in src_dataset
# (3003 examples + 69 padding examples).
src_eos_id = tf.cast(src_vocab_table.lookup(tf.constant(eos)), tf.int32)
src_sos_id = tf.cast(src_vocab_table.lookup(tf.constant(sos)), tf.int32)
src_dataset = src_dataset.map(lambda src: tf.string_split([src]).values)
# Convert the word strings to ids
src_dataset = src_dataset.map(
lambda src: tf.cast(src_vocab_table.lookup(src), tf.int32))
# Add in the word counts.
src_dataset = src_dataset.map(lambda src: (tf.concat(
([src_sos_id], src, [src_eos_id]), 0), 2 + tf.size(src)))
def batching_func(x):
return x.padded_batch(
batch_size,
# The entry is the source line rows;
# this has unknown-length vectors. The last entry is
# the source row size; this is a scalar.
padded_shapes=(
tf.TensorShape([src_max_len]), # src
tf.TensorShape([])), # src_len
# Pad the source sequences with eos tokens.
# (Though notice we don't generally need to do this since
# later on we will be masking out calculations past the true sequence.
padding_values=(
src_eos_id, # src
0),
drop_remainder=True) # src_len -- unused
batched_dataset = batching_func(src_dataset)
batched_dataset = batched_dataset.map(
lambda src_ids, src_seq_len: (
{"source": src_ids,
"source_sequence_length": src_seq_len}))
return batched_dataset | 5,333,810 |
def test_minus_from_step():
"""Test from_step < 0"""
out = CheckOutput(target_log_history_length=6)
loss_plotter = PlotLosses(outputs=[out], from_step=-5)
for idx in range(10):
loss_plotter.update({
'acc': 0.1 * idx,
'loss': 0.69 / (idx + 1),
})
loss_plotter.send() | 5,333,811 |
def get_num_hearts(image):
"""Returns the number of full and total hearts.
Keyword arguements:
image - image of hearts region
"""
# definitions:
lower_full = np.array([0, 15, 70])
upper_full = np.array([30, 35, 250])
lower_empty = np.array([150, 160, 220])
upper_empty = np.array([255, 255, 255])
full_heart_area_lower = 200
full_heart_area_upper = 300
half_heart_area_lower = 60
half_heart_area_upper = 100
# define heart image:
hearts_image = image[98:161,967:1200] # this the heart region
# initialize hearts
full_hearts = 0
empty_hearts = 0
# calculate shapes in hearts image
shapeMask_full = cv2.inRange(hearts_image, lower_full, upper_full)
shapeMask_empty = cv2.inRange(hearts_image, lower_empty, upper_empty)
# count full hearts
cnts_full_hearts = cv2.findContours(shapeMask_full.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts_full_hearts = cnts_full_hearts[0] if imutils.is_cv2() else cnts_full_hearts[1]
for c in cnts_full_hearts:
if cv2.contourArea(c) >= full_heart_area_lower and cv2.contourArea(c) <= full_heart_area_upper:
full_hearts = full_hearts +1
if cv2.contourArea(c) >= half_heart_area_lower and cv2.contourArea(c) <= half_heart_area_upper:
full_hearts = full_hearts + 0.5
# count empty hearts
cnts_empty_hearts = cv2.findContours(shapeMask_empty.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts_empty_hearts = cnts_empty_hearts[0] if imutils.is_cv2() else cnts_empty_hearts[1]
for c in cnts_empty_hearts:
if cv2.contourArea(c) >= full_heart_area_lower and cv2.contourArea(c) <= full_heart_area_upper:
empty_hearts = empty_hearts +1
if cv2.contourArea(c) >= half_heart_area_lower and cv2.contourArea(c) <= half_heart_area_upper:
empty_hearts = empty_hearts + 0.5
return full_hearts, empty_hearts+full_hearts | 5,333,812 |
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Fully Kiosk Browser number entities."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
entities = [
FullyNumberEntity(coordinator, entity)
for entity in ENTITY_TYPES
if entity.key in coordinator.data["settings"]
]
async_add_entities(entities, False) | 5,333,813 |
def _clean_root(tool_xml):
"""XSD assumes macros have been expanded, so remove them."""
clean_tool_xml = copy.deepcopy(tool_xml)
to_remove = []
for macros_el in clean_tool_xml.getroot().findall("macros"):
to_remove.append(macros_el)
for macros_el in to_remove:
clean_tool_xml.getroot().remove(macros_el)
return clean_tool_xml | 5,333,814 |
def page_not_found(e):
"""Return a custom 404 error."""
logging.error(':: A 404 was thrown a bad URL was requested ::')
logging.error(traceback.format_exc())
return render_template('404.html'), 404 | 5,333,815 |
def find_prime_factors(num):
"""Return prime factors of num."""
validate_integers(num)
zero_divisors_error(num)
potential_factor = 2
prime_factors = set()
while potential_factor <= num:
if num % potential_factor == 0:
prime_factors.add(potential_factor)
num = num/potential_factor
else:
potential_factor += 1
return prime_factors | 5,333,816 |
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
elif isinstance(obj, (dict)):
return obj
raise TypeError("Type %s not serializable" % type(obj)) | 5,333,817 |
def read_yaml_files(directories):
"""Read the contents of all yaml files in a directory.
Args:
directories: List of directory names with configuration files
Returns:
config_dict: Dict of yaml read
"""
# Initialize key variables
yaml_found = False
yaml_from_file = ''
all_yaml_read = ''
# Check each directory in sequence
for config_directory in directories:
# Check if config_directory exists
if os.path.isdir(config_directory) is False:
log_message = (
'Configuration directory "{}" '
'doesn\'t exist!'.format(config_directory))
log.log2die_safe(1009, log_message)
# Cycle through list of files in directory
for filename in os.listdir(config_directory):
# Examine all the '.yaml' files in directory
if filename.endswith('.yaml'):
# Read YAML data
filepath = '{}/{}'.format(config_directory, filename)
yaml_from_file = read_yaml_file(filepath, as_string=True)
yaml_found = True
# Append yaml from file to all yaml previously read
all_yaml_read = '{}\n{}'.format(all_yaml_read, yaml_from_file)
# Verify YAML files found in directory
if yaml_found is False:
log_message = (
'No files found in directory "{}" with ".yaml" '
'extension.'.format(config_directory))
log.log2die_safe(1010, log_message)
# Return
config_dict = yaml.safe_load(all_yaml_read)
return config_dict | 5,333,818 |
def run_command(command, settings):
"""Runs command from rule for passed command."""
if command.side_effect:
command.side_effect(command, settings)
shells.put_to_history(command.script)
print(command.script) | 5,333,819 |
def test_v1_0_workflow_top_level_sf_expr() -> None:
"""Test for the correct error when converting a secondaryFiles expression in a workflow level input."""
with raises(WorkflowException, match=r".*secondaryFiles.*"):
result, modified = traverse0(
parser.load_document(str(HERE / "../testdata/workflow_input_sf_expr.cwl")),
False,
False,
False,
False,
) | 5,333,820 |
def test_measurement():
"""Tests the measurement property of the HMIMap object."""
assert hmi.measurement == "continuum" | 5,333,821 |
def _check_for_crash(project_name, fuzz_target, testcase_path):
"""Check for crash."""
def docker_run(args):
command = ['docker', 'run', '--rm', '--privileged']
if sys.stdin.isatty():
command.append('-i')
return utils.execute(command + args)
logging.info('Checking for crash')
out, err, return_code = helper.reproduce_impl(
project=helper.Project(project_name),
fuzzer_name=fuzz_target,
valgrind=False,
env_to_add=[],
fuzzer_args=[],
testcase_path=testcase_path,
run_function=docker_run,
err_result=(None, None, None))
if return_code is None:
return None
logging.info('stdout =\n%s', out)
logging.info('stderr =\n%s', err)
# pylint: disable=unsupported-membership-test
has_start_marker = any(
marker in out or marker in err for marker in START_MARKERS)
has_end_marker = any(marker in out or marker in err for marker in END_MARKERS)
if not has_start_marker or not has_end_marker:
return None
return _get_dedup_token(out + err) | 5,333,822 |
def main(translator, source, target, text, api_key, languages):
"""
Use TRANSLATOR to translate source material into another language.
Available translators include: Google, MyMemory, QCRI, Linguee, Pons, Yandex, Microsoft (Bing), and Papago.\n
\f
function responsible for parsing terminal arguments and provide them for
further use in the translation process
"""
api_key_required = ["deepl", "qcri", "yandex", "microsoft", "papago"]
if translator in api_key_required and not api_key:
click.echo(
"This translator requires an api key provided through --api-key"
)
elif languages:
print_supported_languages(translator, api_key)
else:
translate(translator, source, target, text, api_key) | 5,333,823 |
def test_post_global_singleuse_coupons(admin_drf_client, single_use_coupon_json):
""" Test that the correct model objects are created for a batch of single-use coupons (global coupon) """
data = single_use_coupon_json
data["is_global"] = True
resp = admin_drf_client.post(reverse("coupon_api"), type="json", data=data)
assert resp.status_code == status.HTTP_200_OK
model_version = CouponPaymentVersion.objects.get(id=resp.json().get("id"))
assert str(model_version) == "CouponPaymentVersion for {} of type {}".format(
model_version.num_coupon_codes, model_version.coupon_type
)
assert model_version.couponversion_set.count() == 5
assert model_version.payment.coupon_set.count() == 5
assert model_version.amount == data.get("amount")
assert model_version.coupon_type == "single-use"
assert model_version.payment_transaction == data.get("payment_transaction")
assert Company.objects.filter(id=data.get("company")).first() is not None
assert (
Coupon.objects.filter(payment=model_version.payment).first().is_global is True
)
assert (
CouponEligibility.objects.filter(product__in=data.get("product_ids")).count()
== 15
) | 5,333,824 |
def parse_from_compdb(compdb, file_to_parse):
"""Extracts the absolute file path of the file to parse and its arguments from compdb"""
absolute_filepath = None
file_arguments = []
compdb = compdb_parser.load_compdb(compdb)
if compdb:
commands = compdb.getAllCompileCommands()
for command in commands:
if file_to_parse in command.filename:
absolute_filepath = os.path.join(command.directory, command.filename)
file_arguments = list(command.arguments)
file_arguments = tu_parser.clean_args(file_arguments)
file_arguments = tu_parser.absolute_path_include(file_arguments, command.directory)
else:
sys.exit("ERROR: Failed to load compdb")
return absolute_filepath, file_arguments | 5,333,825 |
def _pyenv_version():
"""Determine which pyenv
Returns:
str: pyenv version
"""
import subprocess
return subprocess.check_output(['pyenv', 'version']).split(' ')[0] | 5,333,826 |
def reset_config_on_routers(tgen, routerName=None):
"""
Resets configuration on routers to the snapshot created using input JSON
file. It replaces existing router configuration with FRRCFG_BKUP_FILE
Parameters
----------
* `tgen` : Topogen object
* `routerName` : router config is to be reset
"""
logger.debug("Entering API: reset_config_on_routers")
tgen.cfg_gen += 1
gen = tgen.cfg_gen
# Trim the router list if needed
router_list = tgen.routers()
if routerName:
if routerName not in router_list:
logger.warning(
"Exiting API: reset_config_on_routers: no router %s",
routerName,
exc_info=True,
)
return True
router_list = {routerName: router_list[routerName]}
delta_fmt = tgen.logdir + "/{}/delta-{}.conf"
# FRRCFG_BKUP_FILE
target_cfg_fmt = tgen.logdir + "/{}/frr_json_initial.conf"
run_cfg_fmt = tgen.logdir + "/{}/frr-{}.sav"
#
# Get all running configs in parallel
#
procs = {}
for rname in router_list:
logger.info("Fetching running config for router %s", rname)
procs[rname] = router_list[rname].popen(
["/usr/bin/env", "vtysh", "-c", "show running-config no-header"],
stdin=None,
stdout=open(run_cfg_fmt.format(rname, gen), "w"),
stderr=subprocess.PIPE,
)
for rname, p in procs.items():
_, error = p.communicate()
if p.returncode:
logger.error(
"Get running config for %s failed %d: %s", rname, p.returncode, error
)
raise InvalidCLIError(
"vtysh show running error on {}: {}".format(rname, error)
)
#
# Get all delta's in parallel
#
procs = {}
for rname in router_list:
logger.info(
"Generating delta for router %s to new configuration (gen %d)", rname, gen
)
procs[rname] = tgen.net.popen(
[
"/usr/lib/frr/frr-reload.py",
"--test-reset",
"--input",
run_cfg_fmt.format(rname, gen),
"--test",
target_cfg_fmt.format(rname),
],
stdin=None,
stdout=open(delta_fmt.format(rname, gen), "w"),
stderr=subprocess.PIPE,
)
for rname, p in procs.items():
_, error = p.communicate()
if p.returncode:
logger.error(
"Delta file creation for %s failed %d: %s", rname, p.returncode, error
)
raise InvalidCLIError("frr-reload error for {}: {}".format(rname, error))
#
# Apply all the deltas in parallel
#
procs = {}
for rname in router_list:
logger.info("Applying delta config on router %s", rname)
procs[rname] = router_list[rname].popen(
["/usr/bin/env", "vtysh", "-f", delta_fmt.format(rname, gen)],
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
for rname, p in procs.items():
output, _ = p.communicate()
vtysh_command = "vtysh -f {}".format(delta_fmt.format(rname, gen))
if not p.returncode:
router_list[rname].logger.info(
'\nvtysh config apply => "{}"\nvtysh output <= "{}"'.format(
vtysh_command, output
)
)
else:
router_list[rname].logger.warning(
'\nvtysh config apply failed => "{}"\nvtysh output <= "{}"'.format(
vtysh_command, output
)
)
logger.error(
"Delta file apply for %s failed %d: %s", rname, p.returncode, output
)
# We really need to enable this failure; however, currently frr-reload.py
# producing invalid "no" commands as it just preprends "no", but some of the
# command forms lack matching values (e.g., final values). Until frr-reload
# is fixed to handle this (or all the CLI no forms are adjusted) we can't
# fail tests.
# raise InvalidCLIError("frr-reload error for {}: {}".format(rname, output))
#
# Optionally log all new running config if "show_router_config" is defined in
# "pytest.ini"
#
if show_router_config:
procs = {}
for rname in router_list:
logger.info("Fetching running config for router %s", rname)
procs[rname] = router_list[rname].popen(
["/usr/bin/env", "vtysh", "-c", "show running-config no-header"],
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
for rname, p in procs.items():
output, _ = p.communicate()
if p.returncode:
logger.warning(
"Get running config for %s failed %d: %s",
rname,
p.returncode,
output,
)
else:
logger.info(
"Configuration on router %s after reset:\n%s", rname, output
)
logger.debug("Exiting API: reset_config_on_routers")
return True | 5,333,827 |
def ignore_exception(exception):
"""Check whether we can safely ignore this exception."""
if isinstance(exception, BadRequest):
if 'Query is too old' in exception.message or \
exception.message.startswith('Have no rights to send a message') or \
exception.message.startswith('Message_id_invalid') or \
exception.message.startswith('Message identifier not specified') or \
exception.message.startswith('Schedule_date_invalid') or \
exception.message.startswith('Message is not modified: specified new message content'):
return True
if isinstance(exception, Unauthorized):
if exception.message.lower() == 'forbidden: bot was blocked by the user':
return True
if exception.message.lower() == 'forbidden: message_author_required':
return True
if exception.message.lower() == 'forbidden: bot is not a member of the supergroup chat':
return True
if exception.message.lower() == 'forbidden: user is deactivated':
return True
if exception.message.lower() == 'forbidden: bot was kicked from the group chat':
return True
if exception.message.lower() == 'forbidden: bot was kicked from the supergroup chat':
return True
if exception.message.lower() == 'forbidden: chat_write_forbidden':
return True
if isinstance(exception, TimedOut):
return True
return False | 5,333,828 |
def rmse_loss(prediction, ground_truth, weight_map=None):
"""
:param prediction: the current prediction of the ground truth.
:param ground_truth: the measurement you are approximating with regression.
:param weight_map: a weight map for the cost function. .
:return: sqrt(mean(differences squared))
"""
if weight_map is not None:
residuals = tf.subtract(prediction, ground_truth)
residuals = tf.multiply(residuals, residuals)
residuals = tf.multiply(residuals, weight_map)
return tf.sqrt(tf.reduce_mean(residuals) / tf.reduce_mean(weight_map))
else:
return tf.sqrt(tf.losses.mean_squared_error(prediction, ground_truth)) | 5,333,829 |
def test_network_config():
"""Test that the `canu generate network config` command runs and generates config."""
with runner.isolated_filesystem():
with open(sls_file, "w") as f:
json.dump(sls_input, f)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"generate",
"network",
"config",
"--csm",
csm,
"--architecture",
architecture,
"--shcd",
test_file,
"--tabs",
tabs,
"--corners",
corners,
"--sls-file",
sls_file,
"--folder",
folder_name,
],
)
assert result.exit_code == 0
assert "sw-spine-001 Config Generated" in str(result.output)
assert "sw-spine-002 Config Generated" in str(result.output)
assert "sw-leaf-001 Config Generated" in str(result.output)
assert "sw-leaf-002 Config Generated" in str(result.output)
assert "sw-leaf-003 Config Generated" in str(result.output)
assert "sw-leaf-004 Config Generated" in str(result.output)
assert "sw-cdu-001 Config Generated" in str(result.output)
assert "sw-cdu-002 Config Generated" in str(result.output)
assert "sw-edge-001 Config Generated" in str(result.output)
assert "sw-edge-002 Config Generated" in str(result.output)
assert "sw-leaf-bmc-001 Config Generated" in str(result.output) | 5,333,830 |
def start_workflow(base_url, digest_login, workflow_definition, media_package):
"""
Start a workflow on a media package.
:param base_url: The URL for the request
:type base_url: str
:param digest_login: The login credentials for digest authentication
:type digest_login: DigestLogin
:param workflow_definition: The workflow definition
:type workflow_definition: str
:param media_package: The media package
:type media_package: str
:return: The workflow instance
:rtype: str
:raise RequestError:
"""
url = '{}/workflow/start'.format(base_url)
data = {'definition': workflow_definition, 'mediapackage': media_package}
post_request(url, digest_login, element_description="/workflow/start", data=data) | 5,333,831 |
def get_corrupted_simulation_docs():
"""Returns iterable of simdocs without samples (when num_paticles >0)
When num_particle<=0, no samples are created and the simulation is considered Finished anyway.
These ignored simulations
"""
return db[DBCOLLECTIONS.SIMULATION].find({
'procstatus.status': PROCSTATUS.FINISHED,
'samples': {'$exists': True, "$eq": []},
'num_particles': {'$gt': 0},
}) | 5,333,832 |
def wait_for_powerpoint(videofile: str):
"""
While Powerpoint exports a video file, the file exists but has zero size.
Only once the export is complete will the contents be copied to the target.
"""
new_size = os.path.getsize(videofile) # will be zero during export
if new_size > 0:
return # assume the file is ready and no waiting is needed
# risky if we get started near the end of the exporting
print("waiting for filesize of '%s' to change" % videofile)
while True:
time.sleep(5.0)
old_size = new_size
new_size = os.path.getsize(videofile)
if new_size != old_size:
break # Powerpoint is finishing exporting: filesize has changed!
print("waiting for filesize of '%s' to stop changing" % videofile)
while True:
# if the file comes from a different drive, it may take a while to arrive
time.sleep(5.0)
old_size = new_size
new_size = os.path.getsize(videofile)
if new_size == old_size:
break | 5,333,833 |
def scheduler():
"""
A function intended called by systemd timer.
Non-destructive operation, no data will be deleted currently.
Whenever it's called, it will invoke an examination on host and device.
Note:
The operation is in days, which means that if I invoked scheduler today already,
the second call in the same day will not perform anything on snapshots.
For stability and safety, do not invoke the delete process currently
instead, check log file for further information.
"""
for i in BACKUP_LIST:
# TODO: add error type
take_status = False
send_status = False
if i.should_take_snapshot():
take_status = i.take_snapshot()
if i.should_send_snapshot():
send_status = i.send_snapshots_to_device()
assert(take_status != None)
assert(send_status != None)
out_host = i.outdate_snapshots_host()
out_dev = i.outdate_snapshots_device()
if len(out_host) >= 1:
logger.info('should delete on host: %r', out_host)
if len(out_dev) >= 1:
logger.info('should delete on dev: %r', out_dev)
# remove destructive operations currently
if take_status:
# i.remove_snapshots_host()
pass
else:
logger.error('snapshot take failed')
if send_status:
# i.remove_snapshots_device()
pass
else:
logger.error('snapshot send failed') | 5,333,834 |
def main(args):
"""Main.""" | 5,333,835 |
def example_calculate_slippage_with_bid_mid_spreads():
"""Calculate the slippage for trades given market data as a benchmark
"""
from tcapy.analysis.algos.metric import MetricSlippage
market_df, trade_df = get_sample_data()
metric_slippage = MetricSlippage()
trade_df, _ = metric_slippage.calculate_metric(trade_df, market_df)
print(trade_df) | 5,333,836 |
def residual_block(x: Tensor, downsample: bool, filters: int, kernel_size: int = 3) -> Tensor:
"""
Parameters
----------
x : Tensor
DESCRIPTION.
downsample : bool
DESCRIPTION.
filters : int
DESCRIPTION.
kernel_size : int, optional
DESCRIPTION. The default is 3.
Returns
-------
Tensor
DESCRIPTION.
"""
y = Conv2D(kernel_size=kernel_size,
strides= (1 if not downsample else 2),
filters=filters,
padding="same")(x)
y = relu_bn(y)
y = Conv2D(kernel_size=kernel_size,
strides=1,
filters=filters,
padding="same")(y)
if downsample:
x = Conv2D(kernel_size=1,
strides=2,
filters=filters,
padding="same")(x)
out = Add()([x, y])
out = relu_bn(out)
return out | 5,333,837 |
def test_score_hist_splits(spark, df):
"""
test that a dataframe gets processed with non-default splits list
test binwidths and normalised probability densities sum up to 1.0
"""
mysplits = [0.3, 0.6]
res = _calc_probability_density(df, spark=spark, buckets=mysplits)
res = pd.DataFrame(res)
assert res.count_rows.count() == 3
assert res.count_rows.sum() == res.count_rows.cumsum()[2]
assert res.binwidth.sum() == pytest.approx(1.0)
assert res.normalised.sum() == pytest.approx(1.0)
mysplits2 = [0.6, 0.3]
res2 = _calc_probability_density(df, spark=spark, buckets=mysplits2)
res2 = pd.DataFrame(res2)
assert res2.count_rows.count() == 3
assert res2.count_rows.sum() == res.count_rows.cumsum()[2]
assert res2.binwidth.sum() == pytest.approx(1.0)
assert res2.normalised.sum() == pytest.approx(1.0) | 5,333,838 |
def test_measurement_statistics_povm(ops, state, final_states, probabilities):
""" measurement_statistics_povm: projectors applied to basis states. """
collapsed_states, probs = measurement_statistics_povm(state, ops)
for i, final_state in enumerate(final_states):
collapsed_state = collapsed_states[i]
if final_state:
assert isequal(collapsed_state, final_state)
else:
assert collapsed_state is None
np.testing.assert_almost_equal(probs, probabilities) | 5,333,839 |
def dehaze(img, level):
"""use Otsu to threshold https://scikit-image.org/docs/stable/auto_examples/segmentation/plot_multiotsu.html
n.b. threshold used to mask image: dark values are zeroed, but result is NOT binary
level: value 1..5 with larger values preserving more bright voxels
level: dark_classes/total_classes
1: 3/4
2: 2/3
3: 1/2
4: 1/3
5: 1/4
"""
level = bound(1, 5, level)
n_classes = abs(3 - level) + 2
dark_classes = 4 - level
dark_classes = bound(1, 3, dark_classes)
thresholds = skimage.filters.threshold_multiotsu(img, n_classes)
thresh = thresholds[dark_classes - 1]
print("Zeroing voxels darker than ", thresh)
img[img < thresh] = 0
return img | 5,333,840 |
def matthews_correlation_coefficient(tp, tn, fp, fn):
"""Return Matthews correlation coefficient for values from a confusion matrix.
Implementation is based on the definition from wikipedia:
https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
"""
numerator = (tp * tn) - (fp * fn)
denominator = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
if denominator == 0:
denominator = 1
return float(numerator) / denominator | 5,333,841 |
def find_graph(hostnames):
"""
Find a graph file contains all devices in testbed.
duts are spcified by hostnames
Parameters:
hostnames: list of duts in the target testbed.
"""
filename = os.path.join(LAB_GRAPHFILE_PATH, LAB_CONNECTION_GRAPH_FILE)
with open(filename) as fd:
file_list = yaml.safe_load(fd)
# Finding the graph file contains all duts from hostnames,
for fn in file_list:
print_debug_msg(debug_fname, "Looking at conn graph file: %s for hosts %s" % (fn, hostnames))
filename = os.path.join(LAB_GRAPHFILE_PATH, fn)
lab_graph = Parse_Lab_Graph(filename)
lab_graph.parse_graph()
print_debug_msg(debug_fname, "For file %s, got hostnames %s" % (fn, lab_graph.devices))
if lab_graph.contains_hosts(hostnames):
print_debug_msg(debug_fname, ("Returning lab graph from conn graph file: %s for hosts %s" % (fn, hostnames)))
return lab_graph
# Fallback to return an empty connection graph, this is
# needed to bridge the kvm test needs. The KVM test needs
# A graph file, which used to be whatever hardcoded file.
# Here we provide one empty file for the purpose.
lab_graph = Parse_Lab_Graph(os.path.join(LAB_GRAPHFILE_PATH, EMPTY_GRAPH_FILE))
lab_graph.parse_graph()
return lab_graph | 5,333,842 |
def run_analysis(apk_dir, md5_hash, package):
"""Run Dynamic File Analysis."""
analysis_result = {}
logger.info('Dynamic File Analysis')
domains = {}
clipboard = []
# Collect Log data
data = get_log_data(apk_dir, package)
clip_tag = 'I/CLIPDUMP-INFO-LOG'
clip_tag2 = 'I CLIPDUMP-INFO-LOG'
# Collect Clipboard
for log_line in data['logcat']:
if clip_tag in log_line:
clipboard.append(log_line.replace(clip_tag, 'Process ID '))
if clip_tag2 in log_line:
log_line = log_line.split(clip_tag2)[1]
clipboard.append(log_line)
# URLs My Custom regex
url_pattern = re.compile(
r'((?:https?://|s?ftps?://|file://|'
r'javascript:|data:|www\d{0,3}'
r'[.])[\w().=/;,#:@?&~*+!$%\'{}-]+)', re.UNICODE)
urls = re.findall(url_pattern, data['traffic'].lower())
if urls:
urls = list(set(urls))
else:
urls = []
# Domain Extraction and Malware Check
logger.info('Performing Malware Check on extracted Domains')
domains = MalwareDomainCheck().scan(urls)
# Email Etraction Regex
emails = []
regex = re.compile(r'[\w.-]{1,20}@[\w-]{1,20}\.[\w]{2,10}')
for email in regex.findall(data['traffic'].lower()):
if (email not in emails) and (not email.startswith('//')):
emails.append(email)
# Tar dump and fetch files
all_files = get_app_files(apk_dir, md5_hash, package)
analysis_result['urls'] = urls
analysis_result['domains'] = domains
analysis_result['emails'] = emails
analysis_result['clipboard'] = clipboard
analysis_result['xml'] = all_files['xml']
analysis_result['sqlite'] = all_files['sqlite']
analysis_result['other_files'] = all_files['others']
analysis_result['tls_tests'] = get_tls_logs(apk_dir, md5_hash)
return analysis_result | 5,333,843 |
async def quotes(
ticker: str, date: datetime.date,
uow: UoW = fastapi.Depends(dependendies.get_uow),
) -> ListResponse[Ticker]:
"""Return the list of available tickers."""
with uow:
results = uow.quotes.iterator({'ticker': ticker, 'date': date})
return ListResponse(results=results) | 5,333,844 |
def delete_snapshot(client, data_args) -> Tuple[str, dict, Union[list, dict]]:
""" Delete exsisting snapshot from the system.
:type client: ``Client``
:param client: client which connects to api.
:type data_args: ``dict``
:param data_args: request arguments.
:return: human readable format, context output and the original raw response.
:rtype: ``tuple``
"""
snapshot_ids = argToList(data_args.get('snapshot_ids'))
body = {'ids': snapshot_ids}
client.do_request('DELETE', '/plugin/products/threat-response/api/v1/snapshot', data=body)
return f'Snapshot {",".join(snapshot_ids)} deleted successfully.', {}, {} | 5,333,845 |
def faster_tikz_time_1c(dependencies: List[pathlib.Path],
targets: List[pathlib.Path]) -> None:
"""FASTER Tikz time plot 1c."""
_faster_tikz_time_1(dependencies, targets, 2) | 5,333,846 |
def api_user_submissions(user_id):
"""Gets the price submissions for a user matching the user_id.
Example Request:
HTTP GET /api/v1/users/56cf848722e7c01d0466e533/submissions
Example Response:
{
"success": "OK",
"user_submissions": [
{
"submitted_timestamp": "2016-02-25 22:52:32+00:00",
"image": null,
"business_details": {
"google_places": {
... truncated for ease of reading ...
},
"open_time": null,
"business_id": "56cf859195bfb3ccb12582e5",
"address": "6200 N Broad St, Philadelphia, PA 19141, United States",
"phone_number": "(215) 549-5089",
"name": "Shell",
"close_time": null
},
"product_id": "56bbda2dd8d9a114db76ca5c",
"price": 153,
"user_id": "56cf848722e7c01d040ae533",
"price_id": "56cf85b022e7c0197cf2a02b"
},
...
]
}
"""
from backend.models.prices import PriceDB
if not user_is_authenticated(user_id=user_id):
return json_error("Unauthorized", status_code=403)
price_db = PriceDB()
submissions = price_db.get_submissions(user_id=user_id)
return json_success("OK", user_submissions=submissions) | 5,333,847 |
def get_pdf_info(pdf_path: str) -> PdfInfo:
"""Get meta information of a PDF file."""
info: PdfInfo = PdfInfo(path=pdf_path)
keys = get_flat_cfg_file(path="~/.edapy/pdf_keys.csv")
ignore_keys = get_flat_cfg_file(path="~/.edapy/pdf_ignore_keys.csv")
for key in keys:
info.user_attributes[key] = None
info.is_errornous = False
info.is_encrypted = False
info.nb_pages = -1
info.nb_toc_top_level = -1
info.nb_characters = 0
with open(pdf_path, "rb") as fp:
try:
pdf_toread = PdfFileReader(fp, strict=False)
except PyPDF2.utils.PdfReadError:
info.is_errornous = True
return info
except KeyError as e:
logger.warning(
"https://github.com/mstamy2/PyPDF2/issues/388 for "
f" PDF '{pdf_path}': {e}"
)
return info
except OSError as e:
logger.warning(f"OSError for PDF '{pdf_path}': {e}")
return info
except AssertionError as e:
logger.warning(f"AssertionError for PDF '{pdf_path}': {e}")
return info
except TypeError as e:
logger.warning(f"TypeError for PDF '{pdf_path}': {e}")
return info
try:
tl_toc = [el for el in pdf_toread.outlines if not isinstance(el, list)]
info.nb_toc_top_level = len(tl_toc)
except PyPDF2.utils.PdfReadError as e:
logger.error(f"{pdf_path}: PyPDF2.utils.PdfReadError {e}")
except ValueError as e:
logger.error(f"{pdf_path}: ValueError {e}")
except TypeError as e:
logger.error(f"{pdf_path}: TypeError {e}")
info_t = enhance_pdf_info(info, pdf_toread, pdf_path, keys, ignore_keys)
return info_t | 5,333,848 |
def convert_gz_json_type(value):
"""Provide an ArgumentParser type function to unmarshal a b64 gz JSON string.
"""
return json.loads(zlib.decompress(base64.b64decode(value))) | 5,333,849 |
def get_tag(tag):
"""
Returns a tag object for the string passed to it
If it does not appear in the database then return a new tag object
If it does exisit in the data then return the database object
"""
tag = tag.lower()
try:
return Session.query(Tag).filter_by(name=unicode(tag)).one()
except NoResultFound as nrf:
t = Tag(unicode(tag))
Session.add(t)
return t | 5,333,850 |
def makeApiCall( url, endpointParams, debug = 'no' ) :
""" Request data from endpoint with params
Args:
url: string of the url endpoint to make request from
endpointParams: dictionary keyed by the names of the url parameters
Returns:
object: data from the endpoint
"""
data = requests.get( url, endpointParams ) # make get request
response = dict() # hold response info
response['url'] = url # url we are hitting
response['endpoint_params'] = endpointParams #parameters for the endpoint
response['endpoint_params_pretty'] = json.dumps( endpointParams, indent = 4 ) # pretty print for cli
response['json_data'] = json.loads( data.content ) # response data from the api
response['json_data_pretty'] = json.dumps( response['json_data'], indent = 4 ) # pretty print for cli
if ( 'yes' == debug ) : # display out response info
displayApiCallData( response ) # display response
return response | 5,333,851 |
def get_closest_area(
lat: float, lng: float, locations: t.List[config.Area]
) -> t.Optional[config.Area]:
"""Return area if image taken within 50 km from center of area"""
distances = [
(great_circle((area.lat, area.lng), (lat, lng)).km, area) for area in locations
]
distance, closest_area = min(distances)
return closest_area if distance < 50 else None | 5,333,852 |
def saveLocations(filename, model_locations, model_ids):
"""
Save an allocation specified by the parameter.
:param filename: filename to save the allocation to
:param modellocations: allocation to save to file
:param model_ids: all model_ids to model mappings
"""
# Save the locations
f = open(filename, 'w')
for model_id in model_locations:
# Format:
# model_id location fullname
f.write("%s %s %s\n" % (model_id,
model_locations[model_id],
model_ids[model_id].getModelFullName()))
f.close() | 5,333,853 |
def disable(request):
"""
Disable Pool Member Running Script
"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
id_server_pool = request.POST.get('id_server_pool')
ids = request.POST.get('ids')
if id_server_pool and ids:
client.create_pool().disable(split_to_array(ids))
messages.add_message(
request, messages.SUCCESS, pool_messages.get('success_disable'))
else:
messages.add_message(
request, messages.ERROR, error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect(reverse('pool.manage.tab2', args=[id_server_pool])) | 5,333,854 |
def fastapi_native_middleware_factory():
"""Create a FastAPI app that uses native-style middleware."""
app = FastAPI()
# Exception handler for `/client_error_from_handled_exception`
app.add_exception_handler(IndexError, client_induced_exception_handler)
app.add_middleware(BaseHTTPMiddleware, dispatch=xray_middleware)
app.add_api_route("/", handle_request)
app.add_api_route("/client_error_as_http_exception", handle_with_http_exception)
app.add_api_route(
"/client_error_as_response",
handle_request,
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
)
app.add_api_route("/client_error_from_handled_exception", handle_with_indexerror)
app.add_api_route("/delay", handle_with_delay)
app.add_api_route("/exception", handle_with_keyerror)
app.add_api_route(
"/unauthorized", handle_request, status_code=HTTP_401_UNAUTHORIZED
)
return app | 5,333,855 |
def _partial_dependence(
pipeline,
X,
features,
percentiles=(0.05, 0.95),
grid_resolution=100,
kind="average",
custom_range=None,
):
"""Compute the partial dependence for features of X.
Args:
pipeline (PipelineBase): pipeline.
X (pd.DataFrame): Holdout data
features (list(str)): Column names of X to compute the partial dependence for.
percentiles (tuple float): Percentiles to use in range calculation for a given
feature.
grid_resolution: Number of points in range of values used for each feature in
partial dependence calculation.
kind (str): The type of predictions to return.
custom_range (dict[str, np.ndarray]): Mapping from column name in X to
range of values to use in partial dependence. If custom_range is specified,
the percentile + interpolation procedure is skipped and the values in custom_range
are used.
Returns:
dict with 'average', 'individual', 'values' keys. 'values' is a list of
the values used in the partial dependence for each feature.
'average' and 'individual' are averaged and individual predictions for
each point in the grid.
"""
if grid_resolution <= 1:
raise ValueError("'grid_resolution' must be strictly greater than 1.")
custom_range = custom_range or {}
custom_range = {
feature: custom_range.get(feature)
for feature in features
if feature in custom_range
}
grid, values = _grid_from_X(
X.loc[:, features],
percentiles,
grid_resolution,
custom_range,
)
averaged_predictions, predictions = _partial_dependence_calculation(
pipeline,
grid,
features,
X,
)
# reshape predictions to
# (n_outputs, n_instances, n_values_feature_0, n_values_feature_1, ...)
predictions = predictions.reshape(-1, X.shape[0], *[val.shape[0] for val in values])
# reshape averaged_predictions to
# (n_outputs, n_values_feature_0, n_values_feature_1, ...)
averaged_predictions = averaged_predictions.reshape(
-1, *[val.shape[0] for val in values]
)
if kind == "average":
return {"average": averaged_predictions, "values": values}
elif kind == "individual":
return {"individual": predictions, "values": values}
else: # kind='both'
return {
"average": averaged_predictions,
"individual": predictions,
"values": values,
} | 5,333,856 |
def nplog(
a: np.ndarray, deriv: bool = False, eps: float = 1e-30, verbose: bool = False
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""$C^2$ extension of $\ln(a)$ below `eps`
Args:
a: a Numpy array
deriv: if `True`, the first derivative is also returned
eps: a lower bound
verbose: whether diagnoses are printed
Returns:
$\ln(a)$ $C^2$-extended below `eps`,
with its derivative if `deriv` is `True`
"""
if np.min(a) > eps:
loga = np.log(a)
return [loga, 1.0 / a] if deriv else loga
else:
logarreps = np.log(np.maximum(a, eps))
logarr_smaller = log(eps) - (eps - a) * (3.0 * eps - a) / (2.0 * eps * eps)
if verbose:
n_small_args = np.sum(a < eps)
if n_small_args > 0:
finals = "s" if n_small_args > 1 else ""
print(
f"nplog: {n_small_args} argument{finals} smaller than {eps}: mini = {np.min(a)}"
)
loga = np.where(a > eps, logarreps, logarr_smaller)
if deriv:
der_logarreps = 1.0 / np.maximum(a, eps)
der_logarr_smaller = (2.0 * eps - a) / (eps * eps)
der_loga = np.where(a > eps, der_logarreps, der_logarr_smaller)
return loga, der_loga
else:
return loga | 5,333,857 |
def translate_names(recipe_names: List[str], locale: str) -> List[str]:
"""Translates a list of recipe names to the given locale."""
if locale in ['auto', 'en-us']:
return recipe_names
translation_path = os.path.join('recipes', 'translations.json')
with open(translation_path, encoding='utf-8') as fp:
translations = json.load(fp)
return [translations[name][locale] for name in recipe_names] | 5,333,858 |
def _section_to_text(config_section: ConfigSection) -> str:
"""Convert a single config section to text"""
return (f'[{config_section.name}]{LINE_SEP}'
f'{LINE_SEP.join(_option_to_text(option) for option in config_section.options)}{LINE_SEP}') | 5,333,859 |
def TempFileDecorator(func):
"""Populates self.tempfile with path to a temporary writeable file"""
def f(self, *args, **kwargs):
with tempfile.NamedTemporaryFile(dir=self.tempdir, delete=False) as f:
self.tempfile = f.name
return func(self, *args, **kwargs)
f.__name__ = func.__name__
f.__doc__ = func.__doc__
f.__module__ = func.__module__
return TempDirDecorator(f) | 5,333,860 |
def get_random_string(length: int) -> str:
"""
Returns a random string starting with a lower-case letter.
Later parts can contain numbers, lower- and uppercase letters.
Note: Random Seed should be set somewhere in the program!
:param length: How long the required string must be. length > 0 required.
:return: a randomly created string
:raises: ValueError for zero and negative length
"""
if length < 1:
raise ValueError("Random Strings must have length 1 minimum.")
# choose from all lowercase letter
letters = string.ascii_letters + string.digits
first_letter = random.choice(string.ascii_lowercase)
result_str = ''.join(random.choice(letters) for i in range(length - 1))
return first_letter + result_str | 5,333,861 |
def subset_by_month(prediction_dict, desired_month):
"""Subsets examples by month.
:param prediction_dict: See doc for `write_file`.
:param desired_month: Desired month (integer from 1...12).
:return: prediction_dict: Same as input but with fewer examples.
"""
error_checking.assert_is_integer(desired_month)
error_checking.assert_is_geq(desired_month, 1)
error_checking.assert_is_leq(desired_month, 12)
all_months = numpy.array([
int(time_conversion.unix_sec_to_string(t, '%m'))
for t in prediction_dict[INIT_TIMES_KEY]
], dtype=int)
desired_indices = numpy.where(all_months == desired_month)[0]
return subset_by_index(
prediction_dict=prediction_dict, desired_indices=desired_indices
) | 5,333,862 |
def build_dataset_mce(platform, dataset_name, columns):
"""
Creates MetadataChangeEvent for the dataset.
"""
actor, sys_time = "urn:li:corpuser:etl", int(time.time())
fields = []
for column in columns:
fields.append({
"fieldPath": column["name"],
"nativeDataType": repr(column["type"]),
"type": { "type":get_column_type(column["type"]) },
"description": column.get("comment", None)
})
schema_metadata = {
"schemaName": dataset_name,
"platform": f"urn:li:dataPlatform:{platform}",
"version": 0,
"created": { "time": sys_time, "actor": actor },
"lastModified": { "time":sys_time, "actor": actor },
"hash": "",
"platformSchema": { "tableSchema": "" },
"fields": fields
}
return {
"auditHeader": None,
"proposedSnapshot":("com.linkedin.pegasus2avro.metadata.snapshot.DatasetSnapshot", {
"urn": f"urn:li:dataset:(urn:li:dataPlatform:{platform},{dataset_name},PROD)",
"aspects": [("com.linkedin.pegasus2avro.schema.SchemaMetadata", schema_metadata)]
}),
"proposedDelta": None
} | 5,333,863 |
def build_guard(context_info):
"""Convert exceptions to BuildError with the given context information."""
try:
yield
except BuildError: # pylint: disable = try-except-raise
raise
except Exception as exc:
raise BuildError(context_info) from exc | 5,333,864 |
def do_host_stor_delete(cc, args):
"""Delete a stor"""
try:
cc.istor.delete(args.stor)
except exc.HTTPNotFound:
raise exc.CommandError('Delete failed, stor: %s not found'
% args.stor) | 5,333,865 |
def tag_source_file(path):
"""Returns a list of tuples: (line_text, list_of_tags)"""
file = open(path, "r")
# The list of tagged lines
tagged_lines = []
# The list of tags that currently apply
current_tags = []
# Use this to store snapshots of current_tags
from copy import copy
# Regexes for detecting when tags start and end
begin_re = re.compile(".*?\/\/ BEGIN (.*).*")
end_re = re.compile(".*?\/\/ END (.*).*")
line_num = 0
for line in file:
# If this line contains "//-", "/*-" or "-*/", it's a comment
# that should not be rendered.
if "/*-" in line or "-*/" in line or "//-" in line:
pass
# If we entered a tag, add it to the list
elif begin_re.search(line):
tag = begin_re.search(line).group(1)
if tag in current_tags:
logging.warn("{0}:{1}: \"{2}\" was entered twice without exiting it".format(path, line_num, tag))
current_tags.append(tag)
# If we left a tag, remove it
elif end_re.search(line):
tag = end_re.search(line).group(1)
if tag not in current_tags:
logging.warn("{0}:{1}: \"{2}\" was exited, but had not yet been entered".format(path, line_num, tag))
current_tags.remove(tag)
# If it's neither, add it to the list of tagged lines
else:
tagged_lines.append((line, copy(current_tags), (path, line_num)))
line_num += 1
# TODO: Error if we left a file with an unclosed tag
return tagged_lines | 5,333,866 |
def FindMissingReconstruction(X, track_i):
"""
Find the points that will be newly added
Parameters
----------
X : ndarray of shape (F, 3)
3D points
track_i : ndarray of shape (F, 2)
2D points of the newly registered image
Returns
-------
new_point : ndarray of shape (F,)
The indicator of new points that are valid for the new image and are
not reconstructed yet
"""
new_point = np.logical_and(X[:, 0] == -1, track_i[:, 0] != -1)
return new_point | 5,333,867 |
def deserialize(iodata):
"""
Turn IOData back into a Python object of the appropriate kind.
An object is deemed deserializable if
1) it is recorded in SERIALIZABLE_REGISTRY and has a `.deserialize` method
2) there exists a function `file_io_serializers.<typename>_deserialize`
Parameters
----------
iodata: IOData
Returns
-------
class instance
"""
typename = iodata.typename
if typename in io_serializers.SERIALIZABLE_REGISTRY:
cls = io_serializers.SERIALIZABLE_REGISTRY[typename]
return cls.deserialize(iodata)
if hasattr(io_serializers, typename + '_deserialize'):
deserialize_method = getattr(io_serializers, typename + '_deserialize')
return deserialize_method(iodata)
raise NotImplementedError("No implementation for converting {} data to Python object.".format(typename)) | 5,333,868 |
def _constraints_affecting_columns(self, table_name, columns, type="UNIQUE"):
"""
Gets the names of the constraints affecting the given columns.
If columns is None, returns all constraints of the type on the table.
"""
if self.dry_run:
raise ValueError("Cannot get constraints for columns during a dry run.")
if columns is not None:
columns = set(columns)
db_name = self._get_setting('NAME')
# First, load all constraint->col mappings for this table.
rows = self.execute("""
SELECT kc.constraint_name, kc.column_name
FROM information_schema.key_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
kc.table_schema = %s AND
kc.table_name = %s AND
c.constraint_type = %s
""", [db_name, table_name, type])
# Load into a dict
mapping = {}
for constraint, column in rows:
mapping.setdefault(constraint, set())
mapping[constraint].add(column)
# Find ones affecting these columns
for constraint, itscols in mapping.items():
if itscols == columns or columns is None:
yield constraint | 5,333,869 |
def business_day_offset(dates: DateOrDates, offsets: Union[int, Iterable[int]], roll: str= 'raise', calendars: Union[str, Tuple[str, ...]]=(), week_mask: Optional[str]=None) -> DateOrDates:
"""
Apply offsets to the dates and move to the nearest business date
:param dates: The input date or dates
:param offsets: The number of days by which to adjust the dates
:param roll: Which direction to roll, in order to get to the nearest business date
:param calendars: Calendars to use for holidays
:param week_mask: Which days are considered weekends (defaults to Saturday and Sunday)
:return: A date (if dates is a single date) or tuple of dates, adjusted by the offsets
**Examples**
>>> import datetime as dt
>>> prev_bus_date = business_day_offset(dt.date.today(), -1, roll='preceding')
"""
calendar = GsCalendar.get(calendars)
res = np.busday_offset(dates, offsets, roll, busdaycal=calendar.business_day_calendar(week_mask)).astype(dt.date)
return tuple(res) if isinstance(res, np.ndarray) else res | 5,333,870 |
def set_user_agent(agent):
"""Set User-Agent in the HTTP requests.
:keyword param agent: string
ex. 'test agent 1'
"""
global user_agent
user_agent = agent | 5,333,871 |
def factorial(n):
"""
Return n! - the factorial of n.
>>> factorial(1)
1
>>> factorial(0)
1
>>> factorial(3)
6
"""
if n<=0:
return 0
elif n==1:
return 1
else:
return n*factorial(n-1) | 5,333,872 |
def _load_absorption(freqs):
"""Load molar extinction coefficients."""
# Data from https://omlc.org/spectra/hemoglobin/summary.html
# The text was copied to a text file. The text before and
# after the table was deleted. The the following was run in
# matlab
# extinct_coef=importdata('extinction_coef.txt')
# save('extinction_coef.mat', 'extinct_coef')
#
# Returns data as [[HbO2(freq1), Hb(freq1)],
# [HbO2(freq2), Hb(freq2)]]
from scipy.io import loadmat
from scipy.interpolate import interp1d
extinction_fname = op.join(op.dirname(__file__), '..', '..', 'data',
'extinction_coef.mat')
a = loadmat(extinction_fname)['extinct_coef']
interp_hbo = interp1d(a[:, 0], a[:, 1], kind='linear')
interp_hb = interp1d(a[:, 0], a[:, 2], kind='linear')
ext_coef = np.array([[interp_hbo(freqs[0]), interp_hb(freqs[0])],
[interp_hbo(freqs[1]), interp_hb(freqs[1])]])
abs_coef = ext_coef * 0.2303
return abs_coef | 5,333,873 |
def merge(config, revisions, **kwargs):
"""
Merge one or more revisions.
Takes one or more revisions or "heads" for all heads and merges them into
a single revision.
"""
with alembic_lock(
config.registry["sqlalchemy.engine"], config.alembic_config()
) as alembic_config:
alembic.command.merge(alembic_config, revisions, **kwargs) | 5,333,874 |
def lower_volume_listen(vass: VoiceAssistant) -> None:
"""Lower volume when Voice Assistant is listening."""
_lower_volume() | 5,333,875 |
def pw_sin_relaxation(b, x, w, x_pts, relaxation_side=RelaxationSide.BOTH, pw_repn='INC', safety_tol=1e-10):
"""
This function creates piecewise relaxations to relax "w=sin(x)" for -pi/2 <= x <= pi/2.
Parameters
----------
b: pyo.Block
x: pyomo.core.base.var.SimpleVar or pyomo.core.base.var._GeneralVarData
The "x" variable in sin(x). The lower bound on x must greater than or equal to
-pi/2 and the upper bound on x must be less than or equal to pi/2.
w: pyomo.core.base.var.SimpleVar or pyomo.core.base.var._GeneralVarData
The auxillary variable replacing sin(x)
x_pts: list of float
A list of floating point numbers to define the points over which the piecewise
representation will be generated. This list must be ordered, and it is expected
that the first point (x_pts[0]) is equal to x.lb and the last point (x_pts[-1])
is equal to x.ub
relaxation_side: minlp.RelaxationSide
Provide the desired side for the relaxation (OVER, UNDER, or BOTH)
pw_repn: str
This must be one of the valid strings for the peicewise representation to use (directly from the Piecewise
component). Use help(Piecewise) to learn more.
safety_tol: float
amount to lift the overestimator or drop the underestimator. This is used to ensure none of the feasible
region is cut off by error in computing the over and under estimators.
"""
check_var_pts(x, x_pts)
expr = pyo.sin(x)
xlb = x_pts[0]
xub = x_pts[-1]
if x.is_fixed() or xlb == xub:
b.x_fixed_con = pyo.Constraint(expr=w == (pyo.value(expr)))
return
if xlb < -np.pi / 2.0:
return
if xub > np.pi / 2.0:
return
if x_pts[0] >= 0:
pw_univariate_relaxation(b=b, x=x, w=w, x_pts=x_pts, f_x_expr=expr,
shape=FunctionShape.CONCAVE, relaxation_side=relaxation_side, pw_repn=pw_repn)
return
if x_pts[-1] <= 0:
pw_univariate_relaxation(b=b, x=x, w=w, x_pts=x_pts, f_x_expr=expr,
shape=FunctionShape.CONVEX, relaxation_side=relaxation_side, pw_repn=pw_repn)
return
OE_tangent_x, OE_tangent_slope, OE_tangent_intercept = _compute_sine_overestimator_tangent_point(xlb)
UE_tangent_x, UE_tangent_slope, UE_tangent_intercept = _compute_sine_underestimator_tangent_point(xub)
non_piecewise_overestimators_pts = []
non_piecewise_underestimator_pts = []
if relaxation_side == RelaxationSide.OVER:
if OE_tangent_x < xub:
new_x_pts = [i for i in x_pts if i < OE_tangent_x]
new_x_pts.append(xub)
non_piecewise_overestimators_pts = [OE_tangent_x]
non_piecewise_overestimators_pts.extend(i for i in x_pts if i > OE_tangent_x)
x_pts = new_x_pts
elif relaxation_side == RelaxationSide.UNDER:
if UE_tangent_x > xlb:
new_x_pts = [xlb]
new_x_pts.extend(i for i in x_pts if i > UE_tangent_x)
non_piecewise_underestimator_pts = [i for i in x_pts if i < UE_tangent_x]
non_piecewise_underestimator_pts.append(UE_tangent_x)
x_pts = new_x_pts
b.non_piecewise_overestimators = pyo.ConstraintList()
b.non_piecewise_underestimators = pyo.ConstraintList()
for pt in non_piecewise_overestimators_pts:
b.non_piecewise_overestimators.add(w <= math.sin(pt) + safety_tol + (x - pt) * math.cos(pt))
for pt in non_piecewise_underestimator_pts:
b.non_piecewise_underestimators.add(w >= math.sin(pt) - safety_tol + (x - pt) * math.cos(pt))
intervals = []
for i in range(len(x_pts)-1):
intervals.append((x_pts[i], x_pts[i+1]))
b.interval_set = pyo.Set(initialize=range(len(intervals)), ordered=True)
b.x = pyo.Var(b.interval_set)
b.w = pyo.Var(b.interval_set)
if len(intervals) == 1:
b.lam = pyo.Param(b.interval_set, mutable=True)
b.lam[0].value = 1.0
else:
b.lam = pyo.Var(b.interval_set, within=pyo.Binary)
b.x_lb = pyo.ConstraintList()
b.x_ub = pyo.ConstraintList()
b.x_sum = pyo.Constraint(expr=x == sum(b.x[i] for i in b.interval_set))
b.w_sum = pyo.Constraint(expr=w == sum(b.w[i] for i in b.interval_set))
b.lam_sum = pyo.Constraint(expr=sum(b.lam[i] for i in b.interval_set) == 1)
b.overestimators = pyo.ConstraintList()
b.underestimators = pyo.ConstraintList()
for i, tup in enumerate(intervals):
x0 = tup[0]
x1 = tup[1]
b.x_lb.add(x0 * b.lam[i] <= b.x[i])
b.x_ub.add(b.x[i] <= x1 * b.lam[i])
# Overestimators
if relaxation_side in {RelaxationSide.OVER, RelaxationSide.BOTH}:
if x0 < 0 and x1 <= 0:
slope = (math.sin(x1) - math.sin(x0)) / (x1 - x0)
intercept = math.sin(x0) - slope * x0
b.overestimators.add(b.w[i] <= slope * b.x[i] + (intercept + safety_tol) * b.lam[i])
elif (x0 < 0) and (x1 > 0):
tangent_x, tangent_slope, tangent_intercept = _compute_sine_overestimator_tangent_point(x0)
if tangent_x <= x1:
b.overestimators.add(b.w[i] <= tangent_slope * b.x[i] + (tangent_intercept + safety_tol) * b.lam[i])
b.overestimators.add(b.w[i] <= math.cos(x1) * b.x[i] + (math.sin(x1) - x1 * math.cos(x1) + safety_tol) * b.lam[i])
else:
slope = (math.sin(x1) - math.sin(x0)) / (x1 - x0)
intercept = math.sin(x0) - slope * x0
b.overestimators.add(b.w[i] <= slope * b.x[i] + (intercept + safety_tol) * b.lam[i])
else:
b.overestimators.add(b.w[i] <= math.cos(x0) * b.x[i] + (math.sin(x0) - x0 * math.cos(x0) + safety_tol) * b.lam[i])
b.overestimators.add(b.w[i] <= math.cos(x1) * b.x[i] + (math.sin(x1) - x1 * math.cos(x1) + safety_tol) * b.lam[i])
# Underestimators
if relaxation_side in {RelaxationSide.UNDER, RelaxationSide.BOTH}:
if x0 >= 0 and x1 > 0:
slope = (math.sin(x1) - math.sin(x0)) / (x1 - x0)
intercept = math.sin(x0) - slope * x0
b.underestimators.add(b.w[i] >= slope * b.x[i] + (intercept - safety_tol) * b.lam[i])
elif (x1 > 0) and (x0 < 0):
tangent_x, tangent_slope, tangent_intercept = _compute_sine_underestimator_tangent_point(x1)
if tangent_x >= x0:
b.underestimators.add(b.w[i] >= tangent_slope * b.x[i] + (tangent_intercept - safety_tol) * b.lam[i])
b.underestimators.add(b.w[i] >= math.cos(x0) * b.x[i] + (math.sin(x0) - x0 * math.cos(x0) - safety_tol) * b.lam[i])
else:
slope = (math.sin(x1) - math.sin(x0)) / (x1 - x0)
intercept = math.sin(x0) - slope * x0
b.underestimators.add(b.w[i] >= slope * b.x[i] + (intercept - safety_tol) * b.lam[i])
else:
b.underestimators.add(b.w[i] >= math.cos(x0) * b.x[i] + (math.sin(x0) - x0 * math.cos(x0) - safety_tol) * b.lam[i])
b.underestimators.add(b.w[i] >= math.cos(x1) * b.x[i] + (math.sin(x1) - x1 * math.cos(x1) - safety_tol) * b.lam[i])
return x_pts | 5,333,876 |
def getAsDateTimeStr(value, offset=0,fmt=_formatTimeStr()):
""" return time as 2004-01-10T00:13:50.000Z """
import sys,time
import types
from datetime import datetime
if (not isinstance(offset,str)):
if isinstance(value, (tuple, time.struct_time,)):
return time.strftime(fmt, value)
if isinstance(value, (int, float,)):
secs = time.gmtime(value+offset)
return time.strftime(fmt, secs)
if isinstance(value, str):
try:
value = time.strptime(value, fmt)
return time.strftime(fmt, value)
except:
secs = time.gmtime(time.time()+offset)
return time.strftime(fmt, secs)
elif (isinstance(value,datetime)):
from datetime import timedelta
if (offset is not None):
value += timedelta(offset)
ts = time.strftime(fmt, value.timetuple())
return ts | 5,333,877 |
def doc_to_tokenlist_no_sents(doc):
""" serializes a spacy DOC object into a python list with tokens grouped by sents
:param doc: spacy DOC element
:return: a list of of token objects/dicts
"""
result = []
for x in doc:
token = {}
if y.has_extension('tokenId'):
parts['tokenId'] = y._.tokenId
else:
parts['tokenId'] = y.i
token['value'] = x.text
token['lemma'] = x.lemma_
token['pos'] = x.pos_
token['type'] = x.tag_
token['dep'] = x.dep_
token['shape'] = x.shape_
token['is_alpha'] = x.is_alpha
token['ent_iob'] = x.ent_iob_
token['iob'] = format_iob_tag(x)
token['ent_type'] = x.ent_type_
result.append(token)
return result | 5,333,878 |
def _applychange(raw_text: Text, content_change: t.TextDocumentContentChangeEvent):
"""Apply changes in-place"""
# Remove chars
start = content_change.range.start
range_length = content_change.range_length
index = _find_position(raw_text, start)
for _ in range(range_length):
raw_text.pop(index)
# Add chars
new_text = content_change.text
for char in reversed(new_text):
raw_text.insert(index, char)
return raw_text | 5,333,879 |
def get_engine():
"""Return a SQLAlchemy engine."""
connection_dict = sqlalchemy.engine.url.make_url(FLAGS.sql_connection)
engine_args = {
"pool_recycle": FLAGS.sql_idle_timeout,
"echo": False,
}
if "sqlite" in connection_dict.drivername:
engine_args["poolclass"] = sqlalchemy.pool.NullPool
return sqlalchemy.create_engine(FLAGS.sql_connection, **engine_args) | 5,333,880 |
def ccw(p0, p1, p2):
"""
Judge whether p0p2 vector is ccw to p0p1 vector.
Return value map: \n
1: p0p2 is ccw to p0p1 (angle to x axis bigger) \n
0: p0p2 and p0p1 on a same line \n
-1: p0p2 is cw to p0p1 (angle to x axis smaller) \n
Args:
p0: base point index 0 and 1 is x and y value. [x, y, ...]
p1: first point index 0 and 1 is x and y value. [x, y, ...]
p2: second point index 0 and 1 is x and y value. [x, y, ...]
Returns:
int: judgement value -1 or 0 or 1
"""
comp = Comp(p0)
return comp.compare_angle(p2, p1) | 5,333,881 |
def get_ffmpeg_folder():
# type: () -> str
"""
Returns the path to the folder containing the ffmpeg executable
:return:
"""
return 'C:/ffmpeg/bin' | 5,333,882 |
def test_wrong_code(clean_collection: Callable[[], None]) -> None:
"""Testing when a wrong status code is given."""
sample_res = [
{
"bag": "sample",
"count": 1,
"status": None,
"_key": "dasd165asd46",
}
]
sample_res = pd.DataFrame(sample_res)
integrate_phrase_data(sample_res)
with pytest.raises(HTTPException):
response = client.post("http://127.0.0.1:8000/api/status-updater/sample/5")
assert response.status_code == 400 | 5,333,883 |
def test_add_characteristic():
"""Test adding characteristics to a service."""
service = Service(uuid1(), "Test Service")
chars = get_chars()
service.add_characteristic(*chars)
for char_service, char_original in zip(service.characteristics, chars):
assert char_service == char_original
service.add_characteristic(chars[0])
assert len(service.characteristics) == 2 | 5,333,884 |
def decode_typeinfo(typeinfo):
"""Invoke c++filt to decode a typeinfo"""
try:
type_string = subprocess.check_output(["c++filt", typeinfo], stdin=subprocess.DEVNULL)
except FileNotFoundError:
# This happens when c++filt (from package binutils) is not found,
# and with "wine python" on Linux systems
raise CxxFiltNotFoundException
if not type_string.startswith(b"typeinfo name for "):
raise ValueError(f"Unexpected c++filt output for {typeinfo!r}: {type_string!r}")
return type_string[18:].decode("ascii").strip() | 5,333,885 |
def get_nums(image):
"""get the words from an image using pytesseract.
the extracted words are cleaned and all spaces, newlines and non uppercase
characters are removed.
:param image: inpout image
:type image: cv2 image
:return: extracted words
:rtype: list
"""
# pytesseract config
config = ('--psm 6 --oem 3 -c tessedit_char_whitelist=0123456789/')
# extract text and preprocess
text = pytesseract.image_to_string(image, config=config)
text = ''.join([c for c in text if c.isdigit() or c in ['\n', ' ', '.']])
# return as a lis
return text.split() | 5,333,886 |
def create_bag_of_vocabulary_words():
"""
Form the array of words which can be conceived during the game.
This words are stored in hangman/vocabulary.txt
"""
words_array = []
file_object = open("./hangman/vocabulary.txt")
for line in file_object:
for word in line.split():
words_array.append(word)
file_object.close()
return words_array | 5,333,887 |
def assert_process_tensor_file(filename: Text) -> None:
"""
Assert that the file is of correct .processTensor form
[see tempo.assert_process_tensor_file() for more details].
Parameters
----------
filename: str
Path to the file.
Raises
------
`AssertionError`:
If the data found in the file is not of the correct .processTensor
form.
"""
t_dyn_dict = load_object(filename)
assert_process_tensor_dict(t_dyn_dict) | 5,333,888 |
def kato_ranking_candidates(identifier: Identifier, params=None):
"""rank candidates based on the method proposed by Kato, S. and Kano, M..
Candidates are the noun phrases in the sentence where the identifier was appeared first.
Args:
identifier (Identifier)
params (dict)
Returns:
Definition_list (List[Definition])
"""
if params is None:
params = {'sigma_d': math.sqrt(12 / math.log(2)),
'sigma_s': 2 / math.sqrt(math.log(2)),
'alpha': 1,
'beta': 1,
'gamma': 0.1,
'eta': 1}
ranked_definition_list = []
for candidate_ in identifier.candidates:
n_sentence = candidate_.included_sentence.id - identifier.sentences[0].id
delta = candidate_.word_count_btwn_var_cand + 1 # minimum is 1.
tf_candidate = candidate_.candidate_count_in_sentence / len(candidate_.included_sentence.replaced.strip())
score_match_initial_char = candidate_.score_match_character
r_sigma_d = math.exp(- 1 / 2 * (delta ** 2 - 1) /
params['sigma_d'] ** 2)
r_sigma_s = math.exp(- 1 / 2 * (n_sentence ** 2 -
1) / params['sigma_s'] ** 2)
score = (params['alpha'] * r_sigma_d
+ params['beta'] * r_sigma_s
+ params['gamma'] * tf_candidate
+ params['eta'] * score_match_initial_char)
score /= (params['alpha'] + params['beta'] +
params['gamma'] + params['eta'])
ranked_definition_list.append(
Definition(
definition=candidate_.text,
score=score,
params=params))
ranked_definition_list = sorted(
ranked_definition_list,
key=lambda x: x.score,
reverse=True)
if not ranked_definition_list:
return [Definition(definition='')]
return ranked_definition_list | 5,333,889 |
def _get_bag_of_pos_with_dependency(words, index):
"""Return pos list surrounding index
Args:
words (list): stanfordnlp word list object having pos attributes.
index (int): target index
Return:
pos_list (List[str]): xpos format string list
"""
pos_list = []
def _get_governor(_index, name):
governor_list = []
if int(words[_index].governor) == 0:
# case _index word has no governer
return -1, governor_list
governor_index = _index + (int(words[_index].governor) - int(words[_index].index))
if governor_index < len(words):
governor = words[governor_index]
governor_list.append(_get_word_feature(governor) + '_' + name)
else:
governor_list.append(NONE_DEPENDENCY + '_' + name)
return governor_index, governor_list
def _get_children(_index, name):
children = []
child_list = []
roots = [(i, w) for i, w in enumerate(words) if int(w.index) == 1]
start_index = 0
end_index = len(words) - 1
for i, w in roots:
if i <= _index:
start_index = i
else:
end_index = i - 1
break
for i, w in enumerate(words[start_index:end_index + 1]):
if int(w.governor) == int(words[_index].index):
children.append(start_index + i)
child_list.append(_get_word_feature(w) + '_' + name)
return children, child_list
# add governor
governor_index, governor_list = _get_governor(index, 'governor')
if 0 <= governor_index < len(words):
# case index word has a governer
pos_list.extend(governor_list)
if int(words[governor_index].governor) != 0:
# case _index word has a governer
# add ancestor
_, ancestor_list = _get_governor(governor_index, 'ancestor')
pos_list.extend(ancestor_list)
# add sibling
siblings, sibling_list = _get_children(governor_index, 'sibling')
i_index = siblings.index(index)
del sibling_list[i_index]
del siblings[i_index]
pos_list.extend(sibling_list)
# add sibling list
for i in siblings:
sibling_children, sibling_child_list = _get_children(i, 'sibling_child')
pos_list.extend(sibling_child_list)
# add child
children, child_list = _get_children(index, 'child')
pos_list.extend(child_list)
for i in children:
grandchildren, grandchild_list = _get_children(i, 'grandchild')
pos_list.extend(grandchild_list)
return pos_list | 5,333,890 |
def fix_time_individual(df):
"""
1. pandas.apply a jit function to add 0 to time
2. concat date + time
3. change to np.datetime64
"""
@jit
def _fix_time(x):
aux = "0" * (8 - len(str(x))) + str(x)
return aux[:2] + ":" + aux[2:4] + ":" + aux[4:6] + "." + aux[6:]
return (df["date"] + " " + df["time"].apply(_fix_time)).astype(np.datetime64) | 5,333,891 |
def write_sigmf(data_file, data, buffer=None, append=True):
"""
Pack and write binary array to file, with SigMF spec.
Parameters
----------
file : str
A string of filename to be read/unpacked to GPU.
binary : ndarray
Binary array to be written to file.
buffer : ndarray, optional
Pinned memory buffer to use when copying data from GPU.
append : bool, optional
Append to file if created.
Returns
-------
"""
packed = pack_bin(data)
write_bin(data_file, packed, buffer, append) | 5,333,892 |
def prueba_flujo(regiones, recursos_flujo, flujos, flujos_err, dimension_mesh,\
n_dimension=100, error=1):
"""
Funcion encargada de graficar con matplotlib los vectores eigen versus los
vectores eigen calculados con error.Equivalente a : f12_V_dV_Prueba()
Pametros de entrada:
Salida:
* Guarda las figuras en la carpeta ../graficas/flujos. Esta carpeta
debe estar previamente creada para que no haya conflictos al momento
de guardar las graficas.
"""
error_t = 'sin error ' if error == 1 else 'con error'
N = range(1, dimension_mesh + 1) # Solo para propositos de graficacion
for n_flujo in flujos.index:
# Reg.1, Reg.2, ... Reg.n - Iteradores de las regiones
index_reg_actual = "Reg." + n_flujo.split('V')[1]
# Encuentre el maximo de valor eigen entre el comparado y el original (para efectos de graficacion)
fig = plt.Figure()
ax = fig.add_subplot(111)
maximo = np.max((flujos.loc[n_flujo].max(), flujos_err.loc[n_flujo].max()))
minimo = np.max((flujos.loc[n_flujo].min(), flujos_err.loc[n_flujo].min()))
fig.suptitle(f"Con nr={n_dimension}- Prueba del flujo {error_t} de la {index_reg_actual}-{regiones.loc[index_reg_actual, 'eps']}.")
# ax.text(n_dimension / 8, 0.95 * maximo, """Prueba correcta si se imprime una sola curva.
# Error si imprime dos curvas""")
# Grafique flujos (con error) Color rojo
ax.plot(N, flujos_err.loc[n_flujo], 'r', label='con error')
# Grafique flujos (sin error) Color negro
ax.plot(N, flujos.loc[n_flujo], 'k', label='sin error')
ax.legend(loc='lower right')
filename = 'graficas/' + conf.data['env']['path'] + '/flujos/' +\
'control ' + error_t + " " + n_flujo + ".png"
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
# Salida por consola del proceso que se esta realizando
print(f"* {n_flujo}={recursos_flujo.loc[n_flujo,'calcular_str']}") | 5,333,893 |
def print_logs(redis_client, threads_stopped, job_id):
"""Prints log messages from workers on all of the nodes.
Args:
redis_client: A client to the primary Redis shard.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
job_id (JobID): The id of the driver's job
"""
pubsub_client = redis_client.pubsub(ignore_subscribe_messages=True)
pubsub_client.subscribe(ray.gcs_utils.LOG_FILE_CHANNEL)
localhost = services.get_node_ip_address()
try:
# Keep track of the number of consecutive log messages that have been
# received with no break in between. If this number grows continually,
# then the worker is probably not able to process the log messages as
# rapidly as they are coming in.
num_consecutive_messages_received = 0
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = pubsub_client.get_message()
if msg is None:
num_consecutive_messages_received = 0
threads_stopped.wait(timeout=0.01)
continue
num_consecutive_messages_received += 1
if (num_consecutive_messages_received % 100 == 0
and num_consecutive_messages_received > 0):
logger.warning(
"The driver may not be able to keep up with the "
"stdout/stderr of the workers. To avoid forwarding logs "
"to the driver, use 'ray.init(log_to_driver=False)'.")
data = json.loads(ray._private.utils.decode(msg["data"]))
# Don't show logs from other drivers.
if data["job"] and ray._private.utils.binary_to_hex(
job_id.binary()) != data["job"]:
continue
data["localhost"] = localhost
global_worker_stdstream_dispatcher.emit(data)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"print_logs: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
pubsub_client.close() | 5,333,894 |
def convolve_fft(data, kernel, kernel_fft=False, return_fft=False):
"""
Convolve data with a kernel.
This is inspired by astropy.convolution.convolve_fft, but
stripped down to what's needed for the expected application. That
has the benefit of cutting down on the execution time, but limits
its use.
Beware:
- ``data`` and ``kernel`` must have the same shape.
- For the sum of all pixels in the convolved image to be the
same as the input data, the kernel must sum to unity.
- Padding is never added by default.
Args:
data (`numpy.ndarray`_):
Data to convolve.
kernel (`numpy.ndarray`_):
The convolution kernel, which must have the same shape as
``data``. If ``kernel_fft`` is True, this is the FFT of
the kernel image; otherwise, this is the direct kernel
image with the center of the kernel at the center of the
array.
kernel_fft (:obj:`bool`, optional):
Flag that the provided ``kernel`` array is actually the
FFT of the kernel, not its direct image.
return_fft (:obj:`bool`, optional):
Flag to return the FFT of the convolved image, instead of
the direct image.
Returns:
`numpy.ndarray`_: The convolved image, or its FFT, with the
same shape as the provided ``data`` array.
Raises:
ValueError:
Raised if ``data`` and ``kernel`` do not have the same
shape or if any of their values are not finite.
"""
if data.shape != kernel.shape:
raise ValueError('Data and kernel must have the same shape.')
if not np.all(np.isfinite(data)) or not np.all(np.isfinite(kernel)):
print('**********************************')
print(f'nans in data: {(~np.isfinite(data)).sum()}, nans in kernel: {(~np.isfinite(kernel)).sum()}')
raise ValueError('Data and kernel must both have valid values.')
datafft = np.fft.fftn(data)
kernfft = kernel if kernel_fft else np.fft.fftn(np.fft.ifftshift(kernel))
fftmult = datafft * kernfft
return fftmult if return_fft else np.fft.ifftn(fftmult).real | 5,333,895 |
def segment_nifti(fname_image, folder_model, fname_prior=None, param=None):
"""
Segment a nifti file.
:param fname_image: str: Filename of the image to segment.
:param folder_model: str: Folder that encloses the deep learning model.
:param fname_prior: str: Filename of a previous segmentation that is used here as a prior.
:param param: dict: Dictionary of user's parameter
:return: fname_out: str: Output filename. If directory does not exist, it will be created.
"""
if param is None:
param = {}
nii_seg = imed.utils.segment_volume(folder_model, fname_image, fname_prior)
# Postprocessing
metadata = sct.deepseg.models.get_metadata(folder_model)
options = {**DEFAULTS, **metadata, **param}
nii_seg = postprocess(nii_seg, options)
# Save output seg
if 'o' in options:
fname_out = options['o']
else:
fname_out = ''.join([sct.utils.splitext(fname_image)[0], '_seg.nii.gz'])
# If output folder does not exist, create it
path_out = os.path.dirname(fname_out)
if not (path_out == '' or os.path.exists(path_out)):
os.makedirs(path_out)
nib.save(nii_seg, fname_out)
return fname_out | 5,333,896 |
def invoke_alert(browser):
"""fixture to invoke sample alert."""
alert_btn = browser.element('#alert_button')
alert_btn.click()
yield
if browser.alert_present:
alert = browser.get_alert()
alert.dismiss() | 5,333,897 |
def test_another_find():
"""Just to triangulate the search code. We want to make sure
that the implementation can do more than one search, at
least."""
tree = NoAho()
tree.add("Python")
tree.add("PLT Scheme")
tree.compile()
assert (19, 25, None) == tree.find_short(
"I am learning both Python and PLT Scheme"
) # NOQA
assert (0, 10, None) == tree.find_short(
"PLT Scheme is an interesting language."
) | 5,333,898 |
def test_mnnb():
"""
Multinomial Naive Bayes classification.
This checks that MultinomialNB implements fit and predict and returns
correct values for a simple toy dataset.
"""
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8) | 5,333,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.