content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def map_cosh(process):
"""
"""
return map_default(process, 'cosh', 'apply') | fe853e23f8008bc5e767ef5af8b4efc6a04de407 | 24,500 |
def fs(func):
"""
This is the decorator which performs recursive AST substitution of
functions, and optional JIT-compilation using `numba`_.
This must only be used on functions with positional parameters
defined; this must not be used on functions with keyword parameters.
This decorator modifies the original function (and any nested
function calls) by replacing any functions passed in using keyword
arguments. It replaces them in the AST and returns a new function
object with a new code object that calls the replacement functions
instead.
For example, a function hierarchy such as:
>>> def calculate(x):
... return x * x
>>> def my_func(x):
... a = calculate(x)
... return a / 2
will take the input variable `x`, square it, and then halve the
result:
>>> my_func(6)
18.0
Six squared is 36, divided by two is 18.
If you wanted to replace the `calculate` function to
return a different calculation, you could use this `@fs`
decorator:
>>> @fs
... def my_func(x):
... a = calculate(x)
... return a / 2
Now the `my_func` callable is able to accept keyword arguments,
which it will replace recursively throughout its hierarchy.
If you wanted to change the `calculate` in this function to:
>>> def cube(x):
... return x * x * x
then after applying the `@fs` decorator you can do this:
>>> my_func(6, calculate=cube)
108.0
Six cubed is 216, divided by two is 108.0.
This parametrisation can be decided at runtime - every time a
new keyword argument is passed in, it generates a new function
object with a new code object.
To store the new function object instead of executing it, pass
`return_callable=True` to the decorated function:
>>> new_func = my_func(6, calculate=cube, return_callable=True)
>>> # At this point the new function has **not** been called.
>>> new_func(6)
108.0
"""
_func = func
replaced = {}
@wraps(func)
def fs_wrapper(*args, **kwargs):
return_callable = kwargs.pop('return_callable', None)
# This deliberately mutates the kwargs.
# We don't want to have a fs-decorated function
# as a kwarg to another, so we undecorate it first.
for k, v in kwargs.items():
if hasattr(v, 'undecorated'):
kwargs[k] = v.undecorated
# TODO : ensure jit function returned
if not kwargs:
return _func(*args)
# TODO : remove fastats keywords such as 'debug'
# before passing into AstProcessor
new_funcs = {}
for v in kwargs.values():
if isfunction(v) and v.__name__ not in kwargs:
inner_replaced = {}
processor = AstProcessor(v, kwargs, inner_replaced, new_funcs)
proc = processor.process()
new_funcs[v.__name__] = convert_to_jit(proc)
new_kwargs = {}
for k, v in kwargs.items():
if new_funcs.get(v.__name__):
new_kwargs[k] = new_funcs[v.__name__]
kwargs.update(new_kwargs)
processor = AstProcessor(_func, kwargs, replaced, new_funcs)
proc = processor.process()
if return_callable:
return convert_to_jit(proc)
return convert_to_jit(proc)(*args)
fs_wrapper.undecorated = _func
return fs_wrapper | 832a770e68501edb6df93c3fb7ef512be64f4e43 | 24,501 |
def cleanline(line):
"""去除讀入資料中的換行符與 ',' 結尾
"""
line = line.strip('\n')
line = line.strip(',')
return line | a4149663e2c3966c5d9be22f4aa009109e4a67ca | 24,502 |
from onnx.helper import make_node
import logging
def convert_contrib_box_nms(node, **kwargs):
"""Map MXNet's _contrib_box_nms operator to ONNX
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
input_dtypes = get_input_dtypes(node, kwargs)
dtype = input_dtypes[0]
#dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype]
opset_version = kwargs['opset_version']
if opset_version < 11:
raise AttributeError('ONNX opset 11 or greater is required to export this operator')
overlap_thresh = float(attrs.get('overlap_thresh', '0.5'))
valid_thresh = float(attrs.get('valid_thresh', '0'))
topk = int(attrs.get('topk', '-1'))
coord_start = int(attrs.get('coord_start', '2'))
score_index = int(attrs.get('score_index', '1'))
id_index = int(attrs.get('id_index', '-1'))
force_suppress = attrs.get('force_suppress', 'True')
background_id = int(attrs.get('background_id', '-1'))
in_format = attrs.get('in_format', 'corner')
out_format = attrs.get('out_format', 'corner')
center_point_box = 0 if in_format == 'corner' else 1
if topk == -1:
topk = 2**31-1
if in_format != out_format:
raise NotImplementedError('box_nms does not currently support in_fomat != out_format')
if background_id != -1:
raise NotImplementedError('box_nms does not currently support background_id != -1')
if id_index != -1 or force_suppress == 'False':
logging.warning('box_nms: id_idex != -1 or/and force_suppress == False detected. '
'However, due to ONNX limitations, boxes of different categories will NOT '
'be exempted from suppression. This might lead to different behavior than '
'native MXNet')
create_tensor([coord_start], name+'_cs', kwargs['initializer'])
create_tensor([coord_start+4], name+'_cs_p4', kwargs['initializer'])
create_tensor([score_index], name+'_si', kwargs['initializer'])
create_tensor([score_index+1], name+'_si_p1', kwargs['initializer'])
create_tensor([topk], name+'_topk', kwargs['initializer'])
create_tensor([overlap_thresh], name+'_ot', kwargs['initializer'], dtype=np.float32)
create_tensor([valid_thresh], name+'_vt', kwargs['initializer'], dtype=np.float32)
create_tensor([-1], name+'_m1', kwargs['initializer'])
create_tensor([-1], name+'_m1_f', kwargs['initializer'], dtype=dtype)
create_tensor([0], name+'_0', kwargs['initializer'])
create_tensor([1], name+'_1', kwargs['initializer'])
create_tensor([2], name+'_2', kwargs['initializer'])
create_tensor([3], name+'_3', kwargs['initializer'])
create_tensor([0, 1, -1], name+'_scores_shape', kwargs['initializer'])
create_tensor([0, 0, 1, 0], name+'_pad', kwargs['initializer'])
create_tensor([0, -1], name+'_bat_spat_helper', kwargs['initializer'])
create_const_scalar_node(name+"_0_s", np.int64(0), kwargs)
create_const_scalar_node(name+"_1_s", np.int64(1), kwargs)
nodes = [
make_node('Shape', [input_nodes[0]], [name+'_shape']),
make_node('Shape', [name+'_shape'], [name+'_dim']),
make_node('Sub', [name+'_dim', name+'_2'], [name+'_dim_m2']),
make_node('Slice', [name+'_shape', name+'_dim_m2', name+'_dim'], [name+'_shape_last2']),
make_node('Concat', [name+'_m1', name+'_shape_last2'], [name+'_shape_3d'], axis=0),
make_node('Reshape', [input_nodes[0], name+'_shape_3d'], [name+'_data_3d']),
make_node('Slice', [name+'_data_3d', name+'_cs', name+'_cs_p4', name+'_m1'],
[name+'_boxes']),
make_node('Slice', [name+'_data_3d', name+'_si', name+'_si_p1', name+'_m1'],
[name+'_scores_raw']),
make_node('Reshape', [name+'_scores_raw', name+'_scores_shape'], [name+'_scores']),
make_node('Shape', [name+'_scores'], [name+'_scores_shape_actual']),
make_node('NonMaxSuppression',
[name+'_boxes', name+'_scores', name+'_topk', name+'_ot', name+'_vt'],
[name+'_nms'], center_point_box=center_point_box),
make_node('Slice', [name+'_nms', name+'_0', name+'_3', name+'_m1', name+'_2'],
[name+'_nms_sliced']),
make_node('GatherND', [name+'_data_3d', name+'_nms_sliced'], [name+'_candidates']),
make_node('Pad', [name+'_candidates', name+'_pad', name+'_m1_f'], [name+'_cand_padded']),
make_node('Shape', [name+'_nms'], [name+'_nms_shape']),
make_node('Slice', [name+'_nms_shape', name+'_0', name+'_1'], [name+'_cand_cnt']),
make_node('Squeeze', [name+'_cand_cnt'], [name+'_cc_s'], axes=[0]),
make_node('Range', [name+'_0_s', name+'_cc_s', name+'_1_s'], [name+'_cand_indices']),
make_node('Slice', [name+'_scores_shape_actual', name+'_0', name+'_3', name+'_m1',
name+'_2'], [name+'_shape_bat_spat']),
make_node('Slice', [name+'_shape_bat_spat', name+'_1', name+'_2'], [name+'_spat_dim']),
make_node('Expand', [name+'_cand_cnt', name+'_shape_bat_spat'], [name+'_base_indices']),
make_node('ScatterND', [name+'_base_indices', name+'_nms_sliced', name+'_cand_indices'],
[name+'_indices']),
make_node('TopK', [name+'_indices', name+'_spat_dim'], [name+'_indices_sorted', name+'__'],
largest=0, axis=-1, sorted=1),
make_node('Gather', [name+'_cand_padded', name+'_indices_sorted'], [name+'_gather']),
make_node('Reshape', [name+'_gather', name+'_shape'], [name+'0'])
]
return nodes | 22bc975bc35ebe8e50f4749f981859460f695596 | 24,503 |
def fill76(text):
"""Any text. Wraps the text to fit in 76 columns."""
return fill(text, 76) | 953ed87d8cfbee7a10c752082783469e866e8540 | 24,504 |
def current_object(cursor_offset, line):
"""If in attribute completion, the object on which attribute should be
looked up."""
match = current_word(cursor_offset, line)
if match is None:
return None
start, end, word = match
matches = current_object_re.finditer(word)
s = ""
for m in matches:
if m.end(1) + start < cursor_offset:
if s:
s += "."
s += m.group(1)
if not s:
return None
return LinePart(start, start + len(s), s) | cba608811a2081b382a2c522bb9d0651569739dd | 24,505 |
import urllib3
import tqdm
import os
def download_zip(url: str) -> BytesIO:
"""Download data from url."""
logger.warning('start chromium download.\n'
'Download may take a few minutes.')
# disable warnings so that we don't need a cert.
# see https://urllib3.readthedocs.io/en/latest/advanced-usage.html for more
urllib3.disable_warnings()
with urllib3.PoolManager(cert_reqs='CERT_NONE') as http:
# Get data from url.
# set preload_content=False means using stream later.
data = http.request('GET', url, preload_content=False)
try:
total_length = int(data.headers['content-length'])
except (KeyError, ValueError, AttributeError):
total_length = 0
process_bar = tqdm(
total=total_length,
file=os.devnull if NO_PROGRESS_BAR else None,
)
# 10 * 1024
_data = BytesIO()
for chunk in data.stream(10240):
_data.write(chunk)
process_bar.update(len(chunk))
process_bar.close()
logger.warning('\nchromium download done.')
return _data | 4b7ff38a529084633969ce95ea0c5bfca3fd7542 | 24,506 |
def _is_match(option, useful_options, find_perfect_match):
"""
returns True if 'option' is between the useful_options
"""
for useful_option in useful_options:
if len(option) == sum([1 for o in option if o in useful_option]):
if not find_perfect_match or len(set(useful_option)) == len(set(option)):
return True
return False | bff60e1320744c16747926071afb3ee02022c55c | 24,507 |
def pass_aligned_filtering(left_read, right_read, counter):
"""
Test if the two reads pass the additional filters such as check for soft-clipped end next to the variant region,
or overlapping region between the two reads.
:param left_read: the left (or 5') most read
:param right_read: the right (or 3') most read
:param counter: Counter to report the number of reads filtered.
:return: True or False
"""
# in CIGAR tuples the operation is coded as an integer
# https://pysam.readthedocs.io/en/latest/api.html#pysam.AlignedSegment.cigartuples
if left_read.cigartuples[-1][0] == pysam.CSOFT_CLIP or right_read.cigartuples[0][0] == pysam.CSOFT_CLIP:
counter['Soft-clipped alignments'] += 1
elif left_read.reference_end > right_read.reference_start:
counter['Overlapping alignment'] += 1
elif left_read.is_reverse != right_read.is_reverse:
counter['Unexpected orientation'] += 1
else:
return True
return False | 78849f12541510216407b7b40fb29a0befc920d7 | 24,508 |
from typing import OrderedDict
import os
def load_metaconfig(file_path):
""" Loads a single metaconfig file and returns variable to expression dictionary """
definitions = OrderedDict()
if os.path.isfile(file_path):
with open(file_path) as f:
for line in [l.strip(os.linesep).strip() for l in f.readlines()]:
# skip comments
if line.startswith("#"):
continue
# skip empty lines
if not line:
continue
# parse line
try:
var_name, var_expression = parse_metaconfig_line(line)
definitions[var_name] = var_expression
except ValueError as e:
future.utils.raise_from(
ValueError("Cannot parse metaconfig; file=" + file_path), e)
return definitions | 6222cf9b589ea4162c080008fd25c0df78607a07 | 24,509 |
def detect_slow_oscillation(data: Dataset, algo: str = 'AASM/Massimini2004', start_offset: float = None) -> pd.DataFrame:
"""
Detect slow waves (slow oscillations) locations in an edf file for each channel
:param edf_filepath: path of edf file to load. Will maybe work with other filetypes. untested.
:param algo: which algorithm to use to detect spindles. See wonambi methods: https://wonambi-python.github.io/gui/methods.html
:param chans_to_consider: which channels to detect spindles on, must match edf channel names
:param bad_segments:
:param start_offset: offset between first epoch and edf - onset is measured from this
:return: returns dataframe of spindle locations, with columns for chan, start, duration and other spindle properties, sorted by onset
"""
detection = DetectSlowWave(algo)
sos_detected = detection(data)
sos_df = pd.DataFrame(sos_detected.events, dtype=float)
col_map = {'start': 'onset',
'end': None,
'trough_time': 'trough_time',
'zero_time': 'zero_time',
'peak_time': 'peak_time',
'trough_val': 'trough_uV',
'peak_val': 'peak_uV',
'dur': 'duration',
'ptp': None,
'chan': 'chan'}
cols_to_keep = set(sos_df.columns) - set([k for k, v in col_map.items() if v is None])
sos_df = sos_df.loc[:, cols_to_keep]
sos_df.columns = [col_map[k] for k in sos_df.columns]
if sos_df.shape[0] == 0:
return None #empty df
sos_df['peak_time'] = sos_df['peak_time'] - sos_df['onset']
sos_df['trough_time'] = sos_df['trough_time'] - sos_df['onset']
sos_df['zero_time'] = sos_df['zero_time'] - sos_df['onset']
sos_df['description'] = 'slow_osc'
if start_offset is not None:
sos_df['onset'] = sos_df['onset'] - start_offset
sos_df = sos_df.loc[sos_df['onset']>=0,:]
return sos_df.sort_values('onset') | a241196b56b6fb426fc9949ee82fca40c0c854f2 | 24,510 |
def _map_channels_to_measurement_lists(snirf):
"""Returns a map of measurementList index to measurementList group name."""
prefix = "measurementList"
data_keys = snirf["nirs"]["data1"].keys()
mls = [k for k in data_keys if k.startswith(prefix)]
def _extract_channel_id(ml):
return int(ml[len(prefix) :])
return {_extract_channel_id(ml): ml for ml in mls} | d6d83c01baec5f345d58fff8a0d0107a40b8db37 | 24,511 |
def is_not_applicable_for_questionnaire(
value: QuestionGroup, responses: QuestionnaireResponses
) -> bool:
"""Returns true if the given group's questions are not answerable for the given responses.
That is, for all the questions in the given question group, only not
applicable answers have been provided for the provided questionnaire
response.
"""
return value.is_not_applicable_for_responses(responses) | a534ca5560193c81e18f4028bd032b4a8e5adf8a | 24,512 |
def _chebnodes(a,b,n):
"""Chebyshev nodes of rank n on interal [a,b]."""
if not a < b:
raise ValueError('Lower bound must be less than upper bound.')
return np.array([1/2*((a+b)+(b-a)*np.cos((2*k-1)*np.pi/(2*n))) for k in range(1,n+1)]) | 4378468aac0642f15b64dcdee75dcb970aab11f7 | 24,513 |
import sys
def delcolumn(particles, columns, metadata):
"""
With dataframes, stating dataframe1 = dataframe2 only creates
a reference. Therefore, we must create a copy if we want to leave
the original dataframe unmodified.
"""
nocolparticles = particles.copy()
#Loop through each passed column to delete them
for c in columns:
#Check if the column doesn't exist.
#Consider doing the check in decisiontree.py
if c not in nocolparticles:
print("\n>> Error: the column \"" + c + "\" does not exist.\n")
sys.exit()
"""
The .drop can be used to drop a whole column.
The "1" tells .drop that it is the column axis that we want to drop
inplace means we want the dataframe to be modified instead of creating an assignment
"""
nocolparticles.drop(c, 1, inplace=True)
#We nead to remove that column header too. The heads are the third
#metadata (i.e. metadata[3])
metadata[3].remove(c)
return(nocolparticles, metadata) | cff587aa460d0478f750a3323b66e20d9c52f85a | 24,514 |
def Rx_matrix(theta):
"""Rotation matrix around the X axis"""
return np.array([
[1, 0, 0],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)]
]) | c7b689b9e6042aa84689003e2de6ffff2229eb69 | 24,515 |
def spawn_actor(world: carla.World, blueprint: carla.ActorBlueprint, spawn_point: carla.Transform,
attach_to: carla.Actor = None, attachment_type=carla.AttachmentType.Rigid) -> carla.Actor:
"""Tries to spawn an actor in a CARLA simulator.
:param world: a carla.World instance.
:param blueprint: specifies which actor has to be spawned.
:param spawn_point: where to spawn the actor. A transform specifies the location and rotation.
:param attach_to: whether the spawned actor has to be attached (linked) to another one.
:param attachment_type: the kind of the attachment. Can be 'Rigid' or 'SpringArm'.
:return: a carla.Actor instance.
"""
actor = world.try_spawn_actor(blueprint, spawn_point, attach_to, attachment_type)
if actor is None:
raise ValueError(f'Cannot spawn actor. Try changing the spawn_point ({spawn_point.location}) to something else.')
return actor | 83d29b21e76f52f1928009e22cee6a635ef4d025 | 24,516 |
def partition(lst, size):
"""Partition list @lst into eveni-sized lists of size @size."""
return [lst[i::size] for i in range(size)] | af7071a5aac36a51f449f153df145d9218808a4a | 24,517 |
def form_errors_json(form=None):
"""It prints form errors as JSON."""
if form:
return mark_safe(dict(form.errors.items())) # noqa: S703, S308
return {} | d9748d5ce4578855775af24d1a758030ad3fa432 | 24,518 |
def get_semantic_ocs_version_from_config():
"""
Returning OCS semantic version from config.
Returns:
semantic_version.base.Version: Object of semantic version for OCS.
"""
return get_semantic_version(config.ENV_DATA["ocs_version"], True) | 346aa6aacff9a758cf06b4a3dc4977e98e9ca501 | 24,519 |
from typing import Optional
from typing import Mapping
import os
import logging
def train_rl(
*,
_run: sacred.run.Run,
_seed: int,
total_timesteps: int,
normalize: bool,
normalize_kwargs: dict,
reward_type: Optional[str],
reward_path: Optional[str],
rollout_save_final: bool,
rollout_save_n_timesteps: Optional[int],
rollout_save_n_episodes: Optional[int],
policy_save_interval: int,
policy_save_final: bool,
) -> Mapping[str, float]:
"""Trains an expert policy from scratch and saves the rollouts and policy.
Checkpoints:
At applicable training steps `step` (where step is either an integer or
"final"):
- Policies are saved to `{log_dir}/policies/{step}/`.
- Rollouts are saved to `{log_dir}/rollouts/{step}.pkl`.
Args:
total_timesteps: Number of training timesteps in `model.learn()`.
normalize: If True, then rescale observations and reward.
normalize_kwargs: kwargs for `VecNormalize`.
reward_type: If provided, then load the serialized reward of this type,
wrapping the environment in this reward. This is useful to test
whether a reward model transfers. For more information, see
`imitation.rewards.serialize.load_reward`.
reward_path: A specifier, such as a path to a file on disk, used by
reward_type to load the reward model. For more information, see
`imitation.rewards.serialize.load_reward`.
rollout_save_final: If True, then save rollouts right after training is
finished.
rollout_save_n_timesteps: The minimum number of timesteps saved in every
file. Could be more than `rollout_save_n_timesteps` because
trajectories are saved by episode rather than by transition.
Must set exactly one of `rollout_save_n_timesteps`
and `rollout_save_n_episodes`.
rollout_save_n_episodes: The number of episodes saved in every
file. Must set exactly one of `rollout_save_n_timesteps` and
`rollout_save_n_episodes`.
policy_save_interval: The number of training updates between in between
intermediate rollout saves. If the argument is nonpositive, then
don't save intermediate updates.
policy_save_final: If True, then save the policy right after training is
finished.
Returns:
The return value of `rollout_stats()` using the final policy.
"""
custom_logger, log_dir = common.setup_logging()
rollout_dir = osp.join(log_dir, "rollouts")
policy_dir = osp.join(log_dir, "policies")
os.makedirs(rollout_dir, exist_ok=True)
os.makedirs(policy_dir, exist_ok=True)
venv = common.make_venv(
post_wrappers=[lambda env, idx: wrappers.RolloutInfoWrapper(env)],
)
callback_objs = []
if reward_type is not None:
reward_fn = load_reward(reward_type, reward_path, venv)
venv = RewardVecEnvWrapper(venv, reward_fn)
callback_objs.append(venv.make_log_callback())
logging.info(f"Wrapped env in reward {reward_type} from {reward_path}.")
vec_normalize = None
if normalize:
venv = vec_normalize = VecNormalize(venv, **normalize_kwargs)
if policy_save_interval > 0:
save_policy_callback = serialize.SavePolicyCallback(policy_dir, vec_normalize)
save_policy_callback = callbacks.EveryNTimesteps(
policy_save_interval,
save_policy_callback,
)
callback_objs.append(save_policy_callback)
callback = callbacks.CallbackList(callback_objs)
rl_algo = rl.make_rl_algo(venv)
rl_algo.set_logger(custom_logger)
rl_algo.learn(total_timesteps, callback=callback)
# Save final artifacts after training is complete.
if rollout_save_final:
save_path = osp.join(rollout_dir, "final.pkl")
sample_until = rollout.make_sample_until(
rollout_save_n_timesteps,
rollout_save_n_episodes,
)
rollout.rollout_and_save(save_path, rl_algo, venv, sample_until)
if policy_save_final:
output_dir = os.path.join(policy_dir, "final")
serialize.save_stable_model(output_dir, rl_algo, vec_normalize)
# Final evaluation of expert policy.
return train.eval_policy(rl_algo, venv) | fdc8c2203752038313cae79b077310b61db3b5c2 | 24,520 |
from typing import List
def get_non_ntile_cols(frame: pd.DataFrame) -> List[str]:
"""
:param frame: data frame to get columns of
:return: all columns in the frame that dont contain 'Ntile'
"""
return [col for col in frame.columns if 'Ntile' not in col] | 93970b576381aa668ce75d77f03793380445d9e4 | 24,521 |
from typing import Any
from typing import Optional
from datetime import datetime
def deserialize_date(value: Any) -> Optional[datetime.datetime]:
"""A flexible converter for str -> datetime.datetime"""
if value is None:
return None
if isinstance(value, datetime.datetime):
return value
if isinstance(value, str):
# datetime.datetime.fromisoformat(...) can't parse Notion's dates,
# and, anyway, this is faster
return ciso8601.parse_datetime(value)
raise TypeError(f'Invalid type {type(value)} for date property') | 15cdd07ad4bd5873d8ed01d3eb9ce3b4e780ca44 | 24,522 |
def intersect(x1, x2, y1, y2, a1, a2, b1, b2):
""" Return True if (x1,x2,y1,y2) rectangles intersect. """
return overlap(x1, x2, a1, a2) & overlap(y1, y2, b1, b2) | 1e9c530b1d5e085df073b8c32d874ef457e2246a | 24,523 |
import functools
import sys
import logging
def BestEffort(func):
"""Decorator to log and dismiss exceptions if one if already being handled.
Note: This is largely a workaround for the lack of support of exception
chaining in Python 2.7, this decorator will no longer be needed in Python 3.
Typical usage would be in |Close| or |Disconnect| methods, to dismiss but log
any further exceptions raised if the current execution context is already
handling an exception. For example:
class Client(object):
def Connect(self):
# code to connect ...
@exc_util.BestEffort
def Disconnect(self):
# code to disconnect ...
client = Client()
try:
client.Connect()
except:
client.Disconnect()
raise
If an exception is raised by client.Connect(), and then a second exception
is raised by client.Disconnect(), the decorator will log the second exception
and let the original one be re-raised.
Otherwise, in Python 2.7 and without the decorator, the second exception is
the one propagated to the caller; while information about the original one,
usually more important, is completely lost.
Note that if client.Disconnect() is called in a context where an exception
is *not* being handled, then any exceptions raised within the method will
get through and be passed on to callers for them to handle in the usual way.
The decorator can also be used on cleanup functions meant to be called on
a finally block, however you must also include an except-raise clause to
properly signal (in Python 2.7) whether an exception is being handled; e.g.:
@exc_util.BestEffort
def cleanup():
# do cleanup things ...
try:
process(thing)
except:
raise # Needed to let cleanup know if an exception is being handled.
finally:
cleanup()
Failing to include the except-raise block has the same effect as not
including the decorator at all. Namely: exceptions during |cleanup| are
raised and swallow any prior exceptions that occurred during |process|.
"""
@functools.wraps(func)
def Wrapper(*args, **kwargs):
exc_type = sys.exc_info()[0]
if exc_type is None:
# Not currently handling an exception; let any errors raise exceptions
# as usual.
func(*args, **kwargs)
else:
# Otherwise, we are currently handling an exception, dismiss and log
# any further cascading errors. Callers are responsible to handle the
# original exception.
try:
func(*args, **kwargs)
except Exception: # pylint: disable=broad-except
logging.exception(
'While handling a %s, the following exception was also raised:',
exc_type.__name__)
return Wrapper | dec08ab8fc1d367203df2e6c2f0507bf880ba503 | 24,524 |
from typing import List
def recording_to_chunks(fingerprints: np.ndarray,
samples_per_chunk: int) -> List[np.ndarray]:
"""Breaks fingerprints of a recording into fixed-length chunks."""
chunks = []
for pos in range(0, len(fingerprints), samples_per_chunk):
chunk = fingerprints[pos:pos + samples_per_chunk]
# exclude partial chunks (at end)
if chunk.shape[0] == samples_per_chunk:
chunks.append(chunk)
return chunks | eae1a3b882e545a8dc08f029ddb5113dcdf1bca4 | 24,525 |
def coset_enumeration_c(fp_grp, Y):
"""
>>> from sympy.combinatorics.free_group import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_c
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y])
>>> C = coset_enumeration_c(f, [x])
>>> C.table
[[0, 0, 1, 2], [1, 1, 2, 0], [2, 2, 0, 1]]
"""
# Initialize a coset table C for < X|R >
C = CosetTable(fp_grp, Y)
X = fp_grp.generators
R = fp_grp.relators()
A = C.A
# replace all the elements by cyclic reductions
R_cyc_red = [rel.identity_cyclic_reduction() for rel in R]
R_c = list(chain.from_iterable((rel.cyclic_conjugates(), (rel**-1).cyclic_conjugates()) \
for rel in R_cyc_red))
R_set = set()
for conjugate in R_c:
R_set = R_set.union(conjugate)
# a list of subsets of R_c whose words start with "x".
R_c_list = []
for x in C.A:
r = set([word for word in R_set if word[0] == x])
R_c_list.append(r)
R_set.difference_update(r)
for w in Y:
C.scan_and_fill_f(0, w)
for x in A:
C.process_deductions(R_c_list[C.A_dict[x]], R_c_list[C.A_dict_inv[x]])
i = 0
while i < len(C.omega):
alpha = C.omega[i]
i += 1
for x in C.A:
if C.table[alpha][C.A_dict[x]] is None:
C.define_f(alpha, x)
C.process_deductions(R_c_list[C.A_dict[x]], R_c_list[C.A_dict_inv[x]])
return C | 0efeacfeeb2b20275378c58a3aacaed07ade57be | 24,526 |
def slr_pulse(
num=N, time_bw=TBW,
ptype=PULSE_TYPE, ftype=FILTER_TYPE,
d_1=PBR, d_2=SBR,
root_flip=ROOT_FLIP,
multi_band = MULTI_BAND,
n_bands = N_BANDS,
phs_type = PHS_TYPE,
band_sep = BAND_SEP
):
"""Use Shinnar-Le Roux algorithm to generate pulse"""
if root_flip is False:
complex_pulse = rf.dzrf(n=num, tb=time_bw, ptype=ptype, ftype=ftype, d1=d_1, d2=d_2)
amp_arr = complex_pulse
else:
amp_arr, b_rootflip = slr_rootflip(ROOT_FLIP_ANGLE)
phs_arr = np.zeros(num)
for idx in range(num):
if amp_arr[idx] < 0:
phs_arr[idx] = 180
else:
phs_arr[idx] = 0
if multi_band is True:
amp_arr = rf.multiband.mb_rf(amp_arr, n_bands, band_sep, phs_type)
# prepare pulse for instrument, which takes absolute only
# cast negative values to positive
amp_arr_abs = np.abs(amp_arr)
# shift amplitude such that the lowest value is 0
amp_arr_abs = amp_arr_abs - amp_arr_abs.min()
# fold back phase when it exceeds 360
phs_arr = phs_arr % 360
freq_arr = (np.diff(phs_arr)/num)/360
return amp_arr, freq_arr, phs_arr, amp_arr_abs | 0986b6ea8adffd90c108308365ebf3172a6459d0 | 24,527 |
def policy_options(state, Q_omega, epsilon=0.1):
""" Epsilon-greedy policy used to select options """
if np.random.uniform() < epsilon:
return np.random.choice(range(Q_omega.shape[1]))
else:
return np.argmax(Q_omega[state]) | 66e36b81fdec06822ebb958611deca23bd64191b | 24,528 |
import tempfile
import time
def test_ps_s3_creation_triggers_on_master():
""" test object creation s3 notifications in using put/copy/post on master"""
if skip_push_tests:
return SkipTest("PubSub push tests don't run in teuthology")
hostname = get_ip()
proc = init_rabbitmq()
if proc is None:
return SkipTest('end2end amqp tests require rabbitmq-server installed')
zones, _ = init_env(require_ps=False)
realm = get_realm()
zonegroup = realm.master_zonegroup()
# create bucket
bucket_name = gen_bucket_name()
bucket = zones[0].create_bucket(bucket_name)
topic_name = bucket_name + TOPIC_SUFFIX
# start amqp receiver
exchange = 'ex1'
task, receiver = create_amqp_receiver_thread(exchange, topic_name)
task.start()
# create s3 topic
endpoint_address = 'amqp://' + hostname
endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker'
topic_conf = PSTopicS3(zones[0].conn, topic_name, zonegroup.name, endpoint_args=endpoint_args)
topic_arn = topic_conf.set_config()
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name,'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:Put', 's3:ObjectCreated:Copy']
}]
s3_notification_conf = PSNotificationS3(zones[0].conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket using PUT
key = bucket.new_key('put')
key.set_contents_from_string('bar')
# create objects in the bucket using COPY
bucket.copy_key('copy', bucket.name, key.name)
# create objects in the bucket using multi-part upload
fp = tempfile.TemporaryFile(mode='w')
fp.write('bar')
fp.close()
uploader = bucket.initiate_multipart_upload('multipart')
fp = tempfile.NamedTemporaryFile(mode='r')
uploader.upload_part_from_file(fp, 1)
uploader.complete_upload()
fp.close()
print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver
keys = list(bucket.list())
receiver.verify_s3_events(keys, exact_match=True)
# cleanup
stop_amqp_receiver(receiver, task)
s3_notification_conf.del_config()
topic_conf.del_config()
for key in bucket.list():
key.delete()
# delete the bucket
zones[0].delete_bucket(bucket_name)
clean_rabbitmq(proc) | bb0770cd80968d8878f0a3c379f5ce2da9863c8f | 24,529 |
import math
def weights_init(init_type='gaussian'):
"""
from https://github.com/naoto0804/pytorch-inpainting-with-partial-conv/blob/master/net.py
"""
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find(
'Linear') == 0) and hasattr(m, 'weight'):
if init_type == 'gaussian':
nn.init.normal_(m.weight, 0.0, 0.02)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight, gain=math.sqrt(2))
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
return init_fun | d65dee3744daf59a2db832b5c4866bee2131b4d6 | 24,530 |
def title(default=None, level="header"):
"""
A decorator that add an optional title argument to component.
"""
def decorator(fn):
loc = get_argument_default(fn, "where", None) or st
@wraps(fn)
def wrapped(
*args,
title=default,
level=level,
header=None,
subheader=None,
where=loc,
**kwargs,
):
if header:
where.header(str(header))
elif subheader:
where.subheader(str(subheader))
elif title:
if level == "header":
where.header(str(title))
elif level == "subheader":
where.subheader(str(title))
elif level == "bold":
where.markdown(f"**{title}**")
else:
raise ValueError(f"invalid title level: {level!r}")
kwargs["where"] = where
return fn(*args, **kwargs)
return wrapped
return decorator | c11a3ee7ccff5e6934fba857d438743464dd653e | 24,531 |
import os
def get_ext(path):
"""
Given a path return the file extension.
**Positional Arguments:**
path: The file whose path we assess
"""
return os.path.splitext(path)[1] | f088e63bde8924fc2bac50950e05384878f637b7 | 24,532 |
def _rect_to_css(rect):
"""
Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order
:param rect: a dlib 'rect' object
:return: a plain tuple representation of the rect in (top, right, bottom, left) order
"""
return rect.top(), rect.right(), rect.bottom(), rect.left() | e3439cc0eb30186b8fc905f518ff21883175b3e2 | 24,533 |
def client():
"""Client Fixture."""
client_obj = Client(base_url=BASE_URL)
return client_obj | bac2ccd038eb587b4dd67ce0cc63bef63af9c365 | 24,534 |
def encode_one_hot(s):
"""One-hot encode all characters of the given string.
"""
all = []
for c in s:
x = np.zeros((INPUT_VOCAB_SIZE))
index = char_indices[c]
x[index] = 1
all.append(x)
return all | e4bc2b02cea4dbf74346cbd672cb58246abe4edc | 24,535 |
from datetime import datetime
def date_to_datetime(date, time_choice='min'):
"""
Convert date to datetime.
:param date: date to convert
:param time_choice: max or min
:return: datetime
"""
choice = getattr(datetime.datetime, 'min' if time_choice == 'min' else 'max').time()
return timezone.make_aware(
datetime.datetime.combine(date, choice),
timezone.get_current_timezone(),
) | 9e429bf71288ffc3bd56b682f2e24fceb0ff49d4 | 24,536 |
def standardize_cell(atoms, cell_type):
""" Standardize the cell of the atomic structure.
Parameters:
atoms: `ase.Atoms`
Atomic structure.
cell_type: { 'standard', 'standard_no_symmetries', 'primitive', None}
Starting from the input cell, creates a standard cell according to same standards
before the supercell generation. \n
`cell_type` = 'standard' creates a standard conventional cell.
See :py:mod:`ai4materials.utils.utils_crystals.get_conventional_std_cell`. \n
`cell_type` = 'standard_no_symmetries' creates a standard conventional cell without using symmetries.
See :py:mod:`ai4materials.utils.utils_crystals.get_conventional_std_cell_no_sym`. \n
`cell_type` = 'primitive' creates a standard primitive cell.
See :py:mod:`ai4materials.utils.utils_crystals.get_primitive_std_cell`. \n
`cell_type` = `None` does not creates any cell.
It simply uses the unit cell as input for the supercell generation.
Returns:
`ase.Atoms`
Atomic structure in the standard cell of the selected type.
.. codeauthor:: Angelo Ziletti <angelo.ziletti@gmail.com>
"""
if cell_type == 'standard':
atoms = get_conventional_std_cell(atoms)
elif cell_type == 'standard_no_symmetries':
atoms = get_conventional_std_cell_no_sym(atoms)
elif cell_type == 'primitive':
atoms = get_primitive_std_cell(atoms)
elif cell_type is None:
pass
else:
raise ValueError("Unrecognized cell_type value.")
return atoms | 4005cf7afd6f4992f3cc271608f0b8c84649d6b1 | 24,537 |
def get_biggan_stats():
""" precomputed biggan statistics """
center_of_mass = [137 / 255., 127 / 255.]
object_size = [213 / 255., 210 / 255.]
return center_of_mass, object_size | 6576e13b7a68369e90b2003171d946453bafd212 | 24,538 |
def get_input_var_value(soup, var_id):
"""Get the value from text input variables.
Use when you see this HTML format:
<input id="wired_config_var" ... value="value">
Args:
soup (soup): soup pagetext that will be searched.
var_id (string): The id of a var, used to find its value.
Returns:
(string): The value of the variable
"""
try:
var_value = soup.find('input', {'id': var_id}).get('value')
return var_value
except AttributeError:
print('\nERROR: <' + var_id + '> not found!\nPagesoup:\n\n', soup)
raise LookupError | 5a9dd65a285c62e0e5e79584858634cb7b0ece75 | 24,539 |
import os
def _create_file(path):
"""Opens file in write mode. It also creates intermediate directories if
necessary.
"""
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
return open(path, 'w') | 448e26c24c48bf654402a9fe35ef28eb7906dd31 | 24,540 |
from typing import List
from typing import Any
import logging
def get_top(metric: str, limit: int) -> List[List[Any]]:
"""Get top stocks based on metric from sentimentinvestor [Source: sentimentinvestor]
Parameters
----------
metric : str
Metric to get top tickers for
limit : int
Number of tickes to get
Returns
-------
List[List[Any]]
List of tickers and scores
"""
data = sentipy.sort(metric, limit)
table: List[List[Any]] = []
for index, stock in enumerate(data):
if not hasattr(stock, "symbol") or not hasattr(stock, metric):
logging.warning("data for stock %s is incomplete, ignoring", index + 1)
table.append([])
else:
table.append([index + 1, stock.symbol, stock.__getattribute__(metric)])
return table | c203fcbe24ccf3d0c2253961d36ec7b556c8651c | 24,541 |
def test_add_single_entities(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Test the addition of SEPTs"""
tensor1 = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
tensor2 = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
result = tensor2 + tensor1
assert isinstance(result, SEPT), "Addition of two SEPTs is wrong type"
assert (
result.max_vals == 2 * upper_bound
).all(), "Addition of two SEPTs results in incorrect max_val"
assert (
result.min_vals == 2 * lower_bound
).all(), "Addition of two SEPTs results in incorrect min_val"
# Try with negative values
tensor3 = SEPT(
child=reference_data * -1.5,
entity=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
result = tensor3 + tensor1
assert isinstance(result, SEPT), "Addition of two SEPTs is wrong type"
assert (
result.max_vals == tensor3.max_vals + tensor1.max_vals
).all(), "SEPT + SEPT results in incorrect max_val"
assert (
result.min_vals == tensor3.min_vals + tensor1.min_vals
).all(), "SEPT + SEPT results in incorrect min_val"
return None | 48531867a74d7267ae65d4350e82d26cae8bef44 | 24,542 |
def prob_get_expected_after_certain_turn(turns_later: int, turns_remain: int,
tiles_expect: int) -> float:
"""The probability of get expected tile after `turns_later` set of turns.
:param turns_later: Get the expected tile after `turns_after` set of turns
:param turns_remain: The remaining turns
:param tiles_expect: The number of expected tiles
:return: Probability
"""
tiles_remain = 4 * turns_remain + 14
if tiles_expect > turns_later:
greater = tiles_remain - turns_later
less = tiles_remain - tiles_expect
else:
greater = tiles_remain - tiles_expect
less = tiles_remain - turns_later
numerator, denominator = 1, 1
i, j = less, greater
while i > tiles_remain - turns_later - tiles_expect:
numerator = numerator * i
i = i - 1
while j > greater:
denominator = denominator * j
j = j - 1
return numerator / denominator | 6575c22302b73b58b2bd9aad5068ffe723fb5fe3 | 24,543 |
def get_gpcr_calpha_distances(pdb, xtc, gpcr_name, res_dbnum,
first_frame=0, last_frame=-1, step=1):
"""
Load distances between all selected atoms.
Parameters
----------
pdb : str
File name for the reference file (PDB or GRO format).
xtc : str
File name for the trajectory (xtc format).
gpcr_name : str
Name of the GPCR as in the GPCRdb.
res_dbnum : list
Relative GPCR residue numbers.
first_frame : int, default=0
First frame to return of the features. Zero-based.
last_frame : int, default=-1
Last frame to return of the features. Zero-based.
step : int, default=1
Subsampling step width when reading the frames.
Returns
-------
feature_names : list of str
Names of all C-alpha distances.
feature_labels : list of str
Labels containing GPCRdb numbering of the residues.
features_data : numpy array
Data for all C-alpha distances [Å].
"""
# Select residues from relative residue numbers
resnums, reslabels = select_gpcr_residues(gpcr_name, res_dbnum)
# Create the selection string
selection = 'name CA and resid'
for rn in resnums:
selection += ' %i'%rn
# Create the GPCRdb distance labels
distlabels = []
k = -1
for i in range(len(reslabels)):
for j in range(i + 1, len(reslabels)):
k += 1
_dl = 'CA DIST: %s - %s'%(reslabels[i], reslabels[j])
distlabels.append(_dl)
# Calculate the distances and get the sequential names
names, data = get_atom_self_distances(pdb, xtc,
selection=selection,
first_frame=first_frame,
last_frame=last_frame,
step=step)
return names, distlabels, data | 3465246d610510f2976813fcc69c394e98452292 | 24,544 |
def main(yumrepomap=None,
**kwargs):
"""
Checks the distribution version and installs yum repo definition files
that are specific to that distribution.
:param yumrepomap: list of dicts, each dict contains two or three keys.
'url': the url to the yum repo definition file
'dist': the linux distribution to which the repo should
be installed. one of 'amazon', 'redhat',
'centos', or 'all'. 'all' is a special keyword
that maps to all distributions.
'epel_version': optional. match the major version of the
epel-release that applies to the
system. one of '6' or '7'. if not
specified, the repo is installed to all
systems.
Example: [ {
'url' : 'url/to/the/yum/repo/definition.repo',
'dist' : 'amazon' or 'redhat' or 'centos' or 'all',
'epel_version' : '6' or '7',
},
]
"""
scriptname = __file__
print('+' * 80)
print('Entering script -- {0}'.format(scriptname))
print('Printing parameters...')
print(' yumrepomap = {0}'.format(yumrepomap))
if not yumrepomap:
print('`yumrepomap` is empty. Nothing to do!')
return None
if not isinstance(yumrepomap, list):
raise SystemError('`yumrepomap` must be a list!')
# Read first line from /etc/system-release
release = None
try:
with open(name='/etc/system-release', mode='rb') as f:
release = f.readline().strip()
except Exception as exc:
raise SystemError('Could not read /etc/system-release. '
'Error: {0}'.format(exc))
# Search the release file for a match against _supported_dists
m = _match_supported_dist.search(release.lower())
if m is None:
# Release not supported, exit with error
raise SystemError('Unsupported OS distribution. OS must be one of: '
'{0}.'.format(', '.join(_supported_dists)))
# Assign dist,version from the match groups tuple, removing any spaces
dist,version = (x.translate(None, ' ') for x in m.groups())
# Determine epel_version
epel_version = None
if 'amazon' == dist:
epel_version = _amazon_epel_versions.get(version, None)
else:
epel_version = version.split('.')[0]
if epel_version is None:
raise SystemError('Unsupported OS version! dist = {0}, version = {1}.'
.format(dist, version))
for repo in yumrepomap:
# Test whether this repo should be installed to this system
if repo['dist'] in [dist, 'all'] and repo.get('epel_version', 'all') \
in [epel_version, 'all']:
# Download the yum repo definition to /etc/yum.repos.d/
url = repo['url']
repofile = '/etc/yum.repos.d/{0}'.format(url.split('/')[-1])
download_file(url, repofile)
print('{0} complete!'.format(scriptname))
print('-' * 80) | 1caed81f53cd0dc2e1963aa1b53bc48c1ef71dd3 | 24,545 |
def zero_pad1d(inputs, padding=0):
"""Zero padding for 1d tensor
Args:
-----------------------------
inputs : tvm.te.tensor.Tensor
shape [batch, channel, length]
padding: (optional:0) int or tuple
-----------------------------
Returns:
-----------------------------
tvm.te.tensor.Tensor
shape [batch, channel, padded_length]
-----------------------------
"""
padding = (padding, padding) if isinstance(padding, (int, tvm.tir.IntImm)) else padding
assert_print(isinstance(padding, tuple), "type(padding)={}".format(type(padding)))
assert_print(len(padding) == 2)
padding_zero = tvm.tir.expr.const(0, inputs.dtype)
batch_size, in_channel, in_len = inputs.shape
return tvm.te.compute(
(batch_size, in_channel, in_len + padding[0] + padding[1]),
lambda b, c, l: tvm.te.if_then_else(
tvm.te.all(l >= padding[0], l < in_len + padding[0]),
inputs[b, c, l - padding[0]],
padding_zero
)
) | 8135ffd8447d5fbc84988953a2bfca14b51d3f83 | 24,546 |
import torch
import math
def gelu(x):
"""gelu activation function copied from pytorch-pretrained-BERT."""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) | 35c0f45f904b2381acc95f5a2b4f28cec9fa924b | 24,547 |
import requests
def stock_fund_stock_holder(stock: str = "600004") -> pd.DataFrame:
"""
新浪财经-股本股东-基金持股
https://vip.stock.finance.sina.com.cn/corp/go.php/vCI_FundStockHolder/stockid/600004.phtml
:param stock: 股票代码
:type stock: str
:return: 新浪财经-股本股东-基金持股
:rtype: pandas.DataFrame
"""
url = f"https://vip.stock.finance.sina.com.cn/corp/go.php/vCI_StockStructure/stockid/{stock}.phtml"
r = requests.get(url)
temp_df = pd.read_html(r.text)[13].iloc[:, :5]
temp_df.columns = [*range(5)]
big_df = pd.DataFrame()
need_range = temp_df[temp_df.iloc[:, 0].str.find("截止日期") == 0].index.tolist() + [len(temp_df)]
for i in range(len(need_range)-1):
truncated_df = temp_df.iloc[need_range[i]: need_range[i + 1], :]
truncated_df = truncated_df.dropna(how="all")
temp_truncated = truncated_df.iloc[2:, :]
temp_truncated.reset_index(inplace=True, drop=True)
concat_df = pd.concat([temp_truncated, truncated_df.iloc[0, 1:]], axis=1)
concat_df.columns = truncated_df.iloc[1, :].tolist() + ["截止日期"]
concat_df["截止日期"] = concat_df["截止日期"].fillna(method="ffill")
concat_df["截止日期"] = concat_df["截止日期"].fillna(method="bfill")
big_df = pd.concat([big_df, concat_df], axis=0, ignore_index=True)
big_df.dropna(inplace=True)
big_df.reset_index(inplace=True, drop=True)
return big_df | acde3d06b9fabd9a22223401b6b9b947a1e248ff | 24,548 |
def set_to_available(request, slug, version):
"""
Updates the video status.
Sets the version already encoded to available.
"""
video = get_object_or_404(Video, slug=slug)
status, created = VideoStatus.objects.get_or_create(video_slug=slug)
if version == 'web':
status.web_available = True
elif version == 'cocreate':
status.cocreate_available = True
else:
status.mobile_available = True
status.is_encoding = False
status.encode_duration = Decimal(str(status.encode_duration))
status.save()
# If the video is part of a cocreate project, auto-compile the cocreate project.
try:
if video.section and video.section.cocreate:
cocreate_obj = video.section.cocreate
init_cocreate(cocreate_obj, generate_slug)
except Section.DoesNotExist:
pass
return HttpResponse("OK") | ead832327d733b82b0d1bc38efd241baab039ed2 | 24,549 |
import pathlib
import json
import requests
import sys
def run_test(test):
""" Make the request """
print(bcolors.HEADER + "Running test: "+ test + bcolors.ENDC)
results = dict()
with open(pathlib.Path(test,"test.ini"), "r") as testini:
testini_json = json.loads(testini.read())
if "IGNORE" in testini_json.keys():
results[testini_json["file_name"]] = "IGNORED"
return results
expected_result = (testini_json["result"],testini_json["body"])
with test_setup(pathlib.Path(test,testini_json["file_name"]), testini_json["file_name"]):
try:
r = requests.get("http://127.0.0.1:8080/"+testini_json["test_name"])
except requests.exceptions.ConnectionError:
print ( "Test: " + bcolors.BOLD + test + bcolors.ENDC + " " + bcolors.FAIL + "FAILED" + bcolors.ENDC )
results[testini_json["file_name"]] = "FAILED"
if ( "STOPONFAIL" in sys.argv ):
exit(1)
return results
print("Status Code:" + str(r.status_code))
print("Body:" + r.text)
print("Expected Status code: "+ str(expected_result[0]))
print("Expected text: "+ expected_result[1])
try:
assert r.status_code == expected_result[0]
assert r.text == expected_result[1]
results[testini_json["file_name"]] = "PASSED"
except AssertionError:
print ( "Test: " + bcolors.BOLD + test + bcolors.ENDC + " " + bcolors.FAIL + "FAILED" + bcolors.ENDC )
results[testini_json["file_name"]] = "FAILED"
if ( "STOPONFAIL" in sys.argv ):
exit(1)
return results
print ( "Test: " + bcolors.BOLD + test + bcolors.ENDC + " " + bcolors.OKGREEN + "PASSED" + bcolors.ENDC )
return results | 6a5af4a5c2e964dc97f2875c60187827a4431537 | 24,550 |
def generate_solve_c():
"""Generate C source string for the recursive solve() function."""
piece_letters = 'filnptuvwxyz'
stack = []
lines = []
add = lines.append
add('#define X_PIECE_NUM {}'.format(piece_letters.index('x')))
add("""
void solve(char* board, int pos, unsigned int used) {
if (used == (1 << NUM_PIECES) - 1) {
display_solution(board);
return;
}
while (board[pos]) {
pos++;
}
""")
indent = ' ' * 4
for c in ORIENTATIONS:
if c == '.':
indent = indent[:-4]
add(indent + '}')
stack.pop()
elif c > 'a':
# Found a piece that fits: if it's not yet used, place it and
# solve rest of board recursively
piece_num = piece_letters.index(c)
add(indent + 'if ((used & (1<<{})) == 0) {{'.format(piece_num))
add(indent + ' _num_tries++;')
add(indent + ' used ^= 1<<{};'.format(piece_num))
for offset in stack:
add(indent + ' board[pos + {}] = {!r};'.format(offset, c))
add(indent + ' solve(board, pos, used);')
for offset in stack:
add(indent + ' board[pos + {}] = 0;'.format(offset))
add(indent + ' used ^= 1<<{};'.format(piece_num))
add(indent + '}')
indent = indent[:-4]
add(indent + '}')
stack.pop()
else:
i = ord(c) - ord('A') + 3
x, y = i % 8, i // 8
offset = y * TOTAL_WIDTH + x - 3
add(indent + 'if (board[pos + {}] == 0) {{'.format(offset))
indent += ' ' * 4
stack.append(offset)
add('}')
return '\n'.join(lines) | dde70d4cdbeb8b691c1ffcb61ba524b2c1df9b2c | 24,551 |
def get_permission_info(room):
"""
Fetches permissions about the room, like ban info etc.
# Return Value
dict of session_id to current permissions,
a dict containing the name of the permission mapped to a boolean value.
"""
return jsonify({k: addExtraPermInfo(v) for k, v in room.permissions.items()}) | aab7aa691e1e34e1bf20e3de744f8d4352a2421e | 24,552 |
def ravel(m):
"""ravel(m) returns a 1d array corresponding to all the elements of it's
argument.
"""
return reshape(m, (-1,)) | 728204f77737750783fef9818c102522f17c472e | 24,553 |
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
# My additions
print ("Printing this unstripped text:", line)
index.append(int(line.strip()))
return index | a76c4e94c593a234fd858d369f0133a5170ec8bf | 24,554 |
import click
import socket
def init():
"""Top level command handler."""
@click.command()
@click.option('--port', type=int, help='Port to listen.', default=0)
@click.option('--tun-dev', type=str, required=True,
help='Device to use when establishing tunnels.')
@click.option('--tun-addr', type=str, required=False,
help='Local IP address to use when establishing tunnels.')
@click.option('--tun-cidrs', type=cli.LIST, required=True,
help='CIDRs block assigned to the tunnels.')
@click.option('--policies-dir', type=str, required=True,
help='Directory where to look for policies')
@click.option('--state-dir', type=str, required=False,
default='/var/run/warpgate',
help='Directory where running state is kept')
def warpgate_policy_server(port, tun_dev, tun_addr, tun_cidrs,
policies_dir, state_dir):
"""Run warpgate policy server."""
myhostname = socket.getfqdn()
policy_server.run_server(
admin_address=myhostname,
admin_port=port,
tun_devname=tun_dev,
tun_address=(
tun_addr if tun_addr else socket.gethostbyname(myhostname)
),
tun_cidrs=tun_cidrs,
policies_dir=policies_dir,
state_dir=state_dir
)
return warpgate_policy_server | ba660e7f6698457951e766ce402857a6a5e4bc86 | 24,555 |
def check_collision(bird_rect:object, pipes:list, collide_sound:object):
""" Checks for collision with the Pipe and the Base """
for pipe in pipes:
if bird_rect.colliderect(pipe):
collide_sound.play()
return False
if bird_rect.bottom >= gv.BASE_TOP:
return False
return True | 080c8a6142397e3c1b91b0e3a4dfbd3ed7f1acde | 24,556 |
def compute_ranking_scores(ranking_scores,
global_ranks_to_save,
rank_per_query):
""" Compute ranking scores (MRR and MAP) and a bunch of interesting ranks to save to file from a list of ranks.
Args:
ranking_scores: Ranking scores previously computed
global_ranks_to_save: Global interesting ranks to save to file
rank_per_query: List of ranks computed by the model evaluation procedure
Returns:
ranking scores (in a dict) and a dict of global interesting ranks to save to file
"""
# compute binarized (0/1) relevance scores
rs = [np.asarray([i == rank['ground_truth_label'] for i in rank['rank_labels']], dtype=np.dtype(int))
for rank in rank_per_query]
# compute and log MRR and MAP scores
ranking_scores['MRR'].append(mean_reciprocal_rank(rs))
ranking_scores['MAP'].append(mean_average_precision(rs))
# compute a bunch of indexes for interesting queries to save in csv files as examples
max_rr, max_rr_idx = max_reciprocal_rank(rs)
min_rr, min_rr_idx = min_reciprocal_rank(rs)
max_ap, max_ap_idx = max_average_precision(rs)
min_ap, min_ap_idx = min_average_precision(rs)
# save indexes (and values) just computed to a dict
queries_indexes = {
'max_rr': {'value': max_rr, 'index': max_rr_idx},
'min_rr': {'value': min_rr, 'index': min_rr_idx},
'max_ap': {'value': max_ap, 'index': max_ap_idx},
'min_ap': {'value': min_ap, 'index': min_ap_idx}
}
# get interesting queries
ranks_to_save = {
key: {
'value': scores['value'],
'rank': rank_per_query[scores['index']]
}
for key, scores in queries_indexes.items()
}
# if the global ranks to save dict is none set it to the current ranks to save
if global_ranks_to_save is None:
global_ranks_to_save = ranks_to_save
else:
# otherwise select from the current ranks to save the ones that are more 'interesting' than those
# already in the global ranks to save dict
if ranks_to_save['max_rr']['value'] > global_ranks_to_save['max_rr']['value']:
global_ranks_to_save['max_rr']['value'] = ranks_to_save['max_rr']['value']
global_ranks_to_save['max_rr']['rank'] = ranks_to_save['max_rr']['rank']
if ranks_to_save['min_rr']['value'] < global_ranks_to_save['min_rr']['value']:
global_ranks_to_save['min_rr']['value'] = ranks_to_save['min_rr']['value']
global_ranks_to_save['min_rr']['rank'] = ranks_to_save['min_rr']['rank']
if ranks_to_save['max_ap']['value'] > global_ranks_to_save['max_ap']['value']:
global_ranks_to_save['max_ap']['value'] = ranks_to_save['max_ap']['value']
global_ranks_to_save['max_ap']['rank'] = ranks_to_save['max_ap']['rank']
if ranks_to_save['min_ap']['value'] < global_ranks_to_save['min_ap']['value']:
global_ranks_to_save['min_ap']['value'] = ranks_to_save['min_ap']['value']
global_ranks_to_save['min_ap']['rank'] = ranks_to_save['min_ap']['rank']
# return computed ranking scores and global ranks to save dict
return ranking_scores, global_ranks_to_save | a25a664b67e35ff9b35327b364e84eaf9ae37aaa | 24,557 |
def AirAbsorptionRelaxationFrequencies(T,p,H,T0, p_r):
"""
Calculates the relaxation frequencies for air absorption conforming to
ISO 9613-1. Called by :any:`AirAbsorptionCoefficient`.
Parameters
----------
T : float
Temperature in K.
p : float
Pressure in Pa.
H : float
Humidity as molar conentration in percent.
T0 : float
Reference temperature in K, 293.15 K.
p_r : float
Reference sound pressure in Pa, 101.325*10³ Pa.
Returns
-------
f_rO : float
Relaxation frequency of oxygen.
f_rN : float
Relaxation frequency of nitrogen.
"""
f_rO = p / p_r * (24 + 4.04 * 10**4 * H * (0.02+H) / (0.391+H))
f_rN = p / p_r * (T/T0)**(-0.5) * (9+280*H*np.exp(-4.17*((T/T0)**(-1/3)-1)))
return f_rO, f_rN | c8c047ed4d9a7fc62b2cdb6d19f0d3c8b1b4c570 | 24,558 |
def table_from_bool(ind1, ind2):
"""
Given two boolean arrays, return the 2x2 contingency table
ind1, ind2 : array-like
Arrays of the same length
"""
return [
sum(ind1 & ind2),
sum(ind1 & ~ind2),
sum(~ind1 & ind2),
sum(~ind1 & ~ind2),
] | 497ce6ad1810386fedb6ada9ba87f0a5baa6318a | 24,559 |
def preprocess_skills(month_kpi_skills: pd.DataFrame, quarter_kpi_skills: pd.DataFrame) -> pd.DataFrame:
"""
Функция принимает на вход два DataFrame:
- с данными по KPI сотрудников ВЭД за последний месяц
- с данными по KPI сотрудников ВЭД за последний квартал
Возвращает объединенный DataFrame по двум таблицам с дополнительными признаками отношений выполненных работ
к нормам сотрудников
:param month_kpi_skills: pd.DataFrame
:param quarter_kpi_skills: pd.DataFrame
:return: pd.DataFrame
"""
month_kpi_skills.fillna(0, inplace=True)
quarter_kpi_skills.fillna(0, inplace=True)
# Переносим данные по месячным скилам в один дата-фрейм
month_kpi_skills.columns = month_skills_columns
quarter_kpi_skills.columns = quarter_skills_columns
assert sorted(month_kpi_skills['ВЭД'].unique()) == sorted(quarter_kpi_skills['ВЭД'].unique()), 'В таблицах KPI за месяц из за квартал содержатся разные ВЭД'
kpi_skills = month_kpi_skills.merge(quarter_kpi_skills, on='ВЭД', how='inner')
# Считаем отношения между результатами за 3 мес и нормами
kpi_skills['Звонки / Норма'] = kpi_skills['Звонки (3 мес)'] / kpi_skills['Звонки норма (3 мес)']
kpi_skills['Обработанные заявки / Норма'] = kpi_skills['Обработанные заявки (3 мес)'] / kpi_skills['Норма 88% (3 мес)']
kpi_skills['48 часов / Норма'] = kpi_skills['Обработка не позднее 48 часов (3 мес)'] / kpi_skills['Норма 85% (3 мес)']
kpi_skills['Полнота сбора / Норма'] = kpi_skills['Полнота сбора (3 мес)'] / kpi_skills['Норма 95% (3 мес)']
kpi_skills['Встречи / Норма'] = kpi_skills['Встречи (3 мес)'] / kpi_skills['Встречи норма (3 мес)']
kpi_skills.fillna(0.0, inplace=True) # Заполняем NaN там, где возникло деление на 0
kpi_skills.drop(['Звонки норма', 'Встречи норма', 'Звонки норма (3 мес)', 'Встречи норма (3 мес)'], axis=1, inplace=True)
kpi_skills = kpi_skills.reindex(columns=skills_final_columns)
return kpi_skills | 6bcbc1b93c99acbef04bf0962678c35a3abd3faa | 24,560 |
def bias_col_spline(im, overscan, dymin=5, dymax=2, statistic=np.mean, **kwargs):
"""Compute the offset by fitting a spline to the mean of each row in the
serial overscan region.
Args:
im: A masked (lsst.afw.image.imageLib.MaskedImageF) or unmasked
(lsst.afw.image.imageLib.ImageF) afw image.
overscan: A bounding box for the parallel overscan region.
dymin: The number of rows to skip at the beginning of the parallel
overscan region.
dymax: The number of rows to skip at the end of the parallel overscan region.
statistic: The statistic to use to calculate the offset for each columns.
Keyword Arguments:
k: The degree of the spline fit. The default is: 3.
s: The amount of smoothing to be applied to the fit. The default is: 18000.
t: The number of knots. If None, finds the number of knots to use
for a given smoothing factor, s. The default is: None.
Returns:
A tuple (t,c,k) containing the vector of knots, the B-spline coefficients,
and the degree of the spline.
"""
try:
imarr = im.Factory(im, overscan).getArray()
except AttributeError: # Dealing with a MaskedImage
imarr = im.Factory(im, overscan).getImage().getArray()
ny, nx = imarr.shape
cols = np.arange(nx)
values = np.array([statistic(imarr[dymin:-dymax,j]) for j in cols])
rms = 7 # Expected read noise per pixel
weights = np.ones(nx) * (rms / np.sqrt(nx))
return interpolate.splrep(cols, values, w=1/weights, k=kwargs.get('k', 3),
s=kwargs.get('s', 18000), t=kwargs.get('t', None)) | d157275dd8337b81c9f4c67efe1c033512f963d3 | 24,561 |
def read_config():
""" Returns the decoded config data in 'db_config.json'
Will return the decoded config file if 'db_config.json' exists and is a valid JSON format.
Otherwise, it will return a False.
"""
# Check if file exists
if not os.path.isfile('db_config.json'):
return False
# Check if file is a valid JSON format.
try:
with open('db_config.json') as json_data:
config = json.load(json_data)
except ValueError:
print '[WARN] Error Decoding config.json'
return False
return config | 36b0ccdbd653b654663c7a3c6cf47cb3f68bc399 | 24,562 |
import pandas
def get_sub_title_from_series(ser: pandas.Series, decimals: int = 3) -> str:
"""pandas.Seriesから、平均値、標準偏差、データ数が記載されたSubTitleを生成する。"""
mean = round(ser.mean(), decimals)
std = round(ser.std(), decimals)
sub_title = f"μ={mean}, α={std}, N={len(ser)}"
return sub_title | 45c227e7ddd203872f015e4a95532c8acb80d54f | 24,563 |
import numpy
def atand2(delta_y: ArrayLike, delta_x: ArrayLike) -> ArrayLike:
"""Return the arctan2 of an angle specified in degrees.
Returns
-------
float
An angle, in degrees.
"""
return numpy.degrees(numpy.arctan2(delta_y, delta_x)) | 14d825d9886a2a62e36748eb9660ee27e6ba6827 | 24,564 |
from typing import Union
def adjust_doy_calendar(
source: xr.DataArray, target: Union[xr.DataArray, xr.Dataset]
) -> xr.DataArray:
"""Interpolate from one set of dayofyear range to another calendar.
Interpolate an array defined over a `dayofyear` range (say 1 to 360) to another `dayofyear` range (say 1
to 365).
Parameters
----------
source : xr.DataArray
Array with `dayofyear` coordinate.
target : xr.DataArray or xr.Dataset
Array with `time` coordinate.
Returns
-------
xr.DataArray
Interpolated source array over coordinates spanning the target `dayofyear` range.
"""
doy_max_source = source.dayofyear.max()
doy_max = max_doy[get_calendar(target)]
if doy_max_source == doy_max:
return source
return _interpolate_doy_calendar(source, doy_max) | d55da217c6b6e3b2947e992611da4e1fdacf7f5f | 24,565 |
def iou(box_a, box_b):
"""Calculates intersection area / union area for two bounding boxes."""
assert area(box_a) > 0
assert area(box_b) > 0
intersect = np.array(
[[max(box_a[0][0], box_b[0][0]),
max(box_a[0][1], box_b[0][1])],
[min(box_a[1][0], box_b[1][0]),
min(box_a[1][1], box_b[1][1])]])
return area(intersect) / (area(box_a) + area(box_b) - area(intersect)) | 9722673c7cc5b636d698453224cf3f06d1aa3678 | 24,566 |
def poll():
"""
The send buffer is flushed and any outstanding CA background activity is processed.
.. note:: same as pend_event(1e-12)
"""
status = libca.ca_pend_event(1e-12)
return ECA(status) | 96052229179a0188a3bb63a6e3ab35aa3d6cc5f7 | 24,567 |
def TopLevelWindow_GetDefaultSize(*args):
"""TopLevelWindow_GetDefaultSize() -> Size"""
return _windows_.TopLevelWindow_GetDefaultSize(*args) | e9a04052461bf64b7b3e4962a7df052e1f63de4b | 24,568 |
def human_size(numbytes):
"""converts a number of bytes into a readable string by humans"""
KB = 1024
MB = 1024*KB
GB = 1024*MB
TB = 1024*GB
if numbytes >= TB:
amount = numbytes / TB
unit = "TiB"
elif numbytes >= GB:
amount = numbytes / GB
unit = "GiB"
elif numbytes >= MB:
amount = numbytes / MB
unit = "MiB"
elif numbytes >= KB:
amount = numbytes / KB
unit = "KiB"
else:
amount = numbytes
unit = "B"
return "%.3f%s" % (amount, unit) | 733fdff47350072b9cfcaf72a2de85f8a1d58cc6 | 24,569 |
import argparse
import time
def parse_args():
"""
Parse command-line arguments to train and evaluate a multimodal network for activity recognition on MM-Fit.
:return: Populated namespace.
"""
parser = argparse.ArgumentParser(description='MM-Fit Demo')
parser.add_argument('--data', type=str, default='mm-fit/',
help='location of the dataset')
parser.add_argument('--unseen_test_set', default=False, action='store_true',
help='if set to true the unseen test set is used for evaluation')
parser.add_argument('--epochs', type=int, default=25,
help='number of training epochs')
parser.add_argument('--lr', type=float, default=1e-3,
help='learning rate')
parser.add_argument('--batch_size', type=int, default=128,
help='batch size')
parser.add_argument('--eval_every', type=int, default=1,
help='how often to eval model (in epochs)')
parser.add_argument('--early_stop', type=int, default=20,
help='stop after this number of epoch if the validation loss did not improve')
parser.add_argument('--checkpoint', type=int, default=10,
help='how often to checkpoint model parameters (epochs)')
parser.add_argument('--multimodal_ae_wp', type=str, default='',
help='file path for the weights of the multimodal autoencoder part of the model')
parser.add_argument('--model_wp', type=str, default='',
help='file path for weights of the full model')
parser.add_argument('--window_length', type=int, default=5,
help='length of data window in seconds')
parser.add_argument('--window_stride', type=float, default=0.2,
help='length of window stride in seconds')
parser.add_argument('--target_sensor_sampling_rate', type=float, default=50,
help='Sampling rate of sensor input signal (Hz)')
parser.add_argument('--skeleton_sampling_rate', type=float, default=30,
help='sampling rate of input skeleton data (Hz)')
parser.add_argument('--layers', type=int, default=3,
help='number of FC layers')
parser.add_argument('--hidden_units', type=int, default=200,
help='number of hidden units')
parser.add_argument('--ae_layers', type=int, default=3,
help='number of autoencoder FC layers')
parser.add_argument('--ae_hidden_units', type=int, default=200,
help='number of autoencoder hidden units')
parser.add_argument('--embedding_units', type=int, default=100,
help='number of hidden units')
parser.add_argument('--dropout', type=float, default=0.0,
help='dropout percentage')
parser.add_argument('--ae_dropout', type=float, default=0.0,
help='multimodal autoencoder dropout percentage')
parser.add_argument('--num_classes', type=int, default=None,
help='number of output classes')
parser.add_argument('--name', type=str, default='mmfit_demo_' + str(int(time.time())),
help='name of experiment')
parser.add_argument('--output', type=str, default='output/',
help='path to output folder')
return parser.parse_args() | 6be79c2b83a294dc9f34da4acdbd6c8b0e568a8b | 24,570 |
from typing import Callable
from typing import Any
def node_definitions(
id_fetcher: Callable[[str, GraphQLResolveInfo], Any],
type_resolver: GraphQLTypeResolver = None,
) -> GraphQLNodeDefinitions:
"""
Given a function to map from an ID to an underlying object, and a function
to map from an underlying object to the concrete GraphQLObjectType it
corresponds to, constructs a `Node` interface that objects can implement,
and a field object to be used as a `node` root field.
If the type_resolver is omitted, object resolution on the interface will be
handled with the `is_type_of` method on object types, as with any GraphQL
interface without a provided `resolve_type` method.
"""
node_interface = GraphQLInterfaceType(
"Node",
description="An object with an ID",
fields=lambda: {
"id": GraphQLField(
GraphQLNonNull(GraphQLID), description="The id of the object."
)
},
resolve_type=type_resolver,
)
# noinspection PyShadowingBuiltins
node_field = GraphQLField(
node_interface,
description="Fetches an object given its ID",
args={
"id": GraphQLArgument(
GraphQLNonNull(GraphQLID), description="The ID of an object"
)
},
resolve=lambda _obj, info, id: id_fetcher(id, info),
)
nodes_field = GraphQLField(
GraphQLNonNull(GraphQLList(node_interface)),
args={
"ids": GraphQLArgument(
GraphQLNonNull(GraphQLList(GraphQLNonNull(GraphQLID))),
description="The IDs of objects",
)
},
resolve=lambda _obj, info, ids: [id_fetcher(id_, info) for id_ in ids],
)
return GraphQLNodeDefinitions(node_interface, node_field, nodes_field) | 4e041edacbd7e5d6c82dd7df8616a694aa00181a | 24,571 |
def get_image_from_request(request):
"""
This function is used to extract the image from a POST or GET request.
Usually it is a url of the image and, in case of the POST is possible
to send it as a multi-part data.
Returns a tuple with (ok:boolean, error:string, image:ndarray)
"""
if request.method == 'POST':
content_type = parse_content_type(request)
if content_type == "multipart/form-data":
if 'image' in request.files:
try:
image = read_image_from_stream(request.files['image'])
return (True, '', image)
except:
return (False, "Unable to read uploaded file", None)
else:
return (False, "No image provided in form-data request", None)
elif content_type == 'application/json':
try:
input_params = request.get_json(True)
except:
return (False, 'No valid JSON present', None)
if 'imageUrl' in input_params:
image_url = input_params['imageUrl']
try:
image = read_image_from_url(image_url)
return (True, '', image)
except:
return (False, 'Unable to read image from url', None)
elif 'imageB64' in input_params:
image_b64 = input_params['imageB64']
try:
image = read_image_b64(image_b64)
return (True, '', image)
except:
return (False, 'Unable to read base 64 image', None)
else:
return (False, 'Image url or base 64 string not informed', None)
elif request.method == 'GET':
if request.args.get('imageUrl') == None:
return (False, 'Image url not informed', None)
else:
image_url = request.args.get('imageUrl')
try:
image = read_image_from_url(image_url)
return (True, '', image)
except:
return (False, 'Unable to read image from url', None) | 0af18d65664e1c7dc264ac112b42e001ac293fd6 | 24,572 |
def con_external():
"""Define a connection fixture.
Returns
-------
ibis.omniscidb.OmniSciDBClient
"""
omnisci_client = ibis.omniscidb.connect(
user=EXT_OMNISCIDB_USER,
password=EXT_OMNISCIDB_PASSWORD,
host=EXT_OMNISCIDB_HOST,
port=EXT_OMNISCIDB_PORT,
database=EXT_OMNISCIDB_DATABASE,
protocol=EXT_OMNISCIDB_PROTOCOL
)
return omnisci_client | e5a57ebdf8640bd96a2e28678fe4d0b285fe8408 | 24,573 |
def parse_risk(data_byte_d):
"""Parse and arrange risk lists.
Parameters
----------
data_byte_d : object
Decoded StringIO object.
Returns
-------
neocc_lst : *pandas.Series* or *pandas.DataFrame*
Data frame with risk list data parsed.
"""
# Read data as csv
neocc_lst = pd.read_csv(data_byte_d, sep='|', skiprows=[3],
header=2)
# Remove redundant white spaces
neocc_lst.columns = neocc_lst.columns.str.strip()
neocc_lst = neocc_lst.replace(r'\s+', ' ', regex=True)
df_obj = neocc_lst.select_dtypes(['object'])
neocc_lst[df_obj.columns] = df_obj.apply(lambda x:
x.str.strip())
# Rename columns
col_dict = {"Num/des. Name": 'Object Name',
"m": 'Diameter in m',
"Vel km/s": 'Vel in km/s'}
neocc_lst.rename(columns=col_dict, inplace=True)
# Remove last column
neocc_lst = neocc_lst.drop(neocc_lst.columns[-1], axis=1)
# Convert column with date to datetime variable
neocc_lst['Date/Time'] = pd.to_datetime(neocc_lst['Date/Time'])
# Split Years into 2 columns to avoid dashed between integers
# Check dataframe is not empty (for special list)
if len(neocc_lst.index.values) != 0:
neocc_lst[['First year', 'Last year']] = neocc_lst['Years']\
.str.split("-",
expand=True)\
.astype(int)
# Drop split column
neocc_lst = neocc_lst.drop(['Years'], axis=1)
# Reorder columns
neocc_lst = neocc_lst[['Object Name', 'Diameter in m', '*=Y',
'Date/Time', 'IP max', 'PS max', 'TS',
'Vel in km/s', 'First year', 'Last year',
'IP cum', 'PS cum']]
# Adding metadata
neocc_lst.help = ('Risk lists contain a data frame with the '
'following information:\n'
'-Object Name: name of the NEA\n'
'-Diamater in m: approximate diameter in meters\n'
'-*=Y: recording an asterisk if the value has '
'been estimated from the absolute magnitude\n'
'-Date/Time: predicted impact date in datetime '
'format\n'
'-IP max: Maximum Impact Probability\n'
'-PS max: Palermo scale rating\n'
'-Vel in km/s: Impact velocity at atmospheric entry'
' in km/s\n'
'-First year: first year of possible impacts\n'
'-Last year: last year of possible impacts\n'
'-IP cum: Cumulative Impact Probability\n'
'-PS cum: Cumulative Palermo Scale')
return neocc_lst | cf8761e46df621ffcf69dba9e2c359c25da02234 | 24,574 |
def plot_step_w_variable_station_filters(df, df_stations=None, options=None):
"""
"""
p = PlotStepWithControls(df, df_stations, options)
return p.plot() | a1faa31c90f4c00103148aa50648f040849984b1 | 24,575 |
def pick_random_element(count):
"""
Parameters
----------
count: {string: int}
A dictionary of all transition
counts from some state
we're in to all other states
Returns
-------
The next character, randomly sampled
from the empirical probabilities
determined from the counts
"""
keys = list(count.keys())
counts = np.array(list(count.values()))
counts = np.cumsum(counts)
r = np.random.rand()*counts[-1]
idx = np.searchsorted(counts, r)
return keys[idx] | 90388526b0a3a663f4f8d2ef6530484ddcf6fde2 | 24,576 |
def do_flake8() -> str:
"""
Flake8 Checks
"""
command = "flake8"
check_command_exists(command)
command_text = f"flake8 --config {settings.CONFIG_FOLDER}/.flake8"
command_text = prepinform_simple(command_text)
execute(*(command_text.split(" ")))
return "flake 8 succeeded" | 1ffaf0ecfd5905f68a9136c597f56c6c86b8d5cb | 24,577 |
def counter_current_heat_exchange(s0_in, s1_in, s0_out, s1_out,
dT, T_lim0=None, T_lim1=None,
phase0=None, phase1=None,
H_lim0=None, H_lim1=None):
"""
Allow outlet streams to exchange heat until either the given temperature
limits or the pinch temperature and return the total heat transfer
[Q; in kJ/hr].
"""
# Counter current heat exchange setup:
# First find the hot inlet, cold inlet, hot outlet and cold outlet streams
# along with the maximum temperature approaches for the hotside and the
# cold side.
if s0_in.T > s1_in.T:
s_hot_in = s0_in
s_cold_in = s1_in
s_hot_out = s0_out
s_cold_out = s1_out
T_lim_coldside = T_lim0
T_lim_hotside = T_lim1
H_lim_coldside = H_lim0
H_lim_hotside = H_lim1
phase_coldside = phase0
phase_hotside = phase1
else:
s_cold_in = s0_in
s_hot_in = s1_in
s_cold_out = s0_out
s_hot_out = s1_out
T_lim_hotside = T_lim0
T_lim_coldside = T_lim1
H_lim_hotside = H_lim0
H_lim_coldside = H_lim1
phase_hotside = phase0
phase_coldside = phase1
if (s_hot_in.T - s_cold_in.T) <= dT: return 0. # No heat exchange
T_pinch_coldside = s_cold_in.T + dT
if T_lim_coldside:
if T_lim_coldside > s_hot_in.T:
return 0. # No heat exchange
else:
T_lim_coldside = max(T_pinch_coldside, T_lim_coldside)
else:
T_lim_coldside = T_pinch_coldside
T_pinch_hotside = s_hot_in.T - dT
if T_lim_hotside:
if T_lim_hotside < s_cold_in.T:
return 0. # No heat exchange
else:
T_lim_hotside = min(T_pinch_hotside, T_lim_hotside)
else:
T_lim_hotside = T_pinch_hotside
# Find which side reaches the pinch first by selecting the side that needs
# the least heat transfer to reach the pinch.
# Pinch on the cold side
Q_hot_stream = heat_exchange_to_condition(s_hot_in, s_hot_out,
T_lim_coldside, phase_coldside,
H_lim_coldside, heating=False)
# Pinch on the hot side
Q_cold_stream = heat_exchange_to_condition(s_cold_in, s_cold_out,
T_lim_hotside, phase_hotside,
H_lim_hotside, heating=True)
if Q_hot_stream == Q_cold_stream == 0.:
s0_out.copy_like(s0_in)
s1_in.copy_like(s1_out)
return 0.
if Q_hot_stream > 0 or Q_cold_stream < 0:
# Sanity check
if Q_hot_stream / s_hot_in.C < 0.1 or Q_cold_stream / s_cold_in.C > -0.1:
s0_out.copy_like(s0_in)
s1_in.copy_like(s1_out)
return 0.
raise RuntimeError('inlet stream not in vapor-liquid equilibrium')
if Q_cold_stream < -Q_hot_stream:
# Pinch on the hot side
Q = Q_cold_stream
if phase_coldside:
s_hot_out.H = s_hot_in.H - Q
else:
s_hot_out.vle(H=s_hot_in.H - Q, P=s_hot_out.P)
else:
# Pinch on the cold side
Q = Q_hot_stream
if phase_hotside:
s_cold_out.H = s_cold_in.H - Q
else:
s_cold_out.vle(H=s_cold_in.H - Q, P=s_cold_out.P)
return abs(Q) | e5654666a56ebd0e32fd3abcde472e138a510d6e | 24,578 |
def ReadCOSx1dsumSpectrum(filename):
"""
filename with full path
Purporse is to have other variation
of files and differnet way of reading in.
"""
wave,flux,dfp,dfm = np.loadtxt(filename,unpack=True,usecols=[0,1,4,5])
return np.array([wave,flux,dfp,dfm]) | a74a76a787ba3f0665c8f73d602e5259fa4828ac | 24,579 |
import argparse
def parse_args():
"""Use argparse to get command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--task', '-t', choices=['seg', 'det', 'drivable',
'det-tracking'])
parser.add_argument('--gt', '-g', help='path to ground truth')
parser.add_argument('--result', '-r',
help='path to results to be evaluated')
parser.add_argument('--categories', '-c', nargs='+',
help='categories to keep')
args = parser.parse_args()
return args | f2478bb73f5f255d832a25800b6fddfbfd9ec734 | 24,580 |
import cmath
import math
def op_atanh(x):
"""Returns the inverse hyperbolic tangent of this mathematical object."""
if isinstance(x, list):
return [op_atanh(a) for a in x]
elif isinstance(x, complex):
return cmath.atanh(x)
else:
return math.atanh(x) | 515da3d653f9ab4df6d87f5cec7d021ac2c98da9 | 24,581 |
from typing import Mapping
from typing import Any
from typing import Callable
import warnings
def find_intersections(
solutions: Mapping[Any, Callable],
ray_direction: Array,
target_center: Array,
) -> dict:
"""
find intersections between ray_direction and target_center given a mapping
of functions (like output of `solutions.make_ray_sphere_lambdas`)
"""
# suppress irrelevant warnings about imaginary values
with warnings.catch_warnings:
warnings.simplefilter("ignore")
return {
coordinate: solution(*ray_direction, *target_center)
for coordinate, solution in solutions.items()
} | a738fd0521a853c0be52bbf26d63ef515208b37a | 24,582 |
def Circum_O_R(vertex_pos, tol):
"""
Function finds the center and the radius of the circumsphere of the every tetrahedron.
Reference:
Fiedler, Miroslav. Matrices and graphs in geometry. No. 139. Cambridge University Press, 2011.
Parameters
-----------------
vertex_pos :
The position of vertices of a tetrahedron
tol :
Tolerance defined to identify co-planar tetrahedrons
Returns
----------
circum_center :
The center of the circum-sphere
circum_rad :
The radius of the circum-sphere
"""
dis_ij = pdist(vertex_pos, 'euclidean')
sq_12, sq_13, sq_14, sq_23, sq_24, sq_34 = np.power(dis_ij, 2)
MatrixC = np.array([[0, 1, 1, 1, 1], [1, 0, sq_12, sq_13, sq_14], [1, sq_12, 0, sq_23, sq_24],
[1, sq_13, sq_23, 0, sq_34], [1, sq_14, sq_24, sq_34, 0]])
det_MC = (np.linalg.det(MatrixC))
if (det_MC < tol):
return [0, 0, 0], 0
else:
M = -2*np.linalg.inv(MatrixC)
circum_center = (M[0, 1]*vertex_pos[0, :] + M[0, 2]*vertex_pos[1, :] + M[0, 3]*vertex_pos[2, :] +
M[0, 4] * vertex_pos[3, :]) / (M[0, 1] + M[0, 2] + M[0, 3] + M[0, 4])
circum_rad = np.sqrt(M[0, 0])/2
return circum_center, circum_rad | 800ee6e56088a1c4df7149e911d4acbc175e2771 | 24,583 |
def reverse_one_hot(image):
"""
Transform a 2D array in one-hot format (depth is num_classes),
to a 2D array with only 1 channel, where each pixel value is
the classified class key.
#Arguments
image: The one-hot format image
#Returns
A 2D array with the same width and height as the input, but
with a depth size of 1, where each pixel value is the calssified
class key.
"""
x = np.argmax(image, axis=-1)
return x | 912d4a5f9fbb3711b1af9dcd9c2092e6d71869bd | 24,584 |
import torch
def get_feature_clusters(x: torch.Tensor, output_size: int, clusters: int = 8):
""" Applies KMeans across feature maps of an input activations tensor """
if not isinstance(x, torch.Tensor):
raise NotImplementedError(f"Function supports torch input tensors only, but got ({type(x)})")
if x.ndim == 3:
x = x.unsqueeze(0)
b, c, h, w = x.shape
assert h == w, f"image should be square, but got h = {h} and w = {w}"
scale_factor = int(np.ceil(output_size / h))
x = interpolate(x, scale_factor=scale_factor, mode='bilinear', align_corners=True)
x = torch2np(x, squeeze=True).reshape((output_size * output_size), c)
x = KMeans(n_clusters=clusters).fit_predict(x).reshape(output_size, output_size)
return x | a43c2b98239f7474bf70747f464e29f4800159d8 | 24,585 |
def get_phone_operator(phonenumber):
"""
Get operator type for a given phonenumber.
>>> get_phone_operator('+959262624625')
<Operator.Mpt: 'MPT'>
>>> get_phone_operator('09970000234')
<Operator.Ooredoo: 'Ooredoo'>
>>> get_phone_operator('123456789')
<Operator.Unknown: 'Unknown'>
"""
phonenumber = str(phonenumber).strip()
if mpt_re.match(phonenumber):
return (Operator.Mpt)
if ooredoo_re.match(phonenumber):
return (Operator.Ooredoo)
if telenor_re.match(phonenumber):
return (Operator.Telenor)
if mytel_re.match(phonenumber):
return (Operator.Mytel)
return (Operator.Unknown) | 01ec72a935b6fec466ab3113a61959d316d8f4b4 | 24,586 |
def projectpoints(P, X):
""" Apply full projection matrix P to 3D points X in cartesian coordinates.
Args:
P: projection matrix
X: 3d points in cartesian coordinates
Returns:
x: 2d points in cartesian coordinates
"""
X_hom = cart2hom(X)
X_pro = P.dot(X_hom) # 像素坐标系 齐次三维坐标
x = hom2cart(X_pro)
return x | a16df6083a567215b474ec29d2b065c8a200c22c | 24,587 |
import os
def getDMI():
"""
Read hardware information from DMI.
This function attempts to read from known files in /sys/class/dmi/id/. If
any are missing or an error occurs, those fields will be omitted from the
result.
Returns: a dictionary with fields such as bios_version and product_serial.
"""
dmi = dict()
for field in DMI_FIELDS:
path = os.path.join("/sys/class/dmi/id", field)
try:
with open(path, 'r') as source:
value = source.read().strip()
dmi[field] = value
except:
pass
return dmi | bd82c18f82a7ecf2681c5c769bee81e9127eeaef | 24,588 |
def mdot(a,b):
"""
Computes a contraction of two tensors/vectors. Assumes
the following structure: tensor[m,n,i,j,k] OR vector[m,i,j,k],
where i,j,k are spatial indices and m,n are variable indices.
"""
if (a.ndim == 3 and b.ndim == 3) or (a.ndim == 4 and b.ndim == 4):
c = (a*b).sum(0)
elif a.ndim == 5 and b.ndim == 4:
c = np.empty(np.maximum(a[:,0,:,:,:].shape,b.shape),dtype=b.dtype)
for i in range(a.shape[0]):
c[i,:,:,:] = (a[i,:,:,:,:]*b).sum(0)
elif a.ndim == 4 and b.ndim == 5:
c = np.empty(np.maximum(b[0,:,:,:,:].shape,a.shape),dtype=a.dtype)
for i in range(b.shape[1]):
c[i,:,:,:] = (a*b[:,i,:,:,:]).sum(0)
elif a.ndim == 5 and b.ndim == 5:
c = np.empty((a.shape[0],b.shape[1],a.shape[2],a.shape[3],max(a.shape[4],b.shape[4])),dtype=a.dtype)
for i in range(c.shape[0]):
for j in range(c.shape[1]):
c[i,j,:,:,:] = (a[i,:,:,:,:]*b[:,j,:,:,:]).sum(0)
elif a.ndim == 5 and b.ndim == 6:
c = np.empty((a.shape[0],b.shape[1],b.shape[2],max(a.shape[2],b.shape[3]),max(a.shape[3],b.shape[4]),max(a.shape[4],b.shape[5])),dtype=a.dtype)
for mu in range(c.shape[0]):
for k in range(c.shape[1]):
for l in range(c.shape[2]):
c[mu,k,l,:,:,:] = (a[mu,:,:,:,:]*b[:,k,l,:,:,:]).sum(0)
else:
raise Exception('mdot', 'wrong dimensions')
return c | 36b8242bf8c643ff35362c4d19f3a222297a1eee | 24,589 |
def sample_duration(sample):
"""Returns the duration of the sample (in seconds)
:param sample:
:return: number
"""
return sample.duration | 9aaddb69b106ad941e3d1172c8e789b4969da99d | 24,590 |
def fetch_commons_memberships(from_date=np.NaN,
to_date=np.NaN,
on_date=np.NaN):
"""Fetch Commons memberships for all MPs.
fetch_commons_memberships fetches data from the data platform showing
Commons memberships for each MP. The memberships are processed to impose
consistent rules on the start and end dates for memberships.
The from_date and to_date arguments can be used to filter the memberships
returned. The on_date argument is a convenience that sets the from_date and
to_date to the same given date. The on_date has priority: if the on_date is
set, the from_date and to_date are ignored.
The filtering is inclusive: a membership is returned if any part
of it falls within the period specified with the from and to dates.
Note that a membership with a NaN end date is still open.
Parameters
----------
from_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is numpy.NaN, which means no records are excluded on the
basis of the from_date.
to_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the to_date.
on_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the on_date.
Returns
-------
out : DataFrame
A pandas dataframe of Commons memberships for each MP, with one row
per Commons membership.
"""
# Set from_date and to_date to on_date if set
if not pd.isna(on_date):
from_date = on_date
to_date = on_date
# Fetch the Commons memberships
commons_memberships = fetch_commons_memberships_raw()
# Get elections and fix the end dates of memberships
end_dates = commons_memberships['seat_incumbency_end_date'].values
general_elections = elections.get_general_elections().values
general_elections_count = len(general_elections)
# If the end date for a membership falls after dissolution adjust it
for i in range(len(end_dates)):
date = end_dates[i]
if pd.isna(date): continue
for j in range(general_elections_count):
dissolution = general_elections[j, 1]
election = general_elections[j, 2]
if date > dissolution and date <= election:
end_dates[i] = dissolution
continue
commons_memberships['seat_incumbency_end_date'] = end_dates
# Filter on dates if requested
if not pd.isna(from_date) or not pd.isna(to_date):
commons_memberships = filter.filter_dates(
commons_memberships,
start_col='seat_incumbency_start_date',
end_col='seat_incumbency_end_date',
from_date=from_date,
to_date=to_date)
# Tidy up and return
commons_memberships.sort_values(
by=['family_name',
'seat_incumbency_start_date'],
inplace=True)
commons_memberships.reset_index(drop=True, inplace=True)
return commons_memberships | 0c9f72f9b2b1bdc090597a69a598ef638383fcf1 | 24,591 |
import win32com.client as win32
def excel_col_w_fitting(excel_path, sheet_name_list):
"""
This function make all column widths of an Excel file auto-fit with the column content.
:param excel_path: The Excel file's path.
:param sheet_name_list: The sheet names of the Excel file.
:return: File's column width correctly formatted.
"""
excel = win32.gencache.EnsureDispatch('Excel.Application')
work_book = excel.Workbooks.Open(excel_path)
for sheet_name in sheet_name_list:
work_sheet = work_book.Worksheets(sheet_name)
work_sheet.Columns.AutoFit()
work_book.Save()
excel.Application.Quit()
return None | 57de5aa63317d4fae4c1f60b607082b8de1f5f91 | 24,592 |
import os
def load_meetings(root=public_meetings.data_root,
ext=public_meetings.file_ext):
"""
Load all meetings from `root` ending with `ext`
Args:
root(str): root meeting directory
ext(str): file extension
Returns:
meetings(dict): a dictionnary {hash: meeting_data}
"""
meetings = {}
for filename in os.listdir(root):
if not filename.endswith(ext):
continue
path = os.path.join(root, filename)
h = filename.replace(ext, '')
meetings[h] = load_meeting(path)
return meetings | 098def3266d699ac9b5ddfcb5b655bafe2c711d6 | 24,593 |
def padding_reflect(image, pad_size):
"""
Padding with reflection to image by boarder
Parameters
----------
image: NDArray
Image to padding. Only support 2D(gray) or 3D(color)
pad_size: tuple
Padding size for height adn width axis respectively
Returns
-------
ret: NDArray
Image after padding
"""
shape = image.shape
assert len(shape) in [2, 3], 'image must be 2D or 3D'
is_3D = True
if len(shape) == 2:
image = np.expand_dims(image, axis=2)
shape = image.shape
is_3D = False
h, w = pad_size
ret = np.zeros((shape[0]+2*h, shape[1]+2*w, shape[2]))
for i in xrange(shape[0]+2*h):
for j in xrange(shape[1]+2*w):
if i < h:
if j < w:
ret[i, j, :] = image[h-1-i, w-1-j, :]
elif w <= j <= w + shape[1] - 1:
ret[i, j, :] = image[h-1-i, j-w, :]
else:
ret[i, j, :] = image[h-1-i, w+2*shape[1]-1-j, :]
elif h <= i <= h + shape[0] - 1:
if j < w:
ret[i, j, :] = image[i-h, w-1-j, :]
elif w <= j <= w + shape[1] - 1:
ret[i, j, :] = image[i-h, j-w, :]
else:
ret[i, j, :] = image[i-h, w+2*shape[1]-1-j, :]
else:
if j < w:
ret[i, j, :] = image[h+2*shape[0]-1-i, w-1-j, :]
elif w <= j <= w + shape[1] - 1:
ret[i, j, :] = image[h+2*shape[0]-1-i, j-w, :]
else:
ret[i, j, :] = image[h+2*shape[0]-1-i, w+2*shape[1]-1-j, :]
return ret if is_3D else np.squeeze(ret, axis=2) | eb9f00bee89cb9a13fef0aa77e2c3eb0bfc8c92c | 24,594 |
def check_if_all_elements_have_geometry(geodataframes_list):
"""
Iterates over a list and checks if all members of the list have geometry
information associated with them.
Parameters
----------
geodataframes_list : A list object
A list object that contains one or more geopandas.GeoDataFrame objects
Returns
-------
bool
Returns True if all elements within geodataframes_list have geometry info associated with them
Returns False if atleast one element within geodataframes_list does not have geometry info associated with it
"""
valerror_text = "geodataframes_list must be of list type. Got {}".format(type(geodataframes_list))
if not isinstance(geodataframes_list, list):
raise ValueError(valerror_text)
valerror_text = "Elements of the list should be of type geopandas.GeoDataFrame. Got at least one value that is not."
if check_if_all_elements_are_gdf(geodataframes_list) is False:
raise ValueError(valerror_text)
for geodataframe in geodataframes_list:
if has_geometry(geodataframe) is False:
return False
return True | 4ca7bcdd405c407a0a15be81876627e88a0d9c80 | 24,595 |
def conference_schedule(parser, token):
"""
{% conference_schedule conference schedule as var %}
"""
contents = token.split_contents()
tag_name = contents[0]
try:
conference = contents[1]
schedule = contents[2]
var_name = contents[4]
except IndexError:
raise template.TemplateSyntaxError("%r tag had invalid arguments" % tag_name)
class ScheduleNode(TNode):
def __init__(self, conference, schedule, var_name):
self.var_name = var_name
self.conference = self._set_var(conference)
self.schedule = self._set_var(schedule)
def render(self, context):
schedule = models.Schedule.objects.get(
conference = self._get_var(self.conference, context),
slug = self._get_var(self.schedule, context),
)
context[self.var_name] = schedule_context(schedule)
return ''
return ScheduleNode(conference, schedule, var_name) | 037e000488a204a9d0094ccda72067ed70e5aa53 | 24,596 |
from typing import Tuple
import http
def run_delete_process() -> Tuple[str, http.HTTPStatus]:
"""Handles deleting tasks pushed from Task Queue."""
return _run_process(constants.Operation.DELETE) | 94a8c459ed67695894c28973f4a04faa2f2782aa | 24,597 |
def annotate(f, expr, ctxt):
"""
f: function argument
expr: expression
ctxt: context
:returns: type of expr
"""
t = f(expr, ctxt)
expr.type = t
return t | d8fb524f6ca2fbddef78aa150733e768d0e3da01 | 24,598 |
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes | 6b0e412f4aa8d4204530ebeca8a45928213847aa | 24,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.