content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return list(dict.fromkeys(nums).keys()) | 27,700 |
def fft(array, nfft=None, dim=None, dx=None, detrend=None, tapering=False,
shift=True, sym=False, chunks=None):
"""Compute the spectrum on several dimensions of xarray.DataArray objects
using the Fast Fourrier Transform parrallelized with dask.
Parameters
----------
array : xarray.DataArray
Array from which compute the spectrum
dim : str or sequence
Dimensions along which to compute the spectrum
dx : float or sequence, optional
Define the resolution of the dimensions. If not precised,
the resolution is computed directly from the coordinates associated
to the dimensions.
detrend : {None, 'mean', 'linear'}, optional
Remove the mean or a linear trend before the spectrum computation
tapering : bool, optional
If True, tapper the data with a Tukey window
shift : bool, optional
If True, the frequency axes are shifted to center the 0 frequency,
otherwise negative frequencies follow positive frequencies as in
numpy.fft.ftt
sym : bool, optional
If True, force the spectrum to be symmetrical even if the input data
is real
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``
Returns
-------
res : DataArray
A multi-dimensional complex DataArray with the corresponding
dimensions transformed in the Fourier space.
Notes
-----
If the input data is real, a real fft is performed over the first
dimension, which is faster. Then the transform over the remaining
dimensions are computed with the classic fft.
"""
temp_nfft, new_dim = _utils.infer_n_and_dims(array, nfft, dim)
new_nfft = _utils.infer_arg(temp_nfft, dim)
new_dx = _utils.infer_arg(dx, dim)
if detrend is 'mean':
# Tackling the issue of the dask graph by computing and loading the
# mean here
for di in new_dim:
mean_array = array.mean(dim=di).load()
preproc_array = array - mean_array
elif detrend is 'linear':
preproc_array = _detrend(array, new_dim)
else:
preproc_array = array
if tapering:
preproc_array = _tapper(array, new_dim)
# TODO: Check if this part may work with dask using np.iscomplexobj
# If the array is complex, set the symmetry parameters to True
if np.any(np.iscomplex(array)):
sym = True
spectrum_array, spectrum_coords, spectrum_dims = \
_fft(preproc_array, new_nfft, new_dim, new_dx, shift=shift,
chunks=chunks, sym=sym)
spec = xr.DataArray(spectrum_array, coords=spectrum_coords,
dims=spectrum_dims, name='spectrum')
_compute_norm_factor(spec, new_nfft, new_dim, new_dx, tapering, sym=sym)
return spec | 27,701 |
def make_symmetric_matrix(d: Union[list, float]) -> list:
"""
d (list or float):
len(d) == 1: Suppose cubic system
len(d) == 3: Suppose tetragonal or orthorhombic system
len(d) == 6: Suppose the other system
"""
if isinstance(d, float):
tensor = [[d, 0, 0], [0, d, 0], [0, 0, d]]
elif len(d) == 9:
tensor = [[d[0], d[1], d[2]], [d[3], d[4], d[5]], [d[6], d[7], d[8]]]
elif len(d) == 1:
tensor = [[d[0], 0, 0], [0, d[0], 0], [0, 0, d[0]]]
elif len(d) == 3:
tensor = [[d[0], 0, 0], [0, d[1], 0], [0, 0, d[2]]]
elif len(d) == 6:
from pymatgen.util.num import make_symmetric_matrix_from_upper_tri
"""
Given a symmetric matrix in upper triangular matrix form as flat array
indexes as:
[A_xx, A_yy, A_zz, A_xy, A_xz, A_yz]
This will generate the full matrix:
[[A_xx, A_xy, A_xz], [A_xy, A_yy, A_yz], [A_xz, A_yz, A_zz]
"""
tensor = make_symmetric_matrix_from_upper_tri(d).tolist()
else:
raise ValueError("{} is not valid to make symmetric matrix".format(d))
return tensor | 27,702 |
def convert_lds_to_block_tridiag(As, bs, Qi_sqrts, ms, Ri_sqrts):
"""
Parameterize the LDS in terms of pairwise linear Gaussian dynamics
and per-timestep Gaussian observations.
p(x_{1:T}; theta)
= [prod_{t=1}^{T-1} N(x_{t+1} | A_t x_t + b_t, Q_t)]
* [prod_{t=1}^T N(x_t | m_t, R_t)]
We can rewrite this as a Gaussian with a block tridiagonal precision
matrix J. The blocks of this matrix are:
J_{t,t} = A_t.T Q_t^{-1} A_t + Q_{t-1}^{-1} + R_t^{-1}
J_{t,t+1} = -Q_t^{-1} A_t
The linear term is h_t
h_t = -A_t.T Q_t^{-1} b_t + Q_{t-1}^{-1} b_{t-1} + R_t^{-1} m_t
We parameterize the model in terms of
theta = {A_t, b_t, Q_t^{-1/2}}_{t=1}^{T-1}, {m_t, R_t^{-1/2}}_{t=1}^T
"""
T, D = ms.shape
assert As.shape == (T-1, D, D)
assert bs.shape == (T-1, D)
assert Qi_sqrts.shape == (T-1, D, D)
assert Ri_sqrts.shape == (T, D, D)
# Construnct the inverse covariance matrices
Qis = np.matmul(Qi_sqrts, np.swapaxes(Qi_sqrts, -1, -2))
Ris = np.matmul(Ri_sqrts, np.swapaxes(Ri_sqrts, -1, -2))
# Construct the joint, block-tridiagonal precision matrix
J_lower_diag = -np.matmul(Qis, As)
J_diag = np.concatenate([-np.matmul(np.swapaxes(As, -1, -2), J_lower_diag), np.zeros((1, D, D))]) \
+ np.concatenate([np.zeros((1, D, D)), Qis]) \
+ Ris
# Construct the linear term
h = np.concatenate([np.matmul(J_lower_diag, bs[:, :, None])[:, :, 0], np.zeros((1, D))]) \
+ np.concatenate([np.zeros((1, D)), np.matmul(Qis, bs[:, :, None])[:, :, 0]]) \
+ np.matmul(Ris, ms[:, :, None])[:, :, 0]
return J_diag, J_lower_diag, h | 27,703 |
def tearDownModule():
# pylint: disable=invalid-name
"""TearDown for the whole unittests.
- Remove all the created persisted data.
"""
print("tearDownModule Begin.")
CF.util.clear_face_lists()
CF.util.clear_person_groups()
CF.util.clear_large_face_lists()
CF.util.clear_large_person_groups()
print("tearDownModule End.") | 27,704 |
def extract_string_from_tensor(input_ids, mode="single", config=None, tokenizer=None):
"""
Args:
input_ids (Tensor): input sentences with shape [batch_size, seq_len].
mode (str): ["pair", "single"]
"pair" for tasks with paired inputs `<bos> A <eos> B <eos>`,
such as summarization task, the dataset format `<bos> Article <eos> Summary <eos>`,
reading comprehension task, the dataset format `<bos> Passage Question <eos> Answer <eos>`.
"single" for tasks with single input `<bos> A <eos>`, such as Language Modeling, Lambada task.
config: the configuration of GPT-2 model.
tokenizer: the tokenizer of GPT-2 model.
Return:
prompt_list (list): list of prompt_text
reference_list (list): list of reference_text, or second part of text
rest_list (list): list of rest_text, or rest part of text
"""
batch_size = config.batch_size
seq_length = config.seq_length
prompt_list = [""] * batch_size
reference_list = [""] * batch_size
eos_text = tokenizer.eos_token
len_eos_text = len(eos_text)
input_ids_np = input_ids.asnumpy()
input_ids_np = input_ids_np.reshape((batch_size, seq_length))
# input_ids = P.Reshape()(input_ids, (batch_size, seq_length))
if mode == "pair":
for batch_idx in range(batch_size):
sentence_tensor = input_ids_np[batch_idx]
sentence_list = sentence_tensor.asnumpy().tolist()[1:]
sentence = tokenizer.decode(sentence_list)
prompt_start = 0
prompt_end = sentence.find(eos_text, 0)
reference_start = prompt_end + len_eos_text
reference_end = sentence[reference_start:].find(
eos_text, 0) + reference_start
prompt_list[batch_idx] = sentence[prompt_start:prompt_end]
reference_list[batch_idx] = sentence[reference_start:reference_end]
return prompt_list, reference_list
# For single output datasets such as WikiText, etc.
if mode == "single":
for batch_idx in range(batch_size):
sentence_tensor = input_ids_np[batch_idx]
sentence_list = sentence_tensor.asnumpy().tolist()[1:]
sentence = tokenizer.decode(sentence_list)
prompt_start = 0
prompt_end = sentence.find(eos_text, 0)
prompt_list[batch_idx] = sentence[prompt_start:prompt_end]
else:
raise NotImplementedError('mode:{} not supported.'.format(mode))
return prompt_list | 27,705 |
def schema_validation_matching(source_fields, target_fields):
"""Compare schemas between two dictionary objects"""
results = []
# Go through each source and check if target exists and matches
for source_field_name, source_field_type in source_fields.items():
# target field exists
if source_field_name in target_fields:
# target data type matches
if source_field_type == target_fields[source_field_name]:
results.append(
[
source_field_name,
source_field_name,
"1",
"1",
consts.VALIDATION_STATUS_SUCCESS,
"Source_type:{} Target_type:{}".format(
source_field_type, target_fields[source_field_name]
),
]
)
# target data type mismatch
else:
results.append(
[
source_field_name,
source_field_name,
"1",
"1",
consts.VALIDATION_STATUS_FAIL,
"Data type mismatch between source and target. Source_type:{} Target_type:{}".format(
source_field_type, target_fields[source_field_name]
),
]
)
# target field doesn't exist
else:
results.append(
[
source_field_name,
"N/A",
"1",
"0",
consts.VALIDATION_STATUS_FAIL,
"Target doesn't have a matching field name",
]
)
# source field doesn't exist
for target_field_name, target_field_type in target_fields.items():
if target_field_name not in source_fields:
results.append(
[
"N/A",
target_field_name,
"0",
"1",
consts.VALIDATION_STATUS_FAIL,
"Source doesn't have a matching field name",
]
)
return results | 27,706 |
def find_thirdparty_marshaller_plugins():
""" Find, but don't load, all third party marshaller plugins.
Third party marshaller plugins declare the entry point
``'hdf5storage.marshallers.plugins'`` with the name being the
Marshaller API version and the target being a function that returns
a ``tuple`` or ``list`` of all the marshallers provided by that
plugin when given the hdf5storage version (``str``) as its only
argument.
.. versionadded:: 0.2
Returns
-------
plugins : dict
The marshaller obtaining entry points from third party
plugins. The keys are the Marshaller API versions (``str``) and
the values are ``dict`` of the entry points, with the module
names as the keys (``str``) and the values being the entry
points (``pkg_resources.EntryPoint``).
See Also
--------
supported_marshaller_api_versions
"""
all_plugins = tuple(pkg_resources.iter_entry_points(
'hdf5storage.marshallers.plugins'))
return {ver: {p.module_name: p
for p in all_plugins if p.name == ver}
for ver in supported_marshaller_api_versions()} | 27,707 |
def justTransportResponse(transport):
"""
Helper function for creating a Response which uses the given transport.
All of the other parameters to L{Response.__init__} are filled with
arbitrary values. Only use this method if you don't care about any of
them.
"""
return Response((b'HTTP', 1, 1), 200, b'OK', _boringHeaders, transport) | 27,708 |
def get_tools_location() -> str:
"""Get the path to the Alteryx Python SDK Tools directory."""
admin_path = os.path.join(os.environ["APPDATA"], "Alteryx", "Tools")
user_path = os.path.join(os.environ["PROGRAMDATA"], "Alteryx", "Tools")
if contains_path(__file__, admin_path):
return admin_path
if contains_path(__file__, user_path):
return user_path
raise RuntimeError("Tool is not located in Alteryx install locations.") | 27,709 |
def test_url_raise_error(tmpdir):
"""ValueError is raised if there is no valid character to be used as filename."""
with pytest.raises(ValueError):
download.sanitize_url(_URL_RAISE_ERROR)
with pytest.raises(ValueError):
download.maybe_download_and_extract(tmpdir, _URL_RAISE_ERROR) | 27,710 |
def object_comparator_lookup(src_obj, dst_obj):
"""
Compare an object with another entry by entry
"""
dont_match = []
no_upstream = []
for i in dst_obj:
count_name = 0
count_value = 0
for j in src_obj:
if list(j.keys())[0] == list(i.keys())[0]:
count_name = 1
if j[list(j.keys())[0]] == i[list(i.keys())[0]]:
count_value = 1
if count_name == 0:
if list(i.keys())[0] != "last-modified":
print(i.keys(), list(i.keys())[0])
no_upstream.append(i)
else:
if count_value == 0:
dont_match.append(i)
if no_upstream or dont_match:
return 1
else:
return 0 | 27,711 |
def line(
data_frame=None,
x=None,
y=None,
line_group=None,
color=None,
line_dash=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
error_x=None,
error_x_minus=None,
error_y=None,
error_y_minus=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
orientation=None,
color_discrete_sequence=None,
color_discrete_map=None,
line_dash_sequence=None,
line_dash_map=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
line_shape=None,
render_mode="auto",
title=None,
template=None,
width=None,
height=None,
):
"""
In a 2D line plot, each row of `data_frame` is represented as vertex of
a polyline mark in 2D space.
"""
return make_figure(args=locals(), constructor=go.Scatter) | 27,712 |
def is_primitive(v):
"""
Checks if v is of primitive type.
"""
return isinstance(v, (int, float, bool, str)) | 27,713 |
def linkify_only_full_urls(attrs, new=False):
"""Linkify only full links, containing the scheme."""
if not new: # This is an existing <a> tag, leave it be.
return attrs
# If the original text doesn't contain the scheme, don't linkify.
if not attrs['_text'].startswith(('http:', 'https:')):
return None
return attrs | 27,714 |
def update_learning_rate_rel(optimizer, cur_lr, new_lr):
"""Update learning rate"""
if cur_lr != new_lr:
ratio = _get_lr_change_ratio(cur_lr, new_lr)
if ratio > cfg.TRAIN.LOG_LR_CHANGE_THRESHOLD:
print('Changing learning rate %.6f -> %.6f', cur_lr, new_lr)
# Update learning rate, note that different parameter may have different learning rate
param_keys = []
for ind, param_group in enumerate(optimizer.param_groups):
if (ind == 1 or ind == 3) and cfg.TRAIN.DOUBLE_BIAS: # bias params
param_group['lr'] = new_lr * 2
else:
param_group['lr'] = new_lr
if ind <= 1: # backbone params
param_group['lr'] = 0.1 * param_group['lr'] # 0.1 * param_group['lr']
param_keys += param_group['params']
if cfg.TRAIN.TYPE in ['SGD'] and cfg.TRAIN.SCALE_MOMENTUM and cur_lr > 1e-7 and \
ratio > cfg.TRAIN.SCALE_MOMENTUM_THRESHOLD:
_CorrectMomentum(optimizer, param_keys, new_lr / cur_lr) | 27,715 |
def _oauth_error(handler, error_msg, error):
"""Set expected status and error formatting for Oauth2 style error
Parameters
----------
error_msg : str
Human parsable error message
error : str
Oauth2 controlled vocab error
Returns
-------
Writes out Oauth2 formatted error JSON of
{error: error,
error_description: error_msg}
Notes
-----
Expects handler to be a tornado RequestHandler or subclass
"""
handler.set_status(400)
handler.write({'error': error,
'error_description': error_msg})
handler.finish() | 27,716 |
def brain_viz_nodes(nodes,title,method,node_size):
"""
"""
global coordinates
NEWcoordinates = coordinates[nodes,:]
node_size = np.array(node_size).astype('float32')
node_size-=np.mean(node_size)
if np.std(node_size)!=0:
node_size/=np.std(node_size)
if method == '2d':
print(title)
node_size*=15.0
node_size+=36
view=plotting.plot_connectome(np.zeros((len(nodes),len(nodes))),
NEWcoordinates,edge_threshold=0, node_size=node_size,
node_color = 'red',axes = (0, 0, 3, 3))
view
elif method == '3d':
node_size*=5.0
node_size+=10
view=plotting.view_markers(NEWcoordinates,title = title ,marker_size=node_size)
view.open_in_browser()
view
return | 27,717 |
def test_captchasolution_obj_get_string(captcha_class):
"""
Checks get_type() function of CaptchaSolution class.
"""
solution_class = captcha_class.get_solution_class()
fields_count = len(solution_class.__dataclass_fields__)
field_values = [f'field{n}' for n in range(fields_count)]
solution_obj = solution_class(*field_values)
assert str(solution_obj) == '\n'.join(field_values) | 27,718 |
def retrieve_model_list(opt):
"""
retrive the model information from form a directory.
:param opt: parser
:return: list of Checkpoint object.
"""
files = os.listdir(os.path.join(opt.dir, 'models'))
files.sort()
# file name format "address/$NAME_acc_XX.YY_ppl_XX.YY_eZZ.pt"
valid_address = []
for address in files:
name = os.path.basename(address)
step = calc_us_in_name(name) - 5
if valid_model_name(name, opt, step=step):
lst = name.strip().split('_')
valid_address.append(
Checkpoints(
os.path.join(
os.path.join(opt.dir, 'models'),
address
),
str(lst[0+step]),
float(lst[2+step]),
float(lst[4+step]),
Checkpoints.str2epoch(lst[5+step])
)
)
try:
assert len(valid_address) != 0
except AssertionError as e:
print("{0}\nNo valid model found in {1} with name={2}."
.format(e, opt.dir, opt.name))
raise
return valid_address | 27,719 |
def plotTopFiveContributors():
"""
根据authorDict绘制扇形图,并保存至/staic/image,名为TopFiveContributors.png
"""
authorDict = getAuthorCommitDict()
# 按值从大到小进行排序
sortedList = Counter(authorDict).most_common()
size, labels = selectTopFive(sortedList)
# 偏移量
explode = [0.02, 0.02, 0.02, 0.02, 0.02]
plt.pie(size, labels=labels, explode=explode)
# 将横、纵坐标轴标准化处理
plt.title('The top five contributors for PyTorch')
dir = os.path.join(os.getcwd(), "static", "image", "TopFiveContributors.png")
# 将图片保存至/static/image,名为TopFiveContributors.png
plt.savefig(dir, format='png', dpi=300) | 27,720 |
def _read_filenames_in_dir(path, extension):
"""Returns the name of the Yaml files in a certain directory
Arguments
---------
path: str
Path to directory
extension: str
Extension of files (such as: '.yml' or '.csv')
Returns
-------
list
The list of files in `path` with extension
"""
files = []
for filename in os.listdir(path):
if filename.endswith(extension):
files.append(os.path.splitext(filename)[0])
return files | 27,721 |
def setup_cccc_tool_plugin(use_plugin_context=True, binary=None, cccc_config=None):
"""Create an instance of the CCCC plugin."""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--cccc-bin", dest="cccc_bin")
arg_parser.add_argument("--cccc-config", dest="cccc_config")
resources = Resources(
[os.path.join(os.path.dirname(statick_tool.__file__), "plugins")]
)
config = Config(resources.get_file("config.yaml"))
ctp = CCCCToolPlugin()
plugin_context = PluginContext(arg_parser.parse_args([]), resources, config)
plugin_context.args.output_directory = os.path.dirname(__file__)
if binary:
plugin_context.args.cccc_bin = binary
if cccc_config:
plugin_context.args.cccc_config = cccc_config
if use_plugin_context:
ctp.set_plugin_context(plugin_context)
return ctp | 27,722 |
def submit_script_to_scheduler(
script: str,
proc_type: int,
queue_folder: str,
sim_dir: str,
run_name: str,
target_machine: str = None,
logger: Logger = get_basic_logger(),
):
"""
Submits the slurm script and updates the management db.
Calling the scheduler submitter may result in an error being raised.
This is not caught in order to get immediate attention of broken runs.
:param sim_dir:
:param script: The location of the script to be run
:param proc_type: The process type of the job being run
:param queue_folder: Where the folder for database updates is
:param run_name: The name of the realisation
:param target_machine: The
:param logger:
:return:
"""
job_id = Scheduler.get_scheduler().submit_job(sim_dir, script, target_machine)
add_to_queue(
queue_folder,
run_name,
proc_type,
const.Status.queued.value,
job_id=job_id,
logger=logger,
) | 27,723 |
def _valid_dir(path, description):
""" Check if the path is a valid directory.
@param path: Path which should be checked.
@type path: str
@param description: Description which is used for the error
message if necessary.
@type description: str
@raise: ValueError, if path is not valid.
"""
_path_exists(path, description)
if not os.path.isdir(path):
raise ValueError('{0} is not directory.'.format(description)) | 27,724 |
def Precedence(op):
"""The numeric precedence of a binary operator."""
# Particularly convenient during layout of binary operators.
return float(sum(i * (op in grp[1:])
for i, grp in enumerate(precedence))) / len(precedence) | 27,725 |
def _FeastToExampleTransform(
pipeline: beam.Pipeline, exec_properties: Dict[str, Any], split_pattern: str
) -> beam.pvalue.PCollection:
"""Read from BigQuery and transform to TF examples.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
split_pattern: Split.pattern in Input config, a BigQuery sql string.
Returns:
PCollection of TF examples.
"""
# Load custom config dictionary
custom_config = _load_custom_config(exec_properties["custom_config"])
# Get Feast retrieval job
retrieval_job = _get_retrieval_job(
entity_query=split_pattern, custom_config=custom_config
)
# Setup datasource and converter.
if isinstance(retrieval_job, BigQueryRetrievalJob):
query = retrieval_job.to_sql()
# Internally Beam creates a temporary table and exports from the query.
datasource = utils.ReadFromBigQuery(query=query)
converter = converters._BigQueryConverter(
query, _get_gcp_project(exec_properties)
)
else:
raise NotImplementedError(
f"Support for {type(retrieval_job)} is not available yet. For now we only support BigQuery source."
)
# Setup converter from dictionary of str -> value to bytes
map_function = None
out_format = exec_properties.get(
"output_data_format", example_gen_pb2.FORMAT_TF_EXAMPLE
)
if out_format == example_gen_pb2.FORMAT_TF_EXAMPLE:
map_function = converter.RowToExampleBytes
elif out_format == example_gen_pb2.FORMAT_TF_SEQUENCE_EXAMPLE:
map_function = converter.RowToSequenceExampleBytes
else:
raise NotImplementedError(
f"Format {out_format} is not currently supported. Currently we only support tfexample"
)
# Setup pipeline
return (
pipeline
| "DataRetrieval" >> datasource
| f"To{out_format.capitalize()}Bytes" >> beam.Map(map_function)
) | 27,726 |
def fluxional_mode(atom_indices, span=360.0):
""" Writes the string for each fluxional mode
"""
# Format the aotm indices string
atom_indices = util.format_flux_mode_indices(atom_indices)
# Create dictionary to fill template
flux_mode_keys = {
'atom_indices': atom_indices,
'span': span,
}
# Set template name and path for a monte carlo species section
template_file_name = 'fluxional_mode.mako'
template_file_path = os.path.join(MONTE_CARLO_PATH, template_file_name)
# Build monte carlo section string
flux_str = Template(filename=template_file_path).render(**flux_mode_keys)
return flux_str | 27,727 |
def test_create_geom_init_and_pass_vars():
"""init with an object and pass vars - should raise warning"""
g = vol.IsoSurfGeom(data='fake_data', dbname='fake_db')
with pytest.warns(None) as warn_info:
g.create_geometry(data=dataname, dbname=exp_db)
assert(len(warn_info) == 2) # initial warning, plus data and db variables
assert(g.data == dataname)
assert(g.db == exp_db)
assert(g.levels == exp_levels)
assert(isfile(exp_db + '/isogeom.h5m'))
remove(exp_db + '/isogeom.h5m') | 27,728 |
def reply_handler(bot, update):
"""Reply message."""
"""check with from id"""
global last_reply_time
username = update.message.from_user.username
text = update.message.text
logger.info(text)
logger.info(username)
if username == 'cming_ou':
now = datetime.datetime.now()
diff = now - last_reply_time
logger.info(repr(diff))
logger.info(repr(diff > datetime.timedelta(minutes=30)))
if diff > datetime.timedelta(minutes=30):
logger.info("reply")
last_reply_time = now
remain_time = datetime.datetime(2021, 3, 4) - now
#update.message.reply_text("距離Jimmy退伍還有{}天".format('XX'))
update.message.reply_text("距離Jimmy退伍還有{}天".format(remain_time.days)) | 27,729 |
def parse_host_info(qhost_tree, queues_tree, queues_to_ignore=[]):
"""
:return: dictionary key: host, value HostInfo
"""
dctRet = {}
for host_node in qhost_tree.findall('host'):
host_name = host_node.get('name')
dct_hostvalues = dict([(hostvalue_node.get('name'), hostvalue_node.text) for hostvalue_node in host_node.findall('hostvalue')])
if dct_hostvalues['num_proc'] != '-':
slots = int(dct_hostvalues['num_proc'])
slots_used = sum([int(slots_used_node.text) for slots_used_node in host_node.findall(".//queuevalue[@name='slots_used']")])
memory = dehumanize_memory(dct_hostvalues['mem_total'])
mem_used = 0 if dct_hostvalues['mem_used'] == '-' else dehumanize_memory(dct_hostvalues['mem_used'])
dctRet[host_name] = HostInfo(host=host_name, slots=slots, memory=memory, state=None, slots_used=slots_used,
mem_used=mem_used, queues=set())
else:
dctRet[host_name] = HostInfo(host=host_name, slots=None, memory=None, state=None, slots_used=None,
mem_used=None, queues=set())
for queue_info in queues_tree.findall('*/Queue-List'):
state = queue_info.findtext('state')
if state is None: state = ''
# Ignore suspended state
state = re.sub('s', '', state)
# Ignore configuration ambiguous state
state = re.sub('c', '', state)
# If disabled, ignore other state flags, because they can vary between queues on a host
if 'd' in state:
state = 'd'
queue = queue_info.findtext('name')
queue_split = queue.split('@', 1)
host = queue_split[1]
queue_name = queue_split[0]
if queue_name in queues_to_ignore:
continue
host_info = dctRet.get(host)
host_info.queues.add(queue_name)
if len(state) > 0:
if host_info is None:
logging.log_message(host + " found in qstat but not qhost")
elif host_info.state is None:
dctRet[host] = host_info._replace(state=state)
elif not is_host_state_compatible(host_info.state, state):
raise Exception("Conflicting states for %s: %s != %s" % (host, host_info.state, state))
return dctRet | 27,730 |
def select(pTable, purged=False, proxyService=None):
"""
for all OBJECT cells select the candidate whose value is most similar to the original one
ties are broken by popularity, i.e. the entity with the most incoming links
can be executed either on the remaining candidates or the purged candidates
"""
# get all object cells
cells = [cell for cell in pTable.getCells(unsolved=True, onlyObj=True)]
# [Audit] How many cells should be solved a.k.a must have sel_cand
target_cells_cnt = len(cells)
# [Audit] unsolved cells a.k. cells with no sel_cand
remaining = []
# [Audit] How many cells with modified sel_cand by this method
solved_cnt = 0
# get the selected candidate
for cell in cells:
# select the candidates to consider
if purged:
cands = cell['purged_cand']
else:
cands = cell['cand']
# skip cells without candidates
if not cands:
# [Audit] cells with no candidates are still remaining!
remaining.extend([cell])
continue
# if there is only one candidate, we select that one
if len(cands) == 1:
cell['sel_cand'] = cands[0]
# [Audit] special case solution
solved_cnt = solved_cnt + 1
if purged:
cell['cand'] = [cell['sel_cand']]
continue
# for all others check the string similarity
best_match = get_most_similar(cands, cell['value'], proxyService)
# add match to candidate
cell['sel_cand'] = best_match
# [Audit] if change detected then count as solved otherwise, add to remaining
solved_cnt = solved_cnt + 1
if purged:
cell['cand'] = [cell['sel_cand']]
# [Audit] calculate remaining cnt
remaining_cnt = target_cells_cnt - solved_cnt
# [Audit] get important keys only
remaining = [pTable.audit.getSubDict(cell, ['value', 'clean_val', 'row_id', 'col_id'])
for cell in remaining]
# [Audit] add audit record
pTable.audit.addRecord(tasks.CEA, steps.selection,
methods.stringSimilarity, solved_cnt, remaining_cnt, remaining) | 27,731 |
def download_artifacts(file_name, file_type, file_extension, file_url):
"""
get run artifacts
Arguments:
file_name (str): file name
file_type (str): file type
file_extension (str): file extension
file_url (str): file url
"""
work_directory = os.getcwd()
artifacts_directory = work_directory + "/" + RUN_NAME + " artifacts"
if not os.path.exists(artifacts_directory):
try:
os.mkdir(artifacts_directory)
print('Artifacts directory created - {} '.format(
artifacts_directory
)
)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
target_file_name = artifacts_directory + "/" + file_name + "." + file_extension
server_response = requests.request("GET", file_url)
with open(target_file_name, "wb") as target_file:
target_file.write(server_response.content)
target_file.close()
print('File type - {} with name - {} is downloaded successfully'.format(
file_type,
target_file_name
)
) | 27,732 |
def marc2rf_researcherFormat(marc_path, request_path, output_folder, options, debug=False):
"""Convert MARC records to Researcher Format.
:rtype: object
:param marc_path: Path to file of MARC records.
:param request_path: Path to Outlook message containing details of the request.
:param output_folder: Folder to save Researcher Format output files.
:param options: Options to set default transformation parameters.
:param debug: Display additional output to assist with debugging.
"""
converter = Converter(marc_path, request_path, output_folder, options, debug)
if debug:
print('Converting MARC records with the following parameters:')
print('marc_path: {}'.format(str(marc_path)))
print('request_path: {}'.format(str(request_path)))
print('output_folder: {}'.format(str(output_folder)))
print('options: {}'.format(str(options)))
converter.marc2rf_researcherFormat() | 27,733 |
def _get_param_type_from_str(
type_name: str = None,
param_doc: docstring_parser.DocstringParam = None,
) -> t.Tuple[_ParamArgs, t.Union[click.ParamType, None]]:
"""Guess parameter type from parameter type name."""
type_name = type_name or ""
desc = param_doc.description if param_doc else ""
if type_name == "int":
return _ParamArgs.single, int
elif type_name == "float":
return _ParamArgs.single, float
elif type_name == "bytes":
return _ParamArgs.single, bytes
elif type_name == "bool":
return _ParamArgs.flag, None
elif type_name[:4] == "list":
args, element = _get_param_type_from_str(type_name[5:-1], param_doc)
assert args is _ParamArgs.single
return _ParamArgs.multiple, element
elif type_name[:5] == "tuple":
els = (_get_param_type_from_str(n)[1] for n in type_name[6:-1].split(", "))
return _ParamArgs.single, click.Tuple(els)
elif type_name == "io.FileIO":
return _ParamArgs.single, _build_file_param_type(desc)
elif type_name == "pathlib.Path":
return _ParamArgs.single, _build_path_param_type(desc)
elif type_name == "datetime.datetime":
return _ParamArgs.single, click.DateTime()
elif type_name == "uuid.UUID":
return _ParamArgs.single, click.UUID
else:
logger.warning("Cannot guess parameter type from name: %s", type_name)
return _ParamArgs.single, None | 27,734 |
def solve(task: str) -> int:
"""How many differently colored bags can contain shiny gold?"""
parents = process_data(task)
seen = set()
candidates = parents["shiny gold"]
while candidates:
candidate = candidates.pop()
if candidate not in seen:
seen.add(candidate)
candidates.extend(parents[candidate])
return len(seen) | 27,735 |
def create_users_table():
"""Creates the users table."""
con, cur = create_con()
cur.execute('CREATE TABLE IF NOT EXISTS users('
'username TEXT PRIMARY KEY,'
'password TEXT NOT NULL,'
'role TEXT NOT NULL);')
con.commit()
cur.close()
con.close() | 27,736 |
def cityDesc(codePostal):
"""
code de retour :
100 : tout est normal
200 : la requete n'a pas abouti
300 : pas de cine dans la ville
400 : la ville n'existe pas
"""
headersUA = init_connect()
YMDstr = getDate()
searchField = codePostal
filterField = ''
countField = '500'
pageField = '1'
url = 'q=' + searchField + '&filter=' + filterField + '&count=' + countField + '&page=' + pageField + '&format=json&partner=' + allocine_partner + '&sed=' + YMDstr
toEncrypt = allocine_secret_key + url
sig = urllib.parse.quote_plus(base64.b64encode(hashlib.sha1(toEncrypt.encode('utf-8')).digest()))
urlComplete = 'http://api.allocine.fr/rest/v3/search?' + url + "&sig=" + sig
codeRetour = 200
listeCine = []
try:
req = requests.get(urlComplete, headers=headersUA)
except:
return listeCine, codeRetour
# print(req.json())
if req.status_code == 200:
codeRetour = 100
if 'location' in req.json()['feed']:
if 'theater' in req.json()['feed']:
for theaterCity in req.json()['feed']['theater']:
listeCine.append(theaterCity)
else:
codeRetour = 300
else:
codeRetour = 400
return listeCine, codeRetour | 27,737 |
async def test_atomic_update(client: Client) -> None:
"""Atomically updating a float value"""
model = await client.types.create({'id': 1, 'float_': 1})
assert model.float_ == 1
updated = await client.types.update(
where={
'id': 1,
},
data={
'float_': {'increment': 5},
},
)
assert updated is not None
assert updated.float_ == 6
updated = await client.types.update(
where={
'id': 1,
},
data={
'float_': {
'set': 20,
},
},
)
assert updated is not None
assert updated.float_ == 20
updated = await client.types.update(
where={
'id': 1,
},
data={
'float_': {
'decrement': 5,
},
},
)
assert updated is not None
assert updated.float_ == 15
updated = await client.types.update(
where={
'id': 1,
},
data={
'float_': {
'multiply': 2,
},
},
)
assert updated is not None
assert updated.float_ == 30
updated = await client.types.update(
where={
'id': 1,
},
data={
'float_': {
'divide': 3,
},
},
)
assert updated is not None
assert updated.float_ == 10 | 27,738 |
async def get_reposet(request: AthenianWebRequest, id: int) -> web.Response:
"""List a repository set.
:param id: Numeric identifier of the repository set to list.
:type id: repository set ID.
"""
rs_cols = [
RepositorySet.name,
RepositorySet.items,
RepositorySet.precomputed,
RepositorySet.tracking_re,
]
rs, _ = await fetch_reposet(id, rs_cols, request.uid, request.sdb, request.cache)
return model_response(RepositorySetWithName(
name=rs.name, items=rs.items, precomputed=rs.precomputed)) | 27,739 |
def test_delete(cursor, run):
"""verify delete operation"""
test = MockTable(the_key=100, name='foo')
run(test.delete, cursor)
assert cursor.query == \
"DELETE FROM 'tester' WHERE 'the_key'=%s"
assert cursor.query_after == \
"DELETE FROM 'tester' WHERE 'the_key'=100" | 27,740 |
def _train(params: Dict,
dtrain: RayDMatrix,
model_factory: Type[LGBMModel],
boost_rounds_left: int,
*args,
evals=(),
ray_params: RayParams,
cpus_per_actor: int,
gpus_per_actor: int,
_training_state: _TrainingState,
machine_addresses: Optional[List[Tuple[str, str]]] = None,
listen_port: Optional[int] = None,
**kwargs) -> Tuple[LGBMModel, Dict, Dict]:
"""This is the local train function wrapped by :func:`train() <train>`.
This function can be thought of one invocation of a multi-actor lightgbm
training run. It starts the required number of actors, triggers data
loading, collects the results, and handles (i.e. registers) actor failures
- but it does not handle fault tolerance or general training setup.
Generally, this function is called one or multiple times by the
:func:`train() <train>` function. It is called exactly once if no
errors occur. It is called more than once if errors occurred (e.g. an
actor died) and failure handling is enabled.
"""
from xgboost_ray.elastic import _maybe_schedule_new_actors, \
_update_scheduled_actor_states, _get_actor_alive_status
# Un-schedule possible scheduled restarts
_training_state.restart_training_at = None
params = deepcopy(params)
if "n_jobs" in params:
if params["n_jobs"] > cpus_per_actor:
raise ValueError(
"Specified number of threads greater than number of CPUs. "
"\nFIX THIS by passing a lower value for the `n_jobs` "
"parameter or a higher number for `cpus_per_actor`.")
else:
params["n_jobs"] = cpus_per_actor
_check_cpus_per_actor_at_least_2(
params["n_jobs"], getattr(ray_params, "allow_less_than_two_cpus",
False))
# This is a callback that handles actor failures.
# We identify the rank of the failed actor, add this to a set of
# failed actors (which we might want to restart later), and set its
# entry in the actor list to None.
def handle_actor_failure(actor_id):
rank = _training_state.actors.index(actor_id)
_training_state.failed_actor_ranks.add(rank)
_training_state.actors[rank] = None
# Here we create new actors. In the first invocation of _train(), this
# will be all actors. In future invocations, this may be less than
# the num_actors setting, depending on the failure mode.
newly_created = 0
for i in list(_training_state.failed_actor_ranks):
if _training_state.actors[i] is not None:
raise RuntimeError(
f"Trying to create actor with rank {i}, but it already "
f"exists.")
ip = None
port = None
if machine_addresses:
ip = machine_addresses[i][0]
port = machine_addresses[i][1]
elif listen_port:
port = listen_port
actor = _create_actor(
rank=i,
num_actors=ray_params.num_actors,
model_factory=model_factory,
num_cpus_per_actor=cpus_per_actor,
num_gpus_per_actor=gpus_per_actor,
resources_per_actor=ray_params.resources_per_actor,
placement_group=_training_state.placement_group,
queue=_training_state.queue,
checkpoint_frequency=ray_params.checkpoint_frequency,
distributed_callbacks=ray_params.distributed_callbacks,
ip=ip,
port=port)
# Set actor entry in our list
_training_state.actors[i] = actor
# Remove from this set so it is not created again
_training_state.failed_actor_ranks.remove(i)
newly_created += 1
alive_actors = sum(1 for a in _training_state.actors if a is not None)
logger.info(f"[RayLightGBM] Created {newly_created} new actors "
f"({alive_actors} total actors). Waiting until actors "
f"are ready for training.")
# For distributed datasets (e.g. Modin), this will initialize
# (and fix) the assignment of data shards to actor ranks
dtrain.assert_enough_shards_for_actors(num_actors=ray_params.num_actors)
dtrain.assign_shards_to_actors(_training_state.actors)
for deval, _ in evals:
deval.assert_enough_shards_for_actors(num_actors=ray_params.num_actors)
deval.assign_shards_to_actors(_training_state.actors)
load_data = [dtrain] + [eval[0] for eval in evals]
prepare_actor_tasks = [
_PrepareActorTask(
actor,
# Maybe we got a new Queue actor, so send it to all actors.
queue=_training_state.queue,
# Maybe we got a new Event actor, so send it to all actors.
stop_event=_training_state.stop_event,
# Trigger data loading
load_data=load_data) for actor in _training_state.actors
if actor is not None
]
start_wait = time.time()
last_status = start_wait
try:
# Construct list before calling any() to force evaluation
ready_states = [task.is_ready() for task in prepare_actor_tasks]
while not all(ready_states):
if time.time() >= last_status + ENV.STATUS_FREQUENCY_S:
wait_time = time.time() - start_wait
logger.info(f"Waiting until actors are ready "
f"({wait_time:.0f} seconds passed).")
last_status = time.time()
time.sleep(0.1)
ready_states = [task.is_ready() for task in prepare_actor_tasks]
except Exception as exc:
_training_state.stop_event.set()
_get_actor_alive_status(_training_state.actors, handle_actor_failure)
raise RayActorError from exc
logger.info("[RayLightGBM] Starting LightGBM training.")
# # Start Rabit tracker for gradient sharing
# rabit_process, env = _start_rabit_tracker(alive_actors)
# rabit_args = [("%s=%s" % item).encode() for item in env.items()]
# Load checkpoint if we have one. In that case we need to adjust the
# number of training rounds.
if _training_state.checkpoint.value:
booster = Booster(
model_str=pickle.loads(_training_state.checkpoint.value))
kwargs["init_model"] = booster
if _training_state.checkpoint.iteration == -1:
# -1 means training already finished.
logger.error(
"Trying to load continue from checkpoint, but the checkpoint"
"indicates training already finished. Returning last"
"checkpointed model instead.")
return kwargs["init_model"], {}, _training_state.additional_results
# The callback_returns dict contains actor-rank indexed lists of
# results obtained through the `put_queue` function, usually
# sent via callbacks.
callback_returns = _training_state.additional_results.get(
"callback_returns")
if callback_returns is None:
callback_returns = [list() for _ in range(len(_training_state.actors))]
_training_state.additional_results[
"callback_returns"] = callback_returns
_training_state.training_started_at = time.time()
# Trigger the train function
live_actors = [
actor for actor in _training_state.actors if actor is not None
]
# LightGBM specific: handle actor addresses
# if neither local_listening_port nor machines are set
# get the ips and a random port from the actors, and then
# assign them back so the lgbm params are updated.
# do this in a loop to ensure that if there is a port
# confilict, it can try and choose a new one. Most of the times
# it will complete in one iteration
machines = None
for _ in range(5):
addresses = ray.get(
[actor.find_free_address.remote() for actor in live_actors])
if addresses:
_, ports = zip(*addresses)
ports = list(ports)
machine_addresses_new = [f"{ip}:{port}" for ip, port in addresses]
if len(machine_addresses_new) == len(set(machine_addresses_new)):
machines = ",".join(machine_addresses_new)
break
if machine_addresses:
raise ValueError(
"Machine addresses contains non-unique entries.")
else:
logger.debug("Couldn't obtain unique addresses, trying again.")
if machines:
logger.debug(f"Obtained unique addresses in {i} attempts.")
else:
raise ValueError(
f"Couldn't obtain enough unique addresses for {len(live_actors)}."
" Try reducing the number of actors.")
for i, actor in enumerate(live_actors):
actor.set_network_params.remote(machines, ports[i], len(live_actors),
params.get("time_out", 120))
training_futures = [
actor.train.remote(
i == 0, # return_bst
params,
dtrain,
evals,
boost_rounds_left,
*args,
**kwargs) for i, actor in enumerate(live_actors)
]
# Failure handling loop. Here we wait until all training tasks finished.
# If a training task fails, we stop training on the remaining actors,
# check which ones are still alive, and raise the error.
# The train() wrapper function will then handle the error.
start_wait = time.time()
last_status = start_wait
try:
not_ready = training_futures
while not_ready:
if _training_state.queue:
_handle_queue(
queue=_training_state.queue,
checkpoint=_training_state.checkpoint,
callback_returns=callback_returns)
if ray_params.elastic_training \
and not ELASTIC_RESTART_DISABLED:
_maybe_schedule_new_actors(
training_state=_training_state,
num_cpus_per_actor=cpus_per_actor,
num_gpus_per_actor=gpus_per_actor,
resources_per_actor=ray_params.resources_per_actor,
ray_params=ray_params,
load_data=load_data)
# This may raise RayXGBoostActorAvailable
_update_scheduled_actor_states(_training_state)
if time.time() >= last_status + ENV.STATUS_FREQUENCY_S:
wait_time = time.time() - start_wait
logger.info(f"Training in progress "
f"({wait_time:.0f} seconds since last restart).")
last_status = time.time()
ready, not_ready = ray.wait(
not_ready, num_returns=len(not_ready), timeout=1)
ray.get(ready)
# Get items from queue one last time
if _training_state.queue:
_handle_queue(
queue=_training_state.queue,
checkpoint=_training_state.checkpoint,
callback_returns=callback_returns)
# The inner loop should catch all exceptions
except Exception as exc:
logger.debug(f"Caught exception in training loop: {exc}")
# Stop all other actors from training
_training_state.stop_event.set()
# Check which actors are still alive
_get_actor_alive_status(_training_state.actors, handle_actor_failure)
raise RayActorError from exc
# Training is now complete.
# # Stop Rabit tracking process
# _stop_rabit_tracker(rabit_process)
# Get all results from all actors.
all_results: List[Dict[str, Any]] = ray.get(training_futures)
# All results should be the same. But only
# the first one actually returns its bst object.
bst: LGBMModel = all_results[0]["bst"]
evals_result = all_results[0]["evals_result"]
if not listen_port:
for param in _ConfigAliases.get("local_listen_port"):
bst._other_params.pop(param, None)
if not machine_addresses:
for param in _ConfigAliases.get("machines"):
bst._other_params.pop(param, None)
for param in _ConfigAliases.get("num_machines", "time_out"):
bst._other_params.pop(param, None)
if callback_returns:
_training_state.additional_results[
"callback_returns"] = callback_returns
total_n = sum(res["train_n"] or 0 for res in all_results)
_training_state.additional_results["total_n"] = total_n
return bst, evals_result, _training_state.additional_results | 27,741 |
def web_entity_detection(inputpath, outputpath):
"""
docstring
"""
for folder in os.listdir(inputpath):
# get the absolute path of input
filepath_in = os.path.join(inputpath, folder)
# get the absolute path of output
filepath_out = os.path.join(outputpath, folder)
os.makedirs(filepath_out, exist_ok=True)
# iterate over all the images in input
for image in os.listdir(filepath_in):
# get the absolute path of input image
img = os.path.join(filepath_in, image)
# make google api call
resp = send_to_google(inputfile=img, filetype='web')
# save response
filename = '.'.join([os.path.splitext(image)[0], 'json'])
fileout = os.path.join(filepath_out, filename)
with open(fileout, 'w') as f:
json.dump(resp, f) | 27,742 |
def is_called_at_module_level() -> bool:
"""
Check if the current function is being called at the module level.
Raise `RuntimeError` if `is_called_at_module_level()` is not called in a function.
"""
if not (frame := getcallerframe().f_back):
raise RuntimeError(
"is_called_at_module_level() expects to be called in a function"
)
# There is currently no reliable and officially-provided way to determine whether a
# function is called from the module level or not.
#
# Therefore we use a try-best-effort heuristic approach here.
#
# This check could emit false positive in the case of some advanced dynamic-reflection
# inspection tricks, like `func.__code__ = func.__code__.replace(co_name="<module>")`.
#
# However such case is so unlikely and rare that we should not be concerned.
#
# We are good with the current approach as it works for most cases.
return frame.f_code.co_name == "<module>" | 27,743 |
def raise_for_failed_build(module_build_ids):
"""
Raises an exception if any module build from `module_build_ids` list is in failed state.
This function also calls "failed" handler before raises an exception.
:param list module_build_ids: List of module build IDs (int) to build locally.
"""
builds = db_session.query(models.ModuleBuild).filter(
models.ModuleBuild.id.in_(module_build_ids)).all()
has_failed_build = False
for build in builds:
if build.state == models.BUILD_STATES["failed"]:
modules_failed_handler("fake_msg_id", build.id, "failed")
has_failed_build = True
if has_failed_build:
raise ValueError("Local module build failed.") | 27,744 |
def extract_dominant_keypoints2D(keypoint_2D, dominant_hand):
""" Extract keypoint 2D.
# Look Later with Octavio
# Arguments
keypoint_2D: Numpy array of shape (num_keypoints, 1).
dominant_hand: List of size (2) with booleans.
# Returns
keypoint_visibility_2D_21: Numpy array of shape (num_keypoints, 1).
"""
keypoint_visibility_left = keypoint_2D[:LEFT_PINKY_TIP, :]
keypoint_visibility_right = keypoint_2D[RIGHT_WRIST:RIGHT_PINKY_TIP, :]
keypoint_visibility_2D_21 = np.where(
dominant_hand[:, :2], keypoint_visibility_left,
keypoint_visibility_right)
return keypoint_visibility_2D_21 | 27,745 |
def NN_regressor(X_train, Y_train, X_valid, Y_valid):
"""
Trains a neural network to perform regression on the dataset. Makes predictions
on the validation set, and prints out the validation loss from a least squares
regression versus that of the neural network regression.
Input:
X_train, Y_train, X_valid, Y_valid - NumPy arrays representing the
training and validation sets.
Output:
None - prints out validation loss of NN and least squares regression.
"""
clf = MLPRegressor(hidden_layer_sizes=(32, 64, 128, 64, 32), learning_rate_init=0.0008, max_iter=400, verbose=True).fit(X_train, Y_train)
predictions = clf.predict(X_valid)
print(np.concatenate((Y_valid.reshape(Y_valid.shape[0], 1), predictions.reshape(predictions.shape[0], 1)), axis=1))
print("NN Valiation Loss: {}".format(np.sqrt(np.sum(np.square( predictions - Y_valid ))))) | 27,746 |
def verify_mailgun_request(timestamp, token, signature):
"""
Ensures that a webhook request from Mailgun is valid.
Raises an exception if the request is invalid.
"""
# Check to avoid reused tokens to prevent replay attacks.
global cached_mailgun_token
if token == cached_mailgun_token:
raise ValueError('Mailgun token is identical to the previous one')
cached_mailgun_token = token
# Ensure that request timestamp is not older than 1 minute.
if time.time() - int(timestamp) > 60:
raise ValueError('Mailgun timestamp is older than 60 seconds')
# Ensure that request signature matches up.
api_key = bytes(config['mailgun-key'], 'utf-8')
message = (timestamp + token).encode('utf-8')
computed = hmac.new(api_key, message, hashlib.sha256).hexdigest()
if not hmac.compare_digest(computed, signature):
raise ValueError('Computed signature does not match request signature') | 27,747 |
def translation_ev(h, t, tol=1e6):
"""Compute the eigenvalues of the translation operator of a lead.
Adapted from kwant.physics.leads.modes.
Parameters
----------
h : numpy array, real or complex, shape (N, N) The unit cell
Hamiltonian of the lead unit cell.
t : numpy array, real or complex, shape (N, M)
The hopping matrix from a lead cell to the one on which self-energy
has to be calculated (and any other hopping in the same direction).
tol : float
Numbers and differences are considered zero when they are smaller
than `tol` times the machine precision.
Returns
-------
ev : numpy array
Eigenvalues of the translation operator in the form lambda=r*exp(i*k),
for |r|=1 they are propagating modes.
"""
a, b = kwant.physics.leads.setup_linsys(h, t, tol, None).eigenproblem
ev = kwant.physics.leads.unified_eigenproblem(a, b, tol=tol)[0]
return ev | 27,748 |
def bsearch(n, pred):
"""
Given a boolean function pred that takes index arguments in [0, n).
Assume the boolean function pred returns all False and then all True for
values. Return the index of the first True, or n if that does not exist.
"""
# invariant: last False lies in [l, r) and pred(l) is False
if pred(0):
return 0
l = 0
r = n
while r-l > 1:
m = l + (r-l)//2
result = pred(m)
if result:
r = m
else:
l = m
return l+1 | 27,749 |
def test_rotating_file_handler_interval(tmpdir, logger, monkeypatch):
"""Test the rotating file handler when the rollover return a time smaller
than the current time.
"""
def rollover(obj, current_time):
return current_time - 0.1
monkeypatch.setattr(DayRotatingTimeHandler, 'computeRollover', rollover)
handler = DayRotatingTimeHandler(str(tmpdir.join('test.log')))
handler.interval = 0.2
logger.addHandler(handler)
# Probably because we gives a negative time.
assert len(tmpdir.listdir()) == 1
logger.info('test')
sleep(1)
logger.info('test')
assert len(tmpdir.listdir()) == 3 | 27,750 |
def compute_zero_crossing_wavelength(period, water_depth, gravity=GRAVITY):
"""Computes zero-crossing wavelength from given period.
This uses the dispersion relation for linear waves.
"""
return wavenumber_to_wavelength(
frequency_to_wavenumber(1. / period, water_depth, gravity)
) | 27,751 |
def problem1b(m, f):
"""
What comes in: Positive integers m and f such that m >= 2.
What goes out:
-- Returns the number of integers from m to (f * m),
inclusive, that are prime.
Side effects: None.
Examples:
-- If m is 3 and f is 5, this function returns 5,
since 3, 5, 7, 11, and 13 are the integers between 3 and 15,
inclusive, that are prime.
-- If m is 2 and f is 1, this function returns 1,
since there is one prime (namely, 2) between 2 and 2.
-- If m is 5 and f is 40, the correct answer is 44,
since there are 44 primes between 5 and 200.
"""
# -------------------------------------------------------------------------
# TODO: 6. Implement and test this function.
# Note that you should write its TEST function first (above).
#
###########################################################################
# IMPORTANT:
# ** For full credit you must appropriately
# ** use (call) the is_prime function that is DEFINED ABOVE.
###########################################################################
# -------------------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 5
# TIME ESTIMATE: 10 to 15 minutes.
# ------------------------------------------------------------------------- | 27,752 |
def run(is_cpp):
"""Passes its arguments directly to pnacl-clang.
If -fsanitize-address is specified, extra information is passed to
pnacl-clang to ensure that later instrumentation in pnacl-sz can be
performed. For example, clang automatically inlines many memory allocation
functions, so this script will redefine them at compile time to make sure
they can be correctly instrumented by pnacl-sz.
"""
pnacl_root = FindBaseNaCl()
dummy_subs = {'calloc': {'sig': ['void *', 'size_t', 'size_t'],
'sub': '__asan_dummy_calloc'},
'_calloc': {'sig': ['void *', 'size_t', 'size_t'],
'sub': '__asan_dummy_calloc'}}
subs_src = (
'{root}/toolchain_build/src/subzero/pydir/sz_clang_dummies.c'
).format(root=pnacl_root)
clang = (
'{root}/toolchain/linux_x86/pnacl_newlib_raw/bin/pnacl-clang{pp}'
).format(root=pnacl_root, pp='++' if is_cpp else '')
args = sys.argv
args[0] = clang
tmp_dir = ''
if '-fsanitize-address' in args:
args.remove('-fsanitize-address')
include_dirs = set()
tmp_dir = tempfile.mkdtemp()
for i, arg in enumerate(args[1:], 1):
if not os.path.isfile(arg):
continue
src = os.path.basename(arg)
ext = os.path.splitext(arg)[1]
if ext in ['.c', '.cc', '.cpp']:
include_dirs |= {os.path.dirname(arg)}
dest_name = os.path.join(tmp_dir, src)
with open(dest_name, 'w') as dest:
dest.write(subsToMacros(dummy_subs, arg))
with open(arg) as src:
for line in src:
dest.write(line)
args[i] = dest_name
# If linking (not single file compilation) then add dummy definitions
if not ('-o' in args and
('-c' in args or '-S' in args or '-E' in args)):
args.append(subs_src)
for d in include_dirs:
args.append('-iquote {d}'.format(d=d))
if '-fno-inline' not in args:
args.append('-fno-inline')
err_code = 0
try:
shellcmd(args, echo=True)
except subprocess.CalledProcessError as e:
print e.output
err_code = e.returncode
if tmp_dir != '':
shutil.rmtree(tmp_dir)
exit(err_code) | 27,753 |
def launch_transport_listener(transport, bindaddr, role, remote_addrport, pt_config, ext_or_cookie_file=None):
"""
Launch a listener for 'transport' in role 'role' (socks/client/server/ext_server).
If 'bindaddr' is set, then listen on bindaddr. Otherwise, listen
on an ephemeral port on localhost.
'remote_addrport' is the TCP/IP address of the other end of the
circuit. It's not used if we are in 'socks' role.
'pt_config' contains configuration options (such as the state location)
which are of interest to the pluggable transport.
'ext_or_cookie_file' is the filesystem path where the Extended
ORPort Authentication cookie is stored. It's only used in
'ext_server' mode.
Return a tuple (addr, port) representing where we managed to bind.
Throws obfsproxy.transports.transports.TransportNotFound if the
transport could not be found.
Throws twisted.internet.error.CannotListenError if the listener
could not be set up.
"""
listen_host = bindaddr[0] if bindaddr else 'localhost'
listen_port = int(bindaddr[1]) if bindaddr else 0
if role == 'socks':
transport_class = FTETransportClient
if hasattr(socks, "OBFSSOCKSv5Factory"):
# obfsproxy >= 0.2.7 provides SOCKS5.
factory = socks.OBFSSOCKSv5Factory(transport_class, pt_config)
pt_config.fte_client_socks_version = 5
elif hasattr(socks, "SOCKSv4Factory"):
# obfsproxy < 0.2.7 provides SOCKS4.
factory = socks.SOCKSv4Factory(transport_class, pt_config)
pt_config.fte_client_socks_version = 4
else:
# This will only happen if the obfsproxy people change the socks
# code again. This really is a dependency issue, so raise an
# ImportError.
raise ImportError("Failed to setup an obfsproxy SOCKS server factory")
elif role == 'ext_server':
assert(remote_addrport and ext_or_cookie_file)
transport_class = FTETransportServer
factory = extended_orport.ExtORPortServerFactory(
remote_addrport, ext_or_cookie_file, transport, transport_class, pt_config)
elif role == 'client':
assert(remote_addrport)
transport_class = FTETransportClient
factory = network.StaticDestinationServerFactory(
remote_addrport, role, transport_class, pt_config)
elif role == 'server':
assert(remote_addrport)
transport_class = FTETransportServer
factory = network.StaticDestinationServerFactory(
remote_addrport, role, transport_class, pt_config)
else:
raise InvalidRoleException()
addrport = twisted.internet.reactor.listenTCP(
listen_port, factory, interface=listen_host)
return (addrport.getHost().host, addrport.getHost().port) | 27,754 |
def div(style, render=False, label=''):
"""Render divider."""
if len(style) == 1:
if label == '':
res = hfill('', style)
else:
res = hfill(style * 2 + ' ' + label + ' ', style)
elif style[0] == style[-1]:
# Windows does line wrapping weird
sp_left = '\n' * (len(style) - (2 if os.name == 'nt' else 1))
sp_right = '\n' * (len(style) - 1)
res = (
hfill('', style[0]) + sp_left + center(label)
+ sp_right + hfill('', style[0]))
else:
raise ValueError("""Style not recognized. Available styles:
> '-', '=', or any other single character sequence
> '- -', '= =', or any other repeated character with n>=0 separating spaces
""")
if render:
return res
else:
print(res) | 27,755 |
def send_array(A, flags=0, copy=True, track=False):
"""send a numpy array with metadata
Inputs
------
A: (subplots,dim) np array to transmit
subplots - the amount of subplots that are
defined in the current plot
dim - the amount of data that you want to plot.
This is not fixed
"""
#If you get a float value, convert it to a numpy array
if(isinstance(A,float)):
A = np.array(A).reshape(1,1)
#If array is one dimensional, reshape to two dimensions
if(len(A.shape) ==1):
A = A.reshape(-1,1)
#Create dict to reconstruct array
md = dict(
dtype = str(A.dtype),
shape = A.shape,
)
#Send category
socket.send_string(SENDING_DATA)
#Send json description
socket.send_json(md, flags|zmq.SNDMORE)
#Send array
return socket.send(A, flags, copy=copy, track=track) | 27,756 |
async def test_single_group_role():
"""Test with single group role for happy path when multiples users are granted group role."""
data = {"group": [("group", "user"), ("group", "user2"), ("group", "user3")]}
role_service = FakeRoleService(data)
users_with_roles = await get_users_with_roles(role_service, "group")
assert users_with_roles == ["user", "user2", "user3"] | 27,757 |
def test__job_create_manual_rollback(client):
"""
Start a job update, and half-way to a manual rollback
"""
res = client.start_job_update(
get_job_update_request("test_dc_labrat_large_job.yaml"),
"start job update test/dc/labrat_large_job",
)
job_update_key = res.key
job_key = res.key.job
# wait for few instances running
time.sleep(5)
res = client.get_job_update_details(
None, api.JobUpdateQuery(jobKey=job_key)
)
assert len(res.detailsList[0].updateEvents) > 0
assert len(res.detailsList[0].instanceEvents) > 0
# rollback update
client.rollback_job_update(job_update_key)
wait_for_rolled_back(client, job_update_key)
res = client.get_job_update_details(
None, api.JobUpdateQuery(jobKey=job_key)
)
assert len(res.detailsList[0].updateEvents) > 0
assert len(res.detailsList[0].instanceEvents) > 0 | 27,758 |
def api_file_upload(request):
""" Upload a file to the storage system """
try:
fobj = request.FILES["file"]
checksum, ext = fobj._name.split(".")
try:
request.user.check_staged_space(fobj._size, checksum)
except Exception as e:
return HttpResponseForbidden(str(e))
write_file_to_storage(fobj, check_valid=True)
StagedFile.objects.get_or_create(
checksum=checksum,
file_size=fobj._size,
uploaded_by=request.user
)
return HttpResponse(json.dumps({
"success": True,
}))
except KeyError:
return HttpResponseBadRequest("Invalid file upload request")
except Exception as e:
handle_server_error(request)
return HttpResponseServerError(content=str(e), reason=str(e)) | 27,759 |
def calculateAggregateInterferenceForGwpz(gwpz_record, grants):
"""Calculates per-channel aggregate interference for GWPZ.
Args:
gwpz_record: A GWPZ record dict.
grants: An iterable of CBSD grants of type |data.CbsdGrantInfo|.
Returns:
Aggregate interference to GWPZ in the nested dictionary format.
{latitude : {longitude: [aggr_interf1(mW), ..., aggr_interfK(mW)]}}
The list contains the value per protected channel.
"""
gwpz_region = gwpz_record['landCategory']
# Get Fine Grid Points for a GWPZ protection area
protection_points = utils.GridPolygon(gwpz_record['zone']['features'][0]['geometry'],
GWPZ_GRID_RES_ARCSEC)
gwpz_freq_range = gwpz_record['record']['deploymentParam'][0]\
['operationParam']['operationFrequencyRange']
gwpz_low_freq = gwpz_freq_range['lowFrequency']
gwpz_high_freq = gwpz_freq_range['highFrequency']
# Get channels over which area incumbent needs partial/full protection
protection_channels = interf.getProtectedChannels(gwpz_low_freq, gwpz_high_freq)
# Calculate aggregate interference from each protection constraint with a
# pool of parallel processes.
logging.info('Computing aggregateInterferenceForPoint for PPA (%s), channels (%s), '
'nPoints (%d), grants (%s), region_type (%s)',
gwpz_record, protection_channels, len(protection_points), grants, gwpz_region)
logging.debug(' points: %s', protection_points)
interfCalculator = partial(aggregateInterferenceForPoint,
channels=protection_channels,
grants=grants,
fss_info=None,
esc_antenna_info=None,
protection_ent_type=data.ProtectedEntityType.GWPZ_AREA,
region_type=gwpz_region)
pool = mpool.Pool()
interferences = pool.map(interfCalculator, protection_points)
return InterferenceDict(interferences) | 27,760 |
def from_pandas_ephemeral(
engine: Engine,
df: pandas.DataFrame,
convert_objects: bool,
name: str
) -> DataFrame:
"""
Instantiate a new DataFrame based on the content of a Pandas DataFrame. The data will be represented
using a `select * from values()` query, or something similar depending on the database dialect.
Warning: This method is only suited for small quantities of data.
For anything over a dozen kilobytes of data it is recommended to store the data in a table in
the database, e.g. by using the from_pd_store_table() function.
Supported dtypes are 'int64', 'float64', 'string', 'datetime64[ns]', 'bool'
:param engine: db connection
:param df: Pandas DataFrame to instantiate as DataFrame
:param convert_objects: If True, columns of type 'object' are converted to 'string' using the
pd.convert_dtypes() method where possible.
"""
# todo add dtypes argument that explicitly let's you set the supported dtypes for pandas columns
df_copy, index_dtypes, all_dtypes = _from_pd_shared(
dialect=engine.dialect,
df=df,
convert_objects=convert_objects,
cte=True
)
column_series_type = [get_series_type_from_dtype(dtype) for dtype in all_dtypes.values()]
per_row_expr = []
for row in df_copy.itertuples():
per_column_expr = []
# Access the columns in `row` by index rather than by name. Because if a name starts with an
# underscore (e.g. _index_skating_order) it will not be available as attribute.
# so we use `row[i]` instead of getattr(row, column_name).
# start=1 is to account for the automatic index that pandas adds
for i, series_type in enumerate(column_series_type, start=1):
val = row[i]
per_column_expr.append(
series_type.value_to_expression(dialect=engine.dialect, value=val, dtype=series_type.dtype)
)
row_expr = Expression.construct('({})', join_expressions(per_column_expr))
per_row_expr.append(row_expr)
all_values_str = join_expressions(per_row_expr, join_str=',\n').to_sql(engine.dialect)
if is_postgres(engine):
# We are building sql of the form:
# select * from (values
# ('row 1', cast(1234 as bigint), cast(-13.37 as double precision)),
# ('row 2', cast(1337 as bigint), cast(31.337 as double precision))
# ) as t("a", "b", "c")
column_names_expr = join_expressions(
[Expression.raw(quote_identifier(engine.dialect, column_name)) for column_name in
all_dtypes.keys()]
)
column_names_str = column_names_expr.to_sql(engine.dialect)
sql = f'select * from (values \n{all_values_str}\n) as t({column_names_str})\n'
elif is_bigquery(engine):
# We are building sql of the form:
# select * from UNNEST([
# STRUCT<`a` STRING, `b` INT64, `c` FLOAT64>
# ('row 1', 1234, cast(-13.37 as FLOAT64))
# ('row 2', 1337, cast(31.337 as FLOAT64))
# ])
sql_column_name_types = []
for col_name, dtype in all_dtypes.items():
db_col_name = quote_identifier(dialect=engine.dialect, name=col_name)
db_dtype = get_series_type_from_dtype(dtype).get_db_dtype(dialect=engine.dialect)
sql_column_name_types.append(f'{db_col_name} {db_dtype}')
sql_struct = f'STRUCT<{", ".join(sql_column_name_types)}>'
sql = f'select * from UNNEST([{sql_struct} \n{all_values_str}\n])\n'
else:
raise DatabaseNotSupportedException(engine)
model_builder = CustomSqlModelBuilder(sql=sql, name=name)
sql_model = model_builder()
index = list(index_dtypes.keys())
return DataFrame.from_model(engine=engine, model=sql_model, index=index, all_dtypes=all_dtypes) | 27,761 |
def cmdline_upload():
"""upload docker image to glance via commandline"""
# only admin can upload images from commandline
os.environ.update({'OS_USERNAME': 'admin'})
cmd = "docker save {name} | " \
"glance image-create " \
"--is-public=True " \
"--container-format=docker " \
"--disk-format=raw " \
"--name {name}".format(name=CONF.tag)
logging.info("Executing %s" % cmd)
subprocess.call([cmd], shell=True) | 27,762 |
def set_vars(api, file:str, tess_profile:dict):
"""
Reads the user-specific variables from the tess_profile
:param api:
:param file:
:param tess_profile:
:return:
"""
# Set necessary information
api.SetImageFile(file)
# Set Variable
api.SetVariable("save_blob_choices", "T")
if 'variables' in tess_profile:
for var in tess_profile['variables']:
api.SetVariable(var, str(tess_profile['variables'][var]['value']))
api.Recognize()
return 0 | 27,763 |
def qtrfit(numpoints, defcoords, refcoords, nrot):
"""Find the quaternion, q, [and left rotation matrix, u] that minimizes
| qTXq - Y | ^ 2 [|uX - Y| ^ 2]
This is equivalent to maximizing Re (qTXTqY)
The left rotation matrix, u, is obtained from q by
u = qT1q
Parameters
numpoints: The number of points in each list (int)
defcoords: List of definition coordinates, with each set a list of form [x,y,z] (list)
refcoords: List of fitted coordinates, with each set a list of form [x,y,z] (list)
nrot: The maximum number of jacobi sweeps
Returns
quat: The best-fit quaternion
lrot: The best-fit left rotation matrix
"""
xxyx = 0.0
xxyy = 0.0
xxyz = 0.0
xyyx = 0.0
xyyy = 0.0
xyyz = 0.0
xzyx = 0.0
xzyy = 0.0
xzyz = 0.0
quat = []
cmat = []
for i in range(numpoints):
xxyx = xxyx + defcoords[i][0] * refcoords[i][0]
xxyy = xxyy + defcoords[i][0] * refcoords[i][1]
xxyz = xxyz + defcoords[i][0] * refcoords[i][2]
xyyx = xyyx + defcoords[i][1] * refcoords[i][0]
xyyy = xyyy + defcoords[i][1] * refcoords[i][1]
xyyz = xyyz + defcoords[i][1] * refcoords[i][2]
xzyx = xzyx + defcoords[i][2] * refcoords[i][0]
xzyy = xzyy + defcoords[i][2] * refcoords[i][1]
xzyz = xzyz + defcoords[i][2] * refcoords[i][2]
for i in range(4):
cmat.append([])
for _ in range(4):
cmat[i].append(0.0)
cmat[0][0] = xxyx + xyyy + xzyz
cmat[0][1] = xzyy - xyyz
cmat[0][2] = xxyz - xzyx
cmat[0][3] = xyyx - xxyy
cmat[1][1] = xxyx - xyyy - xzyz
cmat[1][2] = xxyy + xyyx
cmat[1][3] = xzyx + xxyz
cmat[2][2] = xyyy - xzyz - xxyx
cmat[2][3] = xyyz + xzyy
cmat[3][3] = xzyz - xxyx - xyyy
_, vmat = jacobi(cmat, nrot) # diagonalize c
for i in range(4):
quat.append(vmat[i][3])
lrot = q2mat(quat)
return quat, lrot | 27,764 |
def _embedded_bundles_partial_impl(
ctx,
bundle_embedded_bundles,
embeddable_targets,
frameworks,
plugins,
watch_bundles):
"""Implementation for the embedded bundles processing partial."""
_ignore = [ctx]
embeddable_providers = [
x[_AppleEmbeddableInfo]
for x in embeddable_targets
if _AppleEmbeddableInfo in x
]
transitive_frameworks = []
transitive_plugins = []
transitive_watch_bundles = []
for provider in embeddable_providers:
transitive_frameworks.append(provider.frameworks)
transitive_plugins.append(provider.plugins)
transitive_watch_bundles.append(provider.watch_bundles)
bundle_zips = []
if bundle_embedded_bundles:
bundle_zips.extend([
(processor.location.framework, None, depset(transitive = transitive_frameworks)),
(processor.location.plugin, None, depset(transitive = transitive_plugins)),
(processor.location.watch, None, depset(transitive = transitive_watch_bundles)),
])
# Clear the transitive lists to avoid propagating them, since they will be packaged in the
# bundle processing this partial and do not need to be propagated.
transitive_frameworks = []
transitive_plugins = []
transitive_watch_bundles = []
return struct(
bundle_zips = bundle_zips,
providers = [
_AppleEmbeddableInfo(
frameworks = depset(frameworks, transitive = transitive_frameworks),
plugins = depset(plugins, transitive = transitive_plugins),
watch_bundles = depset(watch_bundles, transitive = transitive_watch_bundles),
),
],
) | 27,765 |
def load_instrument(yml):
"""
Instantiate an instrument from YAML spec.
Parameters
----------
yml : str
filename for the instrument configuration in YAML format.
Returns
-------
hexrd.instrument.HEDMInstrument
Instrument instance.
"""
with open(yml, 'r') as f:
icfg = yaml.safe_load(f)
return instrument.HEDMInstrument(instrument_config=icfg) | 27,766 |
def serialize_model(self: models.Model, excludes: List[str] = None) -> dict:
"""
模型序列化,会根据 select_related 和 prefetch_related 关联查询的结果进行序列化,可以在查询时使用 only、defer 来筛选序列化的字段。
它不会自做主张的去查询数据库,只用你查询出来的结果,成功避免了 N+1 查询问题。
# See:
https://aber.sh/articles/A-new-idea-of-serializing-Django-model/
"""
excludes = excludes or []
serialized = set()
if getattr(settings, "DSA_SERIALIZE_TO_CAMELCASE", False):
to_camel_case_func = string_convert
else:
to_camel_case_func = do_nothing
def _serialize_model(model) -> dict:
# 当 model 存在一对一字段时,会陷入循环,使用闭包的自由变量存储已序列化的 model,
# 在第二次循环到该 model 时直接返回 model.pk,不再循环。
nonlocal serialized
if model in serialized:
return model.pk
else:
serialized.add(model)
# 当 model 存在一对一或一对多字段,且该字段的值为 None 时,直接返回空{},否则会报错。
if model is None:
return {}
result = {
to_camel_case_func(name): _serialize_model(foreign_key)
for name, foreign_key in model.__dict__["_state"]
.__dict__.get("fields_cache", {})
.items()
}
buried_fields = getattr(model, "buried_fields", [])
for name, value in model.__dict__.items():
# 敏感字段不需要序列化
if name in buried_fields:
continue
# 私有属性不需要序列化
if name.startswith("_"):
continue
result[to_camel_case_func(name)] = value
for name, queryset in model.__dict__.get(
"_prefetched_objects_cache", {}
).items():
result[to_camel_case_func(name)] = [_serialize_model(model) for model in queryset] # type: ignore
return result
results = _serialize_model(self)
# 剔除排斥的字段
for field_name in excludes:
del results[to_camel_case_func(field_name)]
return results | 27,767 |
def p_object_list_path(p):
"""
object_list_path : object_path object_path_expr
"""
p[0] = p[1] + p[2] | 27,768 |
def isint(x):
"""
For an ``mpf`` *x*, or any type that can be converted
to ``mpf``, determines whether *x* is exactly
integer-valued::
>>> from sympy.mpmath import *
>>> isint(3), isint(mpf(3)), isint(3.2)
(True, True, False)
"""
if isinstance(x, int_types):
return True
try:
x = mpmathify(x)
except:
return False
if isinstance(x, mpf):
if isnan(x) or isinf(x):
return False
return x == int(x)
return False | 27,769 |
def update_header(file):
"""
Create a standard WCS header from the HDF5 header. To do this we clean up the
header data (which is initially stored in individual arrays). We then create
a new header dictionary with the old cleaned header info. Finally, we use
astropy.wcs.WCS to create an updated WCS header for the 2 spatial dimensions.
This is then saved to self.header while the header dictionary is saved
as self.hdr_dict.
Args:
file: hdf5 File object containing HDF5 file
"""
hdr_dict = {}
header_cols = [str(val[0]).replace("'b", '').replace("'", "").replace("b", '') for val in
list(file['header'][()])]
header_vals = [str(val[1]).replace("'b", '').replace("'", "").replace("b", '') for val in
list(file['header'][()])]
header_types = [val[3] for val in list(file['header'][()])]
for header_col, header_val, header_type in zip(header_cols, header_vals, header_types):
if 'bool' in str(header_type):
hdr_dict[header_col] = bool(header_val)
if 'float' in str(header_type):
hdr_dict[header_col] = float(header_val)
if 'int' in str(header_type):
hdr_dict[header_col] = int(header_val)
else:
try:
hdr_dict[header_col] = float(header_val)
except:
hdr_dict[header_col] = str(header_val)
hdr_dict['CTYPE3'] = 'WAVE-SIP'
hdr_dict['CUNIT3'] = 'm'
# hdr_dict['NAXIS1'] = 2064
# hdr_dict['NAXIS2'] = 2048
# Make WCS
wcs_data = WCS(hdr_dict, naxis=2)
header = wcs_data.to_header()
header.insert('WCSAXES', ('SIMPLE', 'T'))
header.insert('SIMPLE', ('NAXIS', 2), after=True)
# self.header.insert('NAXIS', ('NAXIS1', 2064), after=True)
# self.header.insert('NAXIS1', ('NAXIS2', 2048), after=True)
hdr_dict = hdr_dict
return header, hdr_dict | 27,770 |
def test_fltruncate(doctest):
"""
! (require racket/flonum)
> (fltruncate 2.5)
2.0
> (fltruncate -2.5)
-2.0
> (fltruncate +inf.0)
+inf.0
""" | 27,771 |
def get_MD_psat():
""" MD data for saturation densities:
Thermodynamic properties of the 3D Lennard-Jones/spline model
Bjørn Hafskjold and Karl Patrick Travis and Amanda Bailey Hass and
Morten Hammer and Ailo Aasen and Øivind Wilhelmsen
doi: 10.1080/00268976.2019.1664780
"""
T = np.array([0.5501, 0.5499, 0.5496, 0.5997, 0.6500, 0.7000, 0.7504,
0.8000, 0.8202, 0.8407, 0.8596, 0.8688, 0.8771, 0.8775,
0.6898, 0.7723, 0.8070, 0.8407, 0.8437, 0.8570, 0.8687,
0.8723, 0.8762, 0.8770])
p = np.array([0.002158, 0.002084, 0.002123, 0.004656, 0.008804, 0.015332,
0.025052, 0.038927, 0.045588, 0.054326, 0.063949, 0.069529,
0.075501, 0.075752, 0.014112, 0.031532, 0.042154, 0.055300,
0.056660, 0.062675, 0.070558, 0.070944, 0.072616, 0.073748])
data = {}
data["T"] = T
data["P"] = P
return data | 27,772 |
def get_reddit_client():
"""Utility to get a Reddit Client"""
reddit_username = redditUsername
reddit_password = redditPassword
reddit_user_agent = redditUserAgent
reddit_client_secret = redditClientSecret
reddit_client_id = redditClientID
logging.info("Logged in as user (%s).." % reddit_username)
reddit_client = praw.Reddit(client_id=reddit_client_id,
client_secret=reddit_client_secret,
password=reddit_password,
user_agent=reddit_user_agent,
username=reddit_username)
return reddit_client | 27,773 |
def handle_leet(command: Command) -> None:
"""
`!leet [`easy` | `medium` | `hard`] - Retrieves a set of questions from online coding
websites, and posts in channel with a random question from this set. If a difficulty
is provided as an argument, the random question will be restricted to this level of
challenge. Else, a random difficulty is generated to choose.
"""
was_random = True # Used for output later
if command.has_arg():
if (command.arg not in {"easy", "medium", "hard"}):
bot.post_message(command.channel_id, "Usage: !leet [`easy` | `medium` | `hard`]")
return
else:
difficulty = command.arg.lower()
was_random = False
else:
difficulty = random.choice(LC_DIFFICULTY_MAP) # No difficulty specified, randomly generate
# List to store questions collected
questions: List[Tuple[str, str]] = []
# Go fetch questions from APIs
collect_questions(questions, difficulty)
selected_question = select_question(questions) # Get a random question
# If we didn't find any questions for this difficulty, try again, probably timeout on all 3
if (selected_question is None):
bot.post_message(command.channel_id,
"Hmm, the internet pipes are blocked. Try that one again.")
return
# Leetcode difficulty colors
color = COLORS[difficulty]
if (was_random):
title_text = f"Random {difficulty} question generated!"
else:
# Style this a bit nicer
difficulty = difficulty.title()
title_text = f"{difficulty} question generated!"
difficulty = difficulty.title() # If we haven't already (i.e. random question)
msg_text = f"Here's a new question for you! <{selected_question[1]}|{selected_question[0]}>"
bot.post_message(command.channel_id, text=title_text,
attachments=[Attachment(SectionBlock(msg_text), color=color)._resolve()]) | 27,774 |
def get_member_id():
"""
Retrieve member if for the current process.
:rtype: ``bytes``
"""
proc_info = system_info.get_process_info()
member_id = six.b('%s_%d' % (proc_info['hostname'], proc_info['pid']))
return member_id | 27,775 |
def get_table_arn():
"""A method to get the DynamoDB table ARN string.
Returns
-------
dict
A dictionary with AWS ARN string for the table ARN.
"""
resp = dynamodb_client.describe_table(
TableName=table_name
)
return {
"table_arn": resp['Table']['TableArn']
} | 27,776 |
def replace_characters(request):
"""Function to process execute replace_characters function."""
keys = ['text', 'characters', 'replacement']
values = get_data(request, keys)
if not values[0]:
abort(400, 'missing text parameter')
if not values[2]:
values[2] = ''
return _call('replace_characters', keys, values) | 27,777 |
def serialize_to_jsonable(obj):
"""
Serialize any object to a JSONable form
"""
return repr(obj) | 27,778 |
def move():
"""Move namespaces/files from one parent folder to another
"""
pass | 27,779 |
def test_godlikea(err, xpi_package):
"""Test to make sure that the godlikea namespace is not in use."""
if 'chrome/godlikea.jar' in xpi_package:
err.error(
err_id=('testcases_packagelayout',
'test_godlikea'),
error="Banned 'godlikea' chrome namespace",
description="The 'godlikea' chrome namepsace is generated from a "
'template and should be replaced with something '
'unique to your add-on to avoid name conflicts.',
filename='chrome/godlikea.jar') | 27,780 |
def compare(optimizers, problems, runs=20, all_kwargs={}):
"""Compare a set of optimizers.
Args:
optimizers: list/Optimizer; Either a list of optimizers to compare,
or a single optimizer to test on each problem.
problems: list/Problem; Either a problem instance or a list of problem instances,
one for each optimizer.
all_kwargs: dict/list<dict>; Either the Optimizer.optimize keyword arguments
for all optimizers, or a list of keyword arguments, one for each optimizer.
runs: int; How many times to run each optimizer (smoothness)
Returns:
dict; mapping optimizer identifier to stats.
"""
if not (isinstance(optimizers, collections.Iterable)
or isinstance(problems, collections.Iterable)):
raise TypeError('optimizers or problems must be iterable')
# If optimizers is not a list, repeat into list for each problem
if not isinstance(optimizers, collections.Iterable):
optimizers = [copy.deepcopy(optimizers) for _ in range(len(problems))]
# If problems is not a list, repeat into list for each optimizer
if not isinstance(problems, collections.Iterable):
problems = [copy.deepcopy(problems) for _ in range(len(optimizers))]
# If all_kwargs is not a list, repeat it into a list
if isinstance(all_kwargs, dict):
all_kwargs = [all_kwargs] * len(optimizers)
elif not isinstance(all_kwargs, collections.Iterable):
raise TypeError('all_kwargs must be dict or list of dict')
stats = {}
key_counts = {}
for optimizer, problem, kwargs in zip(optimizers, problems, all_kwargs):
# For nice human readable dictionaries, extract useful names from
# optimizer
class_name = optimizer.__class__.__name__
fitness_func_name = problem._fitness_function.__name__
key_name = '{} {}'.format(class_name, fitness_func_name)
# Keep track of how many optimizers of each class / fitness func
# for better keys in stats dict
try:
key_counts[key_name] += 1
except KeyError:
key_counts[key_name] = 1
# Foo 1, Foo 2, Bar 1, etc.
key = '{} {}'.format(key_name, key_counts[key_name])
print key + ': ',
# Finally, get the actual stats
stats[key] = benchmark(optimizer, problem, runs=runs, **kwargs)
print
return stats | 27,781 |
def az_el2norm(az: float, el: float):
"""Return solar angle as normalized vector."""
theta = np.pi/2-el*np.pi/180
phi = az*np.pi/180
norm = np.asarray(
[
np.sin(theta)*np.cos(phi),
np.sin(theta)*np.sin(phi),
np.cos(theta)
])
return norm | 27,782 |
async def api_get_user(user_id: int, db: Session = Depends(get_db)):
"""
Gets user entity
- **user_id**: the user id
- **db**: current database session object
"""
try:
user = await User.get_by_id(id=user_id, db=db)
return user
except UserNotFoundException as e:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=e.detail) | 27,783 |
def remove_comments(json_like):
"""
Removes C-style comments from *json_like* and returns the result. Example::
>>> test_json = '''\
{
"foo": "bar", // This is a single-line comment
"baz": "blah" /* Multi-line
Comment */
}'''
>>> remove_comments('{"foo":"bar","baz":"blah",}')
'{\n "foo":"bar",\n "baz":"blah"\n}'
From: https://gist.github.com/liftoff/ee7b81659673eca23cd9fc0d8b8e68b7
"""
comments_re = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE,
)
def replacer(match):
s = match.group(0)
if s[0] == "/":
return ""
return s
return comments_re.sub(replacer, json_like) | 27,784 |
def test_extract_multiple_ranges_for_file():
"""Check multiple ranges in a single file.
Note: Final region is at end of file and does not have a trailing
context line.
"""
diffs = """diff --git a/whodunit.py b/whodunit.py
index 2af698e..79da120 100644
--- a/whodunit.py
+++ b/whodunit.py
@@ -475,8 +484,8 @@ def build_owner(args):
args.verbose, args.max)
-def main(args):
- args.root = os.path.abspath(args.root)
+def main(parser):
+ args = validate(parser)
owners = build_owner(args)
# Generators to get the owner info
@@ -491,7 +500,7 @@ def main(args):
all_authors += top_n
# Don't alter ordering, as names in sort (date/size) order
print("(%s)" % ', '.join(top_n))
- if args.details:
+ if owners.details:
owners.show_details(args.max)
print("\\n\\nAll authors: %s" % ', '.join(sort_by_name(all_authors)))
@@ -517,5 +526,4 @@ def setup_parser():
return parser
if __name__ == "__main__":
- parser = setup_parser()
- main(parser.parse_args())
+ main(setup_parser())
"""
source_file, lines = cover.parse_diffs(diffs)
expected = [
cover.SourceLine(484), cover.SourceLine(485), cover.SourceLine(486),
cover.SourceLine(487, False), cover.SourceLine(488, False),
cover.SourceLine(489), cover.SourceLine(490), cover.SourceLine(491),
cover.SourceLine(500), cover.SourceLine(501), cover.SourceLine(502),
cover.SourceLine(503, False),
cover.SourceLine(504), cover.SourceLine(505), cover.SourceLine(506),
cover.SourceLine(526), cover.SourceLine(527), cover.SourceLine(528),
cover.SourceLine(529, False)
]
assert source_file == 'whodunit.py'
assert len(lines) == 19
print("%r" % lines)
print("%r" % expected)
assert lines == expected | 27,785 |
def install_editable(projectroot, **kwargs):
"""Install the given project as an "editable" install."""
return run_pip('install', '-e', projectroot, **kwargs) | 27,786 |
def retry_get(tap_stream_id, url, config, params=None):
"""Wrap certain streams in a retry wrapper for frequent 500s"""
retries = 20
delay = 120
backoff = 1.5
attempt = 1
while retries >= attempt:
r = authed_get(tap_stream_id, url, config, params)
if r.status_code != 200:
logger.info(f'Got a status code of {r.status_code}, attempt '
f'{attempt} of {retries}. Backing off for {delay} '
f'seconds')
time.sleep(delay)
delay *= backoff
attempt += 1
else:
return r
logger.error(f'Status code of latest attempt: {r.status_code}')
logger.error(f'Latest attempt response {r.content}')
raise ValueError(f'Failed {retries} times trying to hit endpoint {url}') | 27,787 |
def build_trie_from_to(template_dictionary: Mapping, from_timestamp: datetime.datetime, to_timestamp: datetime.datetime) -> Tuple[ahocorasick.Automaton, Mapping]:
"""Function which builds the trie from the first timestamp tot the last one given"""
trie = ahocorasick.Automaton()
words_mapping = dict() # words mapping
word_templates = dict() # words template
# collect the words and the template associated (a list of them if multiple template is associated)
for template in template_dictionary:
# index first template to consider
index_first_timestamp = find_previous_timestamp(template_dictionary[template], from_timestamp) or 0
# for all the revisions of that template starting from the first date possible
for index in range(index_first_timestamp, len(template_dictionary[template])):
words_list, t_stamp = template_dictionary[template][index]
# stop the iteration because we overcome the to_timestamp limit
if t_stamp > to_timestamp:
break
if not template in words_mapping:
words_mapping[template] = list()
words_mapping[template].append(template_dictionary[template][index]) # word lists for that template
for word in words_list:
if not word in word_templates:
word_templates[word] = list()
word_templates[word].append(template)
for word in word_templates:
trie.add_word(word, (word_templates[word], word)) # key is the word to search, value is the template
trie.make_automaton()
if not word_templates:
return None, None
return trie, words_mapping | 27,788 |
def check_monotonicity_at_split(
tree_df, tree_no, trend, variable, node, child_nodes_left, child_nodes_right
):
"""Function to check monotonic trend is in place at a given split in a single tree."""
if not isinstance(tree_df, pd.DataFrame):
raise TypeError("tree_df should be a pd.DataFrame")
if not isinstance(tree_no, int):
raise TypeError("tree_no should be an int")
if not isinstance(trend, int):
raise TypeError("trend should be an int")
if not isinstance(node, int):
raise TypeError("node should be an int")
if not isinstance(child_nodes_left, list):
raise TypeError("child_nodes_left should be an list")
if not isinstance(child_nodes_right, list):
raise TypeError("child_nodes_right should be an list")
all_child_nodes = child_nodes_left + child_nodes_right
tree_nodes = tree_df["nodeid"].tolist()
child_nodes_not_in_tree = list(set(all_child_nodes) - set(tree_nodes))
if len(child_nodes_not_in_tree) > 0:
raise ValueError(
"the following child nodes do not appear in tree; "
+ str(child_nodes_not_in_tree)
)
left_nodes_max_pred = tree_df.loc[
tree_df["nodeid"].isin(child_nodes_left), "weight"
].max()
right_nodes_min_pred = tree_df.loc[
tree_df["nodeid"].isin(child_nodes_right), "weight"
].min()
if trend == 1:
if left_nodes_max_pred <= right_nodes_min_pred:
monotonic = True
else:
monotonic = False
elif trend == -1:
if left_nodes_max_pred >= right_nodes_min_pred:
monotonic = True
else:
monotonic = False
else:
raise ValueError(
"unexpected value for trend; "
+ str(trend)
+ " variable; "
+ str(variable)
+ " node:"
+ str(node)
)
results = {
"variable": variable,
"tree": tree_no,
"nodeid": node,
"monotonic_trend": trend,
"monotonic": monotonic,
"child_nodes_left_max_prediction": left_nodes_max_pred,
"child_nodes_right_min_prediction": right_nodes_min_pred,
"child_nodes_left": str(child_nodes_left),
"child_nodes_right": str(child_nodes_right),
}
results_df = pd.DataFrame(results, index=[node])
return results_df | 27,789 |
def allocate_usda_ers_mlu_land_in_urban_areas(df, attr, fbs_list):
"""
This function is used to allocate the USDA_ERS_MLU activity 'land in
urban areas' to NAICS 2012 sectors. Allocation is dependent on
assumptions defined in 'literature_values.py' as well as results from
allocating 'EIA_CBECS_Land' and 'EIA_MECS_Land' to land based sectors.
Methodology is based on the manuscript:
Lin Zeng and Anu Ramaswami
Impact of Locational Choices and Consumer Behaviors on Personal
Land Footprints: An Exploration Across the Urban–Rural Continuum in the
United States
Environmental Science & Technology 2020 54 (6), 3091-3102
DOI: 10.1021/acs.est.9b06024
:param df: df, USDA ERA MLU Land
:param attr: dictionary, attribute data from method yaml for activity set
:param fbs_list: list, FBS dfs for activities created prior
to the activity set that calls on this fxn
:return: df, allocated USDS ERS MLU Land, FBS format
"""
# define sector column to base calculations
sector_col = 'SectorConsumedBy'
vLogDetailed.info('Assuming total land use from MECS and CBECS included '
'in urban land area, so subtracting out calculated '
'MECS and CBECS land from MLU urban land area')
# read in the cbecs and mecs df from df_list
for df_i in fbs_list:
if (df_i['MetaSources'] == 'EIA_CBECS_Land').all():
cbecs = df_i
elif (df_i['MetaSources'] == 'EIA_MECS_Land').all():
mecs = df_i
# load the federal highway administration fees dictionary
fha_dict = get_transportation_sectors_based_on_FHA_fees()
df_fha = pd.DataFrame.from_dict(
fha_dict, orient='index').rename(
columns={'NAICS_2012_Code': sector_col})
# calculate total residential area from the American Housing Survey
residential_land_area = get_area_of_urban_land_occupied_by_houses_2013()
df_residential = df[df[sector_col] == 'F01000']
df_residential = df_residential.assign(FlowAmount=residential_land_area)
# make an assumption about the percent of urban area that is open space
openspace_multiplier = get_open_space_fraction_of_urban_area()
df_openspace = df[df[sector_col] == '712190']
df_openspace = df_openspace.assign(
FlowAmount=df_openspace['FlowAmount'] * openspace_multiplier)
# sum all uses of urban area that are NOT transportation
# first concat dfs for residential, openspace, commercial,
# and manufacturing land use
df_non_urban_transport_area = pd.concat(
[df_residential, df_openspace, cbecs, mecs], sort=False,
ignore_index=True)
df_non_urban_transport_area = \
df_non_urban_transport_area[['Location', 'Unit', 'FlowAmount']]
non_urban_transport_area_sum = df_non_urban_transport_area.groupby(
['Location', 'Unit'], as_index=False).agg(
{'FlowAmount': sum}).rename(columns={'FlowAmount': 'NonTransport'})
# compare units
compare_df_units(df, df_non_urban_transport_area)
# calculate total urban transportation by subtracting
# calculated areas from total urban land
df_transport = df.merge(non_urban_transport_area_sum, how='left')
df_transport = df_transport.assign(
FlowAmount=df_transport['FlowAmount'] - df_transport['NonTransport'])
df_transport.drop(columns=['NonTransport'], inplace=True)
# make an assumption about the percent of urban transport
# area used by airports
airport_multiplier = get_urban_land_use_for_airports()
df_airport = df_transport[df_transport[sector_col] == '488119']
df_airport = df_airport.assign(
FlowAmount=df_airport['FlowAmount'] * airport_multiplier)
# make an assumption about the percent of urban transport
# area used by railroads
railroad_multiplier = get_urban_land_use_for_railroads()
df_railroad = df_transport[df_transport[sector_col] == '482112']
df_railroad = df_railroad.assign(
FlowAmount=df_railroad['FlowAmount'] * railroad_multiplier)
# further allocate the remaining urban transportation area using
# Federal Highway Administration fees
# first subtract area for airports and railroads
air_rail_area = pd.concat([df_airport, df_railroad], sort=False)
air_rail_area = air_rail_area[['Location', 'Unit', 'FlowAmount']]
air_rail_area_sum = air_rail_area.groupby(
['Location', 'Unit'], as_index=False).agg(
{'FlowAmount': sum}).rename(columns={'FlowAmount': 'AirRail'})
df_highway = df_transport.merge(air_rail_area_sum, how='left')
df_highway = df_highway.assign(
FlowAmount=df_highway['FlowAmount'] - df_highway['AirRail'])
df_highway.drop(columns=['AirRail'], inplace=True)
# add fed highway administration fees
df_highway2 = df_highway.merge(df_fha, how='left')
df_highway2 = df_highway2[df_highway2['ShareOfFees'].notna()]
df_highway2 = df_highway2.assign(
FlowAmount=df_highway2['FlowAmount'] * df_highway2['ShareOfFees'])
df_highway2.drop(columns=['ShareOfFees'], inplace=True)
# concat all df subsets
allocated_urban_areas_df = pd.concat(
[df_residential, df_openspace, df_airport, df_railroad, df_highway2],
ignore_index=True, sort=False).reset_index(drop=True)
# aggregate because multiple rows to household data due to residential
# land area and highway fee shares
groupcols = list(df.select_dtypes(include=['object', 'int']).columns)
allocated_urban_areas_df_2 = aggregator(allocated_urban_areas_df,
groupcols)
return allocated_urban_areas_df_2 | 27,790 |
def verify_flash(octowire_ser, fw):
"""
Verify Flash contents (against a given firmware image)
:param octowire_ser: Octowire serial instance
:param fw: Buffer containing the firmware image
:return: Nothing
"""
print(f"{Colors.OKBLUE}Verifying flash...{Colors.ENDC}")
pages = math.ceil(len(fw) / _PAGE_SIZE)
check_fw = bytearray()
for i in range(0, pages):
octowire_ser.write(bytes("r%s,%i\n" % (hex(i * _PAGE_SIZE)[2:], _PAGE_SIZE), "ascii"))
res = octowire_ser.readline().strip().decode()
if res.startswith("ERROR"):
exit(1)
check_fw += binascii.unhexlify(res)
if check_fw[:len(fw)] != fw:
print(f"{Colors.FAIL}Flash verification failed! Exiting...{Colors.ENDC}")
else:
print(f"{Colors.OKGREEN}Flash verification OK!{Colors.ENDC}") | 27,791 |
def start():
"""
THIS IS WHAT calls function `job` repetitively.
note: you can set `INTERVAL_SECONDS` param in settings.py
"""
scheduler = BackgroundScheduler()
scheduler.add_job(job, 'interval', seconds=settings.INTERVAL_SECONDS)
scheduler.start() | 27,792 |
def get_distances_between_points(ray_points3d, last_bin_width=1e10):
"""Estimates the distance between points in a ray.
Args:
ray_points3d: A tensor of shape `[A1, ..., An, M, 3]`,
where M is the number of points in a ray.
last_bin_width: A scalar indicating the witdth of the last bin.
Returns:
A tensor of shape `[A1, ..., An, M]` containing the distances between
the M points, with the distance of the last element set to a high value.
"""
shape.check_static(
tensor=ray_points3d,
tensor_name="ray_points3d",
has_dim_equals=(-1, 3))
shape.check_static(
tensor=ray_points3d,
tensor_name="ray_points3d",
has_rank_greater_than=1)
dists = tf.norm(ray_points3d[..., 1:, :] - ray_points3d[..., :-1, :], axis=-1)
if last_bin_width > 0.0:
dists = tf.concat([dists, tf.broadcast_to([last_bin_width],
dists[..., :1].shape)], axis=-1)
return dists | 27,793 |
def test_form__DatetimeDataConverter__toWidgetValue__1(DatetimeDataConverter):
"""`toWidgetValue` renders datetime with timezone localized."""
assert u'13/02/01 21:20' == DatetimeDataConverter.toWidgetValue(
datetime(2013, 2, 1, 17, 20, tzinfo=utc)) | 27,794 |
def reclassification_heavy_duty_trucks_to_light_commercial_vehicles(register_df: pd.DataFrame) -> pd.DataFrame:
"""
Replace Category to Light Commercial Vehicles for Heavy Duty Trucks of weight below 3500kg
Es Tracta de vehicles registrats TIPUS CAMIONS i per tant classificats en la categoria Heavy Duty Trucks quan no hi
pertoquen degut a pes inferior a 3500kg.
"""
anti = register_df[(register_df['TIPUS'] == 'CAMIONS') &
(register_df['PES_BUIT'] < 3500) &
(register_df['Category'] == 'Heavy Duty Trucks')]
info_logger.info(f'Total number of Heavy Duty Trucks converted to Light Commercial Vehicles loaded: {anti.shape[0]}')
result = anti_join_all_cols(register_df, anti)
recategorized_rows = anti.assign(Category='Light Commercial Vehicles')
return result.append(recategorized_rows) | 27,795 |
def test_create_update_and_delete_room_team_roles(
db: Session,
client: TestClient,
data: Data,
) -> None:
"""
This test is responsible for checking the ability
to create, update and delete team roles for a dataroom
"""
auth_header = create_access_header(data.admin_user_room_2)
new_room_role_request = {
"team_id": str(data.team_1.id),
"team_role": "MEMBER",
}
response = client.post(
f"{settings.API_V1_STR}/orgs/{data.first_org.id}/datarooms/{data.room_2.id}/team_roles",
headers=auth_header,
json=new_room_role_request,
)
assert response.status_code == 201
role = (
db.query(DataRoomRole)
.filter_by(team_id=new_room_role_request["team_id"], dataroom_id=data.room_2.id)
.first()
)
assert role
assert role.name == new_room_role_request["team_role"]
update_room_role_request = {
"team_id": str(data.team_1.id),
"team_role": "ADMIN",
}
response = client.patch(
f"{settings.API_V1_STR}/orgs/{data.first_org.id}/datarooms/{data.room_2.id}/team_roles/{role.id}",
headers=auth_header,
json=update_room_role_request,
)
assert response.status_code == 200
role = (
db.query(DataRoomRole)
.filter_by(
team_id=update_room_role_request["team_id"], dataroom_id=data.room_2.id
)
.first()
)
assert role
assert role.name == update_room_role_request["team_role"]
response = client.delete(
f"{settings.API_V1_STR}/orgs/{data.first_org.id}/datarooms/{data.room_2.id}/team_roles/{role.id}",
headers=auth_header,
)
assert response.status_code == 200
role = (
db.query(DataRoomRole)
.filter_by(
team_id=update_room_role_request["team_id"], dataroom_id=data.room_2.id
)
.first()
)
assert role is None | 27,796 |
def SpComp(rho, U, mesh, fea, penal):
"""Alias SpCompFunction class with the apply method"""
return SpCompFunction.apply(rho, U, mesh, fea, penal) | 27,797 |
def remove_event_class(request):
"""
Remove the given event class, and update the
order of the rest classes
"""
if request.user.is_authenticated:
class_id = request.POST.get("classId", None)
event_class = EventClass.get_classes_by_user(request.user.id).get(id=class_id)
event_class.delete()
return HttpResponse(json.dumps({}), content_type="application/json")
return render_to_response(
"todo_login.html",
{"error_info": constants.SESSION_EXPIRED_MSG,},
RequestContext(request),
) | 27,798 |
def get_landmark_position_from_state(x, ind):
"""
Extract landmark position from state vector
"""
lm = x[STATE_SIZE + LM_SIZE * ind: STATE_SIZE + LM_SIZE * (ind + 1), :]
return lm | 27,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.