content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def get_name_convert_func():
"""
Get the function to convert Caffe2 layer names to PyTorch layer names.
Returns:
(func): function to convert parameter name from Caffe2 format to PyTorch
format.
"""
pairs = [
# ------------------------------------------------------------
# 'nonlocal_conv3_1_theta_w' -> 's3.pathway0_nonlocal3.conv_g.weight'
[
r"^nonlocal_conv([0-9]*)_([0-9]*)_(.*)",
r"s\1.pathway0_nonlocal\2_\3",
],
# 'theta' -> 'conv_theta'
[r"^(.*)_nonlocal([0-9]*)_(theta)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'g' -> 'conv_g'
[r"^(.*)_nonlocal([0-9]*)_(g)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'phi' -> 'conv_phi'
[r"^(.*)_nonlocal([0-9]*)_(phi)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'out' -> 'conv_out'
[r"^(.*)_nonlocal([0-9]*)_(out)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'nonlocal_conv4_5_bn_s' -> 's4.pathway0_nonlocal3.bn.weight'
[r"^(.*)_nonlocal([0-9]*)_(bn)_(.*)", r"\1_nonlocal\2.\3.\4"],
# ------------------------------------------------------------
# 't_pool1_subsample_bn' -> 's1_fuse.conv_f2s.bn.running_mean'
[r"^t_pool1_subsample_bn_(.*)", r"s1_fuse.bn.\1"],
# 't_pool1_subsample' -> 's1_fuse.conv_f2s'
[r"^t_pool1_subsample_(.*)", r"s1_fuse.conv_f2s.\1"],
# 't_res4_5_branch2c_bn_subsample_bn_rm' -> 's4_fuse.conv_f2s.bias'
[
r"^t_res([0-9]*)_([0-9]*)_branch2c_bn_subsample_bn_(.*)",
r"s\1_fuse.bn.\3",
],
# 't_pool1_subsample' -> 's1_fuse.conv_f2s'
[
r"^t_res([0-9]*)_([0-9]*)_branch2c_bn_subsample_(.*)",
r"s\1_fuse.conv_f2s.\3",
],
# ------------------------------------------------------------
# 'res4_4_branch_2c_bn_b' -> 's4.pathway0_res4.branch2.c_bn_b'
[
r"^res([0-9]*)_([0-9]*)_branch([0-9]*)([a-z])_(.*)",
r"s\1.pathway0_res\2.branch\3.\4_\5",
],
# 'res_conv1_bn_' -> 's1.pathway0_stem.bn.'
[r"^res_conv1_bn_(.*)", r"s1.pathway0_stem.bn.\1"],
# 'conv1_w_momentum' -> 's1.pathway0_stem.conv.'
[r"^conv1_(.*)", r"s1.pathway0_stem.conv.\1"],
# 'res4_0_branch1_w' -> 'S4.pathway0_res0.branch1.weight'
[
r"^res([0-9]*)_([0-9]*)_branch([0-9]*)_(.*)",
r"s\1.pathway0_res\2.branch\3_\4",
],
# 'res_conv1_' -> 's1.pathway0_stem.conv.'
[r"^res_conv1_(.*)", r"s1.pathway0_stem.conv.\1"],
# ------------------------------------------------------------
# 'res4_4_branch_2c_bn_b' -> 's4.pathway0_res4.branch2.c_bn_b'
[
r"^t_res([0-9]*)_([0-9]*)_branch([0-9]*)([a-z])_(.*)",
r"s\1.pathway1_res\2.branch\3.\4_\5",
],
# 'res_conv1_bn_' -> 's1.pathway0_stem.bn.'
[r"^t_res_conv1_bn_(.*)", r"s1.pathway1_stem.bn.\1"],
# 'conv1_w_momentum' -> 's1.pathway0_stem.conv.'
[r"^t_conv1_(.*)", r"s1.pathway1_stem.conv.\1"],
# 'res4_0_branch1_w' -> 'S4.pathway0_res0.branch1.weight'
[
r"^t_res([0-9]*)_([0-9]*)_branch([0-9]*)_(.*)",
r"s\1.pathway1_res\2.branch\3_\4",
],
# 'res_conv1_' -> 's1.pathway0_stem.conv.'
[r"^t_res_conv1_(.*)", r"s1.pathway1_stem.conv.\1"],
# ------------------------------------------------------------
# pred_ -> head.projection.
[r"pred_(.*)", r"head.projection.\1"],
# '.bn_b' -> '.weight'
[r"(.*)bn.b\Z", r"\1bn.bias"],
# '.bn_s' -> '.weight'
[r"(.*)bn.s\Z", r"\1bn.weight"],
# '_bn_rm' -> '.running_mean'
[r"(.*)bn.rm\Z", r"\1bn.running_mean"],
# '_bn_riv' -> '.running_var'
[r"(.*)bn.riv\Z", r"\1bn.running_var"],
# '_b' -> '.bias'
[r"(.*)[\._]b\Z", r"\1.bias"],
# '_w' -> '.weight'
[r"(.*)[\._]w\Z", r"\1.weight"],
]
def convert_caffe2_name_to_pytorch(caffe2_layer_name):
"""
Convert the caffe2_layer_name to pytorch format by apply the list of
regular expressions.
Args:
caffe2_layer_name (str): caffe2 layer name.
Returns:
(str): pytorch layer name.
"""
for source, dest in pairs:
caffe2_layer_name = re.sub(source, dest, caffe2_layer_name)
return caffe2_layer_name
return convert_caffe2_name_to_pytorch
| 23,500
|
def update_workflow_modal(
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
action: dict,
db_session=None,
slack_client=None,
):
"""Pushes an updated view to the run workflow modal."""
trigger_id = action["trigger_id"]
incident_id = action["view"]["private_metadata"]["incident_id"]
workflow_id = action["actions"][0]["selected_option"]["value"]
selected_workflow = workflow_service.get(db_session=db_session, workflow_id=workflow_id)
workflows = workflow_service.get_enabled(db_session=db_session)
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
modal_template = run_workflow_view(
incident=incident, workflows=workflows, selected_workflow=selected_workflow
)
modal_template["blocks"].append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": f"*Description* \n {selected_workflow.description}"},
},
)
modal_template["blocks"].append(
{
"block_id": RunWorkflowBlockId.run_reason,
"type": "input",
"element": {
"type": "plain_text_input",
"multiline": True,
"action_id": RunWorkflowBlockId.run_reason,
},
"label": {"type": "plain_text", "text": "Run Reason"},
},
)
modal_template["blocks"].append(
{"type": "section", "text": {"type": "mrkdwn", "text": "*Parameters*"}}
)
if selected_workflow.parameters:
for p in selected_workflow.parameters:
modal_template["blocks"].append(
{
"block_id": f"{RunWorkflowBlockId.param}-{p['key']}",
"type": "input",
"element": {
"type": "plain_text_input",
"placeholder": {"type": "plain_text", "text": "Value"},
},
"label": {"type": "plain_text", "text": p["key"]},
}
)
else:
modal_template["blocks"].append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": "This workflow has no parameters."},
}
)
modal_template["callback_id"] = RunWorkflowCallbackId.submit_form
update_modal_with_user(
client=slack_client,
trigger_id=trigger_id,
view_id=action["view"]["id"],
modal=modal_template,
)
| 23,501
|
def generate_dict_entry(key, wordlist):
"""Generate one entry of the python dictionary"""
entry = " '{}': {},\n".format(key, wordlist)
return entry
| 23,502
|
def test_download_task_saves_file_for_valid_request(tmp_path: Path) -> None:
"""Download on valid HTTP request.
Args:
tmp_path (Path): see https://docs.pytest.org/en/stable/tmpdir.html
"""
filename = "data.zip"
responses.add(
responses.GET,
"http://www.urltodata.ie",
content_type="application/zip",
status=200,
)
download = Download(
url="http://www.urltodata.ie", dirpath=str(tmp_path), filename=filename,
)
download.run()
filepath = tmp_path / filename
assert filepath.exists()
| 23,503
|
def record_to_dict(record):
"""
Transform string into bovespa.Record
:param record: (string) position string from bovespa.
:return: parsed Record
"""
try:
record = bovespa.Record(record)
except:
return None
return {
'date': record.date, 'year': record.date.year,
'month': record.date.month, 'day': record.date.day,
'money_volume': record.volume, 'volume': record.quantity,
'stock_code': record.stock_code, 'company_name': record.company_name,
'price_open': record.price_open, 'price_close': record.price_close,
'price_mean': record.price_mean, 'price_high': record.price_high,
'price_low': record.price_low
}
| 23,504
|
def sample_product(user, **params):
"""Create and return a custom product"""
defaults = {
'name': 'Ron Cacique',
'description': 'El ron cacique es...',
'price': 20,
'weight': '0.70',
'units': 'l',
'featured': True,
}
defaults.update(params)
return Products.objects.create(user=user, **defaults)
| 23,505
|
def _find_op_path_(block, outputs, inputs, no_grad_set):
"""
no_grad_set will also be changed
"""
input_names = set([inp.name for inp in inputs])
output_names = set([out.name for out in outputs])
relevant_op_flags = [True] * len(block.ops)
# All the inputs of the block are used if inputs is empty,
if inputs:
for i, op in enumerate(block.ops):
if _some_in_set_(op.desc.input_arg_names(), input_names):
for name in op.desc.output_arg_names():
if name not in no_grad_set:
input_names.add(name)
else:
relevant_op_flags[i] = False
for i, op in reversed(list(enumerate(block.ops))):
if _some_in_set_(op.desc.output_arg_names(), output_names):
for name in op.desc.input_arg_names():
if name not in no_grad_set:
output_names.add(name)
else:
relevant_op_flags[i] = False
op_path = [
block.ops[i] for i in range(len(block.ops)) if relevant_op_flags[i]
]
if inputs:
for op in op_path:
for name in op.desc.input_arg_names():
if name not in input_names and block.vars[name].stop_gradient:
no_grad_set.add(name)
return op_path
| 23,506
|
def clean_gltf_materials(gltf):
"""
未使用のglTFマテリアルを削除する
:param gltf: glTFオブジェクト
:return: 新しいマテリアルリスト
"""
return filter(lambda m: m['name'] in used_material_names(gltf), gltf['materials'])
| 23,507
|
def create_root(request):
"""
Returns a new traversal tree root.
"""
r = Root()
r.add('api', api.create_root(request))
r.add('a', Annotations(request))
r.add('t', TagStreamFactory())
r.add('u', UserStreamFactory())
return r
| 23,508
|
def test_flatten_preserve_nulls(minion_opts, local_salt):
"""
Test the `flatten` Jinja filter.
"""
rendered = render_jinja_tmpl(
"{{ [1, 2, [None, 3, [4]]] | flatten(preserve_nulls=True) }}",
dict(opts=minion_opts, saltenv="test", salt=local_salt),
)
assert rendered == "[1, 2, None, 3, 4]"
| 23,509
|
def sort_basis_functions(basis_functions):
"""Sorts a set of basis functions by their distance to the
function with the smallest two-norm.
Args:
basis_functions: The set of basis functions to sort.
Expected shape is (-1, basis_function_length).
Returns:
sorted_basis: The sorted basis functions
sorted_ids: Mapping from unsorted basis function ids to
their sorted position.
"""
min_norm_idx = np.argmin(np.linalg.norm(basis_functions, axis=-1), axis=0)
min_norm_fn = basis_functions[min_norm_idx]
ids = list(range(len(basis_functions)))
sorted_ids = sorted(ids, key=lambda x: np.linalg.norm(basis_functions[x] - min_norm_fn))
sorted_basis = np.array(basis_functions)[sorted_ids]
return sorted_basis, sorted_ids
| 23,510
|
def to_zgrid(roms_file, z_file, src_grid=None, z_grid=None, depth=None,
records=None, threads=2, reftime=None, nx=0, ny=0, weight=10,
vmap=None, cdl=None, dims=2, pmap=None):
"""
Given an existing ROMS history or average file, create (if does not exit)
a new z-grid file. Use the given z_grid or otherwise build one with the
same horizontal extent and the specified depths and interpolate the
ROMS fields onto the z-grid.
Parameters
----------
roms_file : string,
File name of src file to interpolate from
z_file : string,
Name of desination file to write to
src_grid : (string or seapy.model.grid), optional:
Name or instance of source grid. If nothing is specified,
derives grid from the roms_file
z_grid: (string or seapy.model.grid), optional:
Name or instance of output definition
depth: numpy.ndarray, optional:
array of depths to use for z-level
records : numpy.ndarray, optional:
Record indices to interpolate
threads : int, optional:
number of processing threads
reftime: datetime, optional:
Reference time as the epoch for z-grid file
nx : float, optional:
decorrelation length-scale for OA (same units as source data)
ny : float, optional:
decorrelation length-scale for OA (same units as source data)
nx : float, optional:
decorrelation length-scale for OA (same units as source data,
typically twice the difference in the source data)
ny : float, optional:
decorrelation length-scale for OA (same units as source data,
typically twice the difference in the source data)
weight : int, optional:
number of points to use in weighting matrix
vmap : dictionary, optional
mapping source and destination variables
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
dims : int, optional
number of dimensions to use for lat/lon arrays (default 2)
pmap : numpy.ndarray, optional:
use the specified pmap rather than compute it
Returns
-------
pmap : ndarray
the weighting matrix computed during the interpolation
"""
if src_grid is None:
src_grid = seapy.model.asgrid(roms_file)
else:
src_grid = seapy.model.asgrid(src_grid)
ncsrc = seapy.netcdf(roms_file)
src_ref, time = seapy.roms.get_reftime(ncsrc)
if reftime is not None:
src_ref = reftime
records = np.arange(0, ncsrc.variables[time].shape[0]) \
if records is None else np.atleast_1d(records)
# Load the grid
if z_grid is not None:
z_grid = seapy.model.asgrid(z_grid)
elif os.path.isfile(z_file):
z_grid = seapy.model.asgrid(z_file)
if not os.path.isfile(z_file):
if z_grid is None:
lat = src_grid.lat_rho.shape[0]
lon = src_grid.lat_rho.shape[1]
if depth is None:
raise ValueError("depth must be specified")
ncout = seapy.roms.ncgen.create_zlevel(z_file, lat, lon,
len(depth), src_ref, "ROMS z-level",
cdl=cdl, dims=dims)
if dims == 1:
ncout.variables["lat"][:] = src_grid.lat_rho[:, 0]
ncout.variables["lon"][:] = src_grid.lon_rho[0, :]
else:
ncout.variables["lat"][:] = src_grid.lat_rho
ncout.variables["lon"][:] = src_grid.lon_rho
ncout.variables["depth"][:] = depth
ncout.variables["mask"][:] = src_grid.mask_rho
ncout.sync()
z_grid = seapy.model.grid(z_file)
else:
lat = z_grid.lat_rho.shape[0]
lon = z_grid.lat_rho.shape[1]
dims = z_grid.spatial_dims
ncout = seapy.roms.ncgen.create_zlevel(z_file, lat, lon,
len(z_grid.z), src_ref, "ROMS z-level",
cdl=cdl, dims=dims)
if dims == 1:
ncout.variables["lat"][:] = z_grid.lat_rho[:, 0]
ncout.variables["lon"][:] = z_grid.lon_rho[0, :]
else:
ncout.variables["lat"][:] = z_grid.lat_rho
ncout.variables["lon"][:] = z_grid.lon_rho
ncout.variables["depth"][:] = z_grid.z
ncout.variables["mask"][:] = z_grid.mask_rho
else:
ncout = netCDF4.Dataset(z_file, "a")
ncout.variables["time"][:] = seapy.roms.date2num(
seapy.roms.num2date(ncsrc, time, records), ncout, "time")
# Call the interpolation
try:
src_grid.set_east(z_grid.east())
pmap = __interp_grids(src_grid, z_grid, ncsrc, ncout, records=records,
threads=threads, nx=nx, ny=ny, vmap=vmap, weight=weight,
z_mask=True, pmap=pmap)
except TimeoutError:
print("Timeout: process is hung, deleting output.")
# Delete the output file
os.remove(z_file)
finally:
# Clean up
ncsrc.close()
ncout.close()
return pmap
| 23,511
|
def info_materials_groups_get():
"""
info_materials_groups_get
Get **array** of information for all materials, or if an array of `type_ids` is included, information on only those materials.
:rtype: List[Group]
"""
session = info_map.Session()
mat = aliased(info_map.Material)
grp = aliased(info_map.Group)
q = session.query(mat.group_id,grp.name).join(grp).distinct()
groups = [Group(group=row.group_id,name=row.name) for row in q.all()]
return groups, 200
| 23,512
|
def TestFSSH():
""" molcas test
1. FSSH calculation
"""
pyrai2mddir = os.environ['PYRAI2MD']
testdir = '%s/fssh' % (os.getcwd())
record = {
'coord' : 'FileNotFound',
'energy' : 'FileNotFound',
'energy1' : 'FileNotFound',
'energy2' : 'FileNotFound',
'energy3' : 'FileNotfound',
'kinetic1' : 'FileNotFound',
'kinetic2' : 'FileNotFound',
'velo1' : 'FileNotFound',
'velo2' : 'FileNotFound',
'nac1' : 'FileNotFound',
'nac2' : 'FileNotFound',
'soc1' : 'FileNotFound',
'soc2' : 'FileNotFound',
'pop2' : 'FileNotFound',
}
filepath = '%s/TEST/fssh/fssh_data/c3h2o.xyz' % (pyrai2mddir)
if os.path.exists(filepath):
record['coord'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.energy' % (pyrai2mddir)
if os.path.exists(filepath):
record['energy'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.energy.1' % (pyrai2mddir)
if os.path.exists(filepath):
record['energy1'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.energy.2' % (pyrai2mddir)
if os.path.exists(filepath):
record['energy2'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.energy.3' % (pyrai2mddir)
if os.path.exists(filepath):
record['energy3'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.kinetic.1' % (pyrai2mddir)
if os.path.exists(filepath):
record['kinetic1'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.kinetic.2' % (pyrai2mddir)
if os.path.exists(filepath):
record['kinetic2'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.nac.1' % (pyrai2mddir)
if os.path.exists(filepath):
record['nac1'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.nac.2' % (pyrai2mddir)
if os.path.exists(filepath):
record['nac2'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.soc.1' % (pyrai2mddir)
if os.path.exists(filepath):
record['soc1'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.soc.2' % (pyrai2mddir)
if os.path.exists(filepath):
record['soc2'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.velo.1' % (pyrai2mddir)
if os.path.exists(filepath):
record['velo1'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.velo.2' % (pyrai2mddir)
if os.path.exists(filepath):
record['velo2'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.pop.2' % (pyrai2mddir)
if os.path.exists(filepath):
record['pop2'] = filepath
summary = """
*---------------------------------------------------*
| |
| FSSH Test Calculation |
| |
*---------------------------------------------------*
Check files and settings:
-------------------------------------------------------
"""
for key, location in record.items():
summary += ' %-10s %s\n' % (key, location)
for key, location in record.items():
if location == 'FileNotFound':
summary += '\n Test files are incomplete, please download it again, skip test\n\n'
return summary, 'FAILED(test file unavailable)'
if location == 'VariableNotFound':
summary += '\n Environment variables are not set, cannot find program, skip test\n\n'
return summary, 'FAILED(enviroment variable missing)'
CopyInput(record, testdir)
Setup(record, testdir)
summary += """
Copy files:
%-10s --> %s/c3h2o.xyz
%-10s --> %s/c3h2o.energy
%-10s --> %s/c3h2o.energy.1
%-10s --> %s/c3h2o.energy.2
%-10s --> %s/c3h2o.energy.3
%-10s --> %s/c3h2o.kinetic
%-10s --> %s/c3h2o.kinetic.1
%-10s --> %s/c3h2o.nac
%-10s --> %s/c3h2o.nac.1
%-10s --> %s/c3h2o.soc
%-10s --> %s/c3h2o.soc.1
%-10s --> %s/c3h2o.velo
%-10s --> %s/c3h2o.velo.1
%-10s --> %s/c3h2o.pop.1
Run FSSH Calculation:
""" % ('coord', testdir,
'energy1', testdir,
'energy2', testdir,
'energy', testdir,
'energy3', testdir,
'kinetic1', testdir,
'kinetic2', testdir,
'nac1', testdir,
'nac2', testdir,
'soc1', testdir,
'soc2', testdir,
'velo1', testdir,
'velo2', testdir,
'pop2', testdir)
results, code = RunFSSH(record, testdir, pyrai2mddir)
if code == 'PASSED':
summary += """
-------------------------------------------------------
FSSH OUTPUT (NAC)
-------------------------------------------------------
%s
-------------------------------------------------------
nactype == nac test done, entering nactype == ktdc test
""" % (results)
else:
summary += """
nactype == test failed, stop here
"""
return summary, code
results, code = RunFSSH2(record, testdir, pyrai2mddir)
summary += """
-------------------------------------------------------
FSSH OUTPUT (kTDC)
-------------------------------------------------------
%s
-------------------------------------------------------
""" % (results)
return summary, code
| 23,513
|
def relu(x):
"""
Compute the relu of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- relu(x)
"""
s = np.maximum(0,x)
return s
| 23,514
|
def _disable_flavor(flavor):
"""Completely disable the given `flavor` (no checks)."""
_deregister_aliases(flavor)
_deregister_description(flavor)
_deregister_identifier(flavor)
_deregister_converters(flavor)
all_flavors.remove(flavor)
| 23,515
|
def validate_config(config: TrainerConfigDict) -> None:
"""Checks and updates the config based on settings.
Rewrites rollout_fragment_length to take into account n_step truncation.
"""
if config["exploration_config"]["type"] == "ParameterNoise":
if config["batch_mode"] != "complete_episodes":
logger.warning(
"ParameterNoise Exploration requires `batch_mode` to be "
"'complete_episodes'. Setting batch_mode=complete_episodes.")
config["batch_mode"] = "complete_episodes"
if config.get("noisy", False):
raise ValueError(
"ParameterNoise Exploration and `noisy` network cannot be "
"used at the same time!")
# Update effective batch size to include n-step
adjusted_batch_size = max(config["rollout_fragment_length"],
config.get("n_step", 1))
config["rollout_fragment_length"] = adjusted_batch_size
if config.get("prioritized_replay"):
if config["multiagent"]["replay_mode"] == "lockstep":
raise ValueError("Prioritized replay is not supported when "
"replay_mode=lockstep.")
elif config["replay_sequence_length"] > 1:
raise ValueError("Prioritized replay is not supported when "
"replay_sequence_length > 1.")
| 23,516
|
def test_adjective_predicates(r):
""" Test adjectives with an associated predicate """
# Accusative case (þolfall)
s = r.parse_single(
"""
Hundurinn var viðstaddur sýninguna sem fjallaði um hann.
"""
)
assert "NP-PRD lo_sb_nf_sþf_et_kk NP-ADP no_et_þf_kvk" in s.tree.flat
# Dative case (þágufall)
s = r.parse_single(
"""
Hundurinn var málkunnugur kettinum frá fyrri tíð.
"""
)
assert (
"NP-PRD lo_sb_nf_sþgf_et_kk NP-ADP no_et_þgf_kk /NP-ADP /NP-PRD" in s.tree.flat
)
# Possessive case (eignarfall)
s = r.parse_single(
"""
Kötturinn þóttist vera frjáls ferða sinna.
"""
)
assert (
"NP-PRD lo_sb_nf_sef_et_kk NP-ADP no_ft_ef_kvk fn_ft_ef_kvk /NP-ADP /NP-PRD"
in s.tree.flat
)
s = r.parse_single(
"""
Kötturinn hafði verið fullur sjálfstrausts.
"""
)
assert "NP-PRD lo_sb_nf_sef_et_kk NP-ADP no_et_ef_hk /NP-ADP /NP-PRD" in s.tree.flat
s = r.parse_single(
"""
Verkamaðurinn er verður launa sinna.
"""
)
assert (
"NP-PRD lo_sb_nf_sef_et_kk NP-ADP no_ft_ef_hk fn_ft_ef_hk /NP-ADP /NP-PRD"
in s.tree.flat
)
| 23,517
|
def kane_frstar_alt(bodies, coordinates, speeds, kdeqs, inertial_frame, uaux=Matrix(), udep=None, Ars=None):
"""Form the generalized inertia force."""
t = dynamicsymbols._t
N = inertial_frame
# Derived inputs
q = Matrix(coordinates) # q
u = Matrix(speeds) # u
udot = u.diff(t)
qdot_u_map,_,_,_k_kqdot = _initialize_kindiffeq_matrices(q, u, kdeqs, uaux=Matrix())
# Dicts setting things to zero
udot_zero = dict((i, 0) for i in udot)
uaux_zero = dict((i, 0) for i in uaux)
uauxdot = [diff(i, t) for i in uaux]
uauxdot_zero = dict((i, 0) for i in uauxdot)
# Dictionary of q' and q'' to u and u'
q_ddot_u_map = dict((k.diff(t), v.diff(t)) for (k, v) in qdot_u_map.items())
q_ddot_u_map.update(qdot_u_map)
# Fill up the list of partials: format is a list with num elements
# equal to number of entries in body list. Each of these elements is a
# list - either of length 1 for the translational components of
# particles or of length 2 for the translational and rotational
# components of rigid bodies. The inner most list is the list of
# partial velocities.
def get_partial_velocity(body):
if isinstance(body,YAMSRigidBody) or isinstance(body, RigidBody):
vlist = [body.masscenter.vel(N), body.frame.ang_vel_in(N)]
elif isinstance(body, Particle):
vlist = [body.point.vel(N),]
elif isinstance(body,YAMSFlexibleBody):
print('>>>> FlexibleBody TODO, Jv Jo to partials')
vlist=[body.masscenter.vel(N), body.frame.ang_vel_in(N)]
else:
raise TypeError('The body list may only contain either ' 'RigidBody or Particle as list elements.')
v = [msubs(vel, qdot_u_map) for vel in vlist]
return partial_velocity(v, u, N)
partials = [get_partial_velocity(body) for body in bodies]
# Compute fr_star in two components:
# fr_star = -(MM*u' + nonMM)
o = len(u)
MM = zeros(o, o)
nonMM = zeros(o, 1)
zero_uaux = lambda expr: msubs(expr, uaux_zero)
zero_udot_uaux = lambda expr: msubs(msubs(expr, udot_zero), uaux_zero)
for i, body in enumerate(bodies):
bodyMM = zeros(o, o)
bodynonMM = zeros(o, 1)
if isinstance(body,YAMSRigidBody) or isinstance(body, RigidBody):
# Rigid Body (see sympy.mechanics.kane)
M = zero_uaux( body.mass )
I = zero_uaux( body.central_inertia )
vel = zero_uaux( body.masscenter.vel(N) )
omega = zero_uaux( body.frame.ang_vel_in(N) )
acc = zero_udot_uaux( body.masscenter.acc(N) )
# --- Mas Matrix
for j in range(o):
tmp_vel = zero_uaux(partials[i][0][j])
tmp_ang = zero_uaux(I & partials[i][1][j])
for k in range(o):
# translational
bodyMM[j, k] += M * (tmp_vel & partials[i][0][k])
# rotational
bodyMM[j, k] += (tmp_ang & partials[i][1][k])
# --- Full inertial loads Matrix
inertial_force = (M.diff(t) * vel + M * acc)
inertial_torque = zero_uaux((I.dt(body.frame) & omega) + msubs(I & body.frame.ang_acc_in(N), udot_zero) + (omega ^ (I & omega))) # "&" = dot, "^"=cross
for j in range(o):
bodynonMM[j] += inertial_force & partials[i][0][j]
bodynonMM[j] += inertial_torque & partials[i][1][j]
elif isinstance(body,YAMSFlexibleBody):
print('>>>> FlexibleBody TODO')
M = zero_uaux(body.mass)
#I = zero_uaux(body.central_inertia)
vel = zero_uaux(body.origin.vel(N))
omega = zero_uaux(body.frame.ang_vel_in(N))
acc = zero_udot_uaux(body.origin.acc(N))
inertial_force=0 # Fstar !<<<< TODO
inertial_torque=0 # Tstar !<<<< TODO
else:
# Particles
M = zero_uaux(body.mass)
vel = zero_uaux(body.point.vel(N))
acc = zero_udot_uaux(body.point.acc(N))
inertial_force = (M.diff(t) * vel + M * acc)
inertial_torque=0 # Tstar
for j in range(o):
temp = zero_uaux(partials[i][0][j])
for k in range(o):
bodyMM[j, k] += M * (temp & partials[i][0][k])
bodynonMM[j] += inertial_force & partials[i][0][j]
# Perform important substitution and store body contributions
body.MM_alt = zero_uaux(msubs(bodyMM, q_ddot_u_map))
body.nonMM_alt_bef = bodynonMM
#body.nonMM_alt = msubs(msubs(bodynonMM, q_ddot_u_map), udot_zero, uauxdot_zero, uaux_zero)
# Cumulative MM and nonMM over all bodies
MM += bodyMM
nonMM += bodynonMM
# --- Storing for debug
body.acc_alt = acc
body.vel_alt = vel
body.omega_alt = omega
body.inertial_force_alt = inertial_force
body.inertial_torque_alt = inertial_torque
body.Jv_vect_alt=partials[i][0]
body.Jo_vect_alt=partials[i][1]
# End loop on bodies
nonMM = msubs(msubs(nonMM, q_ddot_u_map), udot_zero, uauxdot_zero, uaux_zero)
# Compose fr_star out of MM and nonMM
fr_star = -(MM * msubs(Matrix(udot), uauxdot_zero) + nonMM)
# If there are dependent speeds, we need to find fr_star_tilde
if udep:
p = o - len(udep)
fr_star_ind = fr_star[:p, 0]
fr_star_dep = fr_star[p:o, 0]
fr_star = fr_star_ind + (Ars.T * fr_star_dep)
# Apply the same to MM
MMi = MM[:p, :]
MMd = MM[p:o, :]
MM = MMi + (Ars.T * MMd)
MM_full= mass_matrix_full(MM,_k_kqdot)
#self._bodylist = bodies
#self._frstar = fr_star
#self._k_d = MM
#self._f_d = -msubs(self._fr + self._frstar, udot_zero)
return fr_star, MM, MM_full
| 23,518
|
def line(value):
"""
| Line which can be used to cross with functions like RSI or MACD.
| Name: line\_\ **value**\
:param value: Value of the line
:type value: float
"""
def return_function(data):
column_name = f'line_{value}'
if column_name not in data.columns:
data[column_name] = value
return data[column_name].copy()
return return_function
| 23,519
|
def goodsGetSku(spuId,regionId):
"""
:param spuId:
:param regionId:
:return:
"""
reqUrl = req_url('goods', "/goods/getGoodsList")
if reqUrl:
url = reqUrl
else:
return "服务host匹配失败"
headers = {
'Content-Type': 'application/json',
'X-Region-Id': regionId,
}
body = json.dumps(
{
"spuId": spuId,
"groundStatus": "",
"environment": "",
"page": 1,
"limit": 20
}
)
result = requests.post(url=url,headers=headers,data=body)
resultJ = json.loads(result.content)
return resultJ
| 23,520
|
def build_model(config, model_dir=None, weight=None):
"""
Inputs:
config: train_config, see train_celery.py
model_dir: a trained model's output dir, None if model has not been trained yet
weight: class weights
"""
contents = os.listdir(model_dir)
print(contents)
return ClassificationModel(
"roberta",
model_dir or "roberta-base",
use_cuda=USE_CUDA,
args={
# https://github.com/ThilinaRajapakse/simpletransformers/#sliding-window-for-long-sequences
"sliding_window": config.get("sliding_window", False),
"reprocess_input_data": True,
"overwrite_output_dir": True,
"use_cached_eval_features": False,
"no_cache": True,
"num_train_epochs": config["num_train_epochs"],
"weight": weight,
# TODO I don't need checkpoints yet - disable this to save disk space
"save_eval_checkpoints": False,
"save_model_every_epoch": False,
"save_steps": 999999,
# Bug in the library, need to specify it here and in the .train_model kwargs
"output_dir": config.get("model_output_dir"),
# Maybe a bug in the library, need to turn off multiprocessing for prediction
# We may also want to look at the process_count config. It may use too many cpus
"use_multiprocessing": False,
# Note: 512 requires 16g of GPU mem. You can try 256 for 8g.
"max_seq_length": config.get("max_seq_length", 512),
},
)
| 23,521
|
def get_scheduler(config, optimizer):
"""
:param config: 配置参数
:param optimizer: 优化器
:return: 学习率衰减策略
"""
# 加载学习率衰减策略
if config.scheduler_name == 'StepLR':
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=config.StepLR['decay_step'],
gamma=config.StepLR["gamma"])
elif config.scheduler_name == 'Cosine':
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=config.Cosine['restart_step'],
eta_min=config.Cosine['eta_min'])
elif config.scheduler_name == 'author':
scheduler = WarmupMultiStepLR(optimizer,
config.WarmupMultiStepLR["steps"],
config.WarmupMultiStepLR["gamma"],
config.WarmupMultiStepLR["warmup_factor"],
config.WarmupMultiStepLR["warmup_iters"],
config.WarmupMultiStepLR["warmup_method"]
)
return scheduler
| 23,522
|
def main():
"""
Pairwise identity is a script which takes a protein sequence file that's full of protein sequences and calculates
the percent identity of all possible pairwise combinations (basically the cartesian product) of the proteins
"""
sh_parse = argparse.ArgumentParser(description="Cartesian product pairwise identity of a protein sequence file")
sh_parse.add_argument("-i", "--input", dest="infile", help="Input FILE", metavar="FILE", required=True)
sh_parse.add_argument("-o", "--output", dest="outfile", help="Output FILE", metavar="FILE", required=True)
sh_parse.add_argument("-t", "--type", dest="type", help="Input file TYPE", metavar="TYPE", default="fasta")
sh_parse.add_argument("-c", "--cpu", dest="cores", help="Number of CORES to use", metavar="CORES", default=2,
type=int)
sh_args = sh_parse.parse_args()
pairwise_id(sh_args.infile, sh_args.outfile, in_type=sh_args.type, cores=sh_args.cores)
| 23,523
|
def pipeline_dict() -> dict:
"""Pipeline config dict. You need to update the labels!"""
pipeline_dictionary = {
"name": "german_business_names",
"features": {
"word": {"embedding_dim": 16, "lowercase_tokens": True},
"char": {
"embedding_dim": 16,
"encoder": {
"type": "gru",
"num_layers": 1,
"hidden_size": 32,
"bidirectional": True,
},
"dropout": 0.1,
},
},
"head": {
"type": "TextClassification",
"labels": [
"Unternehmensberatungen",
"Friseure",
"Tiefbau",
"Dienstleistungen",
"Gebrauchtwagen",
"Restaurants",
"Architekturbüros",
"Elektriker",
"Vereine",
"Versicherungsvermittler",
"Sanitärinstallationen",
"Edv",
"Maler",
"Physiotherapie",
"Werbeagenturen",
"Apotheken",
"Vermittlungen",
"Hotels",
"Autowerkstätten",
"Elektrotechnik",
"Allgemeinärzte",
"Handelsvermittler Und -vertreter",
],
"pooler": {
"type": "gru",
"num_layers": 1,
"hidden_size": 16,
"bidirectional": True,
},
"feedforward": {
"num_layers": 1,
"hidden_dims": [16],
"activations": ["relu"],
"dropout": [0.1],
},
},
}
return pipeline_dictionary
| 23,524
|
def test_output_should_conform_to_hocr(tmp_path):
"""Test if an exported file conform to hOCR."""
html_path = os.path.join(tmp_path, "md.html")
pdftotree.parse("tests/input/md.pdf", html_path)
with Popen(["hocr-check", html_path], stderr=PIPE) as proc:
assert all([line.decode("utf-8").startswith("ok") for line in proc.stderr])
# Check detailed things that hocr-check does not check.
with open(html_path) as fp:
soup = BeautifulSoup(fp, "lxml")
capabilities = soup.find("meta", attrs={"name": "ocr-capabilities"})
# Check the list as hocr-check only checks the existence of "ocr-capabilities".
assert capabilities["content"].split() == [
"ocr_page",
"ocr_table",
"ocrx_block",
"ocrx_line",
"ocrx_word",
]
| 23,525
|
def _is_hangul_syllable(i):
"""
Function for determining if a Unicode scalar value i is within the range of Hangul syllables.
:param i: Unicode scalar value to lookup
:return: Boolean: True if the lookup value is within the range of Hangul syllables, otherwise False.
"""
if i in range(0xAC00, 0xD7A3 + 1): # Range of Hangul characters as defined in UnicodeData.txt
return True
return False
| 23,526
|
def shape14_4(tik_instance, input_x, res, input_shape, shape_info):
"""input_shape == ((32, 16, 14, 14, 16), 'float16', (1, 1), (1, 1))"""
stride_w, stride_h, filter_w, filter_h, dilation_filter_w, dilation_filter_h = shape_info
pad = [0, 0, 0, 0]
l1_h = 14
l1_w = 14
c1_index = 0
jump_stride = 1
repeat_mode = 1
with tik_instance.for_range(0, 32, block_num=32) as block_index:
eeb0 = block_index % 2
eeb1 = block_index // 2
input_1_1_local_l1 = tik_instance.Tensor("float16", (196 * 32 * 16,), scope=tik.scope_cbuf,
name="input_1_1_local_l1")
input_1_1_fractal_l1_local_ub = tik_instance.Tensor("float16", (106496 // 2,), scope=tik.scope_ubuf,
name="input_1_1_fractal_l1_local_ub")
input_1_2_fractal_l1_local_ub = tik_instance.Tensor("float16", (196 * 16 * 16,), scope=tik.scope_ubuf,
name="input_1_2_fractal_l1_local_ub")
with tik_instance.for_range(0, 32) as i:
tik_instance.data_move(input_1_1_local_l1[i * 3136], input_x[i, eeb1, 0, 0, 0], 0, 1, 196, 0, 0)
with tik_instance.for_range(0, 16) as i:
fetch_filter_w = 0
fetch_filter_h = 0
left_top_h = 0
left_top_w = 0
tik_instance.load3dv1(input_1_1_fractal_l1_local_ub[i * 3328],
input_1_1_local_l1[i * 3136 + eeb0 * 16 * 3136],
pad, l1_h, l1_w, c1_index, fetch_filter_w, fetch_filter_h,
left_top_w, left_top_h, stride_w, stride_h, filter_w,
filter_h, dilation_filter_w, dilation_filter_h,
jump_stride, repeat_mode, 13)
with tik_instance.for_range(0, 16) as i:
tik_instance.data_move(input_1_2_fractal_l1_local_ub[i * 196 * 16],
input_1_1_fractal_l1_local_ub[i * 3328], 0, 1, 196, 0, 0)
with tik_instance.for_range(0, 196) as i:
tik_instance.data_move(res[eeb1, i + 196 * eeb0, 0, 0], input_1_2_fractal_l1_local_ub[256 * i], 0, 1,
16, 0, 0)
return tik_instance, res
| 23,527
|
def read_json(
downloader: Download, datasetinfo: Dict, **kwargs: Any
) -> Optional[Iterator[Union[List, Dict]]]:
"""Read data from json source allowing for JSONPath expressions
Args:
downloader (Download): Download object for downloading JSON
datasetinfo (Dict): Dictionary of information about dataset
**kwargs: Variables to use when evaluating template arguments
Returns:
Optional[Iterator[Union[List,Dict]]]: Iterator or None
"""
url = get_url(datasetinfo["url"], **kwargs)
response = downloader.download(url)
json = response.json()
expression = datasetinfo.get("jsonpath")
if expression:
expression = parse(expression)
json = expression.find(json)
if isinstance(json, list):
return iter(json)
return None
| 23,528
|
def json_dumps_safer(obj, **kwargs):
"""Convert obj to json, with some extra encodable types."""
return json.dumps(obj, cls=WandBJSONEncoder, **kwargs)
| 23,529
|
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
# raise NotImplementedError()
full_data = pd.read_csv(filename).drop_duplicates()
data = full_data.drop(['id', 'date', 'lat', 'long'],
axis=1)
data = data.dropna()
for f in ZERO_AND_ABOVE:
data = data[data[f] >= 0]
for f in ONLY_POSITIVE:
data = data[data[f] > 0]
data['yr_renovated'] = np.where(data['yr_renovated'] == 0.0,
data['yr_built'], data['yr_renovated'])
data = pd.get_dummies(data, columns=['zipcode'],
drop_first=True)
features, label = data.drop("price", axis=1), data['price']
return features, label
| 23,530
|
def test(runner):
"""Test the environment
* Verify redis connectivity indepedent of moi
* Verify database connectivity
* Verify submission via moi
Tests are performed both on the server and ipengines.
"""
def redis_test(**kwargs):
"""Put and get a key from redis"""
from uuid import uuid4
from redis import Redis
from qiita_core.configuration_manager import ConfigurationManager
config = ConfigurationManager()
r_client = Redis(host=config.redis_host,
port=config.redis_port,
password=config.redis_password,
db=config.redis_db)
key = str(uuid4())
r_client.set(key, 42, ex=1)
return int(r_client.get(key))
def postgres_test(**kwargs):
"""Open a connection and query postgres"""
from qiita_db.sql_connection import SQLConnectionHandler
c = SQLConnectionHandler()
return c.execute_fetchone("SELECT 42")[0]
def moi_test(**kwargs):
"""Submit a function via moi"""
from moi.job import submit_nouser
def inner(a, b, **kwargs):
return a + b
_, _, ar = submit_nouser(inner, 7, 35)
state, result = _ipy_wait(ar)
return result
if runner == 'all':
runner = ('local', 'remote', 'moi')
else:
runner = [runner]
for name in runner:
_test_runner(name, "redis", redis_test, 42)
_test_runner(name, "postgres", postgres_test, 42)
_test_runner(name, "submit via moi", moi_test, 42)
| 23,531
|
def reset_database():
"""仅限开发阶段使用,请不要在发布阶段开启这样的危险命令
"""
if app.config['ADMIN_KEY']:
if request.args.get('key') == app.config['ADMIN_KEY']:
if request.args.get('totp') == pyotp.TOTP(app.config['TOTP_SECRET']).now():
os.remove(app.config['SQLALCHEMY_DATABASE_PATH'])
db.create_all()
return 'Success!'
abort(401)
| 23,532
|
def test_construct_with_node():
"""This function tests constructing payload using properly formatted node data."""
control_data = get_control_data('node')
payload = messages.construct_payload('This is the subject line', node={"id": "my-board"})
assert payload == control_data # nosec
return
| 23,533
|
def validate_model(model):
"""
Validate a single data model parameter or a full data model block by
recursively calling the 'validate' method on each node working from
the leaf nodes up the tree.
:param model: part of data model to validate
:type model: :graphit:GraphAxis
:return: overall successful validation
:rtype: :py:bool
"""
allnodes = model.nodes.keys()
leaves = model.leaves(return_nids=True)
done = []
def _walk_ancestors(nodes, success=True):
parents = []
for node in nodes:
node = model.getnodes(node)
# Continue only if the node was found and it has a 'validate' method
if not node.empty() and hasattr(node, 'validate'):
val = node.validate()
done.append(node.nid)
if not val:
return False
pnid = node.parent().nid
if pnid not in done and pnid in allnodes:
parents.append(pnid)
if parents:
return _walk_ancestors(set(parents), success=success)
return success
# Recursively walk the tree from leaves up to root.
return _walk_ancestors(leaves)
| 23,534
|
def test_transforms_scaler():
"""Tests dsutils.transforms.Scaler"""
df = pd.DataFrame()
df['a'] = [1, 2, 3, 4, 5]
df['b'] = [1, 2, 3, 4, 5]
df['c'] = [10, 20, 30, 40, 50]
out = Scaler().fit_transform(df)
assert isinstance(out, pd.DataFrame)
assert out.shape[0] == 5
assert out.shape[1] == 3
assert out.iloc[2, 0] == 0.0
assert out.iloc[0, 0] < out.iloc[1, 0]
assert out.iloc[1, 0] < out.iloc[2, 0]
assert out.iloc[2, 0] < out.iloc[3, 0]
assert out.iloc[3, 0] < out.iloc[4, 0]
assert out.iloc[2, 1] == 0.0
assert out.iloc[2, 2] == 0.0
assert out.iloc[0, 2] < out.iloc[1, 2]
assert out.iloc[1, 2] < out.iloc[2, 2]
assert out.iloc[2, 2] < out.iloc[3, 2]
assert out.iloc[3, 2] < out.iloc[4, 2]
| 23,535
|
def simple_get(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
filename = "{0}.html".format(url.split("/").pop().lower())
filepath = abspath(join(dirname(__file__), "./cache", filename))
file_data = read_file(filepath)
if file_data != None:
return file_data
try:
print("Fetching: {0}...".format(url))
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
write_cache_file(filepath, resp.content)
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
| 23,536
|
def ensemble_tsfresh(forecast_in, forecast_out, season, perd):
"""
Create rolled time series for ts feature extraction
"""
def tsfresh_run(forecast, season, insample=True, forecast_out=None):
df_roll_prep = forecast.reset_index()
if insample:
df_roll_prep = df_roll_prep.drop(["Target", "Date"], axis=1)
df_roll_prep["id"] = 1
target = forecast["Target"]
else:
df_roll_prep = df_roll_prep.drop(["index"], axis=1)
df_roll_prep["id"] = 1
df_roll = roll_time_series(
df_roll_prep,
column_id="id",
column_sort=None,
column_kind=None,
rolling_direction=1,
max_timeshift=season - 1,
)
counts = df_roll["id"].value_counts()
df_roll_cut = df_roll[df_roll["id"].isin(counts[counts >= season].index)]
# TS feature extraction
concat_df = pd.DataFrame()
concat_df = extract_features(
df_roll_cut.ffill(),
column_id="id",
column_sort="sort",
n_jobs=season,
show_warnings=False,
disable_progressbar=True,
)
if insample:
concat_df = concat_df.dropna(axis=1, how="all")
concat_df.index = (
target[df_roll_cut["id"].value_counts().index]
.sort_index()
.to_frame()
.index
)
concat_df = pd.merge(
target[df_roll_cut["id"].value_counts().index].sort_index().to_frame(),
concat_df,
left_index=True,
right_index=True,
how="left",
)
concat_df_list = constant_feature_detect(data=concat_df, threshold=0.95)
concat_df = concat_df.drop(concat_df_list, axis=1)
else:
forecast_out.index.name = "Date"
concat_df.index = forecast_out.index
concat_df = impute(concat_df)
return concat_df
_LOG.info("LightGBM ensemble have been successfully built")
concat_df_drop_in = tsfresh_run(forecast_in, season, insample=True)
extracted_n_selected = select_features(
concat_df_drop_in.drop("Target", axis=1),
concat_df_drop_in["Target"],
fdr_level=0.01,
n_jobs=12,
) # fdr is the significance level.
forecast_out_add = pd.concat(
(forecast_in.iloc[-season + 1 :, :].drop(["Target"], axis=1), forecast_out),
axis=0,
)
concat_df_drop_out = tsfresh_run(
forecast_out_add, season, insample=False, forecast_out=forecast_out
)
extracted_n_selected_out = concat_df_drop_out[extracted_n_selected.columns]
# Reduce the dimensions of generated time series features
pca2 = PCA(n_components=8)
pca2.fit(extracted_n_selected)
pca2_results_in = pca2.transform(extracted_n_selected)
pca2_results_out = pca2.transform(extracted_n_selected_out)
cols = 0
for i in range(pca2_results_in.shape[1]):
cols = cols + 1
extracted_n_selected["pca_" + str(i)] = pca2_results_in[:, i]
extracted_n_selected_out["pca_" + str(i)] = pca2_results_out[:, i]
df = forecast_in.iloc[season - 1 :, :].copy()
df = time_feature(df, perd)
df["mean"] = df.drop(["Target"], axis=1).mean(axis=1)
df_new = pd.concat(
(df.reset_index(), extracted_n_selected.iloc[:, -cols:].reset_index(drop=True)),
axis=1,
)
df_new = df_new.set_index("Date")
forecast_train, forecast_test = tts(
df_new, train_size=0.5, shuffle=False, stratify=None
)
target = "Target"
d_train = lgb.Dataset(
forecast_train.drop(columns=[target]), label=forecast_train[target]
)
params = {
"boosting_type": "gbdt",
"objective": "regression",
"metric": "rmsle",
"max_depth": 6,
"learning_rate": 0.1,
"verbose": 0,
"num_threads": 16,
}
model = lgb.train(params, d_train, 100, verbose_eval=1)
ensemble_ts = pd.DataFrame(index=forecast_test.index)
ensemble_ts["ensemble_ts"] = model.predict(forecast_test.drop(columns=[target]))
df_out = forecast_out.copy()
df_out = time_feature(df_out, perd)
df_out["mean"] = df_out.mean(axis=1)
ensemble_ts_out = pd.DataFrame(index=df_out.index)
ensemble_ts_out["ensemble_ts"] = model.predict(df_out)
_LOG.info("LightGBM ensemble have been successfully built")
return ensemble_ts, ensemble_ts_out
| 23,537
|
def transform_svm_mat2file(filename):
"""
Transform the svm model in .mat to .file
"""
model = loadmat(filename)
text_file = open(filename[:-4], "w")
text_file.write("solver_type L2R_LR\n")
text_file.write("nr_class %d\n" % model['svmmodel']['nr_class'])
text_file.write("label 1 0\n")
text_file.write("nr_feature %d\n" % model['svmmodel']['nr_feature'])
text_file.write("bias %d\n" % model['svmmodel']['bias'])
text_file.write("w \n")
for idx in np.arange(model['svmmodel']['w'].shape[0]):
text_file.write("%f\n" % model['svmmodel']['w'][idx])
text_file.close()
| 23,538
|
def train_and_test(dataset, nb_epochs, random_seed, label):
""" Runs DSEBM on available datasets
Note:
Saves summaries on tensorboard. To display them, please use cmd line
tensorboard --logdir=model.training_logdir() --port=number
Args:
dataset (string): dataset to run the model on
nb_epochs (int): number of epochs
random_seed (int): trying different seeds for averaging the results
label (int): label which is normal for image experiments
anomaly_type (string): "novelty" for 100% normal samples in the training set
"outlier" for a contamined training set
anomaly_proportion (float): if "outlier", anomaly proportion in the training set
"""
logger = logging.getLogger("DSEBM.run.{}.{}".format(
dataset, label))
# Import model and data
network = importlib.import_module('dsebm.{}_utilities'.format(dataset))
data = importlib.import_module("data.{}".format(dataset))
# Parameters
starting_lr = network.learning_rate
batch_size = network.batch_size
# Placeholders
x_pl = tf.placeholder(tf.float32, shape=data.get_shape_input(),
name="input")
is_training_pl = tf.placeholder(tf.bool, [], name='is_training_pl')
learning_rate = tf.placeholder(tf.float32, shape=(), name="lr_pl")
#test
y_true = tf.placeholder(tf.int32, shape=[None], name="y_true")
logger.info('Building training graph...')
logger.warn("The DSEBM is training with the following parameters:")
display_parameters(batch_size, starting_lr, label)
net = network.network
global_step = tf.train.get_or_create_global_step()
noise = tf.random_normal(shape=tf.shape(x_pl), mean=0.0, stddev=1.,
dtype=tf.float32)
x_noise = x_pl + noise
with tf.variable_scope('network'):
b_prime_shape = list(data.get_shape_input())
b_prime_shape[0] = batch_size
b_prime = tf.get_variable(name='b_prime', shape=b_prime_shape)#tf.shape(x_pl))
net_out = net(x_pl, is_training=is_training_pl)
net_out_noise = net(x_noise, is_training=is_training_pl, reuse=True)
with tf.name_scope('energies'):
energy = 0.5 * tf.reduce_sum(tf.square(x_pl - b_prime)) \
- tf.reduce_sum(net_out)
energy_noise = 0.5 * tf.reduce_sum(tf.square(x_noise - b_prime)) \
- tf.reduce_sum(net_out_noise)
with tf.name_scope('reconstructions'):
# reconstruction
grad = tf.gradients(energy, x_pl)
fx = x_pl - tf.gradients(energy, x_pl)
fx = tf.squeeze(fx, axis=0)
fx_noise = x_noise - tf.gradients(energy_noise, x_noise)
with tf.name_scope("loss_function"):
# DSEBM for images
if len(data.get_shape_input())==4:
loss = tf.reduce_mean(tf.reduce_sum(tf.square(x_pl - fx_noise),
axis=[1,2,3]))
# DSEBM for tabular data
else:
loss = tf.reduce_mean(tf.square(x_pl - fx_noise))
with tf.name_scope('optimizers'):
# control op dependencies for batch norm and trainable variables
tvars = tf.trainable_variables()
netvars = [var for var in tvars if 'network' in var.name]
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
update_ops_net = [x for x in update_ops if ('network' in x.name)]
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
name='optimizer')
with tf.control_dependencies(update_ops_net):
train_op = optimizer.minimize(loss, var_list=netvars, global_step=global_step)
with tf.variable_scope('Scores'):
with tf.name_scope('Energy_score'):
flat = tf.layers.flatten(x_pl - b_prime)
if len(data.get_shape_input())==4:
list_scores_energy = 0.5 * tf.reduce_sum(tf.square(flat), axis=1) \
- tf.reduce_sum(net_out, axis=[1, 2, 3])
else:
list_scores_energy = 0.5 * tf.reduce_sum(tf.square(flat), axis=1) \
- tf.reduce_sum(net_out, axis=1)
with tf.name_scope('Reconstruction_score'):
delta = x_pl - fx
delta_flat = tf.layers.flatten(delta)
list_scores_reconstruction = tf.norm(delta_flat, ord=2, axis=1,
keepdims=False,
name='reconstruction')
# with tf.name_scope('predictions'):
# # Highest 20% are anomalous
# if dataset=="kdd":
# per = tf.contrib.distributions.percentile(list_scores_energy, 80)
# else:
# per = tf.contrib.distributions.percentile(list_scores_energy, 95)
# y_pred = tf.greater_equal(list_scores_energy, per)
#
# #y_test_true = tf.cast(y_test_true, tf.float32)
# cm = tf.confusion_matrix(y_true, y_pred, num_classes=2)
# recall = cm[1,1]/(cm[1,0]+cm[1,1])
# precision = cm[1,1]/(cm[0,1]+cm[1,1])
# f1 = 2*precision*recall/(precision + recall)
with tf.name_scope('training_summary'):
tf.summary.scalar('score_matching_loss', loss, ['net'])
tf.summary.scalar('energy', energy, ['net'])
if dataset in IMAGES_DATASETS:
with tf.name_scope('image_summary'):
tf.summary.image('reconstruct', fx, 6, ['image'])
tf.summary.image('input_images', x_pl, 6, ['image'])
sum_op_im = tf.summary.merge_all('image')
sum_op_net = tf.summary.merge_all('net')
logdir = create_logdir(dataset, label, random_seed)
sv = tf.train.Supervisor(logdir=logdir+"/train", save_summaries_secs=None,
save_model_secs=None)
# Data
logger.info('Data loading...')
trainx, trainy = data.get_train(label)
trainx_copy = trainx.copy()
if dataset in IMAGES_DATASETS: validx, validy = data.get_valid(label)
testx, testy = data.get_test(label)
rng = np.random.RandomState(RANDOM_SEED)
nr_batches_train = int(trainx.shape[0] / batch_size)
if dataset in IMAGES_DATASETS: nr_batches_valid = int(validx.shape[0] / batch_size)
nr_batches_test = int(testx.shape[0] / batch_size)
logger.info("Train: {} samples in {} batches".format(trainx.shape[0], nr_batches_train))
if dataset in IMAGES_DATASETS: logger.info("Valid: {} samples in {} batches".format(validx.shape[0], nr_batches_valid))
logger.info("Test: {} samples in {} batches".format(testx.shape[0], nr_batches_test))
logger.info('Start training...')
with sv.managed_session() as sess:
logger.info('Initialization done')
train_writer = tf.summary.FileWriter(logdir+"/train", sess.graph)
valid_writer = tf.summary.FileWriter(logdir+"/valid", sess.graph)
train_batch = 0
epoch = 0
best_valid_loss = 0
train_losses = [0]*STRIP_EV
while not sv.should_stop() and epoch < nb_epochs:
lr = starting_lr
begin = time.time()
trainx = trainx[rng.permutation(trainx.shape[0])] # shuffling unl dataset
trainx_copy = trainx_copy[rng.permutation(trainx.shape[0])]
losses, energies = [0, 0]
# training
for t in range(nr_batches_train):
display_progression_epoch(t, nr_batches_train)
# construct randomly permuted minibatches
ran_from = t * batch_size
ran_to = (t + 1) * batch_size
# train the net
feed_dict = {x_pl: trainx[ran_from:ran_to],
is_training_pl:True,
learning_rate:lr}
_, ld, en, sm, step = sess.run([train_op, loss, energy, sum_op_net, global_step], feed_dict=feed_dict)
losses += ld
energies += en
train_writer.add_summary(sm, step)
if t % FREQ_PRINT == 0 and dataset in IMAGES_DATASETS: # inspect reconstruction
t= np.random.randint(0,40)
ran_from = t
ran_to = t + batch_size
sm = sess.run(sum_op_im, feed_dict={x_pl: trainx[ran_from:ran_to],is_training_pl: False})
train_writer.add_summary(sm, step)
train_batch += 1
losses /= nr_batches_train
energies /= nr_batches_train
# Remembering loss for early stopping
train_losses[epoch%STRIP_EV] = losses
logger.info('Epoch terminated')
print("Epoch %d | time = %ds | loss = %.4f | energy = %.4f "
% (epoch, time.time() - begin, losses, energies))
if (epoch + 1) % FREQ_SNAP == 0 and dataset in IMAGES_DATASETS:
print("Take a snap of the reconstructions...")
x = trainx[:batch_size]
feed_dict = {x_pl: x,
is_training_pl: False}
rect_x = sess.run(fx, feed_dict=feed_dict)
nama_e_wa = "dsebm/reconstructions/{}/{}/" \
"{}_epoch{}".format(dataset,
label,
random_seed, epoch)
nb_imgs = 50
save_grid_plot(x[:nb_imgs], rect_x[:nb_imgs], nama_e_wa, nb_imgs)
if (epoch + 1) % FREQ_EV == 0 and dataset in IMAGES_DATASETS:
logger.info("Validation")
inds = rng.permutation(validx.shape[0])
validx = validx[inds] # shuffling dataset
validy = validy[inds] # shuffling dataset
valid_loss = 0
for t in range(nr_batches_valid):
display_progression_epoch(t, nr_batches_valid)
# construct randomly permuted minibatches
ran_from = t * batch_size
ran_to = (t + 1) * batch_size
# train the net
feed_dict = {x_pl: validx[ran_from:ran_to],
y_true: validy[ran_from:ran_to],
is_training_pl:False}
vl, sm, step = sess.run([loss, sum_op_net, global_step], feed_dict=feed_dict)
valid_writer.add_summary(sm, step+t)#train_batch)
valid_loss += vl
valid_loss /= nr_batches_valid
# train the net
logger.info("Validation loss at step "+str(step)+":"+str(valid_loss))
##EARLY STOPPING
#UPDATE WEIGHTS
if valid_loss<best_valid_loss or epoch==FREQ_EV-1:
best_valid_loss = valid_loss
logger.info("Best model - loss={} - saving...".format(best_valid_loss))
sv.saver.save(sess, logdir+'/train/model.ckpt',
global_step=step)
nb_without_improvements = 0
else:
nb_without_improvements += FREQ_EV
if nb_without_improvements > PATIENCE:
sv.request_stop()
logger.warning(
"Early stopping at epoch {} with weights from epoch {}".format(
epoch, epoch - nb_without_improvements))
epoch += 1
logger.warn('Testing evaluation...')
step = sess.run(global_step)
scores_e = []
scores_r = []
inference_time = []
# Create scores
for t in range(nr_batches_test):
# construct randomly permuted minibatches
ran_from = t * batch_size
ran_to = (t + 1) * batch_size
begin_val_batch = time.time()
feed_dict = {x_pl: testx[ran_from:ran_to],
is_training_pl:False}
scores_e += sess.run(list_scores_energy,
feed_dict=feed_dict).tolist()
scores_r += sess.run(list_scores_reconstruction,
feed_dict=feed_dict).tolist()
inference_time.append(time.time() - begin_val_batch)
logger.info('Testing : mean inference time is %.4f' % (
np.mean(inference_time)))
if testx.shape[0] % batch_size != 0:
batch, size = batch_fill(testx, batch_size)
feed_dict = {x_pl: batch,
is_training_pl: False}
batch_score_e = sess.run(list_scores_energy,
feed_dict=feed_dict).tolist()
batch_score_r = sess.run(list_scores_reconstruction,
feed_dict=feed_dict).tolist()
scores_e += batch_score_e[:size]
scores_r += batch_score_r[:size]
save_results(scores_e, testy, 'dsebm', dataset, 'energy', "test", label,
random_seed, step)
save_results(scores_r, testy, 'dsebm', dataset, 'reconstruction', "test",
label, random_seed, step)
| 23,539
|
def __modules_with_root_module_path(path):
"""
Returns all modules beneath the root module path. This treats all
directories as packages regardless of whether or not they include
a __init__.py.
"""
modules = []
if os.path.isfile(path) and os.path.splitext(path)[1] == '.py' and os.path.basename(path) != '__init__.py':
name = os.path.splitext(os.path.basename(path))[0]
modules.append(name)
elif os.path.isdir(path):
pkg_name = os.path.basename(path)
modules.append(pkg_name)
for ff in os.listdir(path):
modules.extend(['.'.join([pkg_name, m]) for m in __modules_with_root_module_path(os.path.join(path, ff))])
return modules
| 23,540
|
def transform_from_latlon(lat, lon):
"""
Tranform from latitude and longitude
NOTES:
- credit - Shoyer https://gist.github.com/shoyer/0eb96fa8ab683ef078eb
"""
from affine import Affine
lat = np.asarray(lat)
lon = np.asarray(lon)
trans = Affine.translation(lon[0], lat[0])
scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])
return trans * scale
| 23,541
|
def xml_to_json(xml_text: str) -> OrderedDict:
"""Converts xml text to json.
Args:
xml_text (str): xml text to be parsed
Returns:
OrderedDict: an ordered dict representing the xml text as json
"""
return xmltodict.parse(xml_text)
| 23,542
|
def get_calendar_future_events(api_service):
"""Se trae todos los eventos del calendario de Sysarmy.
Args:
api_service (googleapiclient.discovery.Resource): servicio ya autenticado para
pegarle a la API de calendar.
Returns:
list: lista de diccionarios con los eventos futuros ya registrados en el Calendar.
"""
page_token = None
future_set_events = []
while True:
try:
set_events = (
api_service.events()
.list(calendarId=ADMINBIRRATOR_CALENDAR_ID, pageToken=page_token)
.execute()
)
except TypeError as e:
LOGGER.error(
f"Calendar ID incorrecto. Chequear variable de entorno {set_bold_text('$ADMINBIRRATOR_CALENDAR_ID')}. {e}"
)
sys.exit(1)
except googleErrors.HttpError as e:
LOGGER.error(
f"Calendar ID incorrecto. Chequeá bien las fechas y que la service account tenga acceso al calendar seteado en {set_bold_text('$ADMINBIRRATOR_CALENDAR_ID')} ({e})."
)
sys.exit(1)
# La idea general es crear eventos nuevos y updatear los ya existentes,
# así que solo nos quedamos con los eventos futuros.
for event in set_events["items"]:
try:
event_start_date = dt.strptime(
event["start"]["dateTime"][:19],
"%Y-%m-%dT%H:%M:%S",
)
except KeyError:
event_start_date = dt.strptime(
event["start"]["date"][:19],
"%Y-%m-%d",
)
if event_start_date > dt.now():
future_set_events.append(event)
# La misma response de la API te da un token para la próx page, creo
# que te trae de a 25 eventos por default. Habría que jugar para ver
# si puede traer solo los futuros que son los que nos interesan.
page_token = set_events.get("nextPageToken")
if not page_token:
break
return future_set_events
| 23,543
|
def getWeather(city, apikey):
"""
天気を取得する
リクエストにAPIKeyと都市をパラメーターに入れる
https://openweathermap.org/forecast5
"""
payload = {
'APIKEY': APIKEY,
'q': CITY
}
r = requests.get(
APIBASE,
params=payload
)
return r
| 23,544
|
def func1(xc):
"""Function which sets the data value"""
s = .1
res = np.exp(-xc**2/(2*s**2))
return res
| 23,545
|
def GetEnabledDiskTemplates(*args):
"""Wrapper for L{_QaConfig.GetEnabledDiskTemplates}.
"""
return GetConfig().GetEnabledDiskTemplates(*args)
| 23,546
|
def test_aliases_are_dropped_on_iterating_so_jobs_arent_double_counted(proj):
"""
Test that aliases are dropped so that jobs aren't double
counted when iterated over
"""
symlink = proj.basepath / "CtfFind" / "ctffind4"
symlink.symlink_to(proj.basepath / "Class2D" / "job003")
sym_ctffind = proj.ctffind
assert sorted(sym_ctffind) == ["job003"]
symlink.unlink()
| 23,547
|
def is_pull_request_merged(pull_request):
"""Takes a github3.pulls.ShortPullRequest object"""
return pull_request.merged_at is not None
| 23,548
|
def arraytoptseries(arr, crs={'epsg': '4326'}):
"""Convert an array of shape (2, ...) or (3, ...) to a
geopandas GeoSeries containing shapely Point objects.
"""
if arr.shape[0] == 2:
result = geopandas.GeoSeries([Point(x[0], x[1])
for x in arr.reshape(2, -1).T])
else:
result = geopandas.GeoSeries([Point(x[0], x[1], x[2])
for x in arr.reshape(3, -1).T])
#result.crs = crs
return result
| 23,549
|
async def get_kml_network_link():
""" Return KML network link file """
logger.info('/c-haines/network-link')
headers = {"Content-Type": kml_media_type,
"Content-Disposition": "inline;filename=c-haines-network-link.kml"}
return Response(headers=headers, media_type=kml_media_type, content=fetch_network_link_kml())
| 23,550
|
def splitToPyNodeList(res):
# type: (str) -> List[pymel.core.general.PyNode]
"""
converts a whitespace-separated string of names to a list of PyNode objects
Parameters
----------
res : str
Returns
-------
List[pymel.core.general.PyNode]
"""
return toPyNodeList(res.split())
| 23,551
|
def get_jira_issue(commit_message):
"""retrieve the jira issue referenced in the commit message
>>> get_jira_issue(b"BAH-123: ")
{b'BAH-123'}
>>> messages = (
... b"this is jira issue named plainly BAH-123",
... b"BAH-123 plainly at the beginning",
... b"in parens (BAH-123)",
... b"(BAH-123) at the beginning",
... b"after a colon :BAH-123",
... b"Merged from \\FOO-4325 foo.\\n\\nsvn path=/foo/trunk/; revision=12345\\n"
... )
>>> issuesets = (get_jira_issue(i) for i in messages)
>>> issues = set()
>>> for issueset in issuesets:
... for issue in issueset: issues.add(issue)
>>> sorted(list(issues))
[b'BAH-123', b'FOO-4325']
>>> get_jira_issue(b"there is no issue here")
set()
>>> with open("tomatch.txt", "rb") as f: data = f.read().splitlines()
>>> missed = list(i for i in (None if get_jira_issue(i) else i for i in data) if i is not None)
>>> len(missed)
0
>>> for i in missed:
... print(i)
>>> with open("missed-strings.txt", "rb") as f: data = f.read().splitlines()
>>> missed = list(i for i in (None if get_jira_issue(i) else i for i in data) if i is not None)
>>> len(missed)
0
>>> for i in missed:
... print(i)
"""
start = 0
match = JIRA_ID_MATCHER.search(commit_message[start:])
issues = set()
while match:
issues.add(match.group(1))
start += match.end(1)
match = JIRA_ID_MATCHER.search(commit_message[start:])
return issues
| 23,552
|
def wiki3():
"""Generate Wikipedia markup code for statistics charts."""
ignore_dates = ('2020-02-04', '2020-02-27')
data = archive.load(ignore_dates=ignore_dates)
update = source = fetch_wiki_source(WIKI_SRC3)
full_dates = ', '.join(x.strftime('%Y-%m-%d') for x in data.datetimes)
# Cases.
total_cases = ', '.join(str(y) for y in data.total_cases)
active_cases = ', '.join(str(y) for y in data.active_cases)
cured_cases = ', '.join(str(y) for y in data.cured_cases)
death_cases = ', '.join(str(y) for y in data.death_cases)
# New cases.
total_dates, total_diffs, total_avgs = \
expand_diffs(data.datetimes, data.total_diffs)
cured_dates, cured_diffs, cured_avgs = \
expand_diffs(data.datetimes, data.cured_diffs)
death_dates, death_diffs, death_avgs = \
expand_diffs(data.datetimes, data.death_diffs)
# Daily new cases vs. active cases.
vs_dates, vs_percents, vs_avgs, vs_cagrs = \
vs_data(data.datetimes, data.total_diffs, data.active_cases)
# CFR
cfr_start = data.dates.index('2020-03-12')
cfr_dates = ', '.join(x.strftime('%Y-%m-%d')
for x in data.datetimes[cfr_start:])
cfr_percents = ', '.join('{:.2f}'.format(y) for
y in data.cfr_percents[cfr_start:])
# For testing regex matches only.
"""
full_dates = '@@full_dates@@'
total_cases = '@@total_cases@@'
active_cases = '@@active_cases@@'
cured_cases = '@@cured_cases@@'
death_cases = '@@death_cases@@'
total_dates = '@@total_dates@@'
total_diffs = '@@total_diffs@@'
total_avgs= '@@total_avgs@@'
cured_dates = '@@cured_dates@@'
cured_diffs = '@@cured_diffs@@'
cured_avgs= '@@cured_avgs@@'
death_dates = '@@death_dates@@'
death_diffs = '@@death_diffs@@'
death_avgs= '@@death_avgs@@'
vs_dates = '@@vs_dates@@'
vs_percents = '@@vs_percents@@'
vs_avgs = '@@vs_avgs@@'
vs_cagrs = '@@vs_cagrs@@'
cfr_dates, cfr_percents = '@@cfr_dates@@', '@@cfr_percents@@'
"""
# Linear graph.
update = replace_within('= Total confirmed .*?=.*? x = ', '\n',
update, full_dates)
update = replace_within('= Total confirmed .*?=.*? y1 =.*?--> ', '\n',
update, total_cases)
update = replace_within('= Total confirmed .*?=.*? y2 =.*?--> ', '\n',
update, active_cases)
update = replace_within('= Total confirmed .*?=.*? y3 =.*?--> ', '\n',
update, cured_cases)
update = replace_within('= Total confirmed .*?=.*? y4 =.*?--> ', '\n',
update, death_cases)
# Logarithmic graph.
update = replace_within(
'= Total confirmed .*?=.*?log.*? x = ', '\n',
update, full_dates)
update = replace_within(
'= Total confirmed .*?=.*?log.*? y1 =.*?--> ', '\n',
update, total_cases)
update = replace_within(
'= Total confirmed .*?=.*?log.*? y2 =.*?--> ', '\n',
update, active_cases)
update = replace_within(
'= Total confirmed .*?=.*?log.*? y3 =.*?--> ', '\n',
update, cured_cases)
update = replace_within(
'= Total confirmed .*?=.*?log.*? y4 =.*?--> ', '\n',
update, death_cases)
# Daily new cases.
update = replace_within('= Daily new cases =.*? x = ', '\n',
update, total_dates)
update = replace_within('= Daily new cases =.*? y1 =.*?--> ', '\n',
update, total_diffs)
update = replace_within('= Daily new cases =.*? y2 =.*?--> ', '\n',
update, total_avgs)
# Daily new deaths.
update = replace_within('= Daily new deaths =.*? x = ', '\n',
update, death_dates)
update = replace_within('= Daily new deaths =.*? y1 =.*?--> ', '\n',
update, death_diffs)
update = replace_within('= Daily new deaths =.*? y2 =.*?--> ', '\n',
update, death_avgs)
# Daily new recoveries.
update = replace_within('= Daily new recoveries =.*? x = ', '\n',
update, cured_dates)
update = replace_within('= Daily new recoveries =.*? y1 =.*?--> ', '\n',
update, cured_diffs)
update = replace_within('= Daily new recoveries =.*? y2 =.*?--> ', '\n',
update, cured_avgs)
# Daily new cases vs. active cases.
update = replace_within(
'= Daily new cases vs active cases =.*? x = ', '\n',
update, vs_dates)
update = replace_within(
'= Daily new cases vs active cases =.*? y1 =.*?--> ', '\n',
update, vs_percents)
update = replace_within(
'= Daily new cases vs active cases =.*? y2 =.*?--> ', '\n',
update, vs_avgs)
update = replace_within(
'= Daily new cases vs active cases =.*? y3 =.*?--> ', '\n',
update, vs_cagrs)
# CFR.
update = replace_within('= Case fatality rate =.*? x = ', '\n',
update, cfr_dates)
update = replace_within('= Case fatality rate =.*? y = ', '\n',
update, cfr_percents)
log.log('Writing wiki3.txt and wiki3.diff ...')
open('wiki3.txt', 'w').write(update)
open('wiki3.diff', 'w').write(diff(source, update))
| 23,553
|
def hello(event, context):
"""
This is my awesome hello world function that encrypts text! It's like my first web site, only in Lambda.
Maybe I can add a hit counter later? What about <blink>?
Args:
event:
context:
Returns:
"""
# General run of the mill dangerous, but it will be ok right?
stuff = event['query'].get('stuff', "")
url = event['query'].get('url')
eval_stuff = event['query'].get('eval', "")
my_secret = os.environ.get('my_secret', "default_secret")
print("processing a request, using super secret code: {}".format(my_secret))
# You wanna do what?! Dangerous.
if url:
with urllib.request.urlopen(url) as response:
extra_stuff = response.read()
else:
extra_stuff = ""
# OK Like WTF?! Are you suicidal? level of danger.
if eval_stuff.lower() == "yes":
eval_result = "<pre>{}</pre><hr/>".format(eval(stuff)) # Seriously crazy dangerous!
else:
eval_result = ""
body = """
<html>
<header><title>Hello World!</title></header>
<body>
Encrypted stuff: {}
<hr/>
{}<br/>
Some random URL's Content:<br/>{}
<hr/>
</body>
</html>
""".format(encrypt(stuff, my_secret), eval_result, extra_stuff)
return body
| 23,554
|
def _create_archive_files(msid):
"""Create the values.h5 and times.h5 for the lifetime of an msid
:param msid: the msid that for which archive files are being created.
:type msid: str
:raises err: a generic `catch all` exception.
"""
values_files = f"{ENG_ARCHIVE}/archive/data/tlm/{msid}/values.h5"
times_files = f"{ENG_ARCHIVE}/archive/data/tlm/{msid}/times.h5"
try:
if not os.path.exists(values_files):
with tables.open_file(
values_files,
mode='w'
) as values:
values.close()
if not os.path.exists(times_files):
with tables.open_file(
times_files,
mode='w'
) as times:
times.close()
except Exception as err:
# TODO: Capture exception better
print(err)
if not os.path.exists(values_files):
os.remove(values_files)
if not os.path.exists(times_files):
os.remove(times_files)
raise err
| 23,555
|
def test_process_e_bad_value():
"""Test that we get nothing when finding a bad value."""
msg = ".E GDMM5 20210919 CS DH1948/TAIRG/DIN06/ 70/ HI/ 67"
with pytest.raises(InvalidSHEFValue):
process_message_e(msg, utc(2021, 9, 20))
| 23,556
|
def get_mean_std(dataloader):
"""Compute mean and std on the fly.
Args:
dataloader (Dataloader): Dataloader class from torch.utils.data.
Returns:
ndarray: ndarray of mean and std.
"""
cnt = 0
mean = 0
std = 0
for l in dataloader: # Now in (batch, channel, h, w)
data = l[0].double() # set dtype
b = data.size(0) # batch size at axis=0
data = data.view(b, data.size(1), -1) # reshape the tensor into (b, channel, h, w)
mean += data.mean(2).sum(0) # calculate mean for 3 channels
std += data.std(2).sum(0) # calculate std for 3 channels
cnt += b # get the count of data
mean /= cnt
std /= cnt
return mean.cpu().detach().numpy(), std.cpu().detach().numpy()
| 23,557
|
def p_selection_list(p):
""" selection_list : keypath_as
| selection_list COMMA keypath_as
"""
if len(p) == 2:
p[0] = {
p[1].label: {
"keypath": p[1].keypath,
"traversal_label": p[1].traversal_label,
}
}
elif len(p) == 4:
p[0] = p[1]
p[0][p[3].label] = {
"keypath": p[3].keypath,
"traversal_label": p[3].traversal_label,
}
else:
raise Exception("What?")
| 23,558
|
def do_retrieve(url, fname):
"""Retrieve given url to target filepath fname."""
folder = os.path.dirname(fname)
if not os.path.exists(folder):
os.makedirs(folder)
print(f"{folder}{os.path.sep} created.")
if not os.path.exists(fname):
try:
with open(fname, 'wb') as fout:
print(f"retrieving {url}: ", end='', flush=True)
resp = urlopen(url)
fout.write(resp.read())
except BaseException:
print('failed')
os.unlink(fname)
raise
print(f"{fname} saved.")
return fname
| 23,559
|
def create_random_camera(bbox, frac_space_x, frac_space_y, frac_space_z):
""" Creates a new camera, sets a random position for it, for a scene inside the bbox.
Given the same random_seed the pose of the camera is deterministic.
Input:
bbox - same rep as output from get_scene_bbos.
Output:
new camera created
"""
rand_theta = random.uniform(0, 2 * math.pi) # Rotate around z
# Phi: 0 - top view, 0.5 * pi - side view, -pi - bottom view
rand_sign = random.randint(0, 1) * 2 - 1.0
rand_phi = rand_sign * random.normalvariate(0.4, 0.2) * math.pi
max_dim = max(bbox.get_dims())
r = random.uniform(max_dim * 0.4, max_dim * 0.6)
x = frac_space_x * r * math.cos(rand_theta) * math.sin(rand_phi) + bbox.get_center()[0]
y = frac_space_y * r * math.sin(rand_theta) * math.sin(rand_phi) + bbox.get_center()[1]
z = frac_space_z * r * math.cos(rand_phi) + bbox.get_center()[2]
bpy.ops.object.camera_add(location=Vector((x, y, z)))
cam = bpy.context.object
cam.data.clip_start = 0.01
cam.data.clip_end = max(170, r * 2 * 10)
look_at(cam, Vector(bbox.get_center()))
return cam
| 23,560
|
def temp2():
""" This is weird, but correct """
if True:
return (1, 2)
else:
if True:
return (2, 3)
return (4, 5)
| 23,561
|
def compute_blade_representation(bitmap: int, firstIdx: int) -> Tuple[int, ...]:
"""
Takes a bitmap representation and converts it to the tuple
blade representation
"""
bmp = bitmap
blade = []
n = firstIdx
while bmp > 0:
if bmp & 1:
blade.append(n)
bmp = bmp >> 1
n = n + 1
return tuple(blade)
| 23,562
|
def are_dir_trees_equal(dir1, dir2):
"""
Compare two directories recursively. Files in each directory are
assumed to be equal if their names and contents are equal.
@param dir1: First directory path
@param dir2: Second directory path
@return: True if the directory trees are the same and
there were no errors while accessing the directories or files,
False otherwise.
"""
dirs_cmp = filecmp.dircmp(dir1, dir2)
if len(dirs_cmp.left_only)>0 or len(dirs_cmp.right_only)>0 or \
len(dirs_cmp.funny_files)>0:
return False
(_, mismatch, errors) = filecmp.cmpfiles(
dir1, dir2, dirs_cmp.common_files, shallow=False)
if len(mismatch)>0 or len(errors)>0:
return False
for common_dir in dirs_cmp.common_dirs:
new_dir1 = os.path.join(dir1, common_dir)
new_dir2 = os.path.join(dir2, common_dir)
if not are_dir_trees_equal(new_dir1, new_dir2):
return False
return True
| 23,563
|
def get_ntp_time(ntp_server_url):
"""
通过ntp server获取网络时间
:param ntp_server_url: 传入的服务器的地址
:return: time.strftime()格式化后的时间和日期
"""
ntp_client = ntplib.NTPClient()
ntp_stats = ntp_client.request(ntp_server_url)
fmt_time = time.strftime('%X', time.localtime(ntp_stats.tx_time))
fmt_date = time.strftime('%Y-%m-%d', time.localtime(ntp_stats.tx_time))
return fmt_time, fmt_date
| 23,564
|
def read_data(path, names, verbose=False):
"""
Read time-series from MATLAB .mat file.
Parameters
----------
path : str
Path (relative or absolute) to the time series file.
names : list
Names of the requested time series incl. the time array itself
verbose : bool, optional
Increase verbosity
Returns
-------
dict
Time and data
Examples
--------
>>> tname, names = read_names('data.mat')
>>> data = read_data('data.mat')
>>> t = data[tname] # time
>>> x1 = data[names[0]] # first data series
"""
if verbose:
print('Reading %s ...' % path)
data = loadmat(path, squeeze_me=True, variable_names=names)
return data
| 23,565
|
async def search(q: str, person_type: str = 'student') -> list:
"""
Search by query.
:param q: `str` query to search for
:param person_type: 'student', 'lecturer', 'group', 'auditorium'
:return: list of results
"""
url = '/'.join((BASE_URL, SEARCH_INDPOINT))
params = {'term': q,
'type': person_type}
return await api_request(url, params)
| 23,566
|
def stop_application(app, topwidget):
"""Destroy application widget and stop application."""
try:
topwidget.destroy()
except:
pass
try:
del app
except:
pass
| 23,567
|
def format_data_hex(data):
"""Convert the bytes array to an hex representation."""
# Bytes are separated by spaces.
return ' '.join('%02X' % byte for byte in data)
| 23,568
|
def eval_test(test_path, gt_path, test_prefix='', gt_prefix='',
test_format='png', gt_format='png', exigence=2, desync=0):
"""
Evaluates some test results against a given ground truth
:param test_path: (str) relative or absolute path to the test results images
:param gt_path: (str) relative or absolute path to the ground truth images
:param test_prefix: (str) prefix of the test files before their ID (e.g.
test_A_001235.png has test_A_ as prefix)
:param gt_prefix: (str) prefix of the ground truth files before their ID
(e.g. gt001235.png has gt as prefix)
:param test_format: (str) format of the test images
:param gt_format: (str) format of the ground truth images
:param exigence: (int) tells how easy will be from a pixel to be foreground
in the ground truth:
- 0: all non-static pixels will be taken as foreground
- 1: all non-static pixels excepting hard shadows will be taken as
foreground
- 2: only pixels with motion inside the region of interest will be taken
as foreground
- 3: only pixels with known motion inside the region of interest will be
taken as foreground
- Else exigence=2 will be assumed
:return: (dict) results of the test analysis.
- TP: (int) true positives
- FP: (int) false positives
- FN: (int) false negatives
- TN: (int) true negatives
"""
if exigence is 0:
fg_thresh = 25
elif exigence is 1:
fg_thresh = 75
elif exigence is 3:
fg_thresh = 200
else:
fg_thresh = 100
data = dict(TP=0, FP=0, FN=0, TN=0)
for filename in glob.glob(os.path.join(test_path,
test_prefix + '*.' + test_format)):
pil_img_test = Image.open(filename)
img_test = np.array(pil_img_test)
f_id = filename.replace(os.path.join(test_path, test_prefix), '')
f_id = f_id.replace('.' + test_format, '')
try:
f_id = str(int(f_id) + desync).zfill(6)
except:
print('Erroneous type of Id in data files will result in fake '
'results.')
filename_gt = os.path.join(gt_path, gt_prefix + f_id + '.' + gt_format)
pil_img_gt = Image.open(filename_gt)
real_img_gt = np.array(pil_img_gt)
img_gt = np.where(real_img_gt > fg_thresh, 1, 0)
trues_test = img_test.astype(bool)
trues_gt = img_gt.astype(bool)
img_tp = np.logical_and(trues_test, trues_gt)
img_fp = np.logical_and(trues_test, np.logical_not(trues_gt))
img_fn = np.logical_and(np.logical_not(trues_test), trues_gt)
img_tn = np.logical_not(np.logical_and(trues_test, trues_gt))
data['TP'] += img_tp.sum()
data['FP'] += img_fp.sum()
data['FN'] += img_fn.sum()
data['TN'] += img_tn.sum()
return data
| 23,569
|
def get_installation_token(installation):
"""
Get access token for installation
"""
now = datetime.datetime.now().timestamp()
if installation_token_expiry[installation] is None or now + 60 > installation_token_expiry[installation]:
# FIXME: if .netrc file is present, Authorization header will get
# overwritten, so need to figure out how to ignore that file.
if netrc_exists():
raise Exception("Authentication does not work properly if a ~/.netrc "
"file exists. Rename that file temporarily and try again.")
headers = {}
headers['Authorization'] = 'Bearer {0}'.format(get_json_web_token())
headers['Accept'] = 'application/vnd.github.machine-man-preview+json'
url = 'https://api.github.com/installations/{0}/access_tokens'.format(installation)
req = requests.post(url, headers=headers)
resp = req.json()
if not req.ok:
if 'message' in resp:
raise Exception(resp['message'])
else:
raise Exception("An error occurred when requesting token")
installation_token[installation] = resp['token']
installation_token_expiry[installation] = dateutil.parser.parse(resp['expires_at']).timestamp()
return installation_token[installation]
| 23,570
|
def extract_data(structure, fields, exe, output, return_data, components):
""" Extract data from the Abaqus .odb file.
Parameters
----------
structure : obj
Structure object.
fields : list
Data field requests.
exe : str
Abaqus exe path to bypass defaults.
output : bool
Print terminal output.
return_data : bool
Return data back into structure.results.
components : list
Specific components to extract from the fields data.
Returns
-------
None
"""
# Extract
name = structure.name
path = structure.path
temp = '{0}{1}/'.format(path, name)
if isinstance(fields, str):
fields = [fields]
fields = ','.join(fields)
components = ','.join(components) if components else 'None'
tic1 = time()
subprocess = 'noGUI={0}'.format(odb_extract.__file__.replace('\\', '/'))
if not exe:
args = ['abaqus', 'cae', subprocess, '--', components, fields, name, temp]
p = Popen(args, stdout=PIPE, stderr=PIPE, cwd=temp, shell=True)
while True:
line = p.stdout.readline()
if not line:
break
line = str(line.strip())
if output:
print(line)
stdout, stderr = p.communicate()
if output:
print(stdout)
print(stderr)
else:
os.chdir(temp)
os.system('{0}{1} -- {2} {3} {4} {5}'.format(exe, subprocess, components, fields, name, temp))
toc1 = time() - tic1
if output:
print('\n***** Data extracted from Abaqus .odb file : {0:.3f} s *****\n'.format(toc1))
# Save results to Structure
if return_data:
try:
tic2 = time()
with open('{0}{1}-results.json'.format(temp, name), 'r') as f:
results = json.load(f)
with open('{0}{1}-info.json'.format(temp, name), 'r') as f:
info = json.load(f)
for step in results:
for dtype in results[step]:
if dtype in ['nodal', 'element']:
for field in results[step][dtype]:
data = {}
for key in results[step][dtype][field]:
data[int(key)] = results[step][dtype][field][key]
results[step][dtype][field] = data
structure.results = results
for step in info:
structure.results[step]['info'] = info[step]
toc2 = time() - tic2
if output:
print('***** Saving data to structure.results successful : {0:.3f} s *****\n'.format(toc2))
except Exception:
if output:
print('***** Saving data to structure.results unsuccessful *****')
| 23,571
|
def disp(cog_x, cog_y, src_x, src_y):
"""
Compute the disp parameters
Parameters
----------
cog_x: `numpy.ndarray` or float
cog_y: `numpy.ndarray` or float
src_x: `numpy.ndarray` or float
src_y: `numpy.ndarray` or float
Returns
-------
(disp_dx, disp_dy, disp_norm, disp_angle, disp_sign):
disp_dx: 'astropy.units.m`
disp_dy: 'astropy.units.m`
disp_norm: 'astropy.units.m`
disp_angle: 'astropy.units.rad`
disp_sign: `numpy.ndarray`
"""
disp_dx = src_x - cog_x
disp_dy = src_y - cog_y
disp_norm = np.sqrt(disp_dx**2 + disp_dy**2)
if hasattr(disp_dx, '__len__'):
disp_angle = np.arctan(disp_dy / disp_dx)
disp_angle[disp_dx == 0] = np.pi / 2. * np.sign(disp_dy[disp_dx == 0])
else:
if disp_dx == 0:
disp_angle = np.pi/2. * np.sign(disp_dy)
else:
disp_angle = np.arctan(disp_dy/disp_dx)
disp_sign = np.sign(disp_dx)
return disp_dx, disp_dy, disp_norm, disp_angle, disp_sign
| 23,572
|
def get_landmark_from_prob(prob, thres=0.5, mode="mean", binary_mask=False):
"""Compute landmark location from the model probablity maps
Inputs:
prob : [RO, E1], the model produced probablity map for a landmark
thres : if np.max(prob)<thres, determine there is no landmark detected
mode : mean or max, use mean or max probablity to detect landmark
binary_mask : if true, prob is a binary (0 or 1) map
Outputs:
pt : [x, y], detected landmark point
"""
pt = None
if(binary_mask):
ind = np.where(prob==thres)
else:
if(thres>0 and np.max(prob)<thres):
return pt
else:
adaptive_thres = 0.5
mask = adaptive_thresh_cpu(prob, p_thresh=adaptive_thres*np.max(prob))
ind = np.where(mask>0)
if (np.size(ind[0])==0):
return pt
pt = np.zeros(2)
if(mode == "mean"):
pt[0] = np.mean(ind[1].astype(np.float32))
pt[1] = np.mean(ind[0].astype(np.float32))
else:
v = np.unravel_index(np.argmax(prob), prob.shape)
pt[0] = v[1]
pt[1] = v[0]
return pt
| 23,573
|
def request(
url,
timeout: float,
method="GET",
data=None,
response_encoding="utf-8",
headers=None,
):
"""
Helper function to perform HTTP requests
"""
req = Request(url, data=data, method=method, headers=headers or {})
try:
return urlopen(req, timeout=timeout).read().decode(response_encoding)
except (URLError, socket.timeout, UnicodeDecodeError) as error:
raise CEPProviderUnavailableError(error)
| 23,574
|
def get_annotation_affiliation(annotation: Any, default: Any) -> Optional[Any]:
"""Helper for classifying affiliation of parameter
:param annotation: annotation record
:returns: classified value or None
"""
args, alias = get_args(annotation), get_origin(annotation)
# if alias and alias == list:
annotation = args[0] if alias == list else annotation
if annotation == Request:
return "request"
elif isinstance(default, (Form, File)):
return "form"
return None
| 23,575
|
def find_config_files(
path=['~/.vcspull'], match=['*'], filetype=['json', 'yaml'], include_home=False
):
"""Return repos from a directory and match. Not recursive.
Parameters
----------
path : list
list of paths to search
match : list
list of globs to search against
filetype: list
of filetypes to search against
include_home : bool
Include home configuration files
Raises
------
LoadConfigRepoConflict :
There are two configs that have same path and name with different repo urls.
Returns
-------
list :
list of absolute paths to config files.
"""
configs = []
if include_home is True:
configs.extend(find_home_config_files())
if isinstance(path, list):
for p in path:
configs.extend(find_config_files(p, match, filetype))
return configs
else:
path = os.path.expanduser(path)
if isinstance(match, list):
for m in match:
configs.extend(find_config_files(path, m, filetype))
else:
if isinstance(filetype, list):
for f in filetype:
configs.extend(find_config_files(path, match, f))
else:
match = os.path.join(path, match)
match += ".{filetype}".format(filetype=filetype)
configs = glob.glob(match)
return configs
| 23,576
|
def main():
"""
Benchmark your model in your local pc.
"""
model = MobileUNet(input_shape=(img_size, img_size, 3))
inputs = np.random.randn(batch_num, img_size, img_size, 3)
time_per_batch = []
for i in range(10):
start = time.time()
model.predict(inputs, batch_size=batch_num)
elapsed = time.time() - start
time_per_batch.append(elapsed)
time_per_batch = np.array(time_per_batch)
# exclude 1st measure
print(time_per_batch[1:].mean())
print(time_per_batch[1:].std())
| 23,577
|
def sort_dict(original):
"""Recursively sorts dictionary keys and dictionary values in alphabetical order"""
if isinstance(original, dict):
res = (
dict()
) # Make a new "ordered" dictionary. No need for Collections in Python 3.7+
for k, v in sorted(original.items()):
res[k] = v
d = res
else:
d = original
for k in d:
if isinstance(d[k], str):
continue
if isinstance(d[k], list) and len(d[k]) > 1 and isinstance(d[k][0], str):
d[k] = sorted(d[k])
if isinstance(d[k], dict):
d[k] = sort_dict(d[k])
if isinstance(d[k], list) and len(d[k]) >= 1 and isinstance(d[k][0], dict):
for i in range(len(d[k])):
d[k][i] = sort_dict(d[k][i])
return d
| 23,578
|
def playonyt(topic):
"""Will play video on following topic, takes about 10 to 15 seconds to load"""
url = 'https://www.youtube.com/results?q=' + topic
count = 0
cont = requests.get(url)
data = str(cont.content)
lst = data.split('"')
for i in lst:
count+=1
if i == 'WEB_PAGE_TYPE_WATCH':
break
if lst[count-5] == "/results":
raise Exception("No video found.")
#print("Videos found, opening most recent video")
web.open("https://www.youtube.com"+lst[count-5])
return "https://www.youtube.com"+lst[count-5]
| 23,579
|
def train_classifier(classifier, features, labels):
"""This function must concern itself with training the classifier
on the specified data."""
return classifier.fit(features, labels)
| 23,580
|
def work_on_disk(dev, root_mb, swap_mb, image_path):
"""Creates partitions and write an image to the root partition."""
root_part = "%s-part1" % dev
swap_part = "%s-part2" % dev
if not is_block_device(dev):
LOG.warn(_("parent device '%s' not found"), dev)
return
make_partitions(dev, root_mb, swap_mb)
if not is_block_device(root_part):
LOG.warn(_("root device '%s' not found"), root_part)
return
if not is_block_device(swap_part):
LOG.warn(_("swap device '%s' not found"), swap_part)
return
dd(image_path, root_part)
mkswap(swap_part)
try:
root_uuid = block_uuid(root_part)
except processutils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
LOG.error(_("Failed to detect root device UUID."))
return root_uuid
| 23,581
|
def get_annotation_names(viewer):
"""Detect the names of nodes and edges layers"""
layer_nodes_name = None
layer_edges_name = None
for layer in viewer.layers:
if isinstance(layer, napari.layers.points.points.Points):
layer_nodes_name = layer.name
elif isinstance(layer, napari.layers.shapes.shapes.Shapes):
layer_edges_name = layer.name
if layer_nodes_name is not None and layer_edges_name is not None:
break
return layer_nodes_name, layer_edges_name
| 23,582
|
def remap(kx,ky,lx,ly,qomt,datai):
"""
remap the k-space variable back to shearing
periodic frame to reflect the time dependent
Eulerian wave number
"""
ndim = datai.ndim
dim = np.array(datai.shape)# datai[nz,ny,nx]
sh_data = np.empty([dim[0],dim[1],dim[2]])
tp_data = np.empty([dim[0],dim[2]])
sh_kx = -qomt*ky*lx/ly
#nquist= np.max(np.fabs(kx))
for j in np.arange(0,dim[1]):
quot = int(np.floor(sh_kx[j]))
res = sh_kx[j]-float(quot)
#kx_new = kx[:] + sh_kx[j]
tp_data[:,:]= datai[:,j,:]
sh_data[:,j,:] = (1.0-res)*np.roll(tp_data,quot, axis=1) \
+ res*np.roll(tp_data,quot+1,axis=1)
#sh_data[:,j,kx_new[:]>nquist] = 0.0
return sh_data
| 23,583
|
def fips_disable():
"""
Disables FIPS on RH/CentOS system. Note that you must reboot the
system in order for FIPS to be disabled. This routine prepares
the system to disable FIPS.
CLI Example:
.. code-block:: bash
salt '*' ash.fips_disable
"""
installed_fips_pkgs = _get_installed_dracutfips_pkgs()
ret = { 'result': True }
old = {}
new = {}
try:
# Remove dracut-fips installations.
installed_fips_pkgs = _get_installed_dracutfips_pkgs()
if 'dracut-fips' in installed_fips_pkgs:
__salt__['pkg.remove']('dracut-fips')
old['Packages'] = installed_fips_pkgs
# If fips is in kernel, create a new boot-kernel.
if _is_fips_in_kernel():
_move_boot_kernel(False)
__salt__['cmd.run']("dracut -f", python_shell=False)
# Update grub.cfg file to remove the fips argument.
grub_args = _get_grub_args()
if 'fips=1' in grub_args:
cmd = 'grubby --update-kernel=ALL --remove-args=fips=1'
__salt__['cmd.run'](cmd, python_shell=False)
new['grubby'] = cmd
# Update GRUB command line entry to remove fips.
diff = _modify_grub_file(True)
if diff:
new['/etc/default/grub'] = diff
except Exception:
_rollback_fips_disable(installed_fips_pkgs)
ret['result'] = False
ret['changes'] = {}
ret['comment'] = 'Unable to change state of system to FIPS-disabled.'
else:
if old:
ret['changes'] = {'old': old}
ret['comment'] = 'FIPS has been toggled to off.'
if new:
if 'changes' in ret:
ret['changes'].update({'new': new})
else:
ret['changes'] = {'new': new}
ret['comment'] = 'FIPS has been toggled to off.'
if fips_status() == 'enabled':
msg = ' Reboot system to place into FIPS-disabled state.'
if 'comment' in ret:
ret['comment'] = ret['comment'] + msg
else:
ret['comment'] = msg[1:]
if 'changes' not in ret and 'comment' not in ret:
ret['comment'] = 'FIPS mode is already disabled. No changes.'
finally:
return ret
| 23,584
|
def get_os_platform():
"""return platform name, but for Jython it uses os.name Java property"""
ver = sys.platform.lower()
if ver.startswith('java'):
import java.lang
ver = java.lang.System.getProperty("os.name").lower()
print('platform: %s' % (ver))
return ver
| 23,585
|
def buy_sell_fun_mp_org(datam, S1=1.0, S2=0.8):
"""
斜率指标交易策略标准分策略
"""
start_t = datetime.datetime.now()
print("begin-buy_sell_fun_mp:", start_t)
dataR = pd.DataFrame()
for code in datam.index.levels[1]:
# data = price.copy()
# price = datam.query("code=='%s'" % code)
# data = price.copy()
data = buy_sell_fun(datam, code)
# if code == '000732':
# print(data.tail(22))
if len(dataR) == 0:
dataR = data
else:
dataR = dataR.append(data)
end_t = datetime.datetime.now()
print(end_t, 'buy_sell_fun_mp spent:{}'.format((end_t - start_t)))
result01 = dataR['nav'].groupby(level=['date']).sum()
result02 = dataR['nav'].groupby(level=['date']).count()
num = dataR.flag.abs().sum()
dataR2 = pd.DataFrame({'nav':result01 - result02 + 1,'flag':0})
# dataR2['flag'] = 0
dataR2.iat[-1,1] = num
# result['nav'] = result['nav'] - len(datam.index.levels[1]) + 1
return dataR2
| 23,586
|
def marshall_namedtuple(obj):
"""
This method takes any atomic value, list, dictionary or namedtuple,
and recursively it tries translating namedtuples into dictionaries
"""
recurse = lambda x: map(marshall_namedtuple, x)
obj_is = partial(isinstance, obj)
if hasattr(obj, '_marshall'):
return marshall_namedtuple(obj._marshall())
elif obj_is(tuple) and hasattr(obj, '_fields'): # namedtuple
fields = zip(obj._fields, recurse(obj))
class_name = obj.__class__.__name__
return dict(fields, **{'_type': class_name})
elif obj_is((collections.abc.Mapping,dict)):
return type(obj)(zip(obj.keys(), recurse(obj.values())))
elif obj_is(collections.abc.Iterable) and not obj_is(str):
return type(obj)(recurse(obj))
elif obj_is(abc.ABC):
return {
'_instance_of': obj.__class__.__name__
}
elif obj_is(abc.ABCMeta):
return {
'_class': obj.__name__
}
else:
return obj
| 23,587
|
def prep_image(img, inp_dim):
"""
Prepare image for inputting to the neural network.
Returns a Variable
"""
# print("prepping images")
img = cv2.resize(img, (inp_dim, inp_dim))
img = img[:,:,::-1].transpose((2,0,1)).copy()
img = torch.from_numpy(img).float().div(255.0)
# print("prepped images")
return img
| 23,588
|
def best_broaders(supers_for_all_entities: Dict,
per_candidate_links_and_supers: List[Dict],
num_best: int = 5,
super_counts_field: str = "broader_counts",
doprint=False,
representativeness_threshold=0.1):
"""
Returns the best matching super for a candidate class, according to a list of supers for entities in the class
and entities in the whole corpus. If comparing to a taxonomy, a super is a broader.
@param super_counts_field:
@param super_counts: a dictionary that has, for every possible entity, the supers it belongs to
@param per_candidate_links_and_supers: a list of dictionaries, one per candidate. Fro each, at least
two fields are expected "entities" containing the list of entities, and that given by super_counts_field
which is, in turn, a dictionary whose keys are supers and whose values are the number of entities in that
candidate having this broad
@param num_best: maximum number of best matching supers to be returned
@return: for every candidate class, the num_best best matching supers and their log odds ratio
"""
result = []
global_counts = dict()
for ent, bros in supers_for_all_entities.items():
for bro in bros:
global_counts[bro] = global_counts.get(bro, 0) + 1
onlytopmost = []
for can in per_candidate_links_and_supers:
# For this entity, the following dictionaries have an element for every possible super
# Using notation from the paper
# T_cc : The number of entities narrower to a candidate which are tagged with NER typeT
T_cc = {x: y for x, y in can[super_counts_field].items()
if y > representativeness_threshold * len(can["entities"])}
if len(T_cc) == 0:
T_cc = {x: y for x, y in can[super_counts_field].items()}
# T_w : is the number of entities in the wholecorpus tagged with T
T_w = {y: global_counts[y] for y in T_cc.keys()}
# w : the total number of entities in the whole corpus
w = float(len(supers_for_all_entities))
# cc : the total number of entities in this candidate
cc = float(len(can["entities"]))
# dict of the form super : log_odds
log_odds_per_super = {x: math.log((T_cc[x] / cc) / (T_w[x] / w))
for x in T_cc.keys()}
logslist = list(log_odds_per_super.items())
logslist.sort(key=lambda x: x[1])
logslist.reverse()
maxbroads = min(len(logslist), num_best)
logodds = []
for bi in range(maxbroads):
logodds.append({"candidatesbroader": logslist[bi][0],
"loggods": logslist[bi][1]})
can["log_odds"] = logodds
if doprint:
print("\t\t---", ", ".join([str(x[1]) for x in logslist[:maxbroads]]))
if len(logslist) > 0:
onlytopmost.append(logslist[0][1])
can["best_match_broader"] = logslist[0][0]
else:
onlytopmost.append(None)
can["best_match_broader"] = None
return onlytopmost
| 23,589
|
def clump_list_sort(clump_list):
"""Returns a copy of clump_list, sorted by ascending minimum density. This
eliminates overlap when passing to
yt.visualization.plot_modification.ClumpContourCallback"""
minDensity = [c['Density'].min() for c in clump_list]
args = np.argsort(minDensity)
list = nar(clump_list)[args]
reverse = range(list.size-1,-1,-1)
return list[reverse]
| 23,590
|
def _prepare_artifact(
metadata_handler: metadata.Metadata,
uri: Text,
properties: Dict[Text, Any],
custom_properties: Dict[Text, Any],
reimport: bool, output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType]
) -> types.Artifact:
"""Prepares the Importer's output artifact.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
An Artifact object representing the imported artifact.
"""
absl.logging.info(
'Processing source uri: %s, properties: %s, custom_properties: %s' %
(uri, properties, custom_properties))
# Check types of custom properties.
for key, value in custom_properties.items():
if not isinstance(value, (int, Text, bytes)):
raise ValueError(
('Custom property value for key %r must be a string or integer '
'(got %r instead)') % (key, value))
unfiltered_previous_artifacts = metadata_handler.get_artifacts_by_uri(
uri)
# Only consider previous artifacts as candidates to reuse, if the properties
# of the imported artifact match those of the existing artifact.
previous_artifacts = []
for candidate_mlmd_artifact in unfiltered_previous_artifacts:
is_candidate = True
candidate_artifact = output_artifact_class(mlmd_artifact_type)
candidate_artifact.set_mlmd_artifact(candidate_mlmd_artifact)
for key, value in properties.items():
if getattr(candidate_artifact, key) != value:
is_candidate = False
break
for key, value in custom_properties.items():
if isinstance(value, int):
if candidate_artifact.get_int_custom_property(key) != value:
is_candidate = False
break
elif isinstance(value, (Text, bytes)):
if candidate_artifact.get_string_custom_property(key) != value:
is_candidate = False
break
if is_candidate:
previous_artifacts.append(candidate_mlmd_artifact)
result = output_artifact_class(mlmd_artifact_type)
result.uri = uri
for key, value in properties.items():
setattr(result, key, value)
for key, value in custom_properties.items():
if isinstance(value, int):
result.set_int_custom_property(key, value)
elif isinstance(value, (Text, bytes)):
result.set_string_custom_property(key, value)
# If a registered artifact has the same uri and properties and the user does
# not explicitly ask for reimport, reuse that artifact.
if bool(previous_artifacts) and not reimport:
absl.logging.info('Reusing existing artifact')
result.set_mlmd_artifact(max(previous_artifacts, key=lambda m: m.id))
return result
| 23,591
|
def ParseCLILines(lines, skipStartLines=0, lastSkipLineRe=None, skipEndLines=0):
"""Delete first few and last few lines in an array"""
if skipStartLines > 0:
if lastSkipLineRe != None:
# sanity check. Make sure last line to skip matches the given regexp
if None == re.match(lastSkipLineRe, lines[(skipStartLines-1)]):
raise exceptions.MalformedIO("Expected '%s' at line %d of result, but found '%s'." % (lastSkipLineRe, skipStartLines, lines[(skipStartLines-1)].strip()))
if len(lines) < skipStartLines:
raise exceptions.MalformedIO("Can't skip first %d lines of result %s. It only contains %d lines." % (skipStartLines, repr(lines), len(lines)))
del lines[0:skipStartLines]
if skipEndLines > 0:
if len(lines) < skipEndLines:
raise exceptions.MalformedIO("Can't skip last %d lines of result %s. It only contains %d lines." % (skipEndLines, repr(lines), len(lines)))
del lines[-skipEndLines:]
return lines
| 23,592
|
def test_screen_size(stdscr, height, width):
"""
Test that the current screen is larger than height x width.
If its not print a warning and give the user a change to enlarge the screen
wait until Y/y is provided by the user
"""
h, w = stdscr.getmaxyx()
pair = curses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK)
color = curses.color_pair(1) + curses.A_BOLD
while h < height or w < width:
stdscr.addstr(0, 0, " ")
stdscr.addstr(1, 0, " " )
stdscr.addstr(2, 0, " ")
stdscr.refresh()
sleep(.3)
stdscr.addstr(0, 0, "The screen window is too small", color)
stdscr.addstr(1, 0, "Must be at least {} high and {} wide currently is high: {} wide: {}".format(height, width, h, w), color)
stdscr.addstr(2, 0, "To quit hit Cntrl C/Z otherwise enlarge the screen and hit y/Y+Return", color)
stdscr.refresh()
ch = " "
while not(ch == ord('y') or ch == ord('Y')):
ch = stdscr.getch()
char = chr(ch)
h, w = stdscr.getmaxyx()
| 23,593
|
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
params -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(2) # we set up a seed so that your output matches ours although the initialization is random.
### START CODE HERE ### (≈ 4 lines of code)
W1 = np.random.randn(n_h, n_x) * 0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h) * 0.01
b2 = np.zeros((n_y, 1))
### END CODE HERE ###
assert (W1.shape == (n_h, n_x))
assert (b1.shape == (n_h, 1))
assert (W2.shape == (n_y, n_h))
assert (b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
| 23,594
|
def _NormalizedSource(source):
"""Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
| 23,595
|
def get_df_from_sampled_trips(step_trip_list, show_service_data=False, earliest_datetime=None):
"""Get dataframe from sampled trip list.
Parameters
----------
step_trip_list : list of lists
List of trip lists occuring in the same step.
show_service_data: bool
Show trip pickup and dropoff results.
earliest_datetime: datetime
Trip start time - rebalance offset
Returns
-------
DataFrame
Dataframe with trip data info.
"""
d = defaultdict(list)
for step, trips in enumerate(step_trip_list):
for t in trips:
d["placement_datetime"].append(t.placement)
d["step"].append(step + 1)
d["pk_id"].append(t.o.id)
d["dp_id"].append(t.d.id)
d["sq_class"].append(t.sq_class)
d["max_delay"].append(t.max_delay)
d["elapsed_sec"].append(t.elapsed_sec)
d["max_delay_from_placement"].append(t.max_delay_from_placement)
d["delay_close_step"].append(t.delay_close_step)
d["tolerance"].append(t.tolerance)
lon_o, lat_o = nw.tenv.lonlat(t.o.id)
lon_d, lat_d = nw.tenv.lonlat(t.d.id)
d["passenger_count"].append(1)
d["pickup_latitude"].append(lat_o)
d["pickup_longitude"].append(lon_o)
d["dropoff_latitude"].append(lat_d)
d["dropoff_longitude"].append(lon_d)
if show_service_data:
if t.pk_delay is not None:
pickup_datetime = t.placement + timedelta(
minutes=t.pk_delay
)
pickup_datetime_str = datetime.strftime(
pickup_datetime, "%Y-%m-%d %H:%M:%S"
)
if t.dropoff_time is not None:
dropoff_datetime = earliest_datetime + timedelta(
minutes=t.dropoff_time
)
dropoff_datetime_str = datetime.strftime(
dropoff_datetime, "%Y-%m-%d %H:%M:%S"
)
d["times_backlogged"].append(t.times_backlogged)
d["pickup_step"].append(
t.pk_step if t.pk_step is not None else "-"
)
d["dropoff_step"].append(
t.dp_step if t.dp_step is not None else "-"
)
d["pickup_delay"].append(
t.pk_delay if t.pk_delay is not None else "-"
)
d["pickup_duration"].append(
t.pk_duration if t.pk_duration is not None else "-"
)
d["pickup_datetime"].append(
pickup_datetime_str if t.pk_delay is not None else "-"
)
d["dropoff_time"].append(
t.dropoff_time if t.dropoff_time is not None else "-"
)
d["dropoff_datetime"].append(
dropoff_datetime_str if t.dropoff_time is not None else "-"
)
d["picked_by"].append(t.picked_by)
df = pd.DataFrame.from_dict(dict(d))
df.sort_values(by=["placement_datetime", "sq_class"], inplace=True)
return df
| 23,596
|
def compute_v_y(transporter, particles):
"""
Compute values of V y on grid specified in bunch configuration
:param transporter: transport function
:param particles: BunchConfiguration object, specification of grid
:return: matrix with columns: x, theta_x, y, theta_y, pt, V y
"""
return __compute_optical_function(transporter, particles, Parameters.V_Y)
| 23,597
|
def normal_scaler(s3_bucket_input, s3_bucket_output, objects=(), dry_run=False):
""" Scale the values in a dataset to fit a unit normal distribution. """
if not objects:
# Allow user to specify objects, or otherwise get all objects.
objects = get_all_keys(s3_bucket_input)
# Calculate bounds for each chunk.
timer = Timer("NORMAL_SCALING").set_step("LocalRange")
creds = get_redis_creds()
launch_threads(LocalRange, objects, MAX_LAMBDAS, s3_bucket_input, creds)
timer.timestamp().set_step("Creating the global map")
client = boto3.client("s3")
f_ranges = get_global_map(s3_bucket_input, objects, client)
timer.timestamp().set_step("Putting local maps")
s3_resource = boto3.resource("s3")
update_local_maps(s3_bucket_input, objects, f_ranges, client,
s3_resource)
timer.timestamp().set_step("Local scaling")
if not dry_run:
# Scale the chunks and put them in the output bucket.
launch_threads(LocalScale, objects, MAX_LAMBDAS,
s3_bucket_input, s3_bucket_output, creds)
timer.timestamp().set_step("Deleting local maps")
# Delete any intermediary keys.
for i in objects:
s3_resource.Object(s3_bucket_input, str(i) + "_final_bounds").delete()
timer.timestamp()
| 23,598
|
def gabor_kernel_nodc(frequency, theta=0, bandwidth=1, gamma=1,
n_stds=3, offset=0):
"""
Return complex 2D Gabor filter kernel with no DC offset.
This function is a modification of the gabor_kernel function of scikit-image
Gabor kernel is a Gaussian kernel modulated by a complex harmonic function.
Harmonic function consists of an imaginary sine function and a real
cosine function. Spatial frequency is inversely proportional to the
wavelength of the harmonic and to the standard deviation of a Gaussian
kernel. The bandwidth is also inversely proportional to the standard
deviation.
Parameters
----------
frequency : float
Spatial frequency of the harmonic function. Specified in pixels.
theta : float, optional
Orientation in radians. If 0, the harmonic is in the x-direction.
bandwidth : float, optional
The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
and `sigma_y` will decrease with increasing frequency. This value is
ignored if `sigma_x` and `sigma_y` are set by the user.
gamma : float, optional
gamma changes the aspect ratio (ellipsoidal) of the gabor filter.
By default, gamma=1 which means no aspect ratio (circle)
if gamma>1, the filter is larger (x-dir)
if gamma<1, the filter is higher (y-dir)
This value is ignored if `sigma_x` and `sigma_y` are set by the user.
sigma_x, sigma_y : float, optional
Standard deviation in x- and y-directions. These directions apply to
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
n_stds : scalar, optional
The linear size of the kernel is n_stds (3 by default) standard
deviations
offset : float, optional
Phase offset of harmonic function in radians.
Returns
-------
g_nodc : complex 2d array
A single gabor kernel (complex) with no DC offset
References
----------
.. [1] http://en.wikipedia.org/wiki/Gabor_filter
.. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf
Examples
--------
>>> from skimage.filters import gabor_kernel
>>> from skimage import io
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> gk = gabor_kernel(frequency=0.2)
>>> plt.figure() # doctest: +SKIP
>>> io.imshow(gk.real) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
>>> # more ripples (equivalent to increasing the size of the
>>> # Gaussian spread)
>>> gk = gabor_kernel(frequency=0.2, bandwidth=0.1)
>>> plt.figure() # doctest: +SKIP
>>> io.imshow(gk.real) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
"""
# set gaussian parameters
b = bandwidth
sigma_pref = 1.0 / np.pi * np.sqrt(np.log(2) / 2.0) * (2.0 ** b + 1) / (2.0 ** b - 1)
sigma_y = sigma_pref / frequency
sigma_x = sigma_y/gamma
# meshgrid
x0 = np.ceil(max(np.abs(n_stds * sigma_x * np.cos(theta)),
np.abs(n_stds * sigma_y * np.sin(theta)), 1))
y0 = np.ceil(max(np.abs(n_stds * sigma_y * np.cos(theta)),
np.abs(n_stds * sigma_x * np.sin(theta)), 1))
y, x = np.mgrid[-y0:y0 + 1, -x0:x0 + 1]
# rotation matrix
rotx = x * np.cos(theta) + y * np.sin(theta)
roty = -x * np.sin(theta) + y * np.cos(theta)
# combine gambor and
g = np.zeros(y.shape, dtype=np.complex)
g[:] = np.exp(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))
g /= 2 * np.pi * sigma_x * sigma_y # gaussian envelope
oscil = np.exp(1j * (2 * np.pi * frequency * rotx + offset)) # harmonic / oscilatory function
g_dc = g*oscil
# remove dc component by subtracting the envelope weighted by K
K = np.sum(g_dc)/np.sum(g)
g_nodc = g_dc - K*g
return g_nodc
| 23,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.