content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def __process_opss_args(optional_arg_map):
"""
Determine if the user is using opss wallet and if so, get the passphrase.
:param optional_arg_map: the optional arguments map
:raises CLAException: if getting the passphrase from the user fails
"""
_method_name = '__process_opss_args'
if CommandLineArgUtil.OPSS_WALLET_SWITCH in optional_arg_map and \
CommandLineArgUtil.OPSS_WALLET_PASSPHRASE not in optional_arg_map:
try:
passphrase = getcreds.getpass('WLSDPLY-20027')
except IOException, ioe:
ex = exception_helper.create_cla_exception('WLSDPLY-20028', ioe.getLocalizedMessage(),
error=ioe)
ex.setExitCode(CommandLineArgUtil.ARG_VALIDATION_ERROR_EXIT_CODE)
__logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
optional_arg_map[CommandLineArgUtil.OPSS_WALLET_PASSPHRASE] = String(passphrase)
return | 29,500 |
def processing(log: EventLog, causal: Tuple[str, str], follows: Tuple[str, str]):
"""
Applying the Alpha Miner with the new relations
Parameters
-------------
log
Filtered log
causal
Pairs that have a causal relation (->)
follows
Pairs that have a follow relation (>)
Returns
-------------
net
Petri net
im
Initial marking
fm
Final marking
"""
# create list of all events
labels = set()
start_activities = set()
end_activities = set()
for trace in log:
start_activities.add(trace.__getitem__(0))
end_activities.add(trace.__getitem__(len(trace) - 1))
for events in trace:
labels.add(events)
labels = list(labels)
pairs = []
for key, element in causal.items():
for item in element:
if get_sharp_relation(follows, key, key):
if get_sharp_relation(follows, item, item):
pairs.append(({key}, {item}))
# combining pairs
for i in range(0, len(pairs)):
t1 = pairs[i]
for j in range(i, len(pairs)):
t2 = pairs[j]
if t1 != t2:
if t1[0].issubset(t2[0]) or t1[1].issubset(t2[1]):
if get_sharp_relations_for_sets(follows, t1[0], t2[0]) and get_sharp_relations_for_sets(follows,
t1[1],
t2[1]):
new_alpha_pair = (t1[0] | t2[0], t1[1] | t2[1])
if new_alpha_pair not in pairs:
pairs.append((t1[0] | t2[0], t1[1] | t2[1]))
# maximize pairs
cleaned_pairs = list(filter(lambda p: __pair_maximizer(pairs, p), pairs))
# create transitions
net = PetriNet('alpha_plus_net_' + str(time.time()))
label_transition_dict = {}
for label in labels:
if label != 'artificial_start' and label != 'artificial_end':
label_transition_dict[label] = PetriNet.Transition(label, label)
net.transitions.add(label_transition_dict[label])
else:
label_transition_dict[label] = PetriNet.Transition(label, None)
net.transitions.add(label_transition_dict[label])
# and source and sink
src = add_source(net, start_activities, label_transition_dict)
sink = add_sink(net, end_activities, label_transition_dict)
# create places
for pair in cleaned_pairs:
place = PetriNet.Place(str(pair))
net.places.add(place)
for in_arc in pair[0]:
add_arc_from_to(label_transition_dict[in_arc], place, net)
for out_arc in pair[1]:
add_arc_from_to(place, label_transition_dict[out_arc], net)
return net, Marking({src: 1}), Marking({sink: 1}), cleaned_pairs | 29,501 |
def word_tokenize(string: str, language: str = "english") -> List[str]:
"""tokenizes a given string into a list of substrings.
:param string: String to tokenize.
:param language: Language. Either one of ``english'' or ``german''.
"""
if language not in ["english", "german"]:
raise ValueError("language argument has to be either ``english'' or ``german''")
# excessive whitespaces
string = re.sub(r"\s+", " ", string)
# some unicode characters
string = string.replace("’", "'")
string = string.replace("”", '"')
string = string.replace("“", '"')
# floating point (e.g., 1.3 => 1.3)
string = re.sub(r"(\d+)\.(\d+)", r"\g<1>._\g<2>", string)
# percentage (e.g., below.500 => below .500)
string = re.sub(r"(\w+)\.(\d+)", r"\g<1> ._\g<2>", string)
# end of quote
string = string.replace(".``", ". ``")
# number with apostrophe (e.g. '90)
string = re.sub(r"\s'(\d+)", r"' \g<1>", string)
# names with Initials (e.g. C. J. Miles)
string = re.sub(r"(^|\s)(\w)\. (\w)\.", r"\g<1>\g<2>._ \g<3>._", string)
# some dots
string = string.replace("..", " ..")
# names with apostrophe => expands temporarily
string = re.sub(r"\w+'(?!d|s|ll|t|re|ve|\s)", r"\g<0>_", string)
# win-loss scores (German notation seems to be XX:YY, but this is also the time format,
# and the times are not tokenized in the original RotoWire. So we manually handle XX:YY
# expression.
string = re.sub(r"(\d+)-(\d+)", r"\g<1> - \g<2>", string)
string = re.sub(r"(\d+)-of-(\d+)", r"\g<1> - of - \g<2>", string)
# actual tokenization
tokenized = nltk.word_tokenize(string, language=language)
joined = " ".join(tokenized)
# shrink expanded name-with-apostrophe expressions
joined = joined.replace("'_", "'")
# shrink expanded name-with-initial expressions
joined = joined.replace("._", ".")
tokenized = joined.split(" ")
return tokenized | 29,502 |
def get_project_id(file_name):
""" Extracts project ID from intput BAM filename.
:param file_name: string e.g. "/PITT_0452_AHG2THBBXY_A1___P10344_C___13_cf_IGO_10344_C_20___hg19___MD.bam"
:return: string e.g. "P10344_C"
"""
regex = "(?<=___)P[0-9]{5}[_A-Z,a-z]*(?=___)" # Valid project ID is "P" + 5 numbers + (optional) [ "_" + 2 letters]
matches = re.findall(regex, file_name)
if len(matches) == 0:
print("ERROR: Could not find IGO ID in filename: %s with regex: \"%s\"" % (file_name, regex))
sys.exit(1)
if len(matches) > 1:
print("WARNING: More than one match: %s" % str(matches))
return matches[0] | 29,503 |
def forecasting_barplot(
dict_algo,
metric='rmsle',
plot_name='forecasting',
x_labels=['ALDI', 'ALDI++'],
figsize=(16,32),
ylim=(2,3),
fontsize=40
):
"""Plot the chosen forecasting RMSE based on different discord detectors"""
fig, ax = plt.subplots(figsize=figsize)
sns.barplot(
x=list(dict_algo.keys()),
y=list(dict_algo.values()),
orient='v', ax=ax
)
ax.set_ylabel("RMSLE", fontsize=fontsize)
ax.set_xlabel("Discord detectors", fontsize=fontsize)
ax.set_xticklabels(x_labels)
ax.tick_params(length=20, direction="inout", labelsize=fontsize)
plt.ylim(ylim)
plt.hlines(
xmin=0 - 0.5,
xmax=len(list(dict_algo.keys()))-0.5,
y=list(dict_algo.values())[0],
colors='r',
linewidth=3 # vertical line at position 0
)
plt.tight_layout()
fig.savefig(f'img/barplot_comparison-{plot_name}.png', format='PNG') | 29,504 |
def modify_scaffolds_with_coords(scaffolds, coords):
""" Gets scaffolds and fills in the right data.
Inputs:
* scaffolds: dict. as returned by `build_scaffolds_from_scn_angles`
* coords: (L, 14, 3). sidechainnet tensor. same device as scaffolds
Outputs: corrected scaffolds
"""
# calculate distances and update:
# N, CA, C
scaffolds["bond_mask"][1:, 0] = torch.norm(coords[1:, 0] - coords[:-1, 2], dim=-1) # N
scaffolds["bond_mask"][ :, 1] = torch.norm(coords[ :, 1] - coords[: , 0], dim=-1) # CA
scaffolds["bond_mask"][ :, 2] = torch.norm(coords[ :, 2] - coords[: , 1], dim=-1) # C
# O, CB, side chain
selector = np.arange(len(coords))
for i in range(3, 14):
# get indexes
idx_a, idx_b, idx_c = scaffolds["point_ref_mask"][:, :, i-3] # (3, L, 11) -> 3 * (L, 11)
# correct distances
scaffolds["bond_mask"][:, i] = torch.norm(coords[:, i] - coords[selector, idx_c], dim=-1)
# get angles
scaffolds["angles_mask"][0, :, i] = get_angle(coords[selector, idx_b],
coords[selector, idx_c],
coords[:, i])
# handle C-beta, where the C requested is from the previous aa
if i == 4:
# for 1st residue, use position of the second residue's N
first_next_n = coords[1, :1] # 1, 3
# the c requested is from the previous residue
main_c_prev_idxs = coords[selector[:-1], idx_a[1:]]# (L-1), 3
# concat
coords_a = torch.cat([first_next_n, main_c_prev_idxs])
else:
coords_a = coords[selector, idx_a]
# get dihedrals
scaffolds["angles_mask"][1, :, i] = get_dihedral(coords_a,
coords[selector, idx_b],
coords[selector, idx_c],
coords[:, i])
# correct angles and dihedrals for backbone
scaffolds["angles_mask"][0, :-1, 0] = get_angle(coords[:-1, 1], coords[:-1, 2], coords[1: , 0]) # ca_c_n
scaffolds["angles_mask"][0, 1:, 1] = get_angle(coords[:-1, 2], coords[1:, 0], coords[1: , 1]) # c_n_ca
scaffolds["angles_mask"][0, :, 2] = get_angle(coords[:, 0], coords[ :, 1], coords[ : , 2]) # n_ca_c
# N determined by previous psi = f(n, ca, c, n+1)
scaffolds["angles_mask"][1, :-1, 0] = get_dihedral(coords[:-1, 0], coords[:-1, 1], coords[:-1, 2], coords[1:, 0])
# CA determined by omega = f(ca, c, n+1, ca+1)
scaffolds["angles_mask"][1, 1:, 1] = get_dihedral(coords[:-1, 1], coords[:-1, 2], coords[1:, 0], coords[1:, 1])
# C determined by phi = f(c-1, n, ca, c)
scaffolds["angles_mask"][1, 1:, 2] = get_dihedral(coords[:-1, 2], coords[1:, 0], coords[1:, 1], coords[1:, 2])
return scaffolds | 29,505 |
def multinomial(**kwargs):
"""
Load multinomial toxicity model.
Parameters
----------
validate: bool, optional (default=True)
if True, malaya will check model availability and download if not available.
Returns
-------
BAYES : malaya._models._sklearn_model.MULTILABEL_BAYES class
"""
import pickle
check_file(
PATH_TOXIC['multinomial'], S3_PATH_TOXIC['multinomial'], **kwargs
)
try:
with open(PATH_TOXIC['multinomial']['model'], 'rb') as fopen:
multinomial = pickle.load(fopen)
with open(PATH_TOXIC['multinomial']['vector'], 'rb') as fopen:
vectorize = pickle.load(fopen)
except:
raise Exception(
"model corrupted due to some reasons, please run malaya.clear_cache('toxic/multinomial') and try again"
)
from .stem import _classification_textcleaning_stemmer
return MULTILABEL_BAYES(
models = multinomial,
vectors = vectorize,
cleaning = _classification_textcleaning_stemmer,
) | 29,506 |
def create_bucket(bucket_name, region="us-west-2"):
"""Create an S3 bucket in a specified region
:param bucket_name: Bucket to create
:param region: String region to create bucket in, e.g., 'us-west-2'
:return: True if bucket created, else False
"""
# Create bucket
try:
# get list of existing buckets
s3_client = boto3.client('s3', region_name=region)
list_buckets = s3_client.list_buckets()
for bucket in list_buckets['Buckets']:
if bucket["Name"] == bucket_name:
print("------- Bucket already exists")
return s3_client
location = {'LocationConstraint': region}
s3_client.create_bucket(Bucket=bucket_name,
CreateBucketConfiguration=location)
return s3_client
except ClientError as e:
logging.error(e)
return | 29,507 |
def print_tf_graph(graph):
"""Prints tensorflow graph in dictionary form."""
for node in graph:
for child in graph[node]:
print("%s -> %s" % (node.name, child.name)) | 29,508 |
def create_client():
"""Return a client socket that may be connected to a remote address."""
return _new_sock() | 29,509 |
def test_pyramid_app_with_additional_fixtures(
pyramid_app_with_additional_fixtures: TestApp, request: FixtureRequest
) -> None:
"""
Test that pyramid_app factory works with additional_fixtures.
It checks if additional_fixtures are loaded for the test.
"""
assert set(request.fixturenames) == set(
[
"pyramid_app_with_additional_fixtures",
"request",
"pyramid_config_path",
"dummy_fixture",
]
) | 29,510 |
def derivative_surface(obj):
""" Computes the hodograph (first derivative) surface of the input surface.
This function constructs the hodograph (first derivative) surface from the input surface by computing the degrees,
knot vectors and the control points of the derivative surface.
The return value of this function is a tuple containing the following derivative surfaces in the given order:
* U-derivative surface (derivative taken only on the u-direction)
* V-derivative surface (derivative taken only on the v-direction)
* UV-derivative surface (derivative taken on both the u- and the v-direction)
:param obj: input surface
:type obj: abstract.Surface
:return: derivative surfaces w.r.t. u, v and both u-v
:rtype: tuple
"""
if not isinstance(obj, abstract.Surface):
raise TypeError("Input shape must be an instance of abstract.Surface class")
if obj.rational:
warnings.warn("Cannot compute hodograph surface for a rational surface")
return obj
# Find the control points of the derivative surface
d = 2 # 0 <= k + l <= d, see pg. 114 of The NURBS Book, 2nd Ed.
pkl = evaluators.SurfaceEvaluator2.derivatives_ctrlpts(r1=0, r2=obj.ctrlpts_size_u - 1,
s1=0, s2=obj.ctrlpts_size_v - 1,
degree_u=obj.degree_u, degree_v=obj.degree_v,
ctrlpts_size_u=obj.ctrlpts_size_u,
ctrlpts_size_v=obj.ctrlpts_size_v,
knotvector_u=obj.knotvector_u, knotvector_v=obj.knotvector_v,
ctrlpts=obj.ctrlpts2d,
dimension=obj.dimension,
deriv_order=d)
ctrlpts2d_u = []
for i in range(0, len(pkl[1][0]) - 1):
ctrlpts2d_u.append(pkl[1][0][i])
surf_u = copy.deepcopy(obj)
surf_u.degree_u = obj.degree_u - 1
surf_u.ctrlpts2d = ctrlpts2d_u
surf_u.knotvector_u = obj.knotvector_u[1:-1]
surf_u.delta = obj.delta
ctrlpts2d_v = []
for i in range(0, len(pkl[0][1])):
ctrlpts2d_v.append(pkl[0][1][i][0:-1])
surf_v = copy.deepcopy(obj)
surf_v.degree_v = obj.degree_v - 1
surf_v.ctrlpts2d = ctrlpts2d_v
surf_v.knotvector_v = obj.knotvector_v[1:-1]
surf_v.delta = obj.delta
ctrlpts2d_uv = []
for i in range(0, len(pkl[1][1]) - 1):
ctrlpts2d_uv.append(pkl[1][1][i][0:-1])
# Generate the derivative curve
surf_uv = obj.__class__()
surf_uv.degree_u = obj.degree_u - 1
surf_uv.degree_v = obj.degree_v - 1
surf_uv.ctrlpts2d = ctrlpts2d_uv
surf_uv.knotvector_u = obj.knotvector_u[1:-1]
surf_uv.knotvector_v = obj.knotvector_v[1:-1]
surf_uv.delta = obj.delta
return surf_u, surf_v, surf_uv | 29,511 |
def configure(config):
"""
| [bing ] | example | purpose |
| -------- | ------- | ------- |
| api_key | VBsdaiY23sdcxuNG1gP+YBsCwJxzjfHgdsXJG5 | Bing Primary Account Key |
"""
chunk = ''
if config.option('Configuring bing search module', False):
config.interactive_add('bing', 'api_key', 'Bing Primary Account Key', '')
return chunk | 29,512 |
def CVRMSE(ip1,ip2):
""" The normalized RMSE (= Root Mean Square Error) is defined as CVRMSE(X,Y) = sqrt[ sum_i(Yi-Xi)^2 / N ] / mean(Yi) ) """
stats = ip1.getStatistics()
return RMSE(ip1,ip2) / stats.mean | 29,513 |
def get_verified_aid_pairs(ibs):
"""
Example:
>>> # DISABLE_DOCTEST
>>> from wbia_cnn._plugin import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb('NNP_Master3', allow_newdir=True)
>>> verified_aid1_list, verified_aid2_list = get_verified_aid_pairs(ibs)
"""
# Grab marked hard cases
am_rowids = ibs._get_all_annotmatch_rowids()
remove_photobombs = True
if remove_photobombs:
flags = ibs.get_annotmatch_is_photobomb(am_rowids)
am_rowids = ut.filterfalse_items(am_rowids, flags)
verified_aid1_list = ibs.get_annotmatch_aid1(am_rowids)
verified_aid2_list = ibs.get_annotmatch_aid2(am_rowids)
return verified_aid1_list, verified_aid2_list | 29,514 |
def post_scan_handler(active_scans, results):
"""
Process the scanned files and yield the modified results.
Parameters:
- `active_scans`: a list of scanners names requested in the current run.
- `results`: an iterable of scan results for each file or directory.
"""
pass | 29,515 |
def audio_sort_key(ex):
"""Sort using duration time of the sound spectrogram."""
return ex.src.size(1) | 29,516 |
def decorateEosWorkFlowWithPrintOutputsEveryNSteps(inpObj,printInterval=5):
""" For backwards compatability only, just calls wflowCoord.decorateWorkFlowWithPrintOutputsEveryNSteps
"""
wflowCoord.decorateWorkFlowWithPrintOutputsEveryNSteps(inpObj,printInterval=5) | 29,517 |
def print_dot(graph, options):
"""Print a dependency graph to standard output as a dot input file.
graph: a tl.eggdeps.graph.Graph instance
options: an object that provides formatting options as attributes
cluster: bool, cluster direct dependencies of each root distribution?
version_numbers: bool, print version numbers of active distributions?
comment: str, optional, will be included at the top of the dot file
"""
if hasattr(options, 'comment'):
for line in options.comment.splitlines():
print('// ' + line)
direct_deps = set()
for name in graph.roots:
direct_deps.update(graph[name])
print("digraph {")
for node in graph.values():
node_options = {}
if options.version_numbers and node.dist:
node_options["label"] = "%s %s" % (node.name, node.dist.version)
else:
node_options["label"] = node.name
def fill(color):
node_options["style"] = "filled"
node_options["fillcolor"] = color
if node.name in direct_deps:
fill("yellow")
if node.name in graph.roots:
fill("green")
if not node.follow:
fill("lightgrey")
if not node.compatible:
fill("red")
print('"%s"%s' % (node.name, format_options(node_options)))
if options.cluster:
for i, cluster in enumerate(yield_clusters(graph)):
print("subgraph cluster_%s {" % i)
for name in sorted(cluster):
print('"%s"' % name)
print("}")
for node in graph.values():
for dep, extras in node.iter_deps():
edge_options = {}
if extras:
edge_options["color"] = "lightgrey"
print('"%s" -> "%s"%s' % (node.name,
dep, format_options(edge_options)))
print("}") | 29,518 |
def data_restore(directory, model):
"""Reverse the effect of `_dump_weights` by loading individual tensors
into the model."""
log.info("Replacing model state with data from", directory)
leaves = [module for module in model.modules()
if len(list(module.children())) == 0]
for i, module in enumerate(leaves):
layer_name = format(i, "03") + "_" + type(module).__name__
for key, value in module.state_dict().items():
name = layer_name + "_" + key
checkpoint.load_tensor(directory, name, value) | 29,519 |
def saveplot(name, dpi=None, figure=None):
"""
Saves current figure as a png in the home directory
:param name: filename, including or expluding directory and or extension
:param dpi: image resolution, higher means larger image size, default=matplotlib default
:param figure: figure number, default = plt.gcf()
:return: None
E.G.
---select figure to save by clicking on it---
saveplot('test')
E.G.
saveplot('c:\somedir\apicture.jpg', dpi=600, figure=3)
"""
if type(name) is int:
name = str(name)
if figure is None:
gcf = plt.gcf()
else:
gcf = plt.figure(figure)
dir = os.path.dirname(name)
file, ext = os.path.basename(name)
if len(dir) == 0:
dir = os.path.expanduser('~')
if len(ext) == 0:
ext = '.png'
savefile = os.path.join(dir, file+ext)
gcf.savefig(savefile, dpi=dpi)
print('Saved Figure {} as {}'.format(gcf.number, savefile)) | 29,520 |
def _filename_to_title(filename, split_char="_"):
"""Convert a file path into a more readable title."""
filename = Path(filename).with_suffix("").name
filename_parts = filename.split(split_char)
try:
# If first part of the filename is a number for ordering, remove it
int(filename_parts[0])
if len(filename_parts) > 1:
filename_parts = filename_parts[1:]
except Exception:
pass
title = " ".join(ii.capitalize() for ii in filename_parts)
return title | 29,521 |
def basis_function_contributions(universe, mo, mocoefs='coef',
tol=0.01, ao=None, frame=0):
"""
Provided a universe with momatrix and basis_set_order attributes,
return the major basis function contributions of a particular
molecular orbital.
.. code-block:: python
# display the 16th orbital coefficients > abs(0.15)
basis_function_contributions(uni, 15, tol=0.15) # 0-based indexing!
Args:
universe (class:`exatomic.core.universe.Universe`): a universe
mo (int): molecular orbital index
mocoefs (str): column of interest in universe.momatrix
tol (float): minimum value of coefficient by which to filter
frame (int): frame of the universe (default is zero)
Returns:
joined (pd.DataFrame): a join of momatrix and basis_set_order
"""
small = universe.momatrix.contributions(mo, tol=tol, mocoefs=mocoefs, frame=frame)
chis = small['chi'].values
coefs = small[mocoefs]
coefs.index = chis
joined = pd.concat([universe.basis_set_order.ix[chis], coefs], axis=1)
if ao is None:
return joined
else:
raise NotImplementedError("not clever enough for that.") | 29,522 |
def main(args):
"""
Deploys a ModeredditCore on the arguments that have been given through the
command line interface.
Raises:
InvalidColumnException: When a column provided in the options is invalid
PathNotValidException: When path provided does not exist or not valid
"""
modereddit = ModeredditCore(
model=args.model
)
modereddit.start_streaming_server() | 29,523 |
def test_get_confusion_matrix():
"""
Tests :func:`fatf.utils.metrics.tools.get_confusion_matrix` function.
"""
# [[1, 1, 1],
# [1, 2, 1],
# [1, 1, 1]]
ground_truth = np.array(['a', 'b', 'b', 'b', 'a', 'a', 'b', 'c', 'c', 'c'])
predictions = np.array(['b', 'a', 'b', 'c', 'a', 'c', 'b', 'a', 'c', 'b'])
# [[3, 11],
# [7, 5 ]]
ground_truth_bin = np.array([
'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b', 'a', 'a', 'a', 'a', 'a', 'a',
'a', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'
])
predictions_bin = np.array([
'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b',
'b', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a'
])
cmx = np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]])
cmx_bin = np.array([[3, 11], [7, 5]])
cmx_bb = np.array([[1, 1, 0, 1], [1, 2, 0, 1], [0, 0, 0, 0], [1, 1, 0, 1]])
# Default labeling
cma = fumt.get_confusion_matrix(ground_truth, predictions)
assert np.array_equal(cmx, cma)
cma = fumt.get_confusion_matrix(ground_truth_bin, predictions_bin)
assert np.array_equal(cmx_bin, cma)
# Custom non-existing labeling
with pytest.warns(UserWarning) as w:
cma = fumt.get_confusion_matrix(ground_truth, predictions,
['a', 'b', 'bb', 'c'])
assert len(w) == 1
assert str(w[0].message) == USER_WARNING.format("{'bb'}")
assert np.array_equal(cmx_bb, cma) | 29,524 |
def test_salesforce_switch_from_query_to_subscription(sdc_builder, sdc_executor, salesforce, subscription_type, api):
"""Start pipeline, write data using Salesforce client, read existing data via query,
check if Salesforce origin reads data via wiretap, write more data, check that Salesforce
origin reads it via Push Topic/CDC.
The pipeline looks like:
salesforce_origin >> wiretap
Args:
sdc_builder (:py:class:`streamsets.testframework.Platform`): Platform instance
sdc_executor (:py:class:`streamsets.sdk.DataCollector`): Data Collector executor instance
salesforce (:py:class:`testframework.environments.SalesforceInstance`): Salesforce environment
subscription_type (:obj:`str`): Type of subscription: 'PUSH_TOPIC' or 'CDC'
api (:obj:`str`): API to test: 'soap' or 'bulk'
"""
client = salesforce.client
pipeline = None
subscription_id = None
test_data = TEST_DATA['DATA_TO_INSERT']
inserted_ids = get_ids(client.bulk.Contact.insert(test_data), 'id')
logger.info('Created Contacts using Salesforce client with id(s) as %s', inserted_ids)
try:
pipeline_builder = sdc_builder.get_pipeline_builder()
if subscription_type == PUSH_TOPIC:
# note test_data 'LastName' is random unique data same across all records
subscription_id, push_topic_name = create_push_topic(client, test_data[0]['LastName'])
else:
if Version(sdc_builder.version) < Version('3.7.0'):
pytest.skip('CDC Feature requires minimum SDC version 3.7.0')
query = ("SELECT Id, FirstName, LastName, Email, LeadSource FROM Contact "
"WHERE Id > '000000000000000' AND "
f"Email LIKE \'xtest%\' and LastName = '{TEST_DATA['STR_15_RANDOM']}'"
" ORDER BY Id")
salesforce_origin = pipeline_builder.add_stage('Salesforce', type='origin')
salesforce_origin.set_attributes(query_existing_data=True,
subscribe_for_notifications=True,
use_bulk_api=(api == 'bulk'),
soql_query=query,
subscription_type=subscription_type)
if subscription_type == PUSH_TOPIC:
salesforce_origin.set_attributes(push_topic=push_topic_name)
else:
salesforce_origin.set_attributes(change_data_capture_object=CONTACT)
wiretap = pipeline_builder.add_wiretap()
salesforce_origin >> wiretap.destination
pipeline = pipeline_builder.build().configure_for_environment(salesforce)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_status('RUNNING')
time.sleep(10) # Give the pipeline time to connect to the Streaming API
sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', len(test_data))
if subscription_type == PUSH_TOPIC:
verify_wiretap_data(wiretap, test_data)
else:
# cannot verify CDC at this point as we not replaying all events prior to pipeline start
pass
# Note, from Salesforce docs: "Updates performed by the Bulk API won’t generate notifications, since such
# updates could flood a channel."
# REST API in Simple Salesforce can only create one record at a time, so just update one Contact
test_data[0]['FirstName'] = 'Updated FirstName'
contact_id = inserted_ids[0]['Id']
client.Contact.update(contact_id, test_data[0])
logger.info('Updated a Contact using Salesforce client with id as %s', contact_id)
logger.info('Capturing second batch of data ...')
wiretap.reset()
sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', len(test_data) + 1)
if subscription_type == PUSH_TOPIC:
verify_wiretap_data(wiretap, [test_data[0]])
else:
change_records = get_cdc_wiretap_records(wiretap, [contact_id])
assert change_records
change_record = change_records[0]
assert change_record.header.values['salesforce.cdc.changeType'] == 'UPDATE'
assert change_record.field['Name']['FirstName'] == test_data[0]['FirstName']
finally:
clean_up(sdc_executor, pipeline, client, inserted_ids)
if subscription_id and subscription_type == PUSH_TOPIC:
logger.info('Deleting PushTopic with id %s ...', subscription_id)
client.PushTopic.delete(subscription_id) | 29,525 |
def bspline_basis(d, knots, n, x, close=True):
"""The `n`-th B-spline at `x` of degree `d` with knots.
B-Splines are piecewise polynomials of degree `d` [1]_. They are defined on
a set of knots, which is a sequence of integers or floats.
The 0th degree splines have a value of one on a single interval:
>>> from sympy import bspline_basis
>>> from sympy.abc import x
>>> d = 0
>>> knots = range(5)
>>> bspline_basis(d, knots, 0, x)
Piecewise((1, And(x <= 1, x >= 0)), (0, True))
For a given ``(d, knots)`` there are ``len(knots)-d-1`` B-splines defined, that
are indexed by ``n`` (starting at 0).
Here is an example of a cubic B-spline:
>>> bspline_basis(3, range(5), 0, x)
Piecewise((x**3/6, And(x < 1, x >= 0)),
(-x**3/2 + 2*x**2 - 2*x + 2/3, And(x < 2, x >= 1)),
(x**3/2 - 4*x**2 + 10*x - 22/3, And(x < 3, x >= 2)),
(-x**3/6 + 2*x**2 - 8*x + 32/3, And(x <= 4, x >= 3)),
(0, True))
By repeating knot points, you can introduce discontinuities in the
B-splines and their derivatives:
>>> d = 1
>>> knots = [0,0,2,3,4]
>>> bspline_basis(d, knots, 0, x)
Piecewise((-x/2 + 1, And(x <= 2, x >= 0)), (0, True))
It is quite time consuming to construct and evaluate B-splines. If you
need to evaluate a B-splines many times, it is best to lambdify them
first:
>>> from sympy import lambdify
>>> d = 3
>>> knots = range(10)
>>> b0 = bspline_basis(d, knots, 0, x)
>>> f = lambdify(x, b0)
>>> y = f(0.5)
See Also
========
bsplines_basis_set
References
==========
.. [1] http://en.wikipedia.org/wiki/B-spline
"""
knots = [sympify(k) for k in knots]
d = int(d)
n = int(n)
n_knots = len(knots)
n_intervals = n_knots - 1
if n + d + 1 > n_intervals:
raise ValueError('n + d + 1 must not exceed len(knots) - 1')
if d == 0:
result = Piecewise(
(S.One, Interval(knots[n], knots[n + 1], False,
not close).contains(x)),
(0, True)
)
elif d > 0:
denom = knots[n + d + 1] - knots[n + 1]
if denom != S.Zero:
B = (knots[n + d + 1] - x)/denom
b2 = bspline_basis(d - 1, knots, n + 1, x, close)
else:
b2 = B = S.Zero
denom = knots[n + d] - knots[n]
if denom != S.Zero:
A = (x - knots[n])/denom
b1 = bspline_basis(
d - 1, knots, n, x, close and (B == S.Zero or b2 == S.Zero))
else:
b1 = A = S.Zero
result = _add_splines(A, b1, B, b2)
else:
raise ValueError('degree must be non-negative: %r' % n)
return result | 29,526 |
def is_versioned(obj):
"""
Check if a given object is versioned by inspecting some of its attributes.
"""
# before any heuristic, newer versions of RGW will tell if an obj is
# versioned so try that first
if hasattr(obj, 'versioned'):
return obj.versioned
if not hasattr(obj, 'VersionedEpoch'):
# overly paranoid here, an object that is not versioned should *never*
# have a `VersionedEpoch` attribute
if getattr(obj, 'version_id', None):
if obj.version_id is None:
return False
return True # probably will never get here
return False
return True | 29,527 |
def main():
"""Entry point for deployment"""
main_args = _parse_args()
# Load repo default config
default_demo_ini = f"{REPO_DIR}/demo-config.ini"
ini_dict= _load_configuration(default_demo_ini)
# Load developer overrides
override_dev_ini = f"{REPO_DIR}/.dev-config.ini"
override_ini_dict = _load_configuration(override_dev_ini)
for key, val in override_ini_dict.get('deployment', {}).items():
if val:
ini_dict['deployment'][key] = val
# Main args overridden by deployment conf dict
for key, val in ini_dict['deployment'].items():
if hasattr(main_args, key):
if val:
setattr(main_args, key, val)
else:
setattr(main_args, key, val)
assembled_template, save_path, template_name = _get_cloud_config_yaml(main_args)
if main_args.diff_configs:
sys.exit(0)
if not assembled_template or not template_name:
print('# Failure: Could not determine configuration type')
sys.exit(1)
if save_path:
_write_config_to_file(assembled_template, save_path, template_name)
sys.exit(0)
# Deploy Frontend, Demo, es elect cluster, or es wait data nodes
indexing = False if main_args.no_indexing or template_name == 'app' else True
print(f'\nDeploying {template_name} with indexing={indexing}')
print("$ {}".format(' '.join(sys.argv)))
instances_tag_data, is_tag, is_branch = _get_instances_tag_data(main_args, template_name)
if instances_tag_data is None:
print('Failure: No instances_tag_data')
sys.exit(1)
if not is_tag and not is_branch:
print('Failure: Not a tag or branch')
sys.exit(1)
base_branch_info = _get_base_branch_info()
print("Base branch:", base_branch_info)
run_args = _get_run_args(main_args, instances_tag_data, assembled_template, is_tag=is_tag)
# run_args has the asseblmed_template filled with run variables in 'user_data' key
bdm = _get_bdm(main_args)
if main_args.dry_run:
print(f'\nDry Run')
print(f'run_args dict keys: {run_args.keys()}')
print(f'\nRun Variables. In /etc/environment on instance')
for line in run_args['user_data'].split('\n'):
line = line.strip()
if line[:5] == 'ENCD_':
print(line)
print('\ninstances_tag_data', instances_tag_data)
print('\nis_tag:', is_tag, ', is_branch:', is_branch)
print('\nInstance Tags:')
tags, tags_dict = _tag_ec2_instance(
None, instances_tag_data,
(main_args.es_wait or main_args.es_elect),
main_args.cluster_name,
role=main_args.role,
profile_name=main_args.profile_name,
dry_run=True,
arm_arch=main_args.arm_image_id,
image_id=main_args.image_id,
)
for key, val in tags_dict.items():
print(f"{key:28}:'{val}'")
print('Dry Run')
sys.exit(0)
# AWS - Below
print('Create instance and wait for running state')
ec2_client = _get_ec2_client(main_args, instances_tag_data)
if ec2_client is None:
sys.exit(20)
# Create aws demo instance or frontend instance
# OR instances for es_wait nodes, es_elect nodes depending on count
shut_down_behavior = 'terminate'
if main_args.cluster_name and template_name == 'app':
shut_down_behavior = 'stop'
instances = ec2_client.create_instances(
ImageId=main_args.image_id,
MinCount=run_args['count'],
MaxCount=run_args['count'],
InstanceType=main_args.instance_type,
SecurityGroups=run_args['security_groups'],
UserData=run_args['user_data'],
BlockDeviceMappings=bdm,
InstanceInitiatedShutdownBehavior=shut_down_behavior,
IamInstanceProfile={
"Name": run_args['iam_role'],
},
Placement={
'AvailabilityZone': main_args.availability_zone,
},
KeyName=run_args['key-pair-name'],
)
instances_info = _wait_and_tag_instances(
main_args,
run_args,
instances_tag_data,
instances,
main_args.image_id,
)
# Create aws es_wait frontend instance
if main_args.es_wait and run_args.get('master_user_data'):
instances = ec2_client.create_instances(
ImageId=main_args.image_id,
MinCount=1,
MaxCount=1,
InstanceType=main_args.eshead_instance_type,
SecurityGroups=['ssh-http-https'],
UserData=run_args['master_user_data'],
BlockDeviceMappings=bdm,
InstanceInitiatedShutdownBehavior='terminate',
IamInstanceProfile={
"Name": main_args.iam_role,
},
Placement={
'AvailabilityZone': main_args.availability_zone,
},
KeyName=run_args['key-pair-name'],
)
instances_info.update(
_wait_and_tag_instances(
main_args,
run_args,
instances_tag_data,
instances,
main_args.image_id,
cluster_master=True,
)
)
# Displays deployment output
print('')
tail_cmd = " 'tail -f /var/log/cloud-init-output.log'"
helper_vars = []
if 'demo' in instances_info:
instance_info = instances_info['demo']
print('Deploying Demo({}): {}'.format(
instance_info['private_ip'],
instance_info['url']
))
print(" ssh ubuntu@{}".format(instance_info['instance_id_domain']))
print("ssh and tail:\n ssh ubuntu@{}{}".format(instance_info['public_dns'], tail_cmd))
elif 'cluster_master' in instances_info and main_args.es_wait:
instance_info = instances_info['cluster_master']
print('Deploying Head ES Node({}): {}'.format(
instance_info['private_ip'],
instance_info['name']
))
print(" ssh ubuntu@{}".format(instance_info['instance_id_domain']))
print('\nRun the following command to view es head deployment log.')
print("ssh ubuntu@{}{}".format(instance_info['public_dns'], tail_cmd))
print('')
helper_vars.append("datam='{}'".format(instance_info['instance_id']))
for index in range(main_args.cluster_size):
key_name = 'cluster_node_{}'.format(index)
node_info = instances_info[key_name]
helper_vars.append("data{}='{}' # {}".format(index, node_info['instance_id'], key_name))
if index == 0:
if main_args.build_ami and main_args.es_wait:
print(
'After it builds, create the ami: '
"python ./cloud-config/create-ami.py {} es-wait-node {} --profile-name {}".format(
instances_tag_data['username'],
node_info['instance_id'],
main_args.profile_name,
)
)
print('Run the following command to view this es node deployment log.')
print("ssh ubuntu@{}{}".format(node_info['public_dns'], tail_cmd))
else:
print("ES node{} ssh:\n ssh ubuntu@{}".format(index, node_info['public_dns']))
elif 'frontend' in instances_info:
instance_info = instances_info['frontend']
print('Deploying Frontend({}): {}'.format(
instance_info['private_ip'],
instance_info['url'],
))
print(" ssh ubuntu@{}".format(instance_info['instance_id_domain']))
print('\n\nRun the following command to view the deployment log.')
print("ssh ubuntu@{}{}".format(instance_info['public_dns'], tail_cmd))
helper_vars.append("frontend='{}'".format(instance_info['instance_id']))
else:
print('Warning: Unknown instance info')
print(instances_info)
if main_args.role == 'candidate':
print('')
# helps vars for release and building amis
for helper_var in helper_vars:
print(helper_var)
print('Done') | 29,528 |
def default_error_reporter(title, message):
"""By default, error messages are just logged"""
log.error("error: %s" % title)
log.error("details:\n%s" % message) | 29,529 |
def preprocess_and_suggest_hyperparams(
task,
X,
y,
estimator_or_predictor,
location=None,
):
"""Preprocess the data and suggest hyperparameters.
Example:
```python
hyperparams, estimator_class, X, y, feature_transformer, label_transformer = \
preprocess_and_suggest_hyperparams("classification", X_train, y_train, "xgb_limitdepth")
model = estimator_class(**hyperparams) # estimator_class is XGBClassifier
model.fit(X, y)
X_test = feature_transformer.transform(X_test)
y_pred = label_transformer.inverse_transform(pd.Series(model.predict(X_test).astype(int)))
```
Args:
task: A string of the task type, e.g.,
'classification', 'regression', 'ts_forecast', 'rank',
'seq-classification', 'seq-regression'.
X: A dataframe of training data in shape n*m.
For 'ts_forecast' task, the first column of X_train
must be the timestamp column (datetime type). Other
columns in the dataframe are assumed to be exogenous
variables (categorical or numeric).
y: A series of labels in shape n*1.
estimator_or_predictor: A str of the learner name or a dict of the learned config predictor.
"choose_xgb" means choosing between xgb_limitdepth and xgboost.
If a dict, it contains:
- "version": a str of the version number.
- "preprocessing": a dictionary containing:
* "center": a list of meta feature value offsets for normalization.
* "scale": a list of meta feature scales to normalize each dimension.
- "neighbors": a list of dictionaries. Each dictionary contains:
* "features": a list of the normalized meta features for a neighbor.
* "choice": a integer of the configuration id in the portfolio.
- "portfolio": a list of dictionaries, each corresponding to a configuration:
* "class": a str of the learner name.
* "hyperparameters": a dict of the config. They key "FLAML_sample_size" will be ignored.
location: (Optional) A str of the location containing mined portfolio file.
Only valid when the portfolio is a str, by default the location is flaml/default.
Returns:
hyperparams: A dict of the hyperparameter configurations.
estiamtor_class: A class of the underlying estimator, e.g., lightgbm.LGBMClassifier.
X: the preprocessed X.
y: the preprocessed y.
feature_transformer: a data transformer that can be applied to X_test.
label_transformer: a label transformer that can be applied to y_test.
"""
dt = DataTransformer()
X, y = dt.fit_transform(X, y, task)
if "choose_xgb" == estimator_or_predictor:
# choose between xgb_limitdepth and xgboost
estimator_or_predictor = suggest_learner(
task,
X,
y,
estimator_list=["xgb_limitdepth", "xgboost"],
location=location,
)
config = suggest_config(task, X, y, estimator_or_predictor, location=location, k=1)[
0
]
estimator = config["class"]
model_class = get_estimator_class(task, estimator)
hyperparams = config["hyperparameters"]
model = model_class(task=task, **hyperparams)
if model.estimator_class is None:
return hyperparams, model_class, X, y, None, None
else:
estimator_class = model.estimator_class
X = model._preprocess(X)
hyperparams = hyperparams and model.params
class AutoMLTransformer:
def transform(self, X):
return model._preprocess(dt.transform(X))
transformer = AutoMLTransformer()
return hyperparams, estimator_class, X, y, transformer, dt.label_transformer | 29,530 |
def passphrase_from_private_key(private_key):
"""Return passphrase from provided private key."""
return mnemonic.from_private_key(private_key) | 29,531 |
def merge_on_empty_fields(base, tomerge):
"""Utility to quickly fill empty or falsy field of $base with fields
of $tomerge
"""
has_merged_anything = False
for key in tomerge:
if not base.get(key):
base[key] = tomerge.get(key)
has_merged_anything = True
return has_merged_anything | 29,532 |
def clear_rows(grid, locked):
"""Deletes the row, if that row is filled."""
increment = 0
for i in range(len(grid) - 1, -1, -1):
row = grid[i]
if (0, 0, 0) not in row:
increment += 1
index = i
for j in range(len(row)):
try:
del locked[(j, i)]
except:
continue
if increment > 0:
for key in sorted(list(locked), key=lambda x: x[1])[::-1]:
x, y = key
if y < index:
newKey = (x, y + increment)
locked[newKey] = locked.pop(key)
return increment * 1.5 | 29,533 |
def test_photoslibrary_folder_exception_1(photoslib):
""" test exceptions in folder() """
import photoscript
with pytest.raises(ValueError):
folder = photoslib.folder() | 29,534 |
def get_topo(topo_fname, remote_directory, force=None):
"""
Download a topo file from the web, provided the file does not
already exist locally.
remote_directory should be a URL. For GeoClaw data it may be a
subdirectory of http://kingkong.amath.washington.edu/topo/
See that website for a list of archived topo datasets.
If force==False then prompt the user to make sure it's ok to download,
with option to first get small file of metadata.
If force==None then check for environment variable CLAW_TOPO_DOWNLOAD
and if this exists use its value. This is useful for the script
python/run_examples.py that runs all examples so it won't stop to prompt.
"""
import urllib
if force is None:
CTD = os.environ.get('CLAW_TOPO_DOWNLOAD', None)
force = (CTD in [True, 'True'])
print 'force = ',force
if os.path.exists(topo_fname):
print "*** Not downloading topo file (already exists): %s " % topo_fname
else:
remote_fname = topo_fname
local_fname = topo_fname
remote_fname_txt = remote_fname + '.txt'
local_fname_txt = local_fname + '.txt'
print "Require remote file ", remote_fname
print " from ", remote_directory
if not force:
ans=raw_input(" Ok to download topo file? \n" +\
" Type y[es], n[o] or ? to first retrieve and print metadata ")
if ans.lower() not in ['y','yes','?']:
print "*** Aborting! Missing: ", local_fname
return
if ans=="?":
try:
print "Retrieving remote file ", remote_fname_txt
print " from ", remote_directory
url = os.path.join(remote_directory, remote_fname_txt)
urllib.urlretrieve(url, local_fname_txt)
os.system("cat %s" % local_fname_txt)
except:
print "*** Error retrieving metadata file!"
ans=raw_input(" Ok to download topo file? ")
if ans.lower() not in ['y','yes','?']:
print "*** Aborting! Missing: ", local_fname
return
if not os.path.exists(local_fname_txt):
try:
print "Retrieving metadata file ", remote_fname_txt
print " from ", remote_directory
url = os.path.join(remote_directory, remote_fname_txt)
urllib.urlretrieve(url, local_fname_txt)
except:
print "*** Error retrieving metadata file!"
try:
print "Retrieving topo file ", remote_fname
print " from ", remote_directory
url = os.path.join(remote_directory, remote_fname)
urllib.urlretrieve(url, local_fname)
except:
print "*** Error retrieving file! Missing: ", local_fname
raise Exception("Error from urllib.urlretrieve")
try:
firstline = open(local_fname,'r').readline()
if firstline.find('DOC') > -1:
print "*** Possible error -- check the file ", local_fname
else:
print "Saved to ", local_fname
except:
raise Exception("Error opening file %s" % local_fname) | 29,535 |
def init():
"""
App initialisation.
"""
global APP
global config_parser
global oasis_lookup
global logger
global SERVICE_BASE_URL
# Enable utf8 encoding
reload(sys)
sys.setdefaultencoding('utf-8')
# Get the Flask app
APP = Flask(__name__)
# Create config_parser.parser and load with keys server INI file
config_parser = ConfigParser()
cwd = os.path.dirname(__file__)
keys_server_ini_fp = os.path.join(cwd, 'KeysServer.ini')
if not os.path.exists(keys_server_ini_fp):
raise OasisException('No `KeysServer.ini` file found in app directory')
config_parser.read(keys_server_ini_fp)
# Check that the keys data directory exists
keys_data_path = config_parser.get('Lookup', 'KEYS_DATA_PATH') or '/var/oasis/keys_data'
if not os.path.exists(keys_data_path):
raise OasisException("Keys data directory not found: {}.".format(keys_data_path))
log_dir = config_parser.get('Default', 'LOG_DIRECTORY') or keys_data_path
log_fname = (config_parser.get('Default', 'LOG_FILE_PATH') or 'keys_server.log').split(os.path.sep)[-1]
log_fp = os.path.join(log_dir, log_fname)
config_parser.set('Default', 'LOG_FILE_PATH', log_fp)
log_level = config_parser.get('Default', 'LOG_LEVEL') or logging.INFO
max_file_size = config_parser.getint('Default', 'LOG_MAX_SIZE_IN_BYTES') or 10**7
max_backups = config_parser.getint('Default', 'LOG_BACKUP_COUNT') or 5
# Logging configuration
set_rotating_logger(log_fp, log_level, max_file_size, max_backups)
logger = logging.getLogger('\nStarting rotating log.')
logger.info("\nStarting keys service.")
logger.info('\nKeys data path: {}'.format(keys_data_path))
# Check the model version file exists
model_version_fp = os.path.join(keys_data_path, 'ModelVersion.csv')
if not os.path.exists(model_version_fp):
raise OasisException("No model version file: {}.".format(model_version_fp))
with io.open(model_version_fp, 'r', encoding='utf-8') as f:
supplier_id, model_id, model_version = f.read().strip().split(',')
logger.info("\nSupplier: {}.".format(supplier_id))
logger.info("Model ID: {}.".format(model_id))
logger.info("Model version: {}.".format(model_version))
# Set the web service base URL
SERVICE_BASE_URL = '/{}/{}/{}'.format(supplier_id, model_id, model_version)
# Check the lookup config JSON file exists in the keys data
lookup_config_fp = config_parser.get('Lookup', 'CONFIG_FILE_PATH').replace('%KEYS_DATA_PATH%', keys_data_path)
if not os.path.exists(lookup_config_fp):
raise OasisException('No lookup config file {} found in the keys data directory'.format(lookup_config_fp))
config_parser.set('Lookup', 'CONFIG_FILE_PATH', lookup_config_fp)
logger.info('\nLoading lookup config from file {}'.format(lookup_config_fp))
with io.open(lookup_config_fp, 'r+', encoding='utf-8') as f:
lookup_config = json.load(f)
lookup_config['keys_data_path'] = keys_data_path
lookup_config['peril']['file_path'] = os.path.abspath(
lookup_config['vulnerability']['file_path'].replace('%%KEYS_DATA_PATH%%', keys_data_path)
)
lookup_config['vulnerability']['file_path'] = os.path.abspath(
lookup_config['vulnerability']['file_path'].replace('%%KEYS_DATA_PATH%%', keys_data_path)
)
lookup_config['peril']['rtree_index']['filename'] = os.path.abspath(
lookup_config['peril']['rtree_index']['filename'].replace('%%KEYS_DATA_PATH%%', keys_data_path)
)
logger.info('\nLoaded lookup config: {}'.format(lookup_config))
# Instantiate the keys lookup class
oasis_lookup = OasisLookup(config=lookup_config) | 29,536 |
def call(cmd_args, suppress_output=False):
""" Call an arbitary command and return the exit value, stdout, and stderr as a tuple
Command can be passed in as either a string or iterable
>>> result = call('hatchery', suppress_output=True)
>>> result.exitval
0
>>> result = call(['hatchery', 'notreal'])
>>> result.exitval
1
"""
if not funcy.is_list(cmd_args) and not funcy.is_tuple(cmd_args):
cmd_args = shlex.split(cmd_args)
logger.info('executing `{}`'.format(' '.join(cmd_args)))
call_request = CallRequest(cmd_args, suppress_output=suppress_output)
call_result = call_request.run()
if call_result.exitval:
logger.error('`{}` returned error code {}'.format(' '.join(cmd_args), call_result.exitval))
return call_result | 29,537 |
def provides(interface):
"""
A validator that raises a :exc:`TypeError` if the initializer is called
with an object that does not provide the requested *interface* (checks are
performed using ``interface.providedBy(value)`` (see `zope.interface
<http://docs.zope.org/zope.interface/>`_).
:param interface: The interface to check for.
:type interface: zope.interface.Interface
The :exc:`TypeError` is raised with a human readable error message, the
attribute (of type :class:`attr.Attribute`), the expected interface, and
the value it got.
"""
return _ProvidesValidator(interface) | 29,538 |
def v_t(r):
"""
Mean thermal velocity
"""
return (8/np.pi)**0.5*c(r) | 29,539 |
def _state_stateful_alarm_controller(
select_state: Callable[[str], OverkizStateType]
) -> str:
"""Return the state of the device."""
if state := cast(str, select_state(OverkizState.CORE_ACTIVE_ZONES)):
# The Stateful Alarm Controller has 3 zones with the following options:
# (A, B, C, A,B, B,C, A,C, A,B,C). Since it is not possible to map this to AlarmControlPanel entity,
# only the most important zones are mapped, other zones can only be disarmed.
if state in MAP_CORE_ACTIVE_ZONES:
return MAP_CORE_ACTIVE_ZONES[state]
return STATE_ALARM_ARMED_CUSTOM_BYPASS
return STATE_ALARM_DISARMED | 29,540 |
def _connect_new_volume(module, array, answer=False):
"""Connect volume to host"""
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version and module.params['lun']:
try:
array.connect_host(module.params['host'],
module.params['volume'],
lun=module.params['lun'])
answer = True
except Exception:
module.fail_json(msg='LUN ID {0} invalid. Check for duplicate LUN IDs.'.format(module.params['lun']))
else:
array.connect_host(module.params['host'], module.params['volume'])
answer = True
return answer | 29,541 |
def discover_inductive(log_file, noise_thresholds):
"""
Discovers a petri net model from log using the Inductive Miner with specified noise threshold. The resulting model
is written into file
"""
log = xes_importer.apply(log_file)
for i in tqdm(range(len(noise_thresholds)), desc=" > Discovering Models", disable=False):
current = noise_thresholds[i]
model, im, fm = pm4py.discover_petri_net_inductive(log, current)
pnml_exporter.apply(model,
im,
str(os.path.splitext(log_file)[0])+"_n" + str(current) + ".pnml",
final_marking=fm) | 29,542 |
def is_mongo_configured(accessor):
"""
works out if mongodb is configured to run with trackerdash
i.e. first time running
"""
return accessor.verify_essential_collections_present() | 29,543 |
def create_default_children_plugins(request, placeholder, lang, parent_plugin, children_conf):
"""
Create all default children plugins in the given ``placeholder``.
If a child have children, this function recurse.
Return all children and grandchildren (etc.) created
"""
from cms.api import add_plugin
children = list()
grandchildren = list()
for conf in children_conf:
if not permissions.has_plugin_permission(request.user, conf['plugin_type'], "add"):
continue
plugin = add_plugin(placeholder, conf['plugin_type'], lang, **conf['values'])
plugin.parent = parent_plugin
plugin.save()
if 'children' in conf:
grandchildren+= create_default_children_plugins(request, placeholder, lang, plugin, conf['children'])
plugin.notify_on_autoadd(request, conf)
children.append(plugin)
parent_plugin.notify_on_autoadd_children(request, conf, children)
return children + grandchildren | 29,544 |
def _split_value_equally(delta, count):
"""Splits an integer or rational into roughly equal parts."""
numer = sympy.numer(delta)
denom = sympy.denom(delta)
return [int(math.floor((numer + i) / count)) / denom for i in range(count)] | 29,545 |
def maybe_get_docstring(node: ast.AST):
"""Get docstring from a constant expression, or return None."""
if (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.Constant)
and isinstance(node.value.value, str)
):
return node.value.value
elif (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.Str)
):
return node.value.s | 29,546 |
def part_work(NPCorpsList):
"""获取军团LP兑换物品及其信息
Args:
NPCorpList: NPC军团id列表
Returns:
NPCorpList: 可以兑换物品的NPC军团id列表
[123,124,125...244,245,246]
NPCorps: 可以兑换物品的NPC军团信息字典,key为id
[
'物品id': {
'info': {
"ceo_id": 3004049,
"corporation_description": "",
"corporation_name": "CBD社团",
"creator_id": 1,
"member_count": 0,
"tax_rate": 0,
"ticker": "CBDC",
"url": "None"
},
'lp_store': {
"isk_cost": 2400000,
"lp_cost": 2400,
"offer_id": 3584,
"quantity": 5000,
"required_items": [
{
"quantity": 5000,
"type_id": 234
}
],
"type_id": 23047
},
}
]
names: 物品信息字典,key为id
{
"23047": {
"category": "inventory_type",
"id": 23047,
"name": "加达里海军铅质轨道弹 L",
"jita": {
"all": {
"max": 30000000,
"min": 0.01,
"volume": 8102161635
},
"buy": {
"max": 14.86,
"min": 0.01,
"volume": 2893652791
},
"sell": {
"max": 30000000,
"min": 15.23,
"volume": 5208508844
}
}
}
}
"""
NPCorpsList, NPCorps, ids_list = getCorp(NPCorpsList)
if 0 == len(ids_list) or 0 == len(NPCorpsList):
return None, None, None
try:
Names = []
for i in range(0, len(ids_list), 255):
Names += getCHName(ids_list[i: min(i + 255, len(ids_list))])
except Exception as E:
logging.error(E)
return None, None, None
logging.info("get Chinese Name Successed!")
names = {}
for name in Names:
try:
name["jita"] = getValue(name["id"])
except Exception as E:
logging.error(E)
else:
names["{}".format(name["id"])] = name
logging.info("get Jita Market Successed!")
return NPCorpsList, NPCorps, names | 29,547 |
def ShowSchedHistory(cmd_args=None, cmd_options=None):
""" Routine to print out thread scheduling history, optionally sorted by a
column.
Usage: showschedhistory [-S on-core|off-core|last-duration] [<thread-ptr> ...]
"""
sort_column = None
if '-S' in cmd_options:
sort_column = cmd_options['-S']
if cmd_args:
most_recent_dispatch = GetSchedMostRecentDispatch(False)
print ShowThreadSchedHistory.header
if sort_column:
threads = []
for thread_ptr in cmd_args:
threads.append(kern.GetValueFromAddress(ArgumentStringToInt(thread_ptr), 'thread *'))
SortThreads(threads, sort_column)
for thread in threads:
ShowThreadSchedHistory(thread, most_recent_dispatch)
else:
for thread_ptr in cmd_args:
thread = kern.GetValueFromAddress(ArgumentStringToInt(thread_ptr), 'thread *')
ShowThreadSchedHistory(thread, most_recent_dispatch)
return
run_buckets = kern.globals.sched_run_buckets
run_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_RUN')]
fixpri_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_FIXPRI')]
share_fg_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_FG')]
share_df_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_DF')]
share_ut_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_UT')]
share_bg_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_BG')]
sched_pri_shifts = kern.globals.sched_run_buckets
share_fg_shift = sched_pri_shifts[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_FG')]
share_df_shift = sched_pri_shifts[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_DF')]
share_ut_shift = sched_pri_shifts[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_UT')]
share_bg_shift = sched_pri_shifts[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_BG')]
print "Processors: {g.processor_avail_count:d} Runnable threads: {:d} Fixpri threads: {:d}\n".format(run_count, fixpri_count, g=kern.globals)
print "FG Timeshare threads: {:d} DF Timeshare threads: {:d} UT Timeshare threads: {:d} BG Timeshare threads: {:d}\n".format(share_fg_count, share_df_count, share_ut_count, share_bg_count)
print "Mach factor: {g.sched_mach_factor:d} Load factor: {g.sched_load_average:d} Sched tick: {g.sched_tick:d} timestamp: {g.sched_tick_last_abstime:d} interval:{g.sched_tick_interval:d}\n".format(g=kern.globals)
print "Fixed shift: {g.sched_fixed_shift:d} FG shift: {:d} DF shift: {:d} UT shift: {:d} BG shift: {:d}\n".format(share_fg_shift, share_df_shift, share_ut_shift, share_bg_shift, g=kern.globals)
print "sched_pri_decay_band_limit: {g.sched_pri_decay_band_limit:d} sched_decay_usage_age_factor: {g.sched_decay_usage_age_factor:d}\n".format(g=kern.globals)
if kern.arch == 'x86_64':
print "debugger_entry_time: {g.debugger_entry_time:d}\n".format(g=kern.globals)
most_recent_dispatch = GetSchedMostRecentDispatch(True)
print "Most recent dispatch: " + str(most_recent_dispatch)
print ShowThreadSchedHistory.header
if sort_column:
threads = [t for t in IterateQueue(kern.globals.threads, 'thread *', 'threads')]
SortThreads(threads, sort_column)
for thread in threads:
ShowThreadSchedHistory(thread, most_recent_dispatch)
else:
for thread in IterateQueue(kern.globals.threads, 'thread *', 'threads'):
ShowThreadSchedHistory(thread, most_recent_dispatch) | 29,548 |
def read_metadata(image_dir_path):
"""Read image metadata from an image directory."""
return jsons.load_dataobject(
ImageMetadata, _get_metadata_path(image_dir_path)
) | 29,549 |
def subtract(list_1, list_2):
"""Subtracts list_2 from list_1 even if they are different lengths.
Length of the returned list will be the length of the shortest list supplied.
Index 0 is treated as the oldest, and the older list items are truncated.
Args:
list_1 (list of float): List to be subtracted from
list_2 (list of float): List to subtract
Returns:
list of float: result of list_1 - list_2
"""
offset = len(list_1) - len(list_2)
return list(np.array(list_1[offset:]) - np.array(list_2)) | 29,550 |
def get_available_time_slots() -> list:
"""
An application is ready for scheduling when all the payment rules are satisfied plus:
- the application has been paid
- the window to schedule the review has not elapsed
"""
return [
{"try": middleware.create_correlation_id, "fail": []},
{"try": middleware.determine_current_datetime, "fail": []},
{"try": middleware.clean_prohibition_number, "fail": []},
{"try": middleware.validate_prohibition_number, "fail": []},
{"try": middleware.get_vips_status, "fail": []},
{"try": middleware.prohibition_exists_in_vips, "fail": []},
{"try": middleware.user_submitted_last_name_matches_vips, "fail": []},
{"try": middleware.application_has_been_saved_to_vips, "fail": []},
{"try": middleware.get_payment_status, "fail": []},
{"try": middleware.received_valid_payment_status, "fail": []},
{"try": middleware.paid_not_more_than_24hrs_ago, "fail": []},
{"try": middleware.application_has_been_paid, "fail": []},
{"try": middleware.review_has_not_been_scheduled, "fail": []},
{"try": middleware.get_application_details, "fail": []},
{"try": middleware.valid_application_received_from_vips, "fail": []},
{"try": middleware.get_invoice_details, "fail": []},
{"try": middleware.calculate_schedule_window, "fail": []},
{"try": middleware.query_review_times_available, "fail": []},
{"try": middleware.does_applicant_have_enough_review_options, "fail": [
{"try": middleware.query_for_additional_review_times, "fail": []},
{"try": middleware.does_applicant_have_enough_review_options, "fail": [
{"try": rsi_email.insufficient_reviews_available, "fail": []},
]}
]},
] | 29,551 |
def test_stupid_shaped_hole(sink_grid4):
"""Tests inclined fill into a surface with a deliberately awkward shape."""
fr = FlowAccumulator(sink_grid4, flow_director="D8")
hf = SinkFiller(sink_grid4, apply_slope=True)
hf.fill_pits()
hole1 = np.array(
[
4.00007692,
4.00015385,
4.00023077,
4.00030769,
4.00038462,
4.00046154,
4.00053846,
4.00061538,
4.00069231,
4.00076923,
4.00084615,
]
)
hole2 = np.array([7.4, 7.2, 7.6])
assert_array_almost_equal(
sink_grid4.at_node["topographic__elevation"][sink_grid4.lake1], hole1
)
assert_array_almost_equal(
sink_grid4.at_node["topographic__elevation"][sink_grid4.lake2], hole2
)
fr.run_one_step()
assert sink_grid4.at_node["flow__sink_flag"][sink_grid4.core_nodes].sum() == 0 | 29,552 |
def configure_bgp_neighbor(
device,
bgp_as,
neighbor_as,
neighbor_address,
source_interface=None,
ebgp=None,
):
""" Configures bgp neighbor on bgp router
Args:
device('obj'): device to configure on
bgp_as('str'): bgp_as to configure
neighbor_as('str'): neighbor_as to configure
neighbor_address('str'): address of neighbor
source_interface('str'): used to configure update-source on neighbor
Returns:
N/A
Raises:
SubCommandFailure: Failed executing command
"""
log_msg = (
"Configuring BGP on {hostname}\n"
" -local AS number: {bgp_as}\n"
" -remote AS number: {neighbor_as}\n"
" -neighbor: {neighbor_address}".format(
hostname=device.hostname,
bgp_as=bgp_as,
neighbor_as=neighbor_as,
neighbor_address=neighbor_address,
)
)
cmd = (
"router bgp {bgp_as}\n"
"neighbor {neighbor_address} remote-as {neighbor_as}".format(
bgp_as=bgp_as,
neighbor_as=neighbor_as,
neighbor_address=neighbor_address,
)
)
if source_interface:
log_msg += "\n -update-source: {}".format(source_interface)
cmd += "\nneighbor {neighbor_address} update-source {source_interface}".format(
neighbor_address=neighbor_address,
source_interface=source_interface,
)
if ebgp:
log_msg += "\n -ebgp-multihop: {}".format(ebgp)
cmd += "\nneighbor {neighbor_address} ebgp-multihop {ebgp}".format(
neighbor_address=neighbor_address, ebgp=ebgp
)
log.info(log_msg)
try:
device.configure(cmd)
except SubCommandFailure:
raise SubCommandFailure(
"Coult not configure bgp neighbor {neighbor_as} "
"on router {bgp_as}".format(neighbor_as=neighbor_as, bgp_as=bgp_as)
) | 29,553 |
def library_detail(request, lib_id):
"""
Display information about all the flowcells a library has been run on.
"""
lib = get_object_or_404(Library, id=lib_id)
flowcell_list = []
flowcell_run_results = {} # aka flowcells we're looking at
for lane in lib.lane_set.all():
fc = lane.flowcell
flowcell_id, id = parse_flowcell_id(fc.flowcell_id)
if flowcell_id not in flowcell_run_results:
flowcell_run_results[flowcell_id] = get_flowcell_result_dict(flowcell_id)
flowcell_list.append((fc.flowcell_id, lane.lane_number))
flowcell_list.sort()
lane_summary_list = []
eland_results = []
for fc, lane_number in flowcell_list:
lane_summary, err_list = _summary_stats(fc, lane_number, lib_id)
lane_summary_list.extend(lane_summary)
eland_results.extend(_make_eland_results(fc, lane_number, flowcell_run_results))
context = {
'page_name': 'Library Details',
'lib': lib,
'eland_results': eland_results,
'lane_summary_list': lane_summary_list,
}
context.update(SAMPLES_CONTEXT_DEFAULTS)
return render(request, 'samples/library_detail.html', context) | 29,554 |
def _unlink_device(device, nbd):
"""Unlink image from device using loopback or nbd"""
if nbd:
utils.execute('qemu-nbd', '-d', device, run_as_root=True)
_free_device(device)
else:
utils.execute('losetup', '--detach', device, run_as_root=True) | 29,555 |
def test_subst_multi_0_2_0_default_val_unknown_val():
""" Test that substitutions are correct """
# 0 albums, 2 keywords, 0 persons, default vals provided, unknown val in template
import osxphotos
photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB_15_7)
# one album, one keyword, two persons
photo = photosdb.photos(uuid=[UUID_DICT["0_2_0"]])[0]
template = (
"{created.year}/{album,NOALBUM}/{keyword,NOKEYWORD}/{person}/{foo}/{{baz}}"
)
expected = [
"2019/NOALBUM/wedding/_/{foo}/{baz}",
"2019/NOALBUM/flowers/_/{foo}/{baz}",
]
rendered, unknown = photo.render_template(template)
assert sorted(rendered) == sorted(expected)
assert unknown == ["foo"] | 29,556 |
def parse_directory(path, rgb_prefix='img_', flow_x_prefix='flow_x_', flow_y_prefix='flow_y_'):
"""
Parse directories holding extracted frames from standard benchmarks
"""
print('parse frames under folder {}'.format(path))
frame_folders = glob.glob(os.path.join(path, '*'))
def count_files(directory, prefix_list):
lst = os.listdir(directory)
cnt_list = [len(fnmatch.filter(lst, x+'*')) for x in prefix_list]
return cnt_list
# check RGB
rgb_counts = {}
flow_counts = {}
dir_dict = {}
for i,f in enumerate(frame_folders):
all_cnt = count_files(f, (rgb_prefix, flow_x_prefix, flow_y_prefix))
k = f.split('/')[-1]
rgb_counts[k] = all_cnt[0]
dir_dict[k] = f
x_cnt = all_cnt[1]
y_cnt = all_cnt[2]
if x_cnt != y_cnt:
raise ValueError('x and y direction have different number of flow images. video: '+f)
flow_counts[k] = x_cnt
if i % 200 == 0:
print('{} videos parsed'.format(i))
print('frame folder analysis done')
return dir_dict, rgb_counts, flow_counts | 29,557 |
def InjectIntoProcess(pid, env, capturefile, opts, waitForExit): # real signature unknown; restored from __doc__
"""
InjectIntoProcess(pid, env, capturefile, opts, waitForExit)
Where supported by operating system and permissions, inject into a running process.
:param int pid: The Process ID (PID) to inject into.
:param list env: Any :class:`EnvironmentModification` that should be made when running the program.
:param str capturefile: The capture file path template, or blank to use a default location.
:param CaptureOptions opts: The capture options to use when injecting into the program.
:param bool waitForExit: If ``True`` this function will block until the process exits.
:return: The :class:`ExecuteResult` indicating both the status of the operation (success or failure)
and any reason for failure, or else the ident where the new application is listening for target
control if everything succeeded.
:rtype: ExecuteResult
"""
pass | 29,558 |
def traverse_bw(output: List[Variable]) -> Generator[Variable, None, None]:
"""Returns a generator implementing a :term:`backward traversal` of `var`'s transitive dependencies.
This generator guarantees that a variable is yielded after all its usages.
Note:
If `var` is in the boundary, the generator exits without yielding any variable.
Arguments:
output: The variables whose transitive dependencies should be explored.
Yields:
All dependencies of the output variables (stopping at the boundary), each variable is yielded after all variables depending thereupon.
"""
usage_counts = _count_usages(output)
# At this stage, skip output variables also present in the dependency path of another output variable
queue = deque(filterfalse(lambda x: usage_counts[x] > 0, output))
while len(queue) > 0:
cur = queue.popleft()
for dep in cur.dependencies.values():
usage_counts[dep] -= 1
if usage_counts[dep] == 0:
queue.append(dep)
yield cur | 29,559 |
def create_effect(
effect_id: CardEffect.EffectId = CardEffect.EffectId.DMG,
target: CardLevelEffects.Target = CardLevelEffects.Target.OPPONENT,
power: int = 10,
range_: float = 5
) -> Effect:
"""
Creates effect with given data, or creates default effect dealing dmg to opponent if no was data provided.
:param effect_id:
:param target:
:param power:
:param range_:
:return: Created effect.
"""
effect_factory = EffectFactory.get_instance()
card = CardFactory()
effect_model = CardLevelEffectsFactory(
card=card,
card_effect=CardEffect.objects.get(pk=effect_id),
target=target,
power=power,
range=range_
)
return effect_factory.create(effect_model) | 29,560 |
def solver_log(logger, level=logging.ERROR):
"""Context manager to send solver output to a logger. This uses a separate
thread to log solver output while the solver is running"""
# wait 3 seconds to join thread. Should be plenty of time. In case
# something goes horribly wrong though don't want to hang. The logging
# thread is daemonic, so it will shut down with the main process even if it
# stays around for some mysterious reason while the model is running.
join_timeout = 3
tee = logger.isEnabledFor(level)
if not solver_capture():
yield SolverLogInfo(tee=tee)
else:
with capture_output() as s:
lt = IOToLogTread(s, logger=logger, level=level)
lt.start()
try:
yield SolverLogInfo(tee=tee, thread=lt)
except:
lt.stop.set()
lt.join(timeout=join_timeout)
raise
# thread should end when s is closed, but setting stop makes sure
# the last of the output gets logged before closing s
lt.stop.set()
lt.join(timeout=join_timeout) | 29,561 |
def part_two(stream: Stream, violation: int) -> int:
"""Find the sum of min & max in the sequence that sums to `violation`."""
for start in range(len(stream) - 1):
for end in range(start + 2, len(stream) + 1):
seq = stream[start:end]
seq_sum = sum(seq)
if seq_sum == violation:
return min(seq) + max(seq)
if seq_sum > violation:
break # No point in going further, since the sum can only grow
raise Exception("Solution not found!") | 29,562 |
def getDoubleArray(plug):
"""
Gets the float array from the supplied plug.
:type plug: om.MPlug
:rtype: om.MDoubleArray
"""
return om.MFnDoubleArrayData(plug.asMObject()).array() | 29,563 |
def test_main_stdlib_module():
"""This test makes sure that the module is really searched within the
sunpy package.
"""
with pytest.raises(ValueError):
sunpy.self_test(package='random') | 29,564 |
def main():
"""This script reads a CSV file with self-created publisher IDs and looks up the name of the publisher in a separate CSV to identify the "real" identifier."""
parser = OptionParser(usage="usage: %prog [options]")
parser.add_option('-i', '--input-file', action='store', help='The name of the file with self-created publisher IDs')
parser.add_option('-o', '--output-file', action='store', help='The name of the file in which the self-created publisher IDs are replaced with found "real" IDs')
parser.add_option('-l', '--lookup-file', action='store', help='The name of the file in which the actual IDs are stroed')
parser.add_option('', '--warning-log', action='store', help='A log where publishers in the lookup file with several IDs are listed')
parser.add_option('', '--delimiter-input', action='store', help='The delimiter of the input file, default ","')
parser.add_option('', '--delimiter-lookup', action='store', help='The delimiter of the lookup file, default ","')
(options, args) = parser.parse_args()
#
# Check if we got all required arguments
#
if ( (not options.input_file) or (not options.output_file) or (not options.lookup_file) ):
parser.print_help()
exit(1)
delimiterInput = options.delimiter_input if options.delimiter_input else ','
delimiterLookup = options.delimiter_lookup if options.delimiter_lookup else ','
with open(options.lookup_file, 'r') as fLookup, \
open(options.input_file, 'r') as fIn, \
open(options.output_file, 'w') as fOut:
lookupReader = csv.DictReader(fLookup, delimiter=delimiterLookup)
nolangNames = dict()
dutchNames = dict()
frenchNames = dict()
for row in lookupReader:
authorityID = row['KBRID']
nolangName = utils.getNormalizedString(row['name-without-lang'])
dutchName = utils.getNormalizedString(row['name-dutch'])
frenchName = utils.getNormalizedString(row['name-french'])
addLookupRecord(authorityID, nolangName, nolangNames)
addLookupRecord(authorityID, dutchName, dutchNames)
addLookupRecord(authorityID, frenchName, frenchNames)
#warningWriter = csv.writer(warningOut, delimiter=delimiterInput)
#warningWriter.writerow(['name', 'identifiers'])
#duplicateCheck(nolangNames, warningWriter)
#duplicateCheck(dutchNames, warningWriter)
#duplicateCheck(frenchNames, warningWriter)
inputReader = csv.reader(fIn, delimiter=delimiterInput)
outputWriter = csv.writer(fOut, delimiter=delimiterInput)
toBeReplaced = set()
couldBeReplaced = set()
replaced = set()
numberRecords = 0
numberNolangReplacements = 0
numberDutchReplacements = 0
numberFrenchReplacements = 0
outputWriter.writerow(next(inputReader))
for row in inputReader:
manifestationID = row[0]
contributorID = row[1]
contributorName = row[2]
contributorRole = row[3]
uncertainty = row[4]
newContributorID = contributorID
newContributorName = contributorName
contributorNameNorm = utils.getNormalizedString(contributorName)
if len(contributorID) > 8:
# it is a self-created identifier
toBeReplaced.add(contributorID)
# print(f'### check "{contributorName}"')
if contributorNameNorm in frenchNames:
couldBeReplaced.add(contributorID)
if len(frenchNames[contributorNameNorm]) == 1:
newContributorID = next(iter(frenchNames[contributorNameNorm]))
replaced.add(contributorID)
# print(f'found in frenchNames, will add {newContributorID} instead of {contributorID}')
elif contributorNameNorm in dutchNames:
couldBeReplaced.add(contributorID)
if len(dutchNames[contributorNameNorm]) == 1:
newContributorID = next(iter(dutchNames[contributorNameNorm]))
replaced.add(contributorID)
# print(f'found in dutchNames, will add {newContributorID} instead of {contributorID}')
elif contributorNameNorm in nolangNames:
couldBeReplaced.add(contributorID)
if len(nolangNames[contributorNameNorm]) == 1:
newContributorID = next(iter(nolangNames[contributorNameNorm]))
replaced.add(contributorID)
# print(f'found in nolangNames, will add {newContributorID} instead of {contributorID}')
outputWriter.writerow([manifestationID, newContributorID, newContributorName, contributorRole, uncertainty])
numberRecords += 1
numberToBeReplaced = len(toBeReplaced)
numberReplaced = len(replaced)
numberCouldBeReplaced = len(couldBeReplaced)
numberNotReplaced = numberCouldBeReplaced - numberReplaced
print(f'Successfully replaced {numberReplaced} from {numberToBeReplaced} self-created identifiers (However, we could have replaced {numberCouldBeReplaced} but for {numberNotReplaced} several replacement candidates were possible)') | 29,565 |
def get_fb(file_name):
"""#{{{
load feature file and transform to dict
return:
dict
key_list_feat
"""
ff = open(file_name, 'r')
fb = []
delta = []
fb_matrix = numpy.zeros([1, 24])
delta_matrix = numpy.zeros([1, 24])
fbanks = {}
deltas = {}
fb_keylist = []
while(1):
line = ff.readline()
if not line:
# print 'end of file'
break
end_line = line.strip().split()[-1]
if end_line == '[':
key = line.strip().split()[0]
elif end_line == ']':
for i in range(24):
fb.append(float(line.strip().split()[i]))
for i in range(24, 48):
delta.append(float(line.strip().split()[i]))
fb_keylist.append(key)
fb_matrix = numpy.vstack((fb_matrix, fb))
fbanks[key] = fb_matrix[1:, :]
delta_matrix = numpy.vstack((delta_matrix, delta))
deltas[key] = delta_matrix[1:, :]
fb = []
delta = []
fb_matrix = numpy.zeros([1, 24])
delta_matrix = numpy.zeros([1, 24])
else:
for i in range(24):
# value.append(line.strip().split()[i])
fb.append(float(line.strip().split()[i]))
for i in range(24, 48):
delta.append(float(line.strip().split()[i]))
fb_matrix = numpy.vstack((fb_matrix, fb))
delta_matrix = numpy.vstack((delta_matrix, delta))
fb = []
delta = []
print('number of utterances in fbank: %d' % len(fbanks))
ff.close()
return fbanks, deltas, fb_keylist | 29,566 |
def make_train_input_fn(
feature_spec, labels, file_pattern, batch_size, shuffle=True):
"""Makes an input_fn for training."""
return _make_train_or_eval_input_fn(
feature_spec,
labels,
file_pattern,
batch_size,
tf.estimator.ModeKeys.TRAIN,
shuffle) | 29,567 |
def _find(xs, predicate):
"""Locate an item in a list based on a predicate function.
Args:
xs (list) : List of data
predicate (function) : Function taking a data item and returning bool
Returns:
(object|None) : The first list item that predicate returns True for or None
"""
for x in xs:
if predicate(x):
return x
return None | 29,568 |
def remove_dataparallel_prefix(state_dict):
"""Removes dataparallel prefix of layer names in a checkpoint state dictionary."""
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] if k[:7] == "module." else k
new_state_dict[name] = v
return new_state_dict | 29,569 |
def user_upload_widget(node, on_complete=''):
"""Returns a Valum Uploader widget that uploads files based on the user's
home directory.
:param node: storage type (public or private) and path indicator, e.g.
"public:foo/bar" to have the uploaded file go in
MEDIA_ROOT/$USERNAME/foo/bar.
:param on_complete: name of Javascript function to call when an upload has
complete, will be called with signature:
function(String id, String fileName, Object responseJSON)
"""
return _valum_widget('/yacon/browser/user_upload_file/', node,
on_complete=on_complete) | 29,570 |
def run_lsa(model, lsa_options):
"""Implements local sensitivity analysis using LSI, RSI, and parameter subset reduction.
Parameters
----------
model : Model
Object of class Model holding run information.
options : Options
Object of class Options holding run settings.
Returns
-------
LsaResults
Object of class LsaResults holding all run results.
"""
# LSA implements the following local sensitivity analysis methods on system specified by "model" object
# 1) Jacobian
# 2) Scaled Jacobian for Relative Sensitivity Index (RSI)
# 3) Fisher Information matrix
# Required Inputs: object of class "model" and object of class "options"
# Outputs: Object of class lsa with Jacobian, RSI, and Fisher information matrix
# Calculate Jacobian
jac_raw=get_jacobian(model.eval_fcn, model.base_poi, lsa_options.x_delta,\
lsa_options.method, scale=False, y_base=model.base_qoi)
# Calculate relative sensitivity index (RSI)
jac_rsi=get_jacobian(model.eval_fcn, model.base_poi, lsa_options.x_delta,\
lsa_options.method, scale=True, y_base=model.base_qoi)
# Calculate Fisher Information Matrix from jacobian
fisher_mat=np.dot(np.transpose(jac_raw), jac_raw)
#Active Subspace Analysis
if lsa_options.run_param_subset:
reduced_model, active_set, inactive_set = get_active_subset(model, lsa_options)
#Collect Outputs and return as an lsa object
return LsaResults(jacobian=jac_raw, rsi=jac_rsi, fisher=fisher_mat,\
reduced_model=reduced_model, active_set=active_set,\
inactive_set=inactive_set)
else:
return LsaResults(jacobian=jac_raw, rsi=jac_rsi, fisher=fisher_mat) | 29,571 |
def main():
"""
For entry point
"""
parser = argparse.ArgumentParser("Convert temperature in fahrenheit to kelvin or celsius")
parser.add_argument("temperature", help="Temperature in fahrenheit")
parser.add_argument("-c", "--celsius", action="store_true", help="Convert to celsius scale")
args = parser.parse_args()
f = args.temperature
if args.is_celsius:
c = fahr_to_cels(f)
print("Temperature %5.3f fahrenheit equals %5.3f celsius" % (f, c))
else:
k = fahr_to_kelv(f)
print("Temperature %5.3f fahrenheit equals %5.3f kelvin" % (f, k)) | 29,572 |
def condense_simple_conv3x3(in_channels,
out_channels,
groups):
"""
3x3 version of the CondenseNet specific simple convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups.
"""
return CondenseSimpleConv(
in_channels=in_channels,
out_channels=out_channels,
ksize=3,
stride=1,
pad=1,
groups=groups) | 29,573 |
def read_data_from(file_: str) -> list:
"""Read bitmasks and values from file."""
return open(file_, "r").read().splitlines() | 29,574 |
def version_for(plugin):
# (Plugin) -> Optional[str]
"""Determine the version of a plugin by its module.
:param plugin:
The loaded plugin
:type plugin:
Plugin
:returns:
version string for the module
:rtype:
str
"""
module_name = plugin.plugin.__module__
try:
module = __import__(module_name)
except ImportError:
return None
return getattr(module, "__version__", None) | 29,575 |
def delete_tasks_repeat(start, end, user=None, for_reminder=False, filters=None):
"""Delete all schedule within the Date range and specified filters"""
self = frappe.get_doc("Tasks", user)
rescheduled = []
reschedule_errors = []
schedules = frappe.get_list("Tasks",
fields=["name", "task_date", "repeat_on"],
filters=[
["task_title", "=", self.task_title],
["status", "=", "New"],
["create_recurrence_task", "=", "0"],
["task_date", "<=", self.repeat_till]
]
)
if schedules:
for d in schedules:
frappe.db.sql("""delete from `tabChild Tasks` where parent = %s and task_date = %s """, (d.name, d.task_date))
frappe.db.sql("""delete from `tabTasks` where name = %s""", (d.name))
else:
get_tasks(start, end, user=None, for_reminder=False, filters=None)
del_duplicate(start) | 29,576 |
def choose_organization():
"""Allow user to input organization id.
Returns:
str: Access target id
"""
target_id = None
while not target_id:
orgs = None
return_code, out, err = utils.run_command([
'gcloud', 'organizations', 'list', '--format=json'])
if return_code:
print(err)
else:
try:
orgs = json.loads(out)
except ValueError as verr:
print(verr)
if not orgs:
print('\nYou don\'t have access to any organizations. '
'Choose another option to enable Forseti access.')
return None
print('\nHere are the organizations you have access to:')
valid_org_ids = set()
for org in orgs:
org_id = utils.id_from_name(org['name'])
valid_org_ids.add(org_id)
print('ID=%s (description="%s")' %
(org_id, org['displayName']))
choice = raw_input('Enter the organization id where '
'you want Forseti to crawl for data: ').strip()
try:
# make sure that the choice is a valid organization id
if choice not in valid_org_ids:
print('Invalid organization id %s, try again' % choice)
return None
target_id = str(int(choice))
except ValueError:
print('Unable to parse organization id %s' % choice)
return target_id | 29,577 |
def start_server(function):
"""
Decorator.
Tries to call function, if it fails, try to (re)start inotify server.
Raise QueryFailed if something went wrong
"""
def decorated_function(self, *args):
result = None
try:
return function(self, *args)
except (OSError, socket.error), err:
autostart = self.ui.configbool('inotify', 'autostart', True)
if err[0] == errno.ECONNREFUSED:
self.ui.warn(_('inotify-client: found dead inotify server '
'socket; removing it\n'))
os.unlink(os.path.join(self.root, '.hg', 'inotify.sock'))
if err[0] in (errno.ECONNREFUSED, errno.ENOENT) and autostart:
self.ui.debug('(starting inotify server)\n')
try:
try:
server.start(self.ui, self.dirstate, self.root,
dict(daemon=True, daemon_pipefds=''))
except server.AlreadyStartedException, inst:
# another process may have started its own
# inotify server while this one was starting.
self.ui.debug(str(inst))
except Exception, inst:
self.ui.warn(_('inotify-client: could not start inotify '
'server: %s\n') % inst)
else:
try:
return function(self, *args)
except socket.error, err:
self.ui.warn(_('inotify-client: could not talk to new '
'inotify server: %s\n') % err[-1])
elif err[0] in (errno.ECONNREFUSED, errno.ENOENT):
# silently ignore normal errors if autostart is False
self.ui.debug('(inotify server not running)\n')
else:
self.ui.warn(_('inotify-client: failed to contact inotify '
'server: %s\n') % err[-1])
self.ui.traceback()
raise QueryFailed('inotify query failed')
return decorated_function | 29,578 |
def get_subscriber_groups(publication_id, subscription_id='', full_uri=False):
"""This function identifies the subscriber groups for one or more subscriptions within a publication.
.. versionchanged:: 3.1.0
Refactored the function to be more efficient.
:param publication_id: The ID of the publication
:type publication_id: int, str
:param subscription_id: The specific subscription ID for which to return subscriber groups (Optional)
:type subscription_id: int, str
:param full_uri: Determines whether or not to return the full URI or just the Group ID (``False`` by default)
:type full_uri: bool
:returns: A dictionary mapping the subscription IDs to the respective subscriber groups
:raises: :py:exc:`khorosjx.errors.exceptions.SubscriptionNotFoundError`
"""
# Verify that the core connection has been established
verify_core_connection()
# Capture the subscriber groups for each subscription
subscriptions = get_subscription_data(publication_id)
# Filter for a specific subscription if an ID is provided
if subscription_id:
subscriptions = filter_subscriptions_by_id(subscription_id, subscriptions)
# Capture the subscriber groups
subscriber_groups = {}
for subscription in subscriptions:
if full_uri:
subscriber_groups[subscription['id']] = subscription.get('subscribers')
else:
subscribers = []
for subscriber in subscription.get('subscribers'):
subscribers.append(subscriber.split('securityGroups/')[1])
subscriber_groups[subscription['id']] = subscribers
return subscriber_groups | 29,579 |
def get_salutation_from_title(title):
"""
Described here: https://github.com/VNG-Realisatie/Haal-Centraal-BRP-bevragen/blob/v1.0.0/features/aanhef.feature#L4-L38
"""
if title in [BARON, HERTOG, JONKHEER, MARKIES, RIDDER]:
return HOOGWELGEBOREN_HEER
if title in [BARONES, HERTOGIN, JONKVROUW, MARKIEZIN]:
return HOOGWELGEBOREN_VROUWE
if title in [PRINS, PRINSES]:
return HOOGHEID
if title == GRAAF:
return HOOGGEBOREN_HEER
if title == GRAVIN:
return HOOGGEBOREN_VROUWE | 29,580 |
def hourOfDayNy(dateTime):
"""
Returns an int value of the hour of the day for a DBDateTime in the New York time zone.
The hour is on a 24 hour clock (0 - 23).
:param dateTime: (io.deephaven.db.tables.utils.DBDateTime) - The DBDateTime for which to find the hour of the day.
:return: (int) A QueryConstants.NULL_INT if the input is null, otherwise, an int value
of the hour of the day represented by the DBDateTime when interpreted in the New York
time zone.
"""
return _java_type_.hourOfDayNy(dateTime) | 29,581 |
def _extract_action_num_and_node_id(m):
"""Helper method: Extract *action_num* and *node_id* from the given regex
match. Convert *action_num* to a 0-indexed integer."""
return dict(
action_num=(int(m.group('action_num')) - 1),
node_id=m.group('node_id'),
) | 29,582 |
def get_uid_cidx(img_name):
"""
:param img_name: format output_path / f'{uid} cam{cidx} rgb.png'
"""
img_name = img_name.split("/")[-1]
assert img_name[-8:] == " rgb.png"
img_name = img_name[:-8]
import re
m = re.search(r'\d+$', img_name)
assert not m is None
cidx = int(m.group())
img_name = img_name[:-len(str(cidx))]
assert img_name[-4:] == " cam"
uid = img_name[0:-4]
return uid, cidx | 29,583 |
def main():
"""
Configures the parser by parsing command line arguments and calling the core code.
It might also profile the run if the user chose to do so.
"""
args = configure()
# Prepare output directories
outdir, _ = make_dirs(args)
logging.info('Writing files to: %s', outdir)
print('Writing files to: %s' % outdir)
# read input
jobs = [(i, input_str.strip()) for i, input_str in enumerate(sys.stdin)]
if args.profile:
import cProfile
pr = cProfile.Profile()
pr.enable()
for job in jobs:
core(job, args, outdir)
pr.disable()
pr.dump_stats(args.profile)
else:
pool = Pool(args.cpus if args.cpus > 0 else None)
# TODO: load grammars only once
pool.map(partial(traced_core,
args=args,
outdir=outdir), jobs)
print('Check output files in: %s' % outdir) | 29,584 |
def parse_color(hex_color):
"""Parse color values"""
cval = int(hex_color, 16)
x = lambda b: ((cval >> b) & 0xff) / 255.0
return {k: x(v) for k, v in dict(r=16, g=8, b=0).iteritems()} | 29,585 |
def ls(
verbose: int,
username: str,
password: str,
label: Optional[str]
) -> None:
"""List ssh key authorized to the specified user account."""
obj = setup_shared_cmd_options(verbose, username, password)
client_builder = obj.client_builder
client = client_builder.build_client()
for v in client.get_ssh_user_keys(label):
click.echo("label: {}, uuid: {}".format(v.label, v.uuid)) | 29,586 |
def apply_4x4(RT, XYZ):
"""
RT: B x 4 x 4
XYZ: B x N x 3
"""
#RT = RT.to(XYZ.device)
B, N, _ = list(XYZ.shape)
ones = np.ones([B, N, 1])
XYZ1 = np.concatenate([XYZ, ones], 2)
XYZ1_t = np.transpose(XYZ1, 1, 2)
# this is B x 4 x N
XYZ2_t = np.matmul(RT, XYZ1_t)
XYZ2 = np.transpose(XYZ2_t, 1, 2)
XYZ2 = XYZ2[:,:,:3]
return XYZ2 | 29,587 |
def smoothmax(value1, value2, hardness):
"""
A smooth maximum between two functions. Also referred to as the logsumexp() function.
Useful because it's differentiable and preserves convexity!
Great writeup by John D Cook here:
https://www.johndcook.com/soft_maximum.pdf
:param value1: Value of function 1.
:param value2: Value of function 2.
:param hardness: Hardness parameter. Higher values make this closer to max(x1, x2).
:return: Soft maximum of the two supplied values.
"""
value1 = value1 * hardness
value2 = value2 * hardness
max = np.fmax(value1, value2)
min = np.fmin(value1, value2)
out = max + np.log(1 + np.exp(min - max))
out /= hardness
return out | 29,588 |
def base_app(instance_path):
"""Flask application fixture."""
app_ = Flask('testapp', instance_path=instance_path)
app_.config.update(
SECRET_KEY='SECRET_KEY',
SQLALCHEMY_DATABASE_URI=os.environ.get(
'SQLALCHEMY_DATABASE_URI', 'sqlite:///test.db'),
SQLALCHEMY_TRACK_MODIFICATIONS=True,
TESTING=True,
)
InvenioPIDStore(app_)
InvenioDB(app_)
InvenioPIDRelations(app_)
InvenioRecords(app_)
InvenioIndexer(app_)
InvenioSearch(app_)
Babel(app_)
return app_ | 29,589 |
def udf_con(udf_backend):
"""
Instance of Client, already connected to the db (if applies).
"""
return udf_backend.connection | 29,590 |
def InMenu(gumpid: int, text: str):
"""
Returns True if the menu title or entry titles contains the given text.
:param gumpid: ItemID / Graphic such as 0x3db.
:param text: String value - See description for usage.
"""
pass | 29,591 |
def resolve_config(*, config: Union[Path, str]) -> Optional[Path]:
"""Resolves a config to an absolute Path."""
path = config if isinstance(config, Path) else Path(config)
# Is it absolute, or relative to the CWD?
if path.exists():
return path
# Is it relative to a configuration directory?
for config_dir in get_config_dirs():
lpath = config_dir.joinpath(path)
if lpath.exists():
return lpath
for extension in EXTENSIONS:
lpath = config_dir.joinpath(f"{str(path)}.{extension}")
if lpath.exists():
return lpath
return None | 29,592 |
def test_persistent_cache_linux(mock_extensions):
"""The credential should use an unencrypted cache when encryption is unavailable and the user explicitly opts in.
This test was written when Linux was the only platform on which encryption may not be available.
"""
required_arguments = ("tenant-id", "client-id", "secret")
# the credential should prefer an encrypted cache even when the user allows an unencrypted one
ClientSecretCredential(*required_arguments, enable_persistent_cache=True, allow_unencrypted_cache=True)
assert mock_extensions.PersistedTokenCache.called_with(mock_extensions.LibsecretPersistence)
mock_extensions.PersistedTokenCache.reset_mock()
# (when LibsecretPersistence's dependencies aren't available, constructing it raises ImportError)
mock_extensions.LibsecretPersistence = Mock(side_effect=ImportError)
# encryption unavailable, no opt in to unencrypted cache -> credential should raise
with pytest.raises(ValueError):
ClientSecretCredential(*required_arguments, enable_persistent_cache=True)
ClientSecretCredential(*required_arguments, enable_persistent_cache=True, allow_unencrypted_cache=True)
assert mock_extensions.PersistedTokenCache.called_with(mock_extensions.FilePersistence) | 29,593 |
def get_default_config() -> DefaultConfig:
"""
Get the default config.
Returns:
A dict with the default config.
"""
images = assets.get_images()
return {
"static_url": "/static",
"favicon_ico": images.favicon_ico.name,
"favicon_png": images.favicon_png.name,
"favicon_svg": images.favicon_svg.name,
"preview_png": images.preview_png.name,
"google_tag_manager": "GTM-*******",
"language": "en",
"territory": "US",
"domain": "sample.com",
"text_dir": "ltr",
"title": "Sample",
"description": "We do things",
"subject": "Home Page",
"main_color": "#ff0000",
"background_color": "#ffffff",
"author_name": info.AUTHOR,
"author_email": info.EMAIL,
"facebook_app_id": "123456",
"twitter_username": "sample",
"twitter_user_id": "123456",
"itunes_app_id": "123456",
"itunes_affiliate_data": "123456",
} | 29,594 |
def normalize_elt(elt, alphanum=True):
"""
Normalize string by removing newlines, punctuation, spaces,
and optionally filtering for alphanumeric chars
Args:
elt (string):
string to normalize
alphanum (bool, optional, default True):
if True, only return elt if it contains at least
one alphanumeric char, return None otherwise
Returns:
norm_elt (string):
normalized string or None
"""
norm_elt = elt.replace('\n', '') # remove new lines
translator = str.maketrans('', '', string.punctuation)
norm_elt = norm_elt.lower().translate(translator) # lowercase then remove punctuation
norm_elt = norm_elt.strip().replace(' ', '_') # replace spaces with underscores
if alphanum:
alphanum_check = re.search('[a-zA-Z0-9]', norm_elt)
if alphanum_check:
return norm_elt
else:
return None
else:
return norm_elt | 29,595 |
def check_importability(code: str, func_name: str) -> Tuple[bool, Optional[Exception]]:
"""Very simple check just to see whether the code is at least importable"""
try:
import_func_from_code(
code,
func_name,
raise_if_not_found=False,
register_module=False,
)
return True, None
except Exception as e: # pylint: disable=broad-except
return False, e | 29,596 |
def compute_q(u, v, omega, k_hat, m_hat, N=100, map_est=False):
"""
Inputs:
u, v - (B,L*2)
omega - (L,n)
k_hat, m_hat - (B,J)
"""
B, L = u.size()[0], int(u.size()[1]/2)
unique_omega, inverse_idx = torch.unique(omega, dim=0, return_inverse=True) # (J,n), (L)
c, s = utils.circular_moment_numint_multi(k_hat, m_hat, unique_omega, unique_omega, N=N, map_est=map_est) # (B,J), (B,J) (0.0013s)
c, s = c[:,inverse_idx], s[:,inverse_idx] # (B,L), (B,L)
qc, qs = torch.empty(B,L*2,device=device), torch.empty(B,L*2,device=device)
qc[:,::2], qc[:,1::2] = c.clone(), c.clone()
qs[:,::2], qs[:,1::2] = s.clone(), s.clone()
return qc, qs | 29,597 |
def stationarity(sequence):
"""
Compute the stationarity of a sequence.
A stationary transition is one whose source and destination symbols
are the same. The stationarity measures the percentage of transitions
to the same location.
Parameters
----------
sequence : list
A list of symbols.
Returns
-------
float
Percentage of the sequence that is stationary.
"""
if len(sequence) <= 1:
return 100.0
if len(sequence) == len(set(sequence)):
return .0
stationary_transitions = 0
for i in range(1, len(sequence)):
if sequence[i - 1] == sequence[i]:
stationary_transitions += 1
return round(stationary_transitions * 100 / (len(sequence) - 1), 2) | 29,598 |
def build_nmt_model(Vs, Vt, demb=128, h=128, drop_p=0.5, tied=True, mask=True, attn=True, l2_ratio=1e-4,
training=None, rnn_fn='lstm'):
"""
Builds the target machine translation model.
:param demb: Embedding dimension.
:param h: Number of hidden units.
:param drop_p: Dropout percentage.
:param attn: Flag to include attention units.
:param rnn_fn: Can be 'lstm' or 'gru'.
"""
if rnn_fn == 'lstm':
rnn = LSTM
elif rnn_fn == 'gru':
rnn = LSTM
else:
raise ValueError(rnn_fn)
# Build encoder
encoder_input = Input((None,), dtype='float32', name='encoder_input')
if mask:
encoder_emb_layer = Embedding(Vs + 1, demb, mask_zero=True, embeddings_regularizer=l2(l2_ratio),
name='encoder_emb')
else:
encoder_emb_layer = Embedding(Vs, demb, mask_zero=False, embeddings_regularizer=l2(l2_ratio),
name='encoder_emb')
encoder_emb = encoder_emb_layer(encoder_input)
# Dropout for encoder
if drop_p > 0.:
encoder_emb = Dropout(drop_p)(encoder_emb, training=training)
encoder_rnn = rnn(h, return_sequences=True, return_state=True, kernel_regularizer=l2(l2_ratio), name='encoder_rnn')
encoder_rtn = encoder_rnn(encoder_emb)
encoder_outputs = encoder_rtn[0]
encoder_states = encoder_rtn[1:]
# Build decoder
decoder_input = Input((None,), dtype='float32', name='decoder_input')
if mask:
decoder_emb_layer = Embedding(Vt + 1, demb, mask_zero=True, embeddings_regularizer=l2(l2_ratio),
name='decoder_emb')
else:
decoder_emb_layer = Embedding(Vt, demb, mask_zero=False, embeddings_regularizer=l2(l2_ratio),
name='decoder_emb')
decoder_emb = decoder_emb_layer(decoder_input)
# Dropout for decoder
if drop_p > 0.:
decoder_emb = Dropout(drop_p)(decoder_emb, training=training)
decoder_rnn = rnn(h, return_sequences=True, kernel_regularizer=l2(l2_ratio), name='decoder_rnn')
decoder_outputs = decoder_rnn(decoder_emb, initial_state=encoder_states)
if drop_p > 0.:
decoder_outputs = Dropout(drop_p)(decoder_outputs, training=training)
# Taken from https://arxiv.org/pdf/1805.01817.pdf for training with user annotations
if tied:
final_outputs = DenseTransposeTied(Vt, kernel_regularizer=l2(l2_ratio), name='outputs',
tied_to=decoder_emb_layer, activation='linear')(decoder_outputs)
else:
final_outputs = Dense(Vt, activation='linear', kernel_regularizer=l2(l2_ratio), name='outputs')(decoder_outputs)
# Add attention units
if attn:
contexts = Attention(units=h, kernel_regularizer=l2(l2_ratio), name='attention',
use_bias=False)([encoder_outputs, decoder_outputs])
if drop_p > 0.:
contexts = Dropout(drop_p)(contexts, training=training)
contexts_outputs = Dense(Vt, activation='linear', use_bias=False, name='context_outputs',
kernel_regularizer=l2(l2_ratio))(contexts)
final_outputs = Add(name='final_outputs')([final_outputs, contexts_outputs])
model = Model(inputs=[encoder_input, decoder_input], outputs=[final_outputs])
return model | 29,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.