content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def sample_targets_and_primes(targets, primes, n_rounds,
already_sampled_targets=None, already_sampled_primes=None):
"""
Sample targets `targets` and primes `primes` for `n_rounds` number of rounds. Omit already sampled targets
or primes which can be passed as sets `already_sampled_targets` and `already_sampled_primes`.
`targets` and `primes` must be dicts with class -> [files] mapping (as delivered from `get_amp_images`.
Will return a sample of size "num. of target classes * number of targets per class // `n_rounds`" as list of
4-tuples with:
- target class
- target file
- prime class
- prime file
This function makes sure that you present N targets in random order split into R rounds, where each prime of each
prime class is matched with a random target *per target class* by calling it as such:
```
# round 1:
sample_round_1 = sample_targets_and_primes(targets, primes, Constants.num_rounds)
# round 2:
sample_round_2 = sample_targets_and_primes(targets, primes, Constants.num_rounds,
already_sampled_targets=<targets from sample_round_1>)
...
```
"""
# set defaults
if not already_sampled_targets:
already_sampled_targets = set()
if not already_sampled_primes:
already_sampled_primes = set()
# make sure we have sets
if not isinstance(already_sampled_targets, set):
raise ValueError('`already_sampled_targets` must be a set.')
if not isinstance(already_sampled_primes, set):
raise ValueError('`already_sampled_primes` must be a set.')
# get number of classes
n_prime_classes = len(primes)
n_target_classes = len(targets)
if not n_prime_classes:
raise ValueError('No target images found.')
if not n_target_classes:
raise ValueError('No prime images found.')
# create a list of primes with 2-tuples: (class, file)
# order of primes is random inside prime class
primes_list = []
for primes_classname, class_primes in primes.items():
class_primes = list(set(class_primes) - already_sampled_primes) # omit already sampled primes
# random order of primes inside class
random.shuffle(class_primes)
primes_list.extend(zip([primes_classname] * len(class_primes), class_primes))
n_primes = len(primes_list)
targets_round = [] # holds the output list with 4-tuples
# construct a sample of targets per target class
for target_classname, class_targets in targets.items():
n_targets = len(class_targets)
if n_targets % n_rounds != 0:
raise ValueError('Number of targets in class (%d in "%s") must be a multiple of'
' number of rounds (%d)'
% (n_targets, target_classname, n_rounds))
# omit already sampled targets
class_targets = set(class_targets) - already_sampled_targets
# get a sample of class targets as random sample without replacement of size "number of targets divided
# by number of rounds" so that you can split targets into several rounds
targets_sample = random.sample(class_targets, n_targets // n_rounds)
n_targets_sample = len(targets_sample)
if n_targets_sample % n_primes != 0:
raise ValueError('Number of sampled targets in class (%d in "%s") must be a multiple of'
' number of primes (%d)'
% (n_targets_sample, target_classname, n_primes))
# primes sample is the primes list repeated so that it matches the length of targets in this target class
# this makes sure that for each target class all primes will be shown
primes_sample = primes_list * (n_targets_sample // n_primes)
primes_sample_classes, primes_sample_prime = list(zip(*primes_sample))
assert len(primes_sample) == n_targets_sample
# add targets-primes combinations for this round
targets_round.extend(zip([target_classname] * n_targets_sample, targets_sample,
primes_sample_classes, primes_sample_prime))
# random order of targets-primes combinations
random.shuffle(targets_round)
return targets_round
| 21,100
|
def test_recurse():
"""
Test to recurse through a subdirectory on the master
and copy said subdirectory over to the specified path.
"""
name = "/opt/code/flask"
source = "salt://code/flask"
user = "salt"
group = "saltstack"
if salt.utils.platform.is_windows():
name = name.replace("/", "\\")
ret = {"name": name, "result": False, "comment": "", "changes": {}}
comt = (
"'mode' is not allowed in 'file.recurse'."
" Please use 'file_mode' and 'dir_mode'."
)
ret.update({"comment": comt})
assert filestate.recurse(name, source, mode="W") == ret
mock_t = MagicMock(return_value=True)
mock_f = MagicMock(return_value=False)
mock_uid = MagicMock(return_value="")
mock_gid = MagicMock(return_value="")
mock_l = MagicMock(return_value=[])
mock_emt = MagicMock(side_effect=[[], ["code/flask"], ["code/flask"]])
mock_lst = MagicMock(
side_effect=[CommandExecutionError, (source, ""), (source, ""), (source, "")]
)
with patch.dict(
filestate.__salt__,
{
"config.manage_mode": mock_t,
"file.user_to_uid": mock_uid,
"file.group_to_gid": mock_gid,
"file.source_list": mock_lst,
"cp.list_master_dirs": mock_emt,
"cp.list_master": mock_l,
},
):
# Group argument is ignored on Windows systems. Group is set to user
if salt.utils.platform.is_windows():
comt = "User salt is not available Group salt is not available"
else:
comt = "User salt is not available Group saltstack is not available"
ret.update({"comment": comt})
assert filestate.recurse(name, source, user=user, group=group) == ret
with patch.object(os.path, "isabs", mock_f):
comt = "Specified file {} is not an absolute path".format(name)
ret.update({"comment": comt})
assert filestate.recurse(name, source) == ret
with patch.object(os.path, "isabs", mock_t):
comt = "Invalid source '1' (must be a salt:// URI)"
ret.update({"comment": comt})
assert filestate.recurse(name, 1) == ret
comt = "Invalid source '//code/flask' (must be a salt:// URI)"
ret.update({"comment": comt})
assert filestate.recurse(name, "//code/flask") == ret
comt = "Recurse failed: "
ret.update({"comment": comt})
assert filestate.recurse(name, source) == ret
comt = (
"The directory 'code/flask' does not exist"
" on the salt fileserver in saltenv 'base'"
)
ret.update({"comment": comt})
assert filestate.recurse(name, source) == ret
with patch.object(os.path, "isdir", mock_f):
with patch.object(os.path, "exists", mock_t):
comt = "The path {} exists and is not a directory".format(name)
ret.update({"comment": comt})
assert filestate.recurse(name, source) == ret
with patch.object(os.path, "isdir", mock_t):
comt = "The directory {} is in the correct state".format(name)
ret.update({"comment": comt, "result": True})
assert filestate.recurse(name, source) == ret
| 21,101
|
def getsign(num):
"""input the raw num string, return a tuple (sign_num, num_abs).
"""
sign_num = ''
if num.startswith('±'):
sign_num = plus_minus
num_abs = num.lstrip('±+-')
if not islegal(num_abs):
return sign_num, ''
else:
try:
temp = float(num)
if (temp < 0) and (sign_num == ''):
sign_num = sign_negative
elif (temp > 0) and (sign_num == ''):
if ('+' in num):
sign_num = sign_positive
else:
if num.startswith('-'):
sign_num = sign_negative
if num.startswith('+'):
sign_num = sign_positive
num_abs = num.lstrip('+-')
except ValueError:
raise
return sign_num, num_abs
| 21,102
|
def _convert_actions_to_commands(
subvol: Subvol,
build_appliance: Subvol,
action_to_names_or_rpms: Mapping[RpmAction, Union[str, _LocalRpm]],
) -> Mapping[YumDnfCommand, Union[str, _LocalRpm]]:
"""
Go through the list of RPMs to install and change the action to
downgrade if it is a local RPM with a lower version than what is
installed.
Also use `local_install` and `local_remove` for _LocalRpm.
See the docs in `YumDnfCommand` for the rationale.
"""
cmd_to_names_or_rpms = {}
for action, names_or_rpms in action_to_names_or_rpms.items():
for nor in names_or_rpms:
cmd, new_nor = _action_to_command(
subvol, build_appliance, action, nor
)
if cmd == YumDnfCommand.noop:
continue
if cmd is None: # pragma: no cover
raise AssertionError(f"Unsupported {action}, {nor}")
cmd_to_names_or_rpms.setdefault(cmd, set()).add(new_nor)
return cmd_to_names_or_rpms
| 21,103
|
def fit_DBscan (image_X,
eps,
eps_grain_boundary,
min_sample,
min_sample_grain_boundary,
filter_boundary,
remove_large_clusters,
remove_small_clusters,
binarize_bdr_coord,
binarize_grain_coord,
):
""" Function to measure counts and average sizes of instances within an image
args:
image_X: np array containing a preporcessed image
eps: float, parameter for the DBscan algorithm from sklearn
eps_grain_boundary:float, parameter for the DBscan algorithm from sklearn
min_sample: int, parameter for the DBscan algorithm from sklearn
min_sample_grain_boundary: int float, parameter for the DBscan algorithm from sklearn
filter_boundary:int, threshold to apply while finding the grain boundaries
remove_large_clusters: int indicating how many of the largest clusters
shall be removed
remove_small_clusters:int indicating how many of the smallest clusters
shall be removed
binarize_bdr_coord: int for the binarization of the grain boundaries
binarize_grain_coord:: int for the binarization of the grain interiors
returns:
m_CL: float, log10(mean (predicted cluster radius in pixels))
s_CL: float, log10(predicted cluster count)
"""
print('Finding grain boundaries')
bdr_coord=np.array(binarize_array_high(image_X, binarize_bdr_coord))
bdr_coord=find_grain_boundaries(bdr_coord, eps=eps_grain_boundary, min_sample=min_sample_grain_boundary, filter_boundary=filter_boundary)
bdr_coord_df=pd.DataFrame(bdr_coord)
bdr_coord_df.columns=['X','Y']
bdr_coord_df['Z']=255
df_grain=pd.pivot_table(bdr_coord_df, index='X', columns='Y', values='Z', fill_value=0)
df_grain=df_grain.to_numpy()
print('Measuring grains')
grain_coord = np.array(binarize_array(df_grain, binarize_grain_coord))
(m_CL, s_CL, clusters)=find_grains(grain_coord, eps, min_sample, remove_large_clusters, remove_small_clusters)
return (m_CL, s_CL, clusters)
| 21,104
|
def get_today_timestring():
"""Docen."""
return pd.Timestamp.today().strftime('%Y-%m-%d')
| 21,105
|
def load_json(filepath):
"""
Load a json file
:param filepath: path to json file
"""
fp = Path(filepath)
if not fp.exists():
raise ValueError("Unrecognized file path: {}".format(filepath))
with open(filepath) as f:
data = json.load(f)
return data
| 21,106
|
def handle(req):
"""handle a request to the function
Args:
req (str): request body
"""
r = requests.get("http://api.open-notify.org/astros.json")
result = r.json()
index = random.randint(0, len(result["people"]) - 1)
name = result["people"][index]["name"]
return "{} is in space".format(name)
| 21,107
|
def get_file_hashsum(file_name: str):
"""Generate a SHA-256 hashsum of the given file."""
hash_sha256 = hashlib.sha256()
with open(file_name, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()
| 21,108
|
def predict_ch3(net, test_iter, n=6):
"""预测标签(定义见第3章)。"""
for X, y in test_iter:
break
trues = d2l.get_fashion_mnist_labels(y)
preds = d2l.get_fashion_mnist_labels(d2l.argmax(net(X), axis=1))
titles = [true + '\n' + pred for true, pred in zip(trues, preds)]
d2l.show_images(d2l.reshape(X[0:n], (n, 28, 28)), 1, n,
titles=titles[0:n])
| 21,109
|
def getReviews(appid, savePath):
"""
This function returns a list of user reviews from Steamworks website, convert the
reviews into Pandas dataframe, then export to .csv file to the savePath
"""
# Detailed reference for parameters setting, please visit:
# https://partner.steamgames.com/doc/store/getreviews
parameters = {"filter":"recent","language":"english", "num_per_page": 100}
# the target url from which retrieve user reviews
url = "http://store.steampowered.com/appreviews/" + appid + "?json=1"
# make a request on api page given by Steamwork
r = requests.get(url = url, params=parameters)
# the list of reviews store in json format
data = r.json()
# retrieve the list of reviews
reviews = data["reviews"]
# convert the reviews from json to dataframe
df = pd.DataFrame(reviews)
# export df to .csv file
df.to_csv(savePath)
| 21,110
|
def generate_primes(n):
"""Generates a list of prime numbers up to `n`
"""
global PRIMES
k = PRIMES[-1] + 2
while k <= n:
primes_so_far = PRIMES[:]
divisible = False
for p in primes_so_far:
if k % p == 0:
divisible = True
break
if not divisible:
PRIMES.append(k)
k += 2
return PRIMES
| 21,111
|
def Convex(loss, L2_reg):
"""
loss: src_number loss
[loss_1, loss_2, ... loss_src_number]
"""
src_number = len(loss)
lam = cp.Variable(src_number)
prob = cp.Problem(
cp.Minimize(lam @ loss + L2_reg * cp.norm(lam, 2)), [cp.sum(lam) == 1, lam >= 0]
)
# prob.solve()
prob.solve(solver="SCS")
lam_optimal = lam.value
return lam_optimal
| 21,112
|
def state(new_state):
"""State decorator.
Specify True (turn on) or False (turn off).
"""
def decorator(function):
"""Decorator function."""
# pylint: disable=no-member,protected-access
def wrapper(self, **kwargs):
"""Wrap a group state change."""
from limitlessled.pipeline import Pipeline
pipeline = Pipeline()
transition_time = DEFAULT_TRANSITION
# Stop any repeating pipeline.
if self.repeating:
self.repeating = False
self.group.stop()
# Not on and should be? Turn on.
if not self.is_on and new_state is True:
pipeline.on()
# Set transition time.
if ATTR_TRANSITION in kwargs:
transition_time = kwargs[ATTR_TRANSITION]
# Do group type-specific work.
function(self, transition_time, pipeline, **kwargs)
# Update state.
self._is_on = new_state
self.group.enqueue(pipeline)
self.schedule_update_ha_state()
return wrapper
return decorator
| 21,113
|
def rotTransMatrixNOAD(axis, s, c, t):
"""
build a rotate * translate matrix - MUCH faster for derivatives
since we know there are a ton of zeros and can act accordingly
:param axis: x y or z as a character
:param s: sin of theta
:param c: cos of theta
:param t: translation (a 3 tuple)
:return:
"""
if axis == "Z" or axis == "z":
return N.array([[c, -s, 0, c * t[0] - s * t[1]],
[s, c, 0, s * t[0] + c * t[1]],
[0, 0, 1, t[2]],
[0, 0, 0, 1]])
elif axis == "Y" or axis == "y":
return N.array([[c, 0, s, c * t[0] + s * t[2]],
[0, 1, 0, t[1]],
[-s, 0, c, c * t[2] - s * -t[0]],
[0, 0, 0, 1]])
elif axis == "X" or axis == "x":
return N.array([[1, 0, 0, t[0]],
[0, c, -s, c * t[1] - s * t[2]],
[0, s, c, s * t[1] + c * t[2]],
[0, 0, 0, 1]])
else:
print "Unsupported Axis:", axis
raise NotImplementedError
| 21,114
|
def angular_diameter_distance(z, cosmo=None):
""" Angular diameter distance in Mpc at a given redshift.
This gives the proper (sometimes called 'physical') transverse
distance corresponding to an angle of 1 radian for an object at
redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
angdist : astropy.units.Quantity
Angular diameter distance at each input redshift.
"""
if cosmo is None:
cosmo = _get_current()
return cosmo.angular_diameter_distance(z)
| 21,115
|
def main(stdin):
"""
Take sorted standard in from Hadoop and return lines.
Value is just a place holder.
"""
for line_num in stdin:
# Remove trailing newlines.
line_num = line_num.rstrip()
# Omit empty lines.
try:
(line, num) = line_num.rsplit('\t', 1)
print(("{line}\t{num}").format(line=line, num=num))
except ValueError:
pass
return None
| 21,116
|
def _ignore_module_import_frames(file_name, name, line_number, line):
"""
Ignores import frames of extension loading.
Parameters
----------
file_name : `str`
The frame's respective file's name.
name : `str`
The frame's respective function's name.
line_number : `int`
The line's index where the exception occurred.
line : `str`
The frame's respective stripped line.
Returns
-------
should_show_frame : `bool`
Whether the frame should be shown.
"""
should_show_frame = True
if file_name.startswith('<') and file_name.endswith('>'):
should_show_frame = False
elif file_name == EXTENSION_LOADER_EXTENSION_FILE_PATH:
if name == '_load':
if line == 'loaded = self._load_module()':
should_show_frame = False
elif name == '_load_module':
if line == 'spec.loader.exec_module(module)':
should_show_frame = False
elif file_name == EXTENSION_LOADER_EXTENSION_LOADER_FILE_PATH:
if name == '_extension_loader_task':
if line in (
'module = await KOKORO.run_in_executor(extension._load)',
'await entry_point(module)',
'entry_point(module)',
):
should_show_frame = False
elif name == '_extension_unloader_task':
if line in (
'await exit_point(module)',
'exit_point(module)',
):
should_show_frame = False
return should_show_frame
| 21,117
|
def test_i(name, expectation, limit_name, limit_value, result):
""" Check for behaviour of I under different pipeline configurations.
name
Name of I, defines its output.
expectation
Test is expected to raise an error when names requires calculaion of total iterations (e.g. for 'm')
and this number is not defined in pipeline (limit_value is None).
limit_name
'n_epochs' or 'n_iters'
limit_value
Total numer of epochs or iteration to run.
result
Expected output of I. If None, I is expected to raise an error.
"""
kwargs = {'batch_size': 2, limit_name: limit_value, 'lazy': True}
pipeline = (Dataset(10).pipeline()
.init_variable('var', -1)
.update(V('var', mode='w'), I(name))
.run(**kwargs)
)
with expectation:
_ = pipeline.next_batch()
assert pipeline.get_variable('var') == result
| 21,118
|
def get_tf_tensor_data(tensor):
"""Get data from tensor."""
assert isinstance(tensor, tensor_pb2.TensorProto)
is_raw = False
if tensor.tensor_content:
data = tensor.tensor_content
is_raw = True
elif tensor.float_val:
data = tensor.float_val
elif tensor.dcomplex_val:
data = tensor.dcomplex_val
elif tensor.int_val:
data = tensor.int_val
elif tensor.int64_val:
data = tensor.int64_val
elif tensor.bool_val:
data = tensor.bool_val
elif tensor.dtype == tf.int32:
data = [0]
elif tensor.dtype == tf.int64:
data = [0]
elif tensor.dtype == tf.float32:
data = [0.]
elif tensor.dtype == tf.float16:
data = [0]
elif tensor.string_val:
data = tensor.string_val
else:
raise ValueError('tensor data not supported')
return [is_raw, data]
| 21,119
|
def _createPhraseMatchList(tree1, tree2, matchList, doEquivNodes=False):
"""
Create the list of linked phrases between tree1 and tree2
"""
phraseListTxt1 = tree1.getPhrases()
phraseListHi1 = tree2.getPhrases()
if PRINT_DEBUG or PRINT_DEBUG_SPLIT:
print "\nPhrase 1 nodes:"
printAllPhraseInfo(phraseListTxt1)
print "\nPhrase 2 nodes:"
printAllPhraseInfo(phraseListHi1)
# Match phrases based on word content
# match exact phrases first
matchList.extend( _phraseMatchListExactText(phraseListTxt1, phraseListHi1) )
if PRINT_MATCH_LIST:
print matchList
print "Exact phrase matching:"
printAllMatchListInfo(matchList)
# match based on headwords
matchList.extend(_phraseMatchListHeadwords(phraseListTxt1, phraseListHi1,stoplist=True))
matchList = refineML_TopLevelMatch(matchList)
matchList = refineML_RemoveDuplicates(matchList)
bestMatchList = matchList[:] # copy of matches we believe
if PRINT_MATCH_LIST:
print "*** raw match list, after identical phrases matched"
printAllMatchListInfo(matchList)
print "----------"
# relatively safe matches are completed
# now build up tree
continueMatching = True
watchdogLoopCounter = 0
while (continueMatching)>0:
watchdogLoopCounter += 1
if watchdogLoopCounter > 10: raise ValueError,"watchdog for match list creation"
_oldMLLength = len(matchList) # only for debugging, can compare to newML length
newML = []
# Link parent nodes together as well
# including stop words
newML.extend(linkParentNodes(matchList, matchList))
if PRINT_MATCH_LIST:
print "*** match list, after parent phrases matched"
printAllMatchListInfo(newML)
# Link equivalent higher nodes
# generally this is no longer needed, now that we can contract trees
# It is still needed if we are describing links to more than one target
if doEquivNodes:
mequiv = linkHigherEquivalentNodes(matchList)
if PRINT_MATCH_LIST:
print "*** equivalent nodes"
printAllMatchListInfo(mequiv)
# raise SystemExit
newML.extend(mequiv)
if PRINT_MATCH_LIST: printAllMatchListInfo(newML)
newML.extend(linkParentNodesOfSingleChildren(matchList, phraseListTxt1, phraseListHi1 ))
newML.extend(linkParentNodesOfSingleTargetChild(matchList, phraseListTxt1, phraseListHi1 ))
newML.extend(linkParentNodesOfSingleLinkedChildren(matchList, phraseListTxt1, phraseListHi1 ))
# Link child nodes that may not be independent phrases
# but which do have identical word content
# Working with highlights rather than sentences
# as it's more important to match all the phrases of the highlight
# nodesAlreadyMatched = [ n2 for (n1,n2) in matchList ]
for (ph1, ph2) in matchList:
if ph1.isLeaf() or ph2.isLeaf(): continue
newML.extend(linkIdenticalNodes(ph1,ph2,matchList))
newML.extend(linkIdenticalWords(ph1,ph2,matchList))
if PRINT_MATCH_LIST:
print "*** After further linking nodes"
printAllMatchListInfo(newML)
# Remove any rules that involve a change to top level phrase type
# We think that the only rules worth learning keep the
# top level phrase element the same
newML = refineML_TopLevelMatch(newML)
newML = refineML_RemoveDuplicates(newML)
newML = refineML_RemoveKnownMatches(newML, matchList)
# newML = refineML_RemoveMissedProperNouns(newML, matchList)
matchList.extend(newML)
matchList = topDownConsistency(tree1, tree2, matchList, bestMatchList)
# check to see what this iteration has done
newMLAcceptedLinks = [ m for m in newML if m in matchList ]
if len(newMLAcceptedLinks)==0: continueMatching=False
if PRINT_MATCH_LIST:
print
print "After refining matchList so top levels match"
print "New matches:"
printAllMatchListInfo(newML)
print "New matches that were accepted:"
printAllMatchListInfo(newMLAcceptedLinks)
print "Full set of matches:"
printAllMatchListInfo(matchList)
# TODO: make a consistent tree
# raise SystemExit,"End of while loop"
matchListRefined = topDownConsistency(tree1, tree2, matchList, bestMatchList)
matchList = matchListRefined
if PRINT_MATCH_LIST:
print
print "After refining matchList after making consistent top down"
printAllMatchListInfo(matchList)
# raise SystemExit,"topDownConsistency"
return matchList
| 21,120
|
def coth(x):
"""
Return the hyperbolic cotangent of x.
"""
return 1.0/tanh(x)
| 21,121
|
def rmtree_fixed(path):
"""Like :func:`shutil.rmtree` but doesn't choke on annoying permissions.
If a directory with -w or -x is encountered, it gets fixed and deletion
continues.
"""
if path.is_link():
raise OSError("Cannot call rmtree on a symbolic link")
uid = os.getuid()
st = path.lstat()
if st.st_uid == uid and st.st_mode & 0o700 != 0o700:
path.chmod(st.st_mode | 0o700)
for entry in path.listdir():
if stat.S_ISDIR(entry.lstat().st_mode):
rmtree_fixed(entry)
else:
entry.remove()
path.rmdir()
| 21,122
|
def param_is_numeric(p):
"""
Test whether any parameter is numeric; functionally, determines if any
parameter is convertible to a float.
:param p: An input parameter
:return:
"""
try:
float(p)
return True
except ValueError:
return False
| 21,123
|
def convert_graph_to_angular_abstract_graph(graph: Graph, simple_graph=True, return_tripel_edges=False) -> Graph:
"""Converts a graph into an abstract angular graph
Can be used to calculate a path tsp
Arguments:
graph {Graph} -- Graph to be converted
simple_graph {bool} -- Indicates if graph is simple
return_tripel_edges {bool} -- Also return translation for original edges to abstract
Returns:
Graph -- Converted abstract graph
"""
# create a vertex for every edge in the original graph
# For geometric instances, only one direction of edges is needed
vertices = np.array([[u, v] for u, v in graph.edges if u < v])
edges = {}
tripel_edges = {}
for i, vertex in enumerate(vertices):
ran = range(i+1, len(vertices)) if simple_graph else range(len(vertices))
for j in ran:
if j == i:
continue
other = vertices[j]
if np.intersect1d(vertex, other).size > 0:
shared_vertex = np.intersect1d(vertex, other)
non_shared = np.setdiff1d(np.hstack([vertex, other]), shared_vertex)
edges[(i, j)] = get_angle(
graph.vertices[shared_vertex],
graph.vertices[non_shared[0]],
graph.vertices[non_shared[1]]
)
if return_tripel_edges:
from_vertex = np.intersect1d(vertex, non_shared)
to_vertex = np.intersect1d(other, non_shared)
edge = (*from_vertex, *to_vertex)
tripel_edges[(*shared_vertex, *edge)] = (i, j)
graph = Graph(vertices, edges.keys(), c=edges)
if return_tripel_edges:
return (tripel_edges, graph)
return graph
| 21,124
|
def login_to_site(url, username, password, user_tag, pass_tag):
"""
:param url:
:param username:
:param password:
:param user_tag:
:param pass_tag:
:return: :raise:
"""
browser = mechanize.Browser(factory=mechanize.RobustFactory())
browser.set_handle_robots(False)
browser.set_handle_referer(True)
browser.set_handle_refresh(True)
browser.set_handle_robots(False)
browser.open(url)
# noinspection PyCallingNonCallable,PyCallingNonCallable,PyCallingNonCallable,PyCallingNonCallable
browser.select_form(nr=0)
browser["USER"] = username
browser["password"] = password
# noinspection PyCallingNonCallable
browser.submit()
# noinspection PyCallingNonCallable
if "Case Search Login Error" in browser.response().get_data():
raise ValueError("Could not login to PACER Case Search. Check your "
"username and password")
print ("You are logged on to the Public Access to Court Electronic "
"Records (PACER) Case Search website as " + username + ". All costs "
"will be billed to this account.")
return browser
| 21,125
|
async def test_db(
service: Service = Depends(Service)
) -> HTTPSuccess:
"""Test the API to determine if the database is connected."""
if service.test().__class__ is not None:
return { "message": "Database connected." }
else:
return {"message": "Database not connected." }
| 21,126
|
def main(ctx):
"""Show ids of accounts defined in `my_accounts` config var.
The format is suitable for BITSHARESD_TRACK_ACCOUNTS= env variable to use in docker-compose.yml for running docker
image bitshares/bitshares-core:latest
"""
if not ctx.config['my_accounts']:
ctx.log.critical('You need to list your accounts in "my_accounts" config variable')
sys.exit(1)
ids = ''
for account_name in ctx.config['my_accounts']:
account = Account(account_name, bitshares_instance=ctx.bitshares)
ids += '"{}" '.format((account['id']))
print(ids)
| 21,127
|
def model_fit_predict():
"""
Training example was implemented according to machine-learning-mastery forum
The function takes data from the dictionary returned from splitWindows.create_windows function
https://machinelearningmastery.com/stateful-stateless-lstm-time-series-forecasting-python/
:return: np.array of predictions
"""
X, y, test_input = windows_dict['X'], windows_dict['y'], windows_dict['X_test']
# Predictions are stored in a list
predictions = []
with tqdm(total=X.shape[0], desc="Training the model, saving predictions") as progress_bar:
# Save model History in order to check error data
history = History()
# build model framework
current_model = model_builder(X)
# Make predictions for each window
for i in range(X.shape[0]):
# TRAIN (FIT) model for each epoch
# history = current_model.fit(
# input_X[i], target_X[i],
# epochs=_epochs, batch_size=batch,
# verbose=0, shuffle=False, validation_split=0.1,
# callbacks=[history]
# )
# print(X[i].shape, X[i].dtype, y[i].shape, y[i].dtype)
for e in range(epochs):
current_model.fit(
X[i], y[i],
epochs=1, batch_size=batch,
verbose=0, shuffle=False,
callbacks=[history]
)
current_model.reset_states()
# PREDICT and save results
predictions.append(
current_model.predict(test_input[i], batch_size=batch_test, verbose=0)
)
progress_bar.update(1)
return np.asarray(predictions)
| 21,128
|
def mp_variant_annotations(df_mp, df_split_cols='', df_sampleid='all',
drop_hom_ref=True, n_cores=1):
"""
Multiprocessing variant annotations
see variantAnnotations.process_variant_annotations for description of annotations
This function coordinates the annotation of variants using the
multiprocessing library.
Parameters
---------------
df_mp: pandas df, required
VCF DataFrame
df_split_cols: dict, optional
key:FORMAT id value:#fields expected
e.g. {'AD':2} indicates Allelic Depth should be
split into 2 columns.
df_sampleid: list, required
list of sample_ids, can be 'all'
drop_hom_ref: bool, optional
specifies whether to drop all homozygous reference
variants from dataframe.
FALSE REQUIRES LARGE MEMORY FOOTPRINT
n_cores: int, optional
Number of multiprocessing jobs to start.
Be careful as memory is copied to each process, RAM intensive
"""
from functools import partial
import multiprocessing as mp
import gc
print('starting multiprocessing')
pool = mp.Pool(int(n_cores))
# tasks = np.array_split(df_mp.copy(), int(n_cores)) #breaks with older
# pandas/numpy
dfs = df_split(df_mp.copy(), int(n_cores))
mp_process = partial(process_variant_annotations, sample_id=df_sampleid,
split_columns=df_split_cols, drop_hom_ref=drop_hom_ref)
results = []
del df_mp
gc.collect()
r = pool.map_async(mp_process, \
dfs, callback=results.append)
r.wait()
pool.close()
pool.join()
pool.terminate()
print('multiprocessing complete')
res_df = pd.concat([df for df in results[0] if len(df) > 0])
cat_cols = ['vartype1', 'vartype2', 'a1', 'a2', \
'GT1', 'GT2', 'GT','sample_ids', 'zygosity']
res_df.loc[:, cat_cols] = res_df[cat_cols].astype('category')
return res_df
| 21,129
|
async def wiki(wiki_q):
""" For .wiki command, fetch content from Wikipedia. """
match = wiki_q.pattern_match.group(1)
try:
summary(match)
except DisambiguationError as error:
await wiki_q.edit(f"Disambiguated page found.\n\n{error}")
return
except PageError as pageerror:
await wiki_q.edit(f"Page not found.\n\n{pageerror}")
return
result = summary(match)
if len(result) >= 4096:
file = open("output.txt", "w+")
file.write(result)
file.close()
await wiki_q.client.send_file(
wiki_q.chat_id,
"output.txt",
reply_to=wiki_q.id,
caption="`Output too large, sending as file`",
)
if os.path.exists("output.txt"):
os.remove("output.txt")
return
await wiki_q.edit("**Search:**\n`" + match + "`\n\n**Result:**\n" + result)
if BOTLOG:
await wiki_q.client.send_message(
BOTLOG_CHATID, f"Wiki query `{match}` was executed successfully")
| 21,130
|
def find_attachments(pattern, cursor):
"""Return a list of attachments that match the specified pattern.
Args:
pattern: The path to the attachment, as a SQLite pattern (to be
passed to a LIKE clause).
cursor: The Cursor object through which the SQLite queries are
sent to the Zotero database.
Returns:
A list of (parentItemID, path) pairs that match the specified
pattern. The returned list is empty if no matches are found.
"""
query = 'SELECT parentItemID, path FROM itemAttachments WHERE path LIKE ?'
cursor.execute(query, (pattern,))
return list(cursor)
| 21,131
|
def generate_symmetric_matrix(n_unique_action: int, random_state: int) -> np.ndarray:
"""Generate symmetric matrix
Parameters
-----------
n_unique_action: int (>= len_list)
Number of actions.
random_state: int
Controls the random seed in sampling elements of matrix.
Returns
---------
symmetric_matrix: array-like, shape (n_unique_action, n_unique_action)
"""
random_ = check_random_state(random_state)
base_matrix = random_.normal(scale=5, size=(n_unique_action, n_unique_action))
symmetric_matrix = (
np.tril(base_matrix) + np.tril(base_matrix).T - np.diag(base_matrix.diagonal())
)
return symmetric_matrix
| 21,132
|
def sanitizer_update(instrument_name: str | None):
"""Update sanitizers with new data"""
from .service.sanitize import update_tablesanitizer
if instrument_name:
items = (instrument_name,)
else:
listing = app.get_local_source_listing(
defaults.source_dir,
defaults.blob_from_instrument_name
)
items = tuple(l.name for l in listing)
click.secho()
for name in items:
table = app.load_unsanitizedtable(
name,
defaults.blob_from_instrument_name,
)
table_sanitizer = app.load_table_sanitizer(
name,
defaults.sanitizer_dir_from_instrument_name,
)
updates = update_tablesanitizer(table, table_sanitizer)
if updates:
click.secho("Updating sanitizers for {}".format(click.style(name, fg="bright_cyan")))
for u in updates:
if u.new:
click.secho(" Creating new sanitizer {} ({}) items".format(u.name, len(u.rows)))
else:
click.secho(" Updating sanitizer {} ({}) items".format(u.name, len(u.rows)))
app.update_sanitizer(
name,
updates,
defaults.sanitizer_dir_from_instrument_name
)
click.secho()
| 21,133
|
def model_fields_map(model, fields=None, exclude=None, prefix='', prefixm='', attname=True, rename=None):
"""
На основании переданной модели, возвращает список tuple, содержащих путь в орм к этому полю,
и с каким именем оно должно войти в результат.
Обрабатываются только обычные поля, m2m и generic сюда не войдут.
ARGUMENTS:
:param model: модель или инстанс модели, на основе которой будет формироваться список полей
:param None | collections.Container fields: список полей, которые будут забраны из модели
:param None | collections.Container exclude: список полей, которые не будут забираться
:param str prefix: ORM путь, по которому будут распологаться модель в запросе
:param str prefixm: префикс, который будет добавлен к имени поля
:param bool attname: использовать имя name (model) или attname(model_id) эти поля отличаются для внешних ключей
:param dict rename: словарь переименования полей
:rtype: list[tuple[str]]
"""
data = []
rename = rename or {}
attribute = 'attname' if attname else 'name'
for f in model._meta.concrete_fields:
if fields and f.attname not in fields and f.name not in fields:
continue
if exclude and f.attname in exclude and f.name not in exclude:
continue
param_name = getattr(f, attribute)
new_param_name = rename[param_name] if param_name in rename else param_name
data.append(('{}{}'.format(prefix, param_name), '{}{}'.format(prefixm, new_param_name)))
return data
| 21,134
|
def test_setext_headings_extra_90x():
"""
Test case extra 90: SetExt heading with full image with backslash in label
"""
# Arrange
source_markdown = """a![foo\\#bar][bar]a
---
[bar]: /url 'title'"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[image(1,2):full:/url:title:foo#bar:::bar:foo\\#bar:::::]",
"[text(1,18):a:]",
"[end-setext::]",
"[BLANK(3,1):]",
"[link-ref-def(4,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<h2>a<img src="/url" alt="foo#bar" title="title" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
| 21,135
|
def scaffold_to_smiles(mols: Union[List[str], List[Chem.Mol]],
use_indices: bool = False) -> Dict[str, Union[Set[str], Set[int]]]:
""" Computes the scaffold for each SMILES and returns a mapping from scaffolds to sets of smiles (or indices).
Parameters
----------
mols: A list of SMILES strings or RDKit molecules.
use_indices:
Whether to map to the SMILES's index in :code:`mols` rather than mapping to the smiles string itself.
This is necessary if there are duplicate smiles.
Returns
-------
A dictionary mapping each unique scaffold to all SMILES (or indices) which have that scaffold.
"""
scaffolds = defaultdict(set)
for i, mol in tqdm(enumerate(mols), total=len(mols)):
scaffold = generate_scaffold(mol)
if use_indices:
scaffolds[scaffold].add(i)
else:
scaffolds[scaffold].add(mol)
return scaffolds
| 21,136
|
def encontrar_passwords():
"""
Probar todas las combinaciones de 6 letras, hasheando cada una para ver si
coinciden con los hashes guardados en los /etc/shadow
Para el tema de equipos, basicamente fui probando con copiar y pegar
contenido en texto de distintas paginas de wikipedia en el archivo
equipos.txt, hasta que con la NBA funciono.
"""
hashes = [
('ox', 'ox45K6RsEUfmQ', generar_palabras()), # fido
('$1$42dJ1xYh', '$1$42dJ1xYh$MfrRke8/Ej3h5.vMtNEhC.', leer_palabras('./colores.txt')), # white
('$6$SZGpKoPi', '$6$SZGpKoPi$GGGqHYKy6PO/H5nvV0AmaGB/5krnxVuz2k2uX81O.CF5nYctE5RlR/rzJQCL3ZsF8yratCRbSR2ZuwKzvve.D0', leer_palabras('./equipos.txt')), # knicks
]
encontradas = []
for algo_y_salt, hash_resultado, origen_passwords in hashes:
for password in origen_passwords:
if crypt(password, algo_y_salt) == hash_resultado:
encontradas.append(password)
break
return encontradas
| 21,137
|
def save(fileref, data=None, metadata=None, compression_level=None, order='C'):
"""saves the data/metadata to a file represented by `fileref`."""
if isinstance(fileref, (str, bytes, _PathLike)):
fileref = check_suffix(fileref)
with open(fileref, 'wb') as dest:
save(dest, data=data, metadata=metadata, order=order)
return
# otherwise: assume fileref to be a IOBase
if not isinstance(data, _np.ndarray):
data = _np.array(data)
metadict = metadata_dict.from_data(data=data, metadata=metadata, arrayorder=order)
metabin = codec.encode_metadata_dict(metadict)
metasiz = calc_metadata_size(metabin)
databin = codec.encode_data(data, order, compression_level=compression_level)
fileref.write(databin)
fileref.write(metabin)
fileref.write(codec.encode_metadata_size(metasiz))
| 21,138
|
def queue_merlin_study(study, adapter):
"""
Launch a chain of tasks based off of a MerlinStudy.
"""
samples = study.samples
sample_labels = study.sample_labels
egraph = study.dag
LOG.info("Calculating task groupings from DAG.")
groups_of_chains = egraph.group_tasks("_source")
# magic to turn graph into celery tasks
LOG.info("Converting graph to tasks.")
celery_dag = chain(
chord(
group(
[
expand_tasks_with_samples.s(
egraph,
gchain,
samples,
sample_labels,
merlin_step,
adapter,
study.level_max_dirs,
).set(queue=egraph.step(chain_group[0][0]).get_task_queue())
for gchain in chain_group
]
),
chordfinisher.s().set(
queue=egraph.step(chain_group[0][0]).get_task_queue()
),
)
for chain_group in groups_of_chains[1:]
)
LOG.info("Launching tasks.")
return celery_dag.delay(None)
| 21,139
|
def sphere_mass(density,radius):
"""Usage: Find the mass of a sphere using density and radius"""
return density*((4/3)*(math.pi)*radius**3)
| 21,140
|
def generate_stats_table(buildings_clust_df):
"""
Generate statistical analysis table of building types in the area
Args:
buildings_clust_df: building footprints dataframe after performed building blocks assignment (HDBSCAN)
Return:
stat_table: statistical analysis results which contains means and standard deviations values for every building type in the area
"""
# Count
count_table = buildings_clust_df.groupby('building_types')[['building_types']].size().to_frame('count').reset_index()
# Mean
mean_table = buildings_clust_df.groupby('building_types')[['building_types','surface_area','rectangularity']].mean().reset_index()
mean_table.columns = ['building_types','mean_surface_area','mean_rectangularity']
# Standard deviation
sd_table=buildings_clust_df.groupby('building_types')[['surface_area','rectangularity']].agg(np.std, ddof=0).reset_index()
# Rename columns
sd_table.columns = ['building_types','sd_surface_area','sd_rectangularity']
stat_table = count_table.merge(mean_table).merge(sd_table)
# Reorder columns
stat_table = stat_table[stat_table.columns[[0,1,3,2,4,5]]]
return stat_table
| 21,141
|
def make_multisat(nucsat_tuples):
"""Creates a rst.sty Latex string representation of a multi-satellite RST subtree
(i.e. merge a set of nucleus-satellite relations that share the same nucleus
into one subtree).
"""
nucsat_tuples = [tup for tup in nucsat_tuples] # unpack the iterable, so we can check its length
assert len(nucsat_tuples) > 1, \
"A multisat relation bundle must contain more than one relation"
result = "\dirrel\n\t"
first_relation, remaining_relations = nucsat_tuples[0], nucsat_tuples[1:]
relname, nuc_types, elements = first_relation
first_nucleus_pos = current_nucleus_pos = nuc_types.index('N')
result_segments = []
# add elements (nucleus and satellite) from first relation to resulting (sub)tree
for i, nuc_type in enumerate(nuc_types):
element = elements[i]
if is_edu_segment(element):
element = wrap_edu_segment(element)
if nuc_type == 'N':
result_segments.append(NUC_TEMPLATE.substitute(nucleus=element))
else:
result_segments.append(SAT_TEMPLATE.substitute(satellite=element, relation=relname))
# reorder elements of the remaining relation and add them to the resulting (sub)tree
for (relname, nuc_types, elements) in remaining_relations:
for i, nuc_type in enumerate(nuc_types):
if nuc_type == 'N': # all relations share the same nucleus, so we don't need to reprocess it.
continue
else:
element = elements[i]
if is_edu_segment(element):
element = wrap_edu_segment(element)
result_segment = SAT_TEMPLATE.substitute(satellite=element, relation=relname)
if i < first_nucleus_pos: # satellite comes before the nucleus
result_segments.insert(current_nucleus_pos, result_segment)
current_nucleus_pos += 1
else:
result_segments.append(result_segment)
return result + '\n\t'.join(result_segments)
| 21,142
|
def plot_to_image(figure):
"""Converts the matplotlib figure to a PNG image."""
# The function is adapted from
# github.com/tensorflow/tensorboard/blob/master/docs/image_summaries.ipynb
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format="png")
# Closing the figure prevents it from being displayed directly.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# tf.summary.image requires 4-D inputs. [num_samples, height, weight, color].
image = tf.expand_dims(image, 0)
return image
| 21,143
|
def create_connection(db_config: Box = None) -> None:
"""Register a database connection
Args:
db_config: Yapconf-generated configuration object
Returns:
None
"""
global motor_db
motor_conn = MotorClient(
host=db_config.connection.host,
port=db_config.connection.port,
)
motor_db = motor_conn[db_config.name]
| 21,144
|
async def getDiscordTwitchAlerts(cls:"PhaazebotDiscord", guild_id:str, alert_id:int=None, limit:int=0, offset:int=0) -> list:
"""
Get server discord alerts, if alert_id = None, get all
else only get one associated with the alert_id
Returns a list of DiscordTwitchAlert().
"""
sql:str = """
SELECT
`discord_twitch_alert`.*,
`twitch_user_name`.`user_name` AS `twitch_channel_name`
FROM `discord_twitch_alert`
LEFT JOIN `twitch_user_name`
ON `discord_twitch_alert`.`twitch_channel_id` = `twitch_user_name`.`user_id`
WHERE `discord_twitch_alert`.`discord_guild_id` = %s"""
values:tuple = ( str(guild_id), )
if alert_id:
sql += " AND `discord_twitch_alert`.`id` = %s"
values += (alert_id,)
if limit:
sql += f" LIMIT {limit}"
if offset:
sql += f" OFFSET {offset}"
res:list = cls.BASE.PhaazeDB.selectQuery(sql, values)
if res:
return [DiscordTwitchAlert(x) for x in res]
else:
return []
| 21,145
|
def is_visible_dir(file_info):
"""Checks to see if the file is a visible directory.
@param file_info: The file to check
@type file_info: a gnomevfs.FileInfo
"""
return is_dir(file_info) and not is_hidden(file_info)
| 21,146
|
def add_model_components(m, d, scenario_directory, subproblem, stage):
"""
:param m:
:param d:
:return:
"""
# Import needed availability type modules
required_availability_modules = get_required_subtype_modules_from_projects_file(
scenario_directory=scenario_directory,
subproblem=subproblem,
stage=stage,
which_type="availability_type",
)
imported_availability_modules = load_availability_type_modules(
required_availability_modules
)
# First, add any components specific to the availability type modules
for op_m in required_availability_modules:
imp_op_m = imported_availability_modules[op_m]
if hasattr(imp_op_m, "add_model_components"):
imp_op_m.add_model_components(m, d, scenario_directory, subproblem, stage)
def availability_derate_rule(mod, g, tmp):
"""
:param mod:
:param g:
:param tmp:
:return:
"""
# TODO: make the no_availability type module, which will be the
# default for the availability type param (it will just return 1 as
# the derate)
availability_type = mod.availability_type[g]
return imported_availability_modules[
availability_type
].availability_derate_rule(mod, g, tmp)
m.Availability_Derate = Expression(m.PRJ_OPR_TMPS, rule=availability_derate_rule)
| 21,147
|
def read_file(item):
"""Read file in key path into key image
"""
item['image'] = tf.read_file(item['path'])
return item
| 21,148
|
def make_joint(withdraw, old_password, new_password):
"""Return a password-protected withdraw function that has joint access to
the balance of withdraw.
>>> w = make_withdraw(100, 'hax0r')
>>> w(25, 'hax0r')
75
>>> make_joint(w, 'my', 'secret')
'Incorrect password'
>>> j = make_joint(w, 'hax0r', 'secret')
>>> w(25, 'secret')
'Incorrect password'
>>> j(25, 'secret')
50
>>> j(25, 'hax0r')
25
>>> j(100, 'secret')
'Insufficient funds'
>>> j2 = make_joint(j, 'secret', 'code')
>>> j2(5, 'code')
20
>>> j2(5, 'secret')
15
>>> j2(5, 'hax0r')
10
>>> j2(25, 'password')
'Incorrect password'
>>> j2(5, 'secret')
"Your account is locked. Attempts: ['my', 'secret', 'password']"
>>> j(5, 'secret')
"Your account is locked. Attempts: ['my', 'secret', 'password']"
>>> w(5, 'hax0r')
"Your account is locked. Attempts: ['my', 'secret', 'password']"
>>> make_joint(w, 'hax0r', 'hello')
"Your account is locked. Attempts: ['my', 'secret', 'password']"
"""
"*** YOUR CODE HERE ***"
x = withdraw(0, old_password)
if type(x) == str:
return x
else:
def withdraw_r(amount, code):
if code == new_password:
# print('password is new')
return withdraw(amount, old_password)
elif code != new_password:
return withdraw(amount, code)
return withdraw_r
| 21,149
|
def should_parse(config, file):
"""Check if file extension is in list of supported file types (can be configured from cli)"""
return file.suffix and file.suffix.lower() in config.filetypes
| 21,150
|
def gui_main():
"""Главное окно программы"""
layout_range = 20
layout_distance = 20
sg.theme(GUI_THEME) # Применение темы интерфейса
layout_main_window = [
[sg.Text('Производитель принтера:',
size=(20, 1),
pad=((0, 0), (10, layout_range))),
sg.DropDown(print_programs,
size=(max([len(i) for i in print_programs]) + 3, 1),
key='#ProgramSelection',
enable_events=True,
pad=((0, layout_distance), (10, layout_range))),
sg.Button('Установить пакет поддержки',
size=(26, 1),
key='#InstallPack',
pad=((0, layout_distance + 151), (10, layout_range))),
sg.Button('Шрифт InconsolataCyr.ttf',
size=(24, 1),
key='#InstallFont',
pad=((0, 0), (10, layout_range)))
],
[sg.Text('Файл данных:',
size=(20, 1),
pad=((0, 0), (0, layout_range))),
sg.InputText('',
size=(93, 1),
key='#FilePath',
pad=((0, layout_distance + 2), (0, layout_range))),
sg.FileBrowse('Открыть',
size=(8, 1),
key='#OpenFile',
target='#FilePath',
pad=((2, 0), (0, layout_range)))
],
[sg.Button('Обработать',
size=(15, 2),
key='#ProcFile',
pad=((0, 33), (0, 10))),
sg.Button('Импорт',
size=(9, 2),
key='#Import',
pad=((0, 473), (0, 10))),
sg.Button('Справка',
size=(9, 2),
key='#Man',
pad=((0, 33), (0, 10))),
sg.Button('Выход',
size=(10, 2),
key='#Exit',
pad=((0, 0), (0, 10)))
]
]
window = sg.Window('{0}, версия {1}'.format(__progName__,
__version__),
layout_main_window)
while True:
event, values = window.read()
print(event, values)
if event in (None, '#Exit'):
break
if event == '#ProgramSelection':
if not prog_installed_check(values['#ProgramSelection']):
prog_installed_flag = False
sg.PopupError(
"""Программное обеспечение для печати не установлено""",
title='Ошибка!',
keep_on_top=True)
else:
prog_installed_flag = True
import_file_path = os.path.join(
gpd('Программы',
'KEY',
values['#ProgramSelection']
)['data_path'],
gpd('Программы',
'KEY',
values['#ProgramSelection']
)['transfer_file_name'])
if not pack_installed_check(values['#ProgramSelection']):
pack_installed_flag = False
sg.PopupError(
"""Пакет поддержки программы печати не установлен или повреждён.
Для установки пакета нажмите кнопку \"Установить пакет поддержки\"""",
title='Ошибка!',
keep_on_top=True)
else:
pack_installed_flag = True
elif event == '#InstallPack':
if values['#ProgramSelection'] == '':
sg.Popup(
"""Выберите производителя принтера""",
title='Ошибка!',
keep_on_top=True)
elif not prog_installed_flag:
sg.PopupError(
"""Программное обеспечение для печати не установлено""",
title='Ошибка!',
keep_on_top=True)
else:
if install_pack(values['#ProgramSelection']):
sg.Popup(
"""Пакет поддержки программы печати успешно установлен.""",
title='Успех!',
keep_on_top=True)
pack_installed_flag = True
else:
sg.PopupError(
"""Не удалось установить пакет поддержки программы печати""",
title='Ошибка!',
keep_on_top=True)
elif event == '#InstallFont':
fontfile = gpd('Пути', 'KEY', 'VALUE')['font']
if os.path.exists(fontfile):
os.startfile(fontfile)
else:
sg.PopupError(
"""Не найден файл шрифта. Переустановите программу MarkV""",
title='Ошибка!',
keep_on_top=True)
elif event == '#ProcFile':
if values['#ProgramSelection'] == '':
sg.Popup(
"""Выберите производителя принтера""",
title='Ошибка!',
keep_on_top=True)
elif not prog_installed_flag:
sg.PopupError(
"""Программное обеспечение для печати не установлено""",
title='Ошибка!',
keep_on_top=True)
elif values['#FilePath'] == '':
sg.Popup(
"""Выберите файл данных маркировки""",
title='Ошибка!',
keep_on_top=True)
elif not os.path.exists(values['#FilePath']):
sg.PopupError(
"""Указанный файл данных не найден""",
title='Ошибка!',
keep_on_top=True)
else:
# try:
ret = proc_mark_file(values['#FilePath'],
import_file_path)
# except:
# ret = 'Неизвестная ошибка формата данных'
if ret == 0:
if sg.PopupYesNo(
"""Файл данных успешно обработан.\n
Сформирован и сохранён трансферный файл.\n\n
Открыть трансферный файл для просмотра?\n""",
title='Успех!',
keep_on_top=True) == 'Yes':
# Открытие трансферного файла для проверки
os.startfile(import_file_path)
else:
sg.Popup(ret, title='Ошибка!', keep_on_top=True)
elif event == '#Import':
if values['#ProgramSelection'] == '':
sg.Popup(
"""Выберите производителя принтера""",
title='Ошибка!',
keep_on_top=True)
elif not prog_installed_flag:
sg.PopupError(
"""Программное обеспечение для печати не установлено""",
title='Ошибка!',
keep_on_top=True)
elif not pack_installed_flag:
sg.PopupError(
"""Пакет поддержки программы печати не установлен или повреждён.
Для установки нажмите кнопку \"Установить пакет поддержки\"""",
title='Ошибка!',
keep_on_top=True)
elif not os.path.exists(import_file_path):
sg.PopupError(
"""Не найден трансферный файл.
Чтобы его сформировать выберите файл данных и
нажмите кнопку \'Обработать\"""",
title='Ошибка!',
keep_on_top=True)
else:
prog_string = '"{0}"'.format(gpd('Программы',
'KEY',
values['#ProgramSelection']
)['program_path'])
param_string = '"{0}"'.format(gpd('Программы',
'KEY',
values['#ProgramSelection']
)['param_path'])
call_string = ' '.join([prog_string, param_string])
print(call_string)
subprocess.Popen(call_string,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
elif event == '#Man':
manfile = gpd('Пути', 'KEY', 'VALUE')['man']
if os.path.exists(manfile):
os.startfile(manfile)
else:
sg.PopupError(
"""Не найден файл справки. Переустановите программу MarkV""",
title='Ошибка!',
keep_on_top=True)
# subprocess.call('Acrobat.exe /A page=3 {0}'.format(manfile))
window.close()
| 21,151
|
def _get_pipeline_per_subband(subband_name: str):
"""
Constructs a pipeline to extract the specified subband related features.
Output:
sklearn.pipeline.Pipeline object containing all steps to calculate time-domain feature on the specified subband.
"""
freq_range = FREQ_BANDS_RANGE[subband_name]
order = FREQ_BANDS_ORDERS[subband_name]
assert len(
freq_range) == 2, "Frequency range must only have 2 elements: [lower bound frequency, upper bound frequency]"
bounds = [freq / NYQUIST_FREQ for freq in freq_range]
b, a = butter(order, bounds, btype='bandpass')
def filter_epochs_in_specified_subband(epochs):
return epochs.copy().filter(
l_freq=bounds[0],
h_freq=bounds[1],
method='iir',
n_jobs=1,
iir_params={
'a': a,
'b': b
}, verbose=False)
return Pipeline([
('filter', FunctionTransformer(filter_epochs_in_specified_subband, validate=False)),
('get-values', FunctionTransformer(get_data_from_epochs, validate=False)),
('mean-energy', FunctionTransformer(
get_transformer(_get_signal_mean_energy), validate=True
)),
])
| 21,152
|
def add_default_area(apps, schema_editor):
"""Use a dummy area for progress with no areas
"""
Area = apps.get_model('goals', 'Area')
Progress = apps.get_model('goals', 'Progress')
prgs = Progress.objects.filter(area=None)
if len(prgs):
area = Area.objects.first()
for progress in prgs:
progress.area = area
progress.save()
| 21,153
|
def get_credentials_interactively() -> Credentials:
""" Gets credentials for the bl interactively
"""
return ("placeholder-user", "placeholder-pass")
| 21,154
|
def reynolds(find="Re", printEqs=True, **kwargs):
"""
Reynolds Number = Inertia / Viscosity
"""
eq = list()
eq.append("Eq(Re, rho * U * L / mu)")
return solveEqs(eq, find=find, printEq=printEqs, **kwargs)
| 21,155
|
def data_app():
""" Data Processer and Visualizer """
st.title("Data Cake")
st.subheader("A to Z Data Analysis")
file = ['./dataset/Ac1',[0,1]]
def file_selector():
filename = st.file_uploader("Upload Excel File", type=['xls','xlsx'])
if filename is not None:
sheetnames = pd.ExcelFile(filename).sheet_names
sheet = st.selectbox("Sheet Sheet", sheetnames)
return [filename, sheet]
file = file_selector()
# Read Data
try :
df = pd.read_excel(file[0], sheet_name = file[1])
except Exception as e:
st.info("Please upload Excel file")
# Show Datas
try:
if st.checkbox("Show Dataset"):
number = st.number_input("Number of Rows to View",5,10)
st.dataframe(df.head(number))
except Exception as e:
st.info("Please upload Excel file")
# Show Columns
try:
if st.button("Column Names"):
st.write(df.columns)
except Exception as e:
st.info("Please upload Excel file")
# Show Shape
try:
if st.checkbox("Shape of Dataset"):
st.write(df.shape)
except Exception as e:
st.info("Please upload Excel file")
# Select Columns
try:
if st.checkbox("Select Columns To Show"):
all_columns = df.columns.tolist()
selected_columns = st.multiselect("Select",all_columns)
new_df = df[selected_columns]
st.dataframe(new_df)
except Exception as e:
st.info("Please upload Excel file")
# Show Datatypes
try:
if st.button("Data Types"):
st.write(df.dtypes)
except Exception as e:
st.info("Please upload Excel file")
# Show Summary
try:
if st.checkbox("Summary"):
st.write(df.describe().T)
except Exception as e:
st.info("Please upload Excel file")
## Plot and Visualization
st.subheader("Data Visualization")
# Correlation
# Seaborn Plot
if st.checkbox("Correlation Plot[Seaborn]"):
st.write(sns.heatmap(df.corr(),annot=True))
st.pyplot()
# Pie Chart
if st.checkbox("Pie Plot"):
all_columns_names = df.columns.tolist()
if st.button("Generate Pie Plot"):
st.success("Generating A Pie Plot")
st.write(df.iloc[:,-1].value_counts().plot.pie(autopct="%1.1f%%"))
st.pyplot()
# Count Plot
if st.checkbox("Plot of Value Counts"):
st.text("Value Counts By Target")
all_columns_names = df.columns.tolist()
primary_col = st.selectbox("Primary Columm to GroupBy",all_columns_names)
selected_columns_names = st.multiselect("Select Columns",all_columns_names)
if st.button("Plot"):
st.text("Generate Plot")
if selected_columns_names:
vc_plot = df.groupby(primary_col)[selected_columns_names].count()
else:
vc_plot = df.iloc[:,-1].value_counts()
st.write(vc_plot.plot(kind="bar"))
st.pyplot()
#Contour Plot
if st.checkbox("Contour Plot "):
st.text("3D Contour Plot")
all_columns_names = df.columns.tolist()
X = st.selectbox("Select X axis",all_columns_names)
Y = st.selectbox("Select Y axis",all_columns_names,index = 1)
VS = st.selectbox("Select Z axis",all_columns_names,index =2)
Z_F = df.pivot_table(index=X, columns=Y, values=VS).T.values
X_unique = np.sort(df[X].unique())
Y_unique = np.sort(df[Y].unique())
X_F, Y_F = np.meshgrid(X_unique, Y_unique)
pd.DataFrame(Z_F).round(3)
pd.DataFrame(X_F).round(3)
pd.DataFrame(Y_F).round(3)
fig,ax=plt.subplots(1,1)
cp = ax.contourf(X_F, Y_F, Z_F)
fig.colorbar(cp) # Add a colorbar to a plot
st.pyplot(fig=fig)
# Customizable Plot
try:
st.subheader("Customizable Plot")
all_columns_names = df.columns.tolist()
type_of_plot = st.selectbox("Select Type of Plot",["area","bar","line","hist","box","kde"])
selected_columns_names = st.multiselect("Select Columns To Plot",all_columns_names)
if st.button("Generate Plot"):
st.success("Generating Customizable Plot of {} for {}".format(type_of_plot,selected_columns_names))
# Plot By Streamlit
if type_of_plot == 'area':
cust_data = df[selected_columns_names]
st.area_chart(cust_data)
elif type_of_plot == 'bar':
cust_data = df[selected_columns_names]
st.bar_chart(cust_data)
elif type_of_plot == 'line':
cust_data = df[selected_columns_names]
st.line_chart(cust_data)
# Custom Plot
elif type_of_plot:
cust_plot= df[selected_columns_names].plot(kind=type_of_plot)
st.write(cust_plot)
st.pyplot()
if st.button("Ready to ML !"):
st.balloons()
except:
st.info("Please upload Excel file")
st.sidebar.header("Data Cake")
st.sidebar.info("Built by Veera Ragavan")
| 21,156
|
def merge_clusters_for_nodes(nodes_to_merge: List[NNCFNode], clusterization: Clusterization):
"""
Merges clusters to which nodes from nodes_to_merge belongs.
:param nodes_to_merge: All nodes are clusters for which should be merged.
:param clusterization: Clusterization of nodes to work with.
"""
if len(nodes_to_merge) <= 1:
return
# Will merge cluster with highest importance with others pairwise
max_importance_node_id = None
max_importance = 0
for node in nodes_to_merge:
importance = clusterization.get_cluster_containing_element(node.node_id).importance
if importance > max_importance:
max_importance_node_id = node.node_id
max_importance = importance
max_importance_cluster_id = clusterization.get_cluster_containing_element(max_importance_node_id).id
for node in nodes_to_merge:
if node.node_id != max_importance_node_id:
current_node_cluster_id = clusterization.get_cluster_containing_element(node.node_id).id
if current_node_cluster_id != max_importance_cluster_id:
clusterization.merge_clusters(max_importance_cluster_id, current_node_cluster_id)
| 21,157
|
def derivative(x, y, order=1):
"""Returns the derivative of y-coordinates as a function of x-coodinates.
Args:
x (list or array): 1D array x-coordinates.
y (list or array): 1D array y-coordinates.
order (number, optional): derivative order.
Returns:
x and y arrays.
"""
if order<0:
raise ValueError('order must be a positive integer.')
x = np.array(x)
y = np.array(y)
x_diff = np.diff(x)
y_diff = np.diff(y)/x_diff
for i in range(order-1):
y_diff = np.diff(y_diff)/x_diff[:len(x_diff)-(i+1)]
for i in range(order):
x = moving_average(x, n=2)
return x, y_diff
| 21,158
|
def get_ff_parameters(wc_params, molecule=None, components=None):
"""Get the parameters for ff_builder."""
ff_params = {
'ff_framework': wc_params['ff_framework'],
'ff_molecules': {},
'shifted': wc_params['ff_shifted'],
'tail_corrections': wc_params['ff_tail_corrections'],
'mixing_rule': wc_params['ff_mixing_rule'],
'separate_interactions': wc_params['ff_separate_interactions']
}
if molecule is not None:
ff_params['ff_molecules'] = {molecule['name']: molecule['forcefield']}
if components is not None:
for value in components.get_dict().values():
ff = value['forcefield'] #pylint: disable=invalid-name
ff_params['ff_molecules'][value['name']] = ff
return Dict(dict=ff_params)
| 21,159
|
def healpix_header_odict(nside,nest=False,ordering='RING',coord=None, partial=True):
"""Mimic the healpy header keywords."""
hdr = odict([])
hdr['PIXTYPE']=odict([('name','PIXTYPE'),
('value','HEALPIX'),
('comment','HEALPIX pixelisation')])
ordering = 'NEST' if nest else 'RING'
hdr['ORDERING']=odict([('name','ORDERING'),
('value',ordering),
('comment','Pixel ordering scheme, either RING or NESTED')])
hdr['NSIDE']=odict([('name','NSIDE'),
('value',nside),
('comment','Resolution parameter of HEALPIX')])
if coord:
hdr['COORDSYS']=odict([('name','COORDSYS'),
('value',coord),
('comment','Ecliptic, Galactic or Celestial (equatorial)')])
if not partial:
hdr['FIRSTPIX']=odict([('name','FIRSTPIX'),
('value',0),
('comment','First pixel # (0 based)')])
hdr['LASTPIX']=odict([('name','LASTPIX'),
('value',hp.nside2npix(nside)-1),
('comment','Last pixel # (0 based)')])
hdr['INDXSCHM']=odict([('name','INDXSCHM'),
('value','EXPLICIT' if partial else 'IMPLICIT'),
('comment','Indexing: IMPLICIT or EXPLICIT')])
hdr['OBJECT']=odict([('name','OBJECT'),
('value','PARTIAL' if partial else 'FULLSKY'),
('comment','Sky coverage, either FULLSKY or PARTIAL')])
return hdr
| 21,160
|
def hello():
"""Test endpoint"""
return {'hello': 'world'}
| 21,161
|
def get_config_data(func_name: str, config_file_name: str = "config.json")\
-> tuple[str, str, str, str] | str | tuple[str, str]:
"""Extracts the data pertaining to the covid_news_handling module from the
provided config file.
A try except is used to get the encoding style to be used, and to check if
a valid/present config file has been provided. If one hasn't been provided
the event is logged and the dashboard is shutdown. Otherwise, the encoding
style is extracted (data loaded as a json and the value of the 'encoding'
key is found). The config file is opened again with the required encoding
style, loaded as a json, with the data relating to the
'covid_news_handling' key being found and the required values being
extracted. A while loop is used to ensure all values are present in
the config file, if they aren't, the event is logged and the dashboard is
shutdown, and each value is returned to the respective functions.
Args:
func_name (str): The name of the function data is being returned to,
given as a string. This allows for certain values to be returned
to certain functions (no wasted variables).
config_file_name (str): The name of the config file data is being taken
from, given as a string. This allows for data in the config file
to be used throughout the module and to customise the program.
Returns:
tuple[str, str, str, str]: (queries, language, sort_by, news_api_key).
The parameters to be used in the news API call, returned as a
tuple of strings. This allows the user to change the parameters
used within the news API call (further customise the dashboard).
str: displayed_content. The data from the article to be displayed in
the content section of the news article widgets on the dashboard.
This again lets the user customise the news section of the
dashboard.
tuple[str, str]: (num_displayed_articles, no_articles_message). The
number of news articles to display on each page and the message
that is displayed when there are no unremoved articles remaining,
returned as a tuple of strings, allowing the user to change the
number of displayed articles and the no articles message via the
config file.
"""
logging.debug("Entering the get_config_data function.")
# Get the encoding style to be used throughout the module.
try:
get_encoding = open(config_file_name, 'r')
except FileNotFoundError:
logging.critical("Config file missing or cannot be located.")
# Loads the json data and gets the value of the 'encoding' key.
data = json.load(get_encoding)
program_encoding = data['encoding']
get_encoding.close()
# Opens the file with the given encoding to get the rest of the data.
with open(config_file_name, 'r', encoding=program_encoding) as\
configuration_file:
data = json.load(configuration_file)
json_news_data = data['covid_news_handling']
queries = json_news_data['queries']
language = json_news_data['language']
sort_by = json_news_data['sort_by']
displayed_content = json_news_data['displayed_content']
num_displayed_articles = json_news_data['num_displayed_articles']
no_articles_message = json_news_data['no_articles_message']
news_api_key = json_news_data['news_api_key']
# Ensures a complete config file is provided before progressing.
while (queries and language and sort_by and displayed_content
and num_displayed_articles and no_articles_message
and news_api_key) is not None:
# Returns different values depending on the function calling it.
if func_name == 'news_API_request':
logging.info("Exiting get_config_data function as intended")
return (queries, language, sort_by, news_api_key)
if func_name == 'news_processor':
logging.info("Exiting get_config_data function as intended")
return displayed_content
if func_name == 'remove_and_limit_news_articles':
logging.info("Exiting get_config_data function as intended")
return (num_displayed_articles, no_articles_message)
logging.error("Incomplete config file provided, dashboard stopped.")
| 21,162
|
def test_get_sorted_filenames():
"""
Checking sorted filenames returned by util function
"""
with TempDirectory() as tempdir:
tempdir.write((tempdir.path + "/a.txt"), (bytes(1)))
tempdir.write((tempdir.path + "/b.txt"), (bytes(1)))
tempdir.write((tempdir.path + "/c.txt"), (bytes(1)))
expected = [
tempdir.path + "/a.txt",
tempdir.path + "/b.txt",
tempdir.path + "/c.txt",
]
actual = util.get_sorted_filenames_in_dir(tempdir.path, "txt")
assert expected == actual
| 21,163
|
def compare_gaussian_classifiers():
"""
Fit both Gaussian Naive Bayes and LDA classifiers on both
gaussians1 and gaussians2 datasets
"""
for f in ["gaussian1.npy", "gaussian2.npy"]:
# Load dataset
X, y = load_dataset(f"../datasets/{f}")
# Fit models and predict over training set
naive = GaussianNaiveBayes()
lda = LDA()
lda.fit(X, y)
naive.fit(X, y)
naive_pred = naive.predict(X)
lda_pred = lda.predict(X)
# Plot a figure with two subplots, showing the Gaussian Naive
# Bayes predictions on the left and LDA predictions
# on the right. Plot title should specify dataset used and
# subplot titles should specify algorithm and accuracy
# Create subplots
from IMLearn.metrics import accuracy
fig = make_subplots(1, 2,
subplot_titles=[f"Gaussian Naive Bayes, accuracy "
f"{accuracy(y, naive_pred)}",
f"LDA, accuracy "
f"{accuracy(y, lda_pred)}"])
fig.update_layout(title={"text": f})
# naive
fig.add_scatter(x=X[:, 0], y=X[:, 1], mode="markers",
marker=dict(color=naive_pred, symbol=y),
text=f"Gaussian Naive Bayes, accuracy "
f"{accuracy(y, naive_pred)}", row=1,
col=1)
# LDA
fig.add_scatter(x=X[:, 0], y=X[:, 1], mode="markers",
marker=dict(color=lda_pred, symbol=y), xaxis="x",
row=1,
col=2)
fig.update_xaxes(title_text="Feature 1", row=1, col=1)
fig.update_xaxes(title_text="Feature 1", row=1, col=2)
fig.update_yaxes(title_text="Feature 2", row=1, col=1)
fig.update_yaxes(title_text="Feature 2", row=1, col=2)
# Add `X` dots specifying fitted Gaussians' means
fig.add_scatter(x=lda.mu_[:, 0], y=lda.mu_[:, 1], mode="markers",
marker=dict(color="black", symbol="x"),
row=1, col=1)
fig.add_scatter(x=naive.mu_[:, 0], y=naive.mu_[:, 1], mode="markers",
marker=dict(color="black", symbol="x"),
row=1, col=2)
# Add ellipses depicting the covariances of the fitted Gaussians
fig.add_trace(get_ellipse(lda.mu_[0], lda.cov_), col=2, row=1)
fig.add_trace(get_ellipse(lda.mu_[1], lda.cov_), col=2, row=1)
fig.add_trace(get_ellipse(lda.mu_[2], lda.cov_), col=2, row=1)
fig.add_trace(get_ellipse(naive.mu_[0], np.diag(naive.vars_[0])),
col=1, row=1)
fig.add_trace(get_ellipse(naive.mu_[1], np.diag(naive.vars_[1])),
col=1, row=1)
fig.add_trace(get_ellipse(naive.mu_[2], np.diag(naive.vars_[2])),
col=1, row=1)
fig.show()
| 21,164
|
def permute_channels(n_channels, keep_nbr_order=True):
"""Permute the order of neighbor channels
Args:
n_channels: the total number of channels
keep_nbr_order: whether to keep the relative order of neighbors
if true, only do random rotation and flip
if false, random permutation
"""
ch_idx = np.arange(1, n_channels)
if keep_nbr_order:
# rotate and flip
ch_idx = np.roll(ch_idx, np.random.randint(n_channels-1))
if np.random.randint(2) == 1:
ch_idx = ch_idx[::-1]
else:
# random permute
np.random.shuffle(ch_idx)
ch_idx = np.concatenate([[0], ch_idx])
return ch_idx
| 21,165
|
def checkHdf5CorrelationNan(frags_dict, fragList, atmList, jsonfile):
"""Check cor energies are not error. Cor energies that were not calculated are 0.0 and nan if error."""
with open('correlation-nan.txt', 'w') as w:
for key, dict_ in frags_dict.items():
if np.isnan(dict_['os']):
w.write(f"{key}\n")
geometryFromListIds(key.split('-'), fragList, atmList, jsonfile, newDir="nan-cor")
| 21,166
|
def test_merging_recipes():
"""Test recipes can be merged and merging results in a valid minimal DAG."""
A, B = generate_recipes()
# Merging empty recipes returns original recipe
C = A.merge(None)
assert A == C
C = workflows.recipe.Recipe().merge(A)
assert A == C
C = A.merge(workflows.recipe.Recipe())
assert A == C
C = A.merge(B)
# Merge function should not modify original recipes
assert A, B == generate_recipes()
# Result will have 6 nodes: start, error, A1, A2, B1, B2
assert len(C.recipe) == 6
# Start node contains two different pointers to 'A service'
assert "start" in C.recipe
assert len(C.recipe["start"]) == 2
assert C.recipe["start"][0] == (1, {})
assert C.recipe["start"][1] == (mock.ANY, {})
assert C.recipe["start"][0][0] != C.recipe["start"][1][0]
assert C.recipe[C.recipe["start"][0][0]]["service"] == "A service"
assert C.recipe[C.recipe["start"][1][0]]["service"] == "A service"
# Error node points to 'B service'
assert "error" in C.recipe
assert len(C.recipe["error"]) == 1
assert C.recipe[C.recipe["error"][0]]["service"] == "B service"
# There is a 'C service'
assert any(
map(
lambda x: (isinstance(x, dict) and x.get("service") == "C service"),
C.recipe.values(),
)
)
| 21,167
|
def from_file(file,typcls):
"""Parse an instance of the given typeclass from the given file."""
s = Stream(file)
return s.read_value(typcls._ep_typedesc)
| 21,168
|
def test_supersmoother_multiband(N=100, period=1):
"""Test that results are the same with/without filter labels"""
t, y, dy = _generate_data(N, period)
periods = np.linspace(period / 2, period * 2, 100)
model = SuperSmoother()
P_singleband = model.fit(t, y, dy).score(periods)
filts = np.ones(N)
model_mb = SuperSmootherMultiband()
P_multiband = model_mb.fit(t, y, dy, filts).score(periods)
assert_allclose(P_multiband, P_singleband)
tfit = [1.5, 2, 2.5]
assert_allclose(model.predict(tfit, period=period),
model_mb.predict(tfit, 1, period=period))
assert_raises(ValueError, model_mb.predict, tfit, 2)
| 21,169
|
def read_test_ids():
"""
Read sample submission file, list and return all test image ids.
"""
df_test = pd.read_csv(SAMPLE_SUBMISSION_PATH)
ids_test = df_test['img'].map(lambda s: s.split('.')[0])
return ids_test
| 21,170
|
def chuseok(year=None):
"""
:parm year: int
:return: Thanksgiving Day of Korea
"""
year = year if year else _year
return LunarDate(year, 8, 15).toSolarDate()
| 21,171
|
def _run():
"""Makes event-attribution schematics for 2019 tornado-prediction paper.
This is effectively the main method.
"""
file_system_utils.mkdir_recursive_if_necessary(
directory_name=OUTPUT_DIR_NAME)
# Interpolation with merger.
figure_object, axes_object = _plot_interp_two_times(
storm_object_table=_get_data_for_interp_with_merger()[0],
tornado_table=_get_data_for_interp_with_merger()[1],
legend_font_size=SMALL_LEGEND_FONT_SIZE, legend_position_string='upper right'
)
axes_object.set_title('Interpolation with merger')
this_file_name = '{0:s}/interp_with_merger_standalone.jpg'.format(
OUTPUT_DIR_NAME)
print('Saving figure to: "{0:s}"...'.format(this_file_name))
figure_object.savefig(
this_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
plotting_utils.label_axes(axes_object=axes_object, label_string='(a)')
panel_file_names = ['{0:s}/interp_with_merger.jpg'.format(OUTPUT_DIR_NAME)]
print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1]))
figure_object.savefig(
panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
pyplot.close(figure_object)
# Interpolation with split.
figure_object, axes_object = _plot_interp_two_times(
storm_object_table=_get_data_for_interp_with_split()[0],
tornado_table=_get_data_for_interp_with_split()[1],
legend_font_size=DEFAULT_FONT_SIZE,
legend_position_string='upper left'
)
axes_object.set_title('Interpolation with split')
this_file_name = '{0:s}/interp_with_split_standalone.jpg'.format(
OUTPUT_DIR_NAME)
print('Saving figure to: "{0:s}"...'.format(this_file_name))
figure_object.savefig(
this_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
plotting_utils.label_axes(axes_object=axes_object, label_string='(b)')
panel_file_names.append(
'{0:s}/interp_with_split.jpg'.format(OUTPUT_DIR_NAME)
)
print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1]))
figure_object.savefig(
panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
pyplot.close(figure_object)
# Simple successors.
figure_object, axes_object = _plot_attribution_one_track(
storm_object_table=_get_track_for_simple_succ(),
plot_legend=True, plot_x_ticks=True,
legend_font_size=SMALL_LEGEND_FONT_SIZE, legend_location='lower right'
)
this_file_name = '{0:s}/simple_successors_standalone.jpg'.format(
OUTPUT_DIR_NAME)
print('Saving figure to: "{0:s}"...'.format(this_file_name))
figure_object.savefig(
this_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
plotting_utils.label_axes(axes_object=axes_object, label_string='(c)')
axes_object.set_title('Linking to simple successors')
panel_file_names.append(
'{0:s}/simple_successors.jpg'.format(OUTPUT_DIR_NAME)
)
print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1]))
figure_object.savefig(
panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
pyplot.close(figure_object)
# Simple predecessors, example 1.
figure_object, axes_object = _plot_attribution_one_track(
storm_object_table=_get_track1_for_simple_pred(),
plot_legend=True, plot_x_ticks=False,
legend_font_size=DEFAULT_FONT_SIZE, legend_location=(0.28, 0.1)
)
axes_object.set_title('Simple predecessors, example 1')
this_file_name = '{0:s}/simple_predecessors_track1_standalone.jpg'.format(
OUTPUT_DIR_NAME)
print('Saving figure to: "{0:s}"...'.format(this_file_name))
figure_object.savefig(
this_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
plotting_utils.label_axes(axes_object=axes_object, label_string='(d)')
axes_object.set_title('Linking to simple predecessors, example 1')
panel_file_names.append(
'{0:s}/simple_predecessors_track1.jpg'.format(OUTPUT_DIR_NAME)
)
print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1]))
figure_object.savefig(
panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
pyplot.close(figure_object)
# Simple predecessors, example 2.
figure_object, axes_object = _plot_attribution_one_track(
storm_object_table=_get_track2_for_simple_pred(),
plot_legend=False, plot_x_ticks=False
)
axes_object.set_title('Simple predecessors, example 2')
this_file_name = '{0:s}/simple_predecessors_track2_standalone.jpg'.format(
OUTPUT_DIR_NAME)
print('Saving figure to: "{0:s}"...'.format(this_file_name))
figure_object.savefig(
this_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
plotting_utils.label_axes(axes_object=axes_object, label_string='(e)')
axes_object.set_title('Linking to simple predecessors, example 2')
panel_file_names.append(
'{0:s}/simple_predecessors_track2.jpg'.format(OUTPUT_DIR_NAME)
)
print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1]))
figure_object.savefig(
panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
pyplot.close(figure_object)
# Concatenate all panels into one figure.
concat_file_name = '{0:s}/attribution_schemas.jpg'.format(OUTPUT_DIR_NAME)
print('Concatenating panels to: "{0:s}"...'.format(concat_file_name))
imagemagick_utils.concatenate_images(
input_file_names=panel_file_names, output_file_name=concat_file_name,
num_panel_rows=2, num_panel_columns=3)
imagemagick_utils.resize_image(
input_file_name=concat_file_name, output_file_name=concat_file_name,
output_size_pixels=CONCAT_FIGURE_SIZE_PX)
| 21,172
|
def plot_order(generation_idx, obs, out_path=None):
"""Plot generation coordinate list. A star on the curve
denotes the pixel generated last. obs is a three-tuple of input image dimensions,
(input-channels-unused, num_rows, num_cols)"""
plt.figure(figsize=(3, 3))
plt.hlines(np.arange(-1, obs[1])+0.5, xmin=-0.5, xmax=obs[2]-0.5, alpha=0.5)
plt.vlines(np.arange(-1, obs[2])+0.5, ymin=-0.5, ymax=obs[1]-0.5, alpha=0.5)
rows, cols = zip(*generation_idx)
plt.plot(cols, rows, color="r")
plt.scatter([cols[-1]], [rows[-1]], marker="*", s=100, c="k")
plt.xticks(np.arange(obs[1]))
plt.axis("equal")
plt.gca().invert_yaxis()
if out_path:
plt.savefig(out_path)
else:
plt.show()
| 21,173
|
def distribute(iterable, layout: ModelLayout):
"""
Of each group of layout.n_replicas successive items from the iterable, pick the one with index
`layout.replica_idx`.
Makes sure that the underlying iterator is advanced at the same pace no matter what replica_idx is.
"""
it = iter(iterable)
try:
while True:
for i in range(layout.replica_idx):
next(it)
ret = next(it)
for i in range(layout.n_replicas - layout.replica_idx - 1):
next(it)
yield ret
except StopIteration:
return
| 21,174
|
def DELETE(request):
"""Delete a user's authorization level over a simulation."""
# Make sure required parameters are there
try:
request.check_required_parameters(
path={
'simulationId': 'int',
'userId': 'int'
}
)
except exceptions.ParameterError as e:
return Response(400, e.message)
# Instantiate an Authorization
authorization = Authorization.from_primary_key((
request.params_path['userId'],
request.params_path['simulationId']
))
# Make sure this Authorization exists in the database
if not authorization.exists():
return Response(404, '{} not found.'.format(authorization))
# Make sure this User is allowed to delete this Authorization
if not authorization.google_id_has_at_least(request.google_id, 'OWN'):
return Response(403, 'Forbidden from deleting {}.'.format(authorization))
# Delete this Authorization
authorization.delete()
return Response(
200,
'Successfully deleted {}.'.format(authorization),
authorization.to_JSON()
)
| 21,175
|
def process_query(data):
"""
Concat query, question, and narrative then 'preprocess'
:data: a dataframe with queries in rows; query, question, and narrative in columns
:return: 2d list of tokens (rows: queries, columns: tokens)
"""
lst_index = []
lst_words = []
for index, row in data.iterrows():
tmp = preprocess(row["query"] +" "+ row["question"]+ " "+row["narrative"])
lst_words.append(tmp)
lst_index.append(row["number"])
return lst_words
| 21,176
|
def test_ssl_cert_search_command_success(mocker_http_request, client):
"""
When "ssl-cert-search" command executes successfully then context output and response should match.
"""
from PassiveTotal_v2 import ssl_cert_search_command
# Fetching expected raw response from file
with open('test_data/SSL/ssl_cert_resp.json', encoding='utf-8') as f:
json_file = json.load(f)
expected_res = json_file.get('success')
mocker_http_request.return_value = expected_res
# Fetching expected entry context details from file
with open("test_data/SSL/ssl_cert_ec.json", encoding='utf-8') as f:
expected_ec = json.load(f)
# Fetching expected entry context details from file
with open("test_data/SSL/ssl_cert_hr.md") as f:
expected_hr = f.read()
result = ssl_cert_search_command(client, SSL_ARGS)
assert result.raw_response == expected_res
assert result.outputs == expected_ec
assert result.readable_output == expected_hr
assert result.outputs_key_field == 'sha1'
assert result.outputs_prefix == 'PassiveTotal.SSL'
| 21,177
|
def parameter_parser():
"""
A method to parse up command line parameters.
The default hyperparameters give a high performance model without grid search.
"""
parser = argparse.ArgumentParser(description="Run SimGNN.")
parser.add_argument("--dataset",
nargs="?",
default="AIDS700nef", # AIDS700nef LINUX IMDBMulti
help="Dataset name. Default is AIDS700nef")
parser.add_argument("--gnn-operator",
nargs="?",
default="gin", # gcn gin gat
help="Type of GNN-Operator. Default is gcn")
parser.add_argument("--epochs",
type=int,
default=350,
help="Number of training epochs. Default is 350.")
parser.add_argument("--filters-1",
type=int,
default=64,
help="Filters (neurons) in 1st convolution. Default is 64.")
parser.add_argument("--filters-2",
type=int,
default=32,
help="Filters (neurons) in 2nd convolution. Default is 32.")
parser.add_argument("--filters-3",
type=int,
default=32, ##
help="Filters (neurons) in 3rd convolution. Default is 32.")
parser.add_argument("--tensor-neurons",
type=int,
default=16,
help="Neurons in tensor network layer. Default is 16.")
parser.add_argument("--bottle-neck-neurons",
type=int,
default=16,
help="Bottle neck layer neurons. Default is 16.")
parser.add_argument("--batch-size",
type=int,
default=128,
help="Number of graph pairs per batch. Default is 128.")
parser.add_argument("--bins",
type=int,
default=16,
help="Histogram Similarity score bins. Default is 16.")
parser.add_argument("--dropout",
type=float,
default=0,
help="Dropout probability. Default is 0.")
parser.add_argument("--learning-rate",
type=float,
default=0.001,
help="Learning rate. Default is 0.001.")
parser.add_argument("--weight-decay",
type=float,
default=5 * 10 ** -4,
help="Adam weight decay. Default is 5*10^-4.")
parser.add_argument("--histogram",
dest="histogram",
action="store_true")
parser.add_argument("--diffpool",
dest="diffpool",
action="store_true",
help="Enable differentiable pooling.")
parser.add_argument("--plot-loss",
dest="plot_loss",
action="store_true")
parser.add_argument("--notify",
dest="notify",
action="store_true",
help="Send notification message when the code is finished (only Linux & Mac OS support).")
# TODO device selection
#parser.add_argument("--device",
# nargs="?",
# default='cpu', # torch.device('cuda' if torch.cuda.is_available() else 'cpu'),
# help="Select to run with gpu or cpu. Default depends on existing CUDA installation.")
parser.add_argument("--use-lsh",
dest="use_lsh",
action="store_true",
help="Specify if LSH will be utilized. Default choice is to train WITH LSH.")
parser.set_defaults(histogram=False) # True False
parser.set_defaults(use_lsh=False) # True False
parser.set_defaults(diffpool=False) # True False
parser.set_defaults(plot_loss=False) # True False
parser.set_defaults(notify=False)
# TODO add lsh variables as arguments conditional on --use-lsh
return parser.parse_args()
| 21,178
|
def nb_view_patches(Yr, A, C, S, b, f, d1, d2, YrA=None, image_neurons=None, thr=0.99, denoised_color=None, cmap='jet'):
"""
Interactive plotting utility for ipython notebook
Args:
Yr: np.ndarray
movie
A,C,b,f: np.ndarrays
outputs of matrix factorization algorithm
d1,d2: floats
dimensions of movie (x and y)
YrA: np.ndarray
ROI filtered residual as it is given from update_temporal_components
If not given, then it is computed (K x T)
image_neurons: np.ndarray
image to be overlaid to neurons (for instance the average)
thr: double
threshold regulating the extent of the displayed patches
denoised_color: string or None
color name (e.g. 'red') or hex color code (e.g. '#F0027F')
cmap: string
name of colormap (e.g. 'viridis') used to plot image_neurons
"""
# PREPROCESSING
nr, T = C.shape
nA2 = np.ravel(np.power(A, 2).sum(0)) if type(
A) == np.ndarray else np.ravel(A.power(2).sum(0))
b = np.squeeze(b)
f = np.squeeze(f)
if YrA is None:
Y_r = np.array(spdiags(old_div(1, nA2), 0, nr, nr) *
(A.T * np.matrix(Yr) -
(A.T * np.matrix(b[:, np.newaxis])) * np.matrix(f[np.newaxis]) -
A.T.dot(A) * np.matrix(C)) + C)
else:
Y_r = C + YrA
x = np.arange(T)
if image_neurons is None:
image_neurons = A.mean(1).reshape((d1, d2), order='F')
coors = get_contours(A, (d1, d2), thr)
cc1 = [cor['coordinates'][:, 0] for cor in coors]
cc2 = [cor['coordinates'][:, 1] for cor in coors]
c1 = cc1[0]
c2 = cc2[0]
# PLOTTING
fig, axes = plt.subplots(2)
axes[0].imshow(image_neurons, cmap = 'gray')
axes[0].set_title('Neural map')
axes[1].plot(C[0], label = 'C: raw traces', c = 'blue')
axes[1].plot(Y_r[0], label = 'Y_r: residuals', c = 'red')
axes[1].plot(S[0], label = 'S: deconvolved activity', c = 'green')
plt.legend()
axes[1].set_xlabel('t [frames]')
# WIDGETS
neuron_nr_slider = IntSlider(description = 'Neuron Number', value = 0, min = 0, max = len(C) - 1)
def neuron_nr_handler(*args):
i = neuron_nr_slider.value
axes[1].clear()
axes[1].plot(C[i], label = 'C: raw traces', c = 'blue')
axes[1].plot(Y_r[i], label = 'Y_r: residuals', c = 'red')
axes[1].plot(S[i], label = 'S: deconvolved activity', c = 'green')
plt.legend()
axes[1].set_xlabel('t [frames]')
neuron_nr_slider.observe(neuron_nr_handler, 'value')
widgets = [neuron_nr_slider]
return fig, widgets
| 21,179
|
def menu():
"""Manda el Menú \n
Opciones:
1: Añadir a un donante
2: Añadir a un donatario
3: Revisar la lista de donantes
4: Revisar la lista de donatarios
5: Realizar una transfusion
6: Estadisticas
7: Salir
Returns:
opc(num):Opcion del menu """
print("\nBienvenido a el sistema de Donacion de Sangre. Elige la accion que deseas realizar.\n1.Añadir Donante de Sangre\n2.Añadir Donatario de Sangre\n3.Revisar lista de Donantes\n4.Revisar Lista de Donatarios\n5.Realizar una transfusion\n6.Estadisticas\n7.Salir")
opc=int(input("Seleccionar: "))
return opc
| 21,180
|
def get_engine(hass, config):
"""Set up Pico speech component."""
if shutil.which("pico2wave") is None:
_LOGGER.error("'pico2wave' was not found")
return False
return PicoProvider(config[CONF_LANG])
| 21,181
|
def test_optional_fields(data, fld):
"""Verify that the FN028 item is created without error if an optional field is omitted
Arguments:
- `data`:
"""
data[fld] = None
item = FN028(**data)
assert item.project_id == data["project_id"]
| 21,182
|
def test_robot_maps(robot):
"""
Test robot maps.
Test robot maps
"""
assert(robot.maps() == [{
'active_pmapv_details': {
'active_pmapv': {
'child_seg_xfer_seq_err': 0,
'create_time': 1570282549,
'creator': 'user',
'last_user_pmapv_id': '134043T209849',
'last_user_ts': 1570282549,
'pmap_id': 'en12a9_lTglkpPqazxDWED',
'pmapv_id': '134043T209849',
'proc_state': 'OK_Processed'
},
'map_header': {
'create_time': 1570282549,
'id': 'en12a9_lTglkpPqazxDWED',
'learning_percentage': 100,
'name': 'Appartement',
'resolution': 0.10500000417232513,
'user_orientation_rad': 0.0,
'version': '134043T209849'
},
'regions': [
{
'id': '1',
'name': 'Living Room',
'region_type': 'living_room'
},
{
'id': '2',
'name': 'Kitchen',
'region_type': 'kitchen'
},
{
'id': '3',
'name': 'Foyer 1',
'region_type': 'foyer'
},
{
'id': '4',
'name': 'BedRoom 1',
'region_type': 'bedroom'
},
{
'id': '5',
'name': 'BedRoom 2',
'region_type': 'bedroom'
},
{
'id': '6',
'name': 'BathRoom',
'region_type': 'bathroom'
},
{
'id': '7',
'name': 'Custom',
'region_type': 'custom'
}
]
},
'active_pmapv_id': '134043T209849',
'create_time': 1570276098,
'last_pmapv_ts': 1571613652,
'merged_pmap_ids': [],
'pmap_id': 'en12a9_lTglkpPqazxDWED',
'robot_pmapv_id': '191020T232046',
'state': 'active',
'user_pmapv_id': '134043T209849',
'visible': True
}])
| 21,183
|
def load_cp() -> Tuple[List[str], List[List[float]]]:
"""
Loads cloud point data; target values given in Celsius
Returns:
Tuple[List[str], List[List[float]]]: (smiles strings, target values);
target values have shape (n_samples, 1)
"""
return _load_set('cp')
| 21,184
|
async def test_browse_media_single_source_no_identifier(
hass: HomeAssistant, dms_device_mock: Mock, device_source_mock: None
) -> None:
"""Test browse_media without a source_id, with a single device registered."""
# Fast bail-out, mock will be checked after
dms_device_mock.async_browse_metadata.side_effect = UpnpError
# No source_id nor media_id
with pytest.raises(BrowseError):
await media_source.async_browse_media(hass, f"media-source://{DOMAIN}")
# Mock device should've been browsed for the root directory
dms_device_mock.async_browse_metadata.assert_awaited_once_with(
"0", metadata_filter=ANY
)
# No source_id but a media_id
# media_source.URI_SCHEME_REGEX won't let the ID through to dlna_dms
dms_device_mock.async_browse_metadata.reset_mock()
with pytest.raises(BrowseError, match="Invalid media source URI"):
await media_source.async_browse_media(
hass, f"media-source://{DOMAIN}//:media-item-id"
)
assert dms_device_mock.async_browse_metadata.await_count == 0
| 21,185
|
def walk_up(directory_path):
"""
Implementation by Michele Pasin
https://gist.github.com/zdavkeos/1098474#gistcomment-2943865
Mimic os.walk, but walk 'up' instead of down the directory tree.
"""
directory_path = path.realpath(directory_path)
# get files in current dir
try:
names = os.listdir(directory_path)
except Exception as e:
print(e)
return
dirs, non_dirs = [], []
for name in names:
if path.isdir(path.join(directory_path, name)):
dirs.append(name)
else:
non_dirs.append(name)
yield directory_path, dirs, non_dirs
new_path = path.realpath(path.join(directory_path, '..'))
# see if we are at the top
if new_path == directory_path:
return
for x in walk_up(new_path):
yield x
| 21,186
|
def look_up(f, *args, **kwargs):
"""
:param f:
:type f:
:param args:
:type args:
:param kwargs:
:type kwargs:
:return:
:rtype:"""
ag_hash = hash(args) + make_hash(kwargs)
if f in global_table:
if ag_hash in global_table[f]:
return global_table[f][ag_hash]
res = global_table[f][ag_hash] = f(*args, **kwargs)
return res
global_table[f] = {}
res = global_table[f][ag_hash] = f(*args, **kwargs)
return res
| 21,187
|
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
"""
return environ.get(key, default)
| 21,188
|
def test_list_nmtokens_min_length_nistxml_sv_iv_list_nmtokens_min_length_1_4(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-1.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-1-4.xml",
class_name="NistschemaSvIvListNmtokensMinLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 21,189
|
def read_tag(request, tid, *args, **kwargs):
"""read_tag(tid) returns ..."""
s = api.read_tag(request, tid, *args, **kwargs)
return render_to_response('read/tag.html', s)
| 21,190
|
def ber_img(original_img_bin, decoded_img_bin):
"""Compute Bit-Error-Rate (BER) by comparing 2 binary images."""
if not original_img_bin.shape == decoded_img_bin.shape:
raise ValueError('Original and decoded images\' shapes don\'t match !')
height, width, k = original_img_bin.shape
errors_bits = abs(original_img_bin - decoded_img_bin).sum()
errors_bits = errors_bits.flatten()
total_bits = np.prod(original_img_bin.shape)
ber = errors_bits / total_bits
return(ber)
| 21,191
|
def get_fractal_patterns_WtoE_NtoS(fractal_portrait, width, height):
""" get all fractal patterns from fractal portrait, from West to East, from North to South """
fractal_patterns = []
for x in range(width):
# single fractal pattern
f_p = get_fractal_patterns_zero_amounts()
for y in range(height):
if fractal_portrait[x][y] != EMPTY_PLACE:
f_p[fractal_portrait[x][y]] += 1
if any(v > 0 for v in f_p.values()):
fractal_patterns.append(f_p)
return fractal_patterns
| 21,192
|
def test_process_entry_array_user(jobs):
"""Providing a user shorts the checks for existing jobs."""
jobs.process_entry(
{
"AllocCPUS": "1",
"Elapsed": "00:00:00",
"JobID": "14729857_[737-999]",
"JobIDRaw": "14729857",
"MaxRSS": "",
"NNodes": "1",
"REQMEM": "50Gn",
"State": "PENDING",
"TotalCPU": "00:00:00",
},
user_provided=True,
)
expected_job = Job("14729857", "14729857_[737-999]", None)
expected_job.state = "PENDING"
assert jobs.jobs == {"14729857_[737-999]": expected_job}
| 21,193
|
def rating(date=None):
"""P2peye comprehensive rating and display results.
from https://www.p2peye.com
Args:
date: if None, download latest data, if like '201812', that download month data.
Returns:
DataFrame
"""
start = time.time()
if date is None:
date = str(pd.to_datetime(datetime.datetime.now())-pd.DateOffset(months=1))[:7].replace('-', '')
assert (isinstance(date, str) and len(date)==6), "`date` shoule format '201812' or None"
url_txt = 'https://raw.githubusercontent.com/Hourout/datasets/master/report/p2peye/rating/p2peye_rating'+date+'.txt'
s = requests.get(url_txt).content
data = pd.read_csv(io.StringIO(s.decode('utf-8')))
print('p2peye rating dataset download completed, run time %d min %.2f sec' %divmod((time.time()-start), 60))
return data
| 21,194
|
def crop_image(src, box, expand=0):
"""Read sensor data and crop a bounding box
Args:
src: a rasterio opened path
box: geopandas geometry polygon object
expand: add padding in percent to the edge of the crop
Returns:
masked_image: a crop of sensor data at specified bounds
"""
#Read data and mask
try:
left, bottom, right, top = box.bounds
expand_width = (right - left) * expand /2
expand_height = (top - bottom) * expand / 2
#If expand is greater than increase both size
if expand >= 0:
expanded_left = left - expand_width
expanded_bottom = bottom - expand_height
expanded_right = right + expand_width
expanded_top = top+expand_height
else:
#Make sure of no negative boxes
expanded_left = left+expand_width
expanded_bottom = bottom+expand
expanded_right = right-expand_width
expanded_top = top-expand_height
window = rasterio.windows.from_bounds(expanded_left, expanded_bottom, expanded_right, expanded_top, transform=src.transform)
masked_image = src.read(window=window)
except Exception as e:
raise ValueError("sensor path: {} failed at reading window {} with error {}".format(src, box.bounds,e))
#Roll depth to channel last
masked_image = np.rollaxis(masked_image, 0, 3)
#Skip empty frames
if masked_image.size ==0:
raise ValueError("Empty frame crop for box {} in sensor path {}".format(box, src))
return masked_image
| 21,195
|
def add_msgpack_support(cls, ext, add_cls_methods=True):
"""Adds serialization support,
Enables packing and unpacking with msgpack with 'pack.packb' and
'pack.unpackb' methods.
If add_method then enables equality, reading and writing for the classs.
Specificly, adds methods:
bytes <- obj.to_binary()
obj <- cls.from_binary(bytes)
boolean <- obj1 == obj2
Args:
cls: class
ext: an unique code for the msgpack's Ext hook
"""
def enc(obj):
return packb(obj.__dict__)
def dec(data):
obj = cls.__new__(cls)
obj.__dict__.update(unpackb(data))
return obj
def eq(a, b):
if type(a) != type(b):
return NotImplemented
return a.__dict__ == b.__dict__
if add_cls_methods:
if cls.__eq__ is object.__eq__:
cls.__eq__ = eq
cls.to_bytes = enc
cls.from_bytes = staticmethod(dec)
_pack_reg[cls] = (ext, enc)
petlib.pack.register_coders(cls, ext, enc, dec)
| 21,196
|
def main():
"""Run the models with each preprocessor using multiple_experiments."""
for preproc in preprocs:
for model in spaces:
multiple_experiments(model, 'full', spaces[model], 5, preproc)
| 21,197
|
def test_plot_glass_brain(testdata_3d, tmpdir): # noqa:F811
"""Smoke tests for plot_glass_brain with colorbar and negative values."""
img = testdata_3d['img']
plot_glass_brain(img, colorbar=True, resampling_interpolation='nearest')
# test plot_glass_brain with negative values
plot_glass_brain(img, colorbar=True, plot_abs=False,
resampling_interpolation='nearest')
| 21,198
|
def parse_conf():
"""(Dictionary) Function that parses the config file and/or environment variables and returns dictionary."""
# Following tuple holds the configfile/env var versions of each config
ALL_CONFIGS = (
# Name of the variable in code, Path to config in the config file(section + value), name of environment variable, default value)
("data_store", "default", "path_to_result", "WEATHER_PATH_TO_RESULT", "data"),
("geocoder", "default", "geocoder", "WEATHER_GEOCODER", "YES"),
("lat", "default", "lat", "WEATHER_LAT", "0"),
("lon", "default", "lon", "WEATHER_LON", "0"),
("api_key", "credentials", "openweather_api", "")
)
# Initialize return dictionary
ret = {}
# Attempt to read config file
path_to_config = os.getenv("WEATHER_CONFIG", "weather.conf")
config = configparser.ConfigParser()
config.read(path_to_config)
debug("Config sections loaded: " + str(config.sections()))
for t in ALL_CONFIGS:
tmp_env = os.getenv(t[3])
if tmp_env != None:
ret[t[0]] = tmp_env
debug("Environment variable loaded for " + t[0] + " is " + str(tmp_env))
elif t[1] in config and t[2] in config[t[1]]:
debug("Config file value loaded for " + t[0] + " is " + config[t[1]][t[2]])
ret[t[0]] = config[t[1]][t[2]]
else:
debug("Couldn't not find a config file value nor Environment variable for " + t[0])
debug("Default value for " + t[0] + " is " + t[4])
ret[t[0]] = t[4]
return ret
| 21,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.