content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import time
def wait_for_job(res, ping_time=0.5):
"""
Blocks execution and waits for an async Forest Job to complete.
:param JobResult res: The JobResult object to wait for.
:param ping_time: The interval (in seconds) at which to ping the server.
:return: The completed JobResult
"""
while not res.is_done():
res.get()
time.sleep(ping_time)
return res | 1a7202f58affa97b0001b246fb7cd187d6a59f44 | 3,630,300 |
def _retr():
"""retrieves a list of all connected spectrometers for all backends"""
params, ids = [], []
if not _running_on_ci():
csb_serials, psb_serials = set(), set()
for serials, backend in [(csb_serials, csb), (psb_serials, psb)]:
if backend is None:
continue
api = backend.SeaBreezeAPI()
try:
serials.update((d.serial_number, d.model) for d in api.list_devices())
except:
pass
finally:
api.shutdown()
for ser_mod in csb_serials.union(psb_serials):
params.extend(
[
(csb, ser_mod[0])
if ser_mod in csb_serials
else pytest.param((csb, None), marks=pytest.mark.skip),
(psb, ser_mod[0])
if ser_mod in psb_serials
else pytest.param((psb, None), marks=pytest.mark.skip),
]
)
ids.extend(
[
("cseabreeze:{}:{}".format(ser_mod[1], ser_mod[0])),
("pyseabreeze:{}:{}".format(ser_mod[1], ser_mod[0])),
]
)
if not params:
params = [
pytest.param((csb, None), marks=pytest.mark.skip),
pytest.param((psb, None), marks=pytest.mark.skip),
]
ids = [("cseabreeze:no-spectrometer"), ("pyseabreeze:no-spectrometer")]
return dict(params=params, ids=ids) | 3b418fe367ed522f376d840efc0883bfd6ea6e60 | 3,630,301 |
def get_individual_annotations(self, all=False, imported=True):
"""Returns a dict with non-empty individual annotations.
If `all` is true, also annotations with no value are included.
If `imported` is true, also include annotations defined in
imported ontologies.
"""
onto = self.namespace.ontology
d = {get_preferred_label(a): a._get_values_for_individual(self)
for a in onto.annotation_properties(imported=imported)}
if all:
return d
else:
return {k: v for k, v in d.items() if v} | c817f51d4918cc97b97c8db2a6d33ce720c6f6aa | 3,630,302 |
def numeric_type(param):
"""
Checks parameter type
True for float; int or null data; false otherwise
:param param: input param to check
"""
if ((type(param) == float or type(param) == int or param == None)):
return True
return False | a5f67a30b3128c1214d8825abbc6ae5170680d80 | 3,630,303 |
def _pre_aggregate_df(df,
dims,
aggregate_dimensions,
show_control,
ctrl_id,
sort_by=None,
auto_decide_control_vals=False,
auto_add_description=True):
"""Process a long-format df to an appropriate format for display.
Args:
df: A dataframe similar to the one returned by metrics_types.as_dataframe().
dims: The column name of slicing dimesions, can be a list or a string.
aggregate_dimensions: If True, all dimension columns are collected into a
'Dimensions' column, and original dimension columns are dropped.
show_control: If False, only ratio values in non-control rows are shown.
ctrl_id: The control experiment id(s). For single control case, it can be
basically any type that can be used as an experiment key except dict. For
multiple controls, it should be a dict, with keys being control ids,
values being list of corresponding experiment id(s).
sort_by: In the form of [{'column': 'CI-lower', 'ascending': False}},
{'column': 'Dim_2': 'order': ['Logged-in', 'Logged-out']}]. The 'column'
is the column to sort by, 'order' is optional and for categorical column,
and 'ascending' is optional and default True. The result will be displayed
in the order specified by sort_by from top to bottom.
auto_decide_control_vals: By default, if users want to see control
experiments, df needs to have rows for control, but the 'Value' there
is supposed to be equal to the 'Control_Value' column in experiment rows.
So if control rows are missing, we can use 'Control_Value' column to fill
them. The problem is when there are multiple experiments their
Control_Values might be different (though they shouldn't be). In that case
we raise a warning and skip. Also if user arleady provide control rows for
certain slices, we won't fill those slices.
auto_add_description: If add Control/Not Control as descriptions.
Returns:
A pandas dataframe with stylized content for display. The display effect is
similar to tge_estimation.display().
Raises:
ValueError: If metrics is not an instance of MetricsTablesByExperiments,
MetricsTable, or MetricsPerSlice.
"""
if 'CI_Upper' not in df or 'CI_Lower' not in df:
df['CI_Upper'] = df['Ratio'] + (df['CI_Range'] / 2)
df['CI_Lower'] = df['Ratio'] - (df['CI_Range'] / 2)
df = _add_is_control_and_control_id(df, ctrl_id)
if auto_add_description:
is_ctrl = df['Is_Control'] if 'Is_Control' in df else None
if is_ctrl is not None: # ctrl id could be 0 or ''.
is_ctrl = ['Control' if x else 'Not Control' for x in is_ctrl]
if 'Description' not in df:
df['Description'] = is_ctrl
else:
df['Description'] = df['Description'].where(
df['Description'].astype(bool), is_ctrl) # Only fills empty cells
if show_control:
if 'Is_Control' in df:
# When Ratio is None, CI won't be displayed. This is intended for control.
df.loc[df['Is_Control'], 'Ratio'] = None
else:
df['Value'] = None
if 'Is_Control' in df:
# Make a copy to avoid "A value is trying to be set on a copy of a slice
# from a DataFrame." warning.
df = df[~df['Is_Control']].copy()
if auto_decide_control_vals:
df = add_control_rows(df, dims)
pre_agg_df = _sorted_long_to_wide(df, dims, sort_by)
if aggregate_dimensions:
pre_agg_df = _merge_dimensions(pre_agg_df, dims)
return pre_agg_df | 6b32b772bd6d1437160c90fff4fb965761431596 | 3,630,304 |
import os
import subprocess
def RunDsymUtil(dsym_path_prefix, full_args):
"""Linker driver action for -Wcrl,dsym,<dsym-path-prefix>. Invokes dsymutil
on the linker's output and produces a dsym file at |dsym_file| path.
Args:
dsym_path_prefix: string, The path at which the dsymutil output should be
located.
full_args: list of string, Full argument list for the linker driver.
Returns:
list of string, Build step outputs.
"""
if not len(dsym_path_prefix):
raise ValueError('Unspecified dSYM output file')
linker_out = _FindLinkerOutput(full_args)
base = os.path.basename(linker_out)
dsym_out = os.path.join(dsym_path_prefix, base + '.dSYM')
# Remove old dSYMs before invoking dsymutil.
_RemovePath(dsym_out)
subprocess.check_call(DSYMUTIL_INVOKE + ['-o', dsym_out, linker_out])
return [dsym_out] | 7efbffe4dca2f45e7d4c76555b27847281a62dac | 3,630,305 |
def search_data_start(file, identifier, encoding):
"""
Returns the line of an identifier for the start of the data in a file.
"""
if identifier is None:
return 0
search = open(file, encoding=encoding)
i = 1
for line in search:
if identifier in line:
search.close()
return i
i += 1 | 2c3c903df2162b9f6fe5452b75c4f7ac06ddd194 | 3,630,306 |
def p_sha1(secret, seed, sizes=()):
"""
Derive one or more keys from secret and seed.
(See specs part 6, 6.7.5 and RFC 2246 - TLS v1.0)
Lengths of keys will match sizes argument
"""
full_size = 0
for size in sizes:
full_size += size
result = b''
accum = seed
while len(result) < full_size:
accum = hmac_sha1(secret, accum)
result += hmac_sha1(secret, accum + seed)
parts = []
for size in sizes:
parts.append(result[:size])
result = result[size:]
return tuple(parts) | f0080c973575537691779c50dd7d89fcefe0f2f5 | 3,630,307 |
def get_workload(batch_size, num_classes=1000, image_shape=(3, 224, 224), dtype="float32"):
"""Get benchmark workload for mobilenet
Parameters
----------
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of classes
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
Returns
-------
net : nnvm.Symbol
The computational graph
params : dict of str to NDArray
The parameters.
"""
net = mobile_net(num_classes=num_classes, alpha=1.0, is_shallow=False)
return create_workload(net, batch_size, image_shape, dtype) | 1c86d4bd4322dbd8eaa9d07461ff900def79059a | 3,630,308 |
def delete_review(review_id):
"""
It allows the user to delete the review from the page
and automatically from the database as well.
Checks that the user deleting the review is the
creator of the review, as only the that user is allowed
to delete it's own content.
"""
review = mongo.db.review.find_one(
{"_id": ObjectId(review_id)})
if review['created_by'] == session["user"]:
mongo.db.reviews.remove({"_id": ObjectId(review_id)})
flash("Review Deleted Successfully")
return redirect(url_for("get_reviews")) | d8d343ecf09e8f6de031e440e6a20b0b4b72bbe9 | 3,630,309 |
def quotient_mealy(mealy, node_relation=None, relabel=False, outputs={'loc'}):
"""Returns the quotient graph of ``G`` under the specified equivalence
relation on nodes.
Parameters
----------
mealy : NetworkX graph
The graph for which to return the quotient graph with the specified node
relation.
node_relation : Boolean function with two arguments
This function must represent an equivalence relation on the nodes of
``G``. It must take two arguments *u* and *v* and return ``True``
exactly when *u* and *v* are in the same equivalence class. The
equivalence classes form the nodes in the returned graph.
unlike the original networkx.quotient_graph selfloops are maintained
relabel : Boolean
if true relabel nodes in the graph
outputs : Tells which outputs are critical and should be kept. Given as a set of strings.
"""
if node_relation is None:
node_relation = lambda u, v: mealy.states.post(u) == mealy.states.post(v)
q_mealy = transys.MealyMachine()
q_mealy.add_inputs(mealy.inputs)
q_mealy.add_outputs(mealy.outputs)
# Compute the blocks of the partition on the nodes of G induced by the
# equivalence relation R.
if relabel:
mapping = dict((n, i) for (i, n) in enumerate(equivalence_classes(mealy, node_relation)))
for (n, i) in mapping.items():
if {'Sinit'} <= set(n):
mapping[n] = 'Sinit'
q_mealy.add_nodes_from({n for (i, n) in mapping.items()})
else:
q_mealy.add_nodes_from(equivalence_classes(mealy, node_relation))
if relabel:
block_pairs = it_product(mapping.keys(), mapping.keys())
for (b, c) in block_pairs:
labels = {frozenset([(key, label[key]) for key in mealy.inputs.keys()]
+ [(output, label[output]) for output in outputs])
for (x, y, label) in mealy.transitions.find(b, c)}
for q in labels:
q_mealy.transitions.add(mapping[b], mapping[c], **dict(q))
else:
block_pairs = it_product(q_mealy, q_mealy)
for (b, c) in block_pairs:
labels = {frozenset([(key, label[key]) for key in mealy.inputs.keys()]
+ [(output, label[output]) for output in outputs])
for (x, y, label) in mealy.transitions.find(b, c)}
for q in labels:
q_mealy.transitions.add(b, c, **dict(q))
if relabel:
for node_eq in mapping.keys():
if any(init in node_eq for init in mealy.states.initial):
q_mealy.states.initial.add(mapping[node_eq])
else: # only initializing after relabel
for node_eq in q_mealy.nodes():
if any(init in node_eq for init in mealy.states.initial):
q_mealy.states.initial.add(node_eq)
return q_mealy | 9eb7b5652af276d9bfd2a23774fa91a68a5c90fc | 3,630,310 |
import os
import sys
def find_package_data(where='.', package='',
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
This function is by Ian Bicking.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append((fn,
prefix + name + '/',
package,
only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out | 3cb778525ea08f0958e64ac8e982f55a82e5127c | 3,630,311 |
import requests
def perform_v1_search(khoros_object, endpoint, filter_field, filter_value, return_json=False, fail_on_no_results=False,
proxy_user_object=None):
"""This function performs a search for a particular field value using a Community API v1 call.
.. versionchanged:: 4.0.0
Introduced the ``proxy_user_object`` parameter to allow API requests to be performed on behalf of other users.
.. versionchanged:: 3.5.0
The typecheck was updated to utilize ``isinstance()`` instead of ``type()``.
.. versionchanged:: 3.4.0
Support has been introduced for the ``ssl_verify`` core setting in the :py:class:`khoros.core.Khoros` object.
:param khoros_object: The core :py:class:`khoros.Khoros` object
:type khoros_object: class[khoros.Khoros]
:param endpoint: The API v1 endpoint against which to perform the search query
:type endpoint: str
:param filter_field: The name of the field being queried within the API v1 endpoint
:type filter_field: str
:param filter_value: The value associated with the field being queried
:type filter_value: str, int
:param return_json: Determines if the response should be returned in JSON format (``False`` by default)
:type return_json: bool
:param fail_on_no_results: Raises an exception if no results are returned (``False`` by default)
:type fail_on_no_results: bool
:param proxy_user_object: Instantiated :py:class:`khoros.objects.users.ImpersonatedUser` object to perform the
API request on behalf of a secondary user.
:type proxy_user_object: class[khoros.objects.users.ImpersonatedUser], None
:returns: The API response (optionally in JSON format)
:raises: :py:exc:`khoros.errors.exceptions.GETRequestError`
"""
# Determine if TLS certificates should be verified during API calls
verify = should_verify_tls(khoros_object)
# Prepare the API call
headers = define_headers(khoros_object, content_type='application/x-www-form-urlencoded',
proxy_user_object=proxy_user_object)
if isinstance(filter_value, str):
filter_value = core_utils.url_encode(filter_value)
uri = f"{khoros_object.core['v1_base']}/search/{endpoint}?q={filter_field}:{filter_value}"
uri = f"{uri}{_get_json_query_string(return_json)}"
# Perform the API call
response = requests.get(uri, headers=headers, verify=verify)
if return_json:
response = response.json()
response = response['response'] if 'response' in response else response
errors.handlers.verify_v1_response(response, 'get', 'users', fail_on_no_results)
return response | 00f7d6716529ff33342104776280da9dd8c788f2 | 3,630,312 |
def get_page_generator(s,max_items=0):
"""Get the generator that returns the Page objects
that we're interested in, from Site s.
"""
page_generator = s.allpages()
if(max_items>0):
page_generator.set_maximum_items(max_items)
return page_generator | d53a890523c999df878fecc71ef1dbd8d17c188c | 3,630,313 |
def get_num_in(profile, key_path):
"""Return the value pointed by the key path in the JSON profile."""
job = __get_job_obj(profile)
obj = job
for key in key_path:
obj = obj[key]
return obj | b432721c5813bfcb5927c39806ffac16470c4a58 | 3,630,314 |
import os
def file_basename_no_extension(filename):
""" Returns filename without extension
>>> file_basename_no_extension('/home/me/file.txt')
'file'
>>> file_basename_no_extension('file')
'file'
"""
base = os.path.basename(filename)
name, extension = os.path.splitext(base)
return name | d4512a06ecc861d2b9e992691fbabb11b9e6e958 | 3,630,315 |
def rl_modelrl_medium():
"""Small set for larger testing."""
hparams = rl_modelrl_base()
hparams.true_env_generator_num_steps //= 2
return hparams | 6cf1a50a9b3f6d00f8f5ec1cddf33d813d9899ab | 3,630,316 |
def check_flow_information(flow, search_name, pos):
""" Try to find out the type of a variable just with the information that
is given by the flows: e.g. It is also responsible for assert checks.::
if isinstance(k, str):
k. # <- completion here
ensures that `k` is a string.
"""
if not settings.dynamic_flow_information:
return None
result = []
if isinstance(flow, (pr.Scope, fast_parser.Module)) and not result:
for ass in reversed(flow.asserts):
if pos is None or ass.start_pos > pos:
continue
result = _check_isinstance_type(ass, search_name)
if result:
break
if isinstance(flow, pr.Flow) and not result:
if flow.command in ['if', 'while'] and len(flow.inputs) == 1:
result = _check_isinstance_type(flow.inputs[0], search_name)
return result | 0ec44d86f87337d5ee0768da19a43a61d6bd1d40 | 3,630,317 |
def accuracy_measures(predictions, trues):
"""Accuracy measures for the predictions of the method vs the groundtruth.
Prints a confusion matrix, accuracy, misclassifcation rate, true positieve rate, false positive rate, specificity, precision, prevalence.
Returns the accuracy score, precision score, and recall score."""
tn, fp, fn, tp = confusion_matrix(trues, predictions).ravel()
print "\t(tn, fp, fn, tp) =", (tn, fp, fn, tp)
# how often is classifier correct?
print "\tAccuracy = {:.2%}".format(float(tp + tn) / len(trues))
# how often is it wrong?
print "\tMisclassification Rate = {:.2%}".format(float(fp + fn) / len(trues))
# when actually yes, how often does it predict yes?
print "\tTrue Positive Rate = {:.2%}".format(float(tp) / trues.count(True))
# when actually no, how often does it predict yes?
print "\tFalse Positive Rate = {:.2%}".format(float(fp) / trues.count(False))
# when actually no, how often does it predict no?
print "\tSpecificity = {:.2%}".format(float(tn) / trues.count(False))
# when it predicts yes, how often is it correct?
print "\tPrecision = {:.2%}".format(float(tp) / predictions.count(True))
# how often does yes condition occur in our sample?
print "\tPrevalence = {:.2%}\n".format(float(trues.count(True)) / len(trues))
# return accuracy, precision, and recall score
return accuracy_score(trues, predictions), precision_score(trues, predictions, average='binary'), recall_score(
trues, predictions, average='binary') | 306fe745e3ff860d5d99e63d276acaa6f9d369cd | 3,630,318 |
def segment_text_to_sentences(text_file, sentence_splitter):
""" Segment text into sentences. Text is provided by BRAT in .txt
file.
Args:
text_file (str): the full path to the BRAT .txt file.
sentence_splitter (spacy LM): SpaCy EN language model.
Returns:
sentences (list((int, int, str))): list of sentence spans.
Spans are triples of (start_offset, end_offset, text),
where offset is relative to the text.
"""
sentences = []
ftext = open(text_file, "r")
for line in ftext:
splits = sentence_splitter(line.strip())
for sent in splits.sents:
sentences.append((sent.start_char, sent.end_char, sent.text))
ftext.close()
return sentences | d74857a4931d162b9573b1b086a8720563b4fd41 | 3,630,319 |
def get_folder_offset(folder_list, folder_name):
"""
Check whether there is already a folder named 'folder_name' and if so, increment a counter.
"""
isExists = False
# Check whether a folder with the same name already exists
for folder in folder_list:
if folder['title'] == folder_name:
#print('title: %s, id: %s' % (folder['title'], folder['id']))
isExists = True
print('Folder', folder_name, 'already exists. Adding an index...')
break
if not isExists :
return 0
# Increment a counter until finding a folder name that doesn't exist (with pattern 'name_i')
i=1
while 1 :
isNew = True
for folder in folder_list:
if folder['title'] == folder_name + '_' + str(i) :
#print('title: %s, id: %s' % (folder['title'], folder['id']))
isNew = False
# Increment the counter
i+=1
# Move on to next iteration
continue
if(isNew):
break
return i | b620e6ee6819b2b1aac6b3fd6a05775cde34838e | 3,630,320 |
def get_coords_for_radius(radius):
""" Given a radius, will return x,y coordinates with x=y.
This is useful for plotting the relation between the undistorted and distorted radius.
"""
sq = radius**2;
coord = np.sqrt(sq / 2);
return coord, coord; | 455c6afda78e1ef28980e39278bd77896957a7bf | 3,630,321 |
def _parse_array(values):
""" parse a list of (string) values representing a fortran array
and return a python list
"""
assert type(values) is list
parsed_value = []
for v in values:
if '*' in v:
# 3* "a" === "a", "a", "a"
mult, val = v.split('*')
parsed_value.extend(int(mult) * [ _parse_value(val.strip()) ])
else:
parsed_value.append(_parse_value(v))
return parsed_value | 17888b900e06db2d6e766d90cff7184191d2c228 | 3,630,322 |
def word_frequency(text: str, use_cases: bool = False) -> dict[str,int]:
"""
Returns a dictionary of the frequency of all words in given string.
All words turned to lowercase if use_cases left unspecified.
Parameters
------------
text: str
The text to find the word frequency of.
use_cases: bool, optional
Should words be considered case-sensitive, and saved as keys with their cases? Defaults to False.
Returns
------------
dict[str,int]
The frequency (int) of every string (str) in text.
"""
dictionary = {}
list_words = words(text)
for word in list_words:
if(not use_cases):
word = word.lower()
if(word in dictionary):
dictionary[word] += 1
else:
dictionary[word] = 1
return dictionary | 6acfddc093c68e97c7f014ab7c93e33fd034af25 | 3,630,323 |
def param_to_string(metric) -> str:
"""Convert a list / tuple of parameters returned from IE to a string"""
if isinstance(metric, (list, tuple)):
return ', '.join([str(x) for x in metric])
else:
return str(metric) | 54476f88936336728ba73425bb57860e17fb7561 | 3,630,324 |
import re
def fix_subtitle_hierarchy(ctx, text):
"""Fix subtitle hierarchy to be strict Language -> Etymology ->
Part-of-Speech -> Translation/Linkage."""
assert isinstance(ctx, Wtp)
assert isinstance(text, str)
# Known language names are in languages_by_name
# Known lowercase PoS names are in part_of_speech_map
# Known lowercase linkage section names are in linkage_map
old = re.split(r"(?m)^(==+)[ \t]*([^= \t]([^=\n]|=[^=])*?)"
r"[ \t]*(==+)[ \t]*$",
text)
parts = []
npar = 4 # Number of parentheses in above expression
parts.append(old[0])
for i in range(1, len(old), npar + 1):
left = old[i]
right = old[i + npar - 1]
title = old[i + 1]
level = len(left)
part = old[i + npar]
if level != len(right):
ctx.debug("subtitle has unbalanced levels: "
"{!r} has {} on the left and {} on the right"
.format(title, left, right))
lc = title.lower()
if title in languages_by_name:
if level > 2:
ctx.debug("subtitle has language name {} at level {}"
.format(title, level))
level = 2
elif lc.startswith("etymology"):
if level > 3:
ctx.debug("etymology section {} at level {}"
.format(title, level))
level = 3
elif lc.startswith("pronunciation"):
level = 3
elif lc in part_of_speech_map:
level = 4
elif lc == "translations":
level = 5
elif lc in linkage_map or lc == "compounds":
level = 5
elif title in inflection_section_titles:
level = 5
elif title in ignored_section_titles:
level = 5
else:
level = 6
parts.append("{}{}{}".format("=" * level, title, "=" * level))
parts.append(part)
# print("=" * level, title)
# if level != len(left):
# print(" FIXED LEVEL OF {} {} -> {}"
# .format(title, len(left), level))
text = "".join(parts)
# print(text)
return text | 3ae42200b576e2370c807c7d973f36c2ffc7c8e4 | 3,630,325 |
import os
import json
def embed_dataset(id, source_folder, output_folder):
""" Here we are at the stage where the three splits starting from the training set have been computed.
We have then train.csv, valid.csv, test.csv files and the correspondent files with labels.
[train.csv, valid.csv, test.csv]
[train.npy, valid.npy, test.npy]
Then we have three folders, each containing the files map
[map_valid/map_n_*.csv, map_valid/map_r_*.csv] -- for all the examples
At this stage we want to obtain the embedding for these files.
"""
emb = Embedding()
dct_feat_type = {0: 'n', 1: 'r'}
fine_phrases = pd.read_csv(join(source_folder, 'fine_phrases_train.csv'), index_col=0)
for split_ in ['train', 'valid', 'test']:
path_sentences_ = join(output_folder, split_ + '_sentences')
path_phrases_ = join(output_folder, split_ + '_phrases')
os.makedirs(path_sentences_, exist_ok=False)
os.makedirs(path_phrases_, exist_ok=False)
x_sentences = pd.read_csv(join(source_folder, split_ + '.csv'), index_col=0)
y_sentences = np.load(join(source_folder, split_ + '.npy'))
fill_dct = True if split_ == 'train' else False
for k_ in range(x_sentences.shape[0]):
out = emb.transform_sentence(x_sentences.loc[k_], fill_dct)
if not np.isscalar(out):
np.save(join(path_sentences_, 'sentence_%i.npy' % k_), out)
np.save(join(path_sentences_, 'y_%i.npy' % k_), y_sentences[k_])
fill_dct = False
for l_ in range(len(os.listdir(join(source_folder, 'map_%s' % split_))) // 2):
for id_file_, file_map_ in enumerate(['map_n_%i.csv' % l_,
'map_r_%i.csv' % l_]):
path_feat_type = join(path_phrases_, 'phrases_%s' % dct_feat_type[id_file_])
os.makedirs(path_feat_type, exist_ok=True)
df_phr = pd.read_csv(join(source_folder, 'map_%s' % split_, file_map_), index_col=0)
for k_ in df_phr.index:
id_phrases = [int(id_) for id_ in df_phr.loc[k_] if not np.isnan(id_)]
for j_phr, id_phr in enumerate(id_phrases):
out = emb.transform_sentence([f_ for f_ in fine_phrases.loc[id_phr]], fill_dct)
if not np.isscalar(out):
np.save(join(path_feat_type, 'map_%i_%i.npy' % (k_, j_phr)), out)
def entire_word2vec_variance(self,
input_df,
fill_dct=True):
""" Based on the variance a of the entire word2vec representation.
we assign value to the randomly initialized
words such that U[-a, a]
:param input_df: pandas data frame containing the sentences
:param dct_new_words: dictionary containing the new words, not in word2vec
:param fill_dct: bool, if True we save the new elements
:returns output_word2vec:
"""
# dct_path = join(self.exp.output_path, 'dct')
# os.makedirs(dct_path, exist_ok=True)
eliminate_samples = False
eliminated_idx = []
# if 'dct_not_in_word2vec.json' not in os.listdir(dct_path):
# dct_new_words = {}
# else:
# with open(join(dct_path, 'dct_not_in_word2vec.json')) as json_file:
# dct_new_words = json.load(json_file)
output_word2vec = [] # save the representation for the entire set
for in_sentence_idx in range(input_df.shape[0]):
word2vec_sentence = [] # save the embedding for the entire sentence here
out = self.embed_sentence(input_df.loc[in_sentence_idx], fill_dct)
"""
stc = [f_ for f_ in input_df.loc[in_sentence_idx] if isinstance(f_, str)]
if len(stc) < self.min_length_stc:
eliminate_samples = True
eliminated_idx.append(in_sentence_idx)
continue
for in_word_ in stc: # if the sentence is longer than self.min_length_stc, for each word
if in_word_ not in dct_new_words.keys():
try: # we try to save the representation of the word
word2vec_sentence.append(self.model[in_word_])
except: # otherwise we generate the embedding
if fill_dct:
rnd_embedding = np.array([np.random.uniform(low=-v_, high=v_) for v_ in self.var])
word2vec_sentence.append(rnd_embedding)
dct_new_words[in_word_] = list(rnd_embedding) # save in the dictionary as a list
else:
word2vec_sentence.append(np.array(dct_new_words[in_word_])) # save in the output embedding as arr
"""
if len(word2vec_sentence) >= 5:
output_word2vec.append(np.array(word2vec_sentence))
else:
eliminated_idx.append(in_sentence_idx)
# with open(join(dct_path, 'dct_not_in_word2vec.json'), 'w') as outfile:
# json.dump(dct_new_words, outfile)
if eliminate_samples:
return output_word2vec, eliminated_idx, dct_new_words
return output_word2vec, -1, dct_new_words
def as_train_word2vec_variance(self,
input_df):
""" Based on the variance a of the words in the word2vec embedding used in the training set only.
We assign value to the randomly initialized word such that U[-a, a].
:param input_df: input data frame
:returns output_word2vec
"""
dct_path = join(self.exp.output_path, 'dct')
os.makedirs(dct_path, exist_ok=True)
n = input_df.shape[0]
print(n)
if 'dct_not_in_word2vec.json' not in os.listdir(dct_path): # if the dictionary does not exist
dct_new_words = {}
word2vec_in_training = [] # words used during training
not_in_word2vec = [] # words of the training set which are not in the word2vec
for in_sentence_idx in range(n): # for all the phrases, we iterate on the words
stc = [f_ for f_ in input_df.loc[in_sentence_idx] if isinstance(f_, str)]
print('------------------------')
print('sentence')
print(stc)
if len(stc) < self.min_length_stc: # if it is too small for the c filter, we continue
continue
print('words in sentence')
for in_word_ in stc: # for each word in the sentence
print(in_word_)
if in_word_ not in not_in_word2vec:
# we have not yet stored this word in the dictionary of the unknowns
try:
# it may be in the word2vec
word2vec_in_training.append(self.model[in_word_])
except:
# if not we need to save it
not_in_word2vec.append(in_word_)
# mean_ = np.zeros(embed_dct[self.embed_name]['dimension'])
n_words_training = len(word2vec_in_training) # words from the word2vec
"""for w_ in word2vec_in_training: # we compute the mean
mean_ += self.model[w_]"""
mean_ = np.mean(np.array(word2vec_in_training), axis=0) / n_words_training
sq_ = np.zeros(embed_dct[self.embed_name]['dimension'])
for w_ in word2vec_in_training:
sq_ += (w_ - mean_) ** 2
var = sq_ / n_words_training # and the variance
for k_ in not_in_word2vec:
dct_new_words[k_] = [np.random.uniform(low=-v_, high=v_) for v_ in var]
with open(join(dct_path, 'dct_not_in_word2vec.json'), 'w') as outfile:
json.dump(dct_new_words, outfile)
np.save(join(dct_path, 'var.npy'), var)
with open(join(dct_path, 'dct_not_in_word2vec.json')) as json_file: # repetition for the training
dct_new_words = json.load(json_file) # otherwise we load it
var = np.load(join(dct_path, 'var.npy'))
# second part, here we generate the representation
eliminate_samples = False
eliminated_idx = []
output_word2vec = []
for in_sentence_idx in range(n):
word2vec_sentence = []
stc = [f_ for f_ in input_df.loc[in_sentence_idx] if isinstance(f_, str)]
if len(stc) < self.min_length_stc:
eliminate_samples = True
eliminated_idx.append(in_sentence_idx)
continue
for in_word_ in stc:
if in_word_ not in dct_new_words.keys():
try:
word2vec_sentence.append(self.model[in_word_])
except:
rnd_embedding = np.array([np.random.uniform(low=-v_, high=v_) for v_ in var])
word2vec_sentence.append(rnd_embedding)
else:
word2vec_sentence.append(np.array(dct_new_words[in_word_])) # save in the output embedding as arr
output_word2vec.append(np.array(word2vec_sentence))
if eliminate_samples:
return output_word2vec, eliminated_idx
return output_word2vec, -1
def convert_word2vec(self, input_df):
""" Here we pass the input data. The input data consists of list of lists.
Each sample corresponds to a list of words. The preprocess is such that we have no space, neither capital letters.
We give here the representation using the word2vec embedding.
Each word has a representation of size 300.
:param input_df: it is a list of lists. Each lists contains a sentence or a phrase.
:returns: a list of numpy arrays, of size #n_words_in_sentence/phrase, 300
"""
dct_path = join(self.exp.output_path, 'dct')
os.makedirs(dct_path, exist_ok=True)
n = input_df.shape[0]
eliminate_samples = False
eliminated_idx = []
if 'dct_not_in_word2vec.json' not in os.listdir(dct_path):
dct_new_words = {}
else:
with open(join(dct_path, 'dct_not_in_word2vec.json')) as json_file:
dct_new_words = json.load(json_file)
output_word2vec = []
for in_sentence_idx in range(n):
word2vec_sentence = []
stc = [f_ for f_ in input_df.loc[in_sentence_idx] if isinstance(f_, str)]
# print(stc)
if len(stc) < self.min_length_stc:
eliminate_samples = True
eliminated_idx.append(in_sentence_idx)
continue
for in_word_ in stc:
if in_word_ not in dct_new_words.keys():
try:
word2vec_sentence.append(self.model[in_word_])
except:
# print("random initialization")
rnd_embedding = np.random.randn(embed_dct[self.embed_name]['dimension'])
word2vec_sentence.append(rnd_embedding)
dct_new_words[in_word_] = list(rnd_embedding) # save in the dictionary as a list
else:
word2vec_sentence.append(np.array(dct_new_words[in_word_])) # save in the output embedding as arr
output_word2vec.append(np.array(word2vec_sentence))
with open(join(dct_path, 'dct_not_in_word2vec.json'), 'w') as outfile:
json.dump(dct_new_words, outfile)
if eliminate_samples:
return output_word2vec, eliminated_idx
return output_word2vec, -1
def apply(self, input_df, y):
""" We apply one of the previous transformations, depending on the dct initialization
:param input_df: input data frame
:param y: output label """
embed_and_init_unknowns = {'gaussian': self.convert_word2vec,
'uniform_all': self.entire_word2vec_variance,
'uniform_tr': self.as_train_word2vec_variance}
embedded_lst, eliminated_id = embed_and_init_unknowns[self.init](input_df)
if eliminated_id != -1:
y = np.delete(y, eliminated_id)
if self.exp.hyper.architecture == 'CNN':
return embedded_lst, y
else: # average over all the words
return np.array([np.mean(emb_, axis=0) for emb_ in embedded_lst]), y | 744904a1cb7965158e43059411f267797973bb31 | 3,630,326 |
def showStitchedModels_Redundant(mods, ax=None,
cmin=None, cmax=None, **kwargs):
"""Show several 1d block models as (stitched) section."""
x = kwargs.pop('x', np.arange(len(mods)))
topo = kwargs.pop('topo', x*0)
nlay = int(np.floor((len(mods[0]) - 1) / 2.)) + 1
if cmin is None or cmax is None:
cmin = 1e9
cmax = 1e-9
for model in mods:
res = np.asarray(model)[nlay - 1:nlay * 2 - 1]
cmin = min(cmin, min(res))
cmax = max(cmax, max(res))
if kwargs.pop('sameSize', True): # all having the same width
dx = np.ones_like(x)*np.median(np.diff(x))
else:
dx = np.diff(x) * 1.05
dx = np.hstack((dx, dx[-1]))
x1 = x - dx / 2
if ax is None:
fig, ax = plt.subplots()
else:
ax = ax
fig = ax.figure
# ax.plot(x, x * 0., 'k.')
zm = kwargs.pop('zm', None)
maxz = 0.
if zm is not None:
maxz = zm
recs = []
RES = []
for i, mod in enumerate(mods):
mod1 = np.asarray(mod)
res = mod1[nlay - 1:]
RES.extend(res)
thk = mod1[:nlay - 1]
thk = np.hstack((thk, thk[-1]))
z = np.hstack((0., np.cumsum(thk)))
if zm is not None:
thk[-1] = zm - z[-2]
z[-1] = zm
else:
maxz = max(maxz, z[-1])
for j, _ in enumerate(thk):
recs.append(Rectangle((x1[i], topo[i]-z[j]), dx[i], -thk[j]))
pp = PatchCollection(recs, edgecolors=kwargs.pop('edgecolors', 'none'))
pp.set_edgecolor(kwargs.pop('edgecolors', 'none'))
pp.set_linewidths(0.0)
ax.add_collection(pp)
if 'cmap' in kwargs:
pp.set_cmap(kwargs['cmap'])
print(cmin, cmax)
norm = colors.LogNorm(cmin, cmax)
pp.set_norm(norm)
pp.set_array(np.array(RES))
# pp.set_clim(cmin, cmax)
ax.set_ylim((-maxz, max(topo)))
ax.set_xlim((x1[0], x1[-1] + dx[-1]))
cbar = None
if kwargs.pop('colorBar', True):
cbar = plt.colorbar(pp, ax=ax, norm=norm, orientation='horizontal',
aspect=60) # , ticks=[1, 3, 10, 30, 100, 300])
if 'ticks' in kwargs:
cbar.set_ticks(kwargs['ticks'])
# cbar.autoscale_None()
if ax is None: # newly created fig+ax
return fig, ax
else: # already given, better give back color bar
return cbar | 4b2428ab7d08c54b32e22c89785b3506e24e21f5 | 3,630,327 |
def export_16(text_col, processed_col, input_filepath,
output_filepath, country):
"""Takes in a file with 8 different sheets containing extracted text.
It will perform mulitple steps to clean the extracted text
and split each sheet into two smaller sheets.
Then export them individual to a local folder on computer
Parameters
----------
text_col : string
Name of the text column that needs processing
processed_col : string
Name of the empty column where processed text will be stored
input_filepath : string
Location of the input file
output_filepath : string
Location of the output file
country : string
Which country's data is being processed
Returns
-------
value : True
Returns True if the process is successfully run
"""
processed_list_8 = process_text(text_col, processed_col, input_filepath)
processed_list_16 = []
for name in processed_list_8:
name, _ = split_half(name)
processed_list_16.append(name)
processed_list_16.append(_)
for i in range(len(processed_list_16)):
processed_list_16[i].to_excel(output_filepath +
country + '_processed_' +
str(i+1) + '.xlsx',
index=False)
return True | 4c6f100c7fd992078f3e9e32aba494930464f625 | 3,630,328 |
import random
def generate_signal(ders, n, sampling, initial_state=None, number_of_variables=1, number_of_perturbations=1, warmup_time=1000.0, tau=3.0, eps=0.5, dt=0.01):
"""generates signal for the oscillator driven by correlated noise from dynamical equations
:param ders: a list of state variable derivatives
:param n: length of time series
:param sampling: sampling rate
:param initial_state: initial state
:param number_of_variables: number of variables returned, not including the input (default 1)
:param number_of_perturbations: number of perturbations (default 1)
:param warmup_time: time for relaxing to the stationary regime (default 1000)
:param tau: noise correlation time (default 3.0)
:param eps: noise strength (default 0.5)
:param dt: time step (default 0.01)
:return: time series of the signal and driving noise"""
# initial conditions
if(initial_state==None):
state = [random.gauss(0,1) for i in range(len(ders))]
else:
state = initial_state
res_s = [[] for i in range(number_of_variables)] # resulting signal
res_p = [[] for j in range(number_of_perturbations)] # resulting perturbations
ps = [0 for p in range(number_of_perturbations)]
# warmup
for i in range(round(warmup_time/dt)):
for p in range(number_of_perturbations):
ps[p] = ps[p] - (ps[p]/tau - eps*sqrt(2/tau)*random.gauss(0,1)/sqrt(dt))*dt
state = one_step_integrator(state, ders, ps, dt)
# real integration
for i in range(n*sampling):
for p in range(number_of_perturbations):
ps[p] = ps[p] - (ps[p]/tau - eps*sqrt(2/tau)*random.gauss(0,1)/sqrt(dt))*dt
state = one_step_integrator(state, ders, ps, dt)
for c in range(number_of_variables):
res_s[c].append(state[c])
for p in range(number_of_perturbations):
res_p[p].append(ps[p])
# sampling
res_s = [res_s[i][::sampling] for i in range(number_of_variables)]
res_p = [res_p[i][::sampling] for i in range(number_of_perturbations)]
return res_s, res_p | f7ebd99aaaf0684b0c5c7a9380b18c6f00d70a50 | 3,630,329 |
from sys import stdout
import os
def get_first_last_line(filePath, encoding=stdout.encoding):
"""Return the first and the last lines of file
The existence of filePath should be check beforehand.
Args:
filePath (str): the path of the file
encoding (str): the encoding of the file. Default stdout.encoding
Returns
two strings (unstripped)
"""
with open(filePath, "rb") as f:
first = f.readline() # Read the first line.
f.seek(-2, os.SEEK_END) # Jump to the second last byte.
while f.read(1) != b"\n": # Until EOL is found...
# ...jump back the read byte plus one more.
f.seek(-2, os.SEEK_CUR)
last = f.readline() # Read last line.
# encode string
return str(first, encoding), str(last, encoding) | 0d2da4c861e118b3b4d29bfc95ac3d56df7804e6 | 3,630,330 |
def getReviewRedirect(entity, params):
"""Returns the redirect to review the specified entity.
"""
return '/%s/review/%s' % (
params['url_name'], entity.key().id_or_name()) | f13aecf226e38809d183d5482e29fe40dfdd40c5 | 3,630,331 |
import imgaug.random
def current_random_state():
"""Get or create the current global RNG of imgaug.
Note that the first call to this function will create a global RNG.
Returns
-------
imgaug.random.RNG
The global RNG to use.
"""
return imgaug.random.get_global_rng() | 1d9064c11ca40e8dca80d456f5a27f3bc92494c6 | 3,630,332 |
def get_remote_station(s3, bucket, dataset_id=None, station_id=None, stn_key=None, version=3):
"""
"""
if isinstance(dataset_id, str):
stn_key = key_patterns[version]['station'].format(dataset_id=dataset_id, station_id=station_id)
try:
obj1 = s3.get_object(Bucket=bucket, Key=stn_key)
rem_stn_body = obj1['Body']
jzstd = rem_stn_body.read()
rem_stn = read_json_zstd(jzstd)
except:
rem_stn = None
return rem_stn | 477ca3ff9e940d5745b97c082ca36ba17058850c | 3,630,333 |
import os
def getURLitemBasename(url):
"""For a URL, absolute or relative, return the basename string.
e.g. "http://foo/bar/path/foo.dmg" => "foo.dmg"
"/path/foo.dmg" => "foo.dmg"
"""
url_parse = urlparse(url)
return os.path.basename(url_parse.path) | d4fd51b8a03a58f0edfc68662713ba6030564265 | 3,630,334 |
def determine_smallest_atom_index_in_torsion(atom1: 'rdkit.Chem.rdchem.Atom',
atom2: 'rdkit.Chem.rdchem.Atom',
) -> int:
"""
Determine the smallest atom index in mol connected to ``atom1`` which is not ``atom2``.
Returns a heavy atom if available, otherwise a hydrogen atom.
Useful for deterministically determining the indices of four atom in a torsion.
This function assumes there ARE additional atoms connected to ``atom1``, and that ``atom2`` is not a hydrogen atom.
Args:
atom1 (Atom): The atom who's neighbors will be searched.
atom2 (Atom): An atom connected to ``atom1`` to exclude (a pivotal atom).
Returns:
int: The smallest atom index (1-indexed) connected to ``atom1`` which is not ``atom2``.
"""
neighbor = [a for a in atom1.GetNeighbors() if a.GetIdx()
!= atom2.GetIdx()]
atomic_num_list = sorted([nb.GetAtomicNum() for nb in neighbor])
min_atomic, max_atomic = atomic_num_list[0], atomic_num_list[-1]
if min_atomic == max_atomic or min_atomic > 1:
return min([nb.GetIdx() for nb in neighbor])
else:
return min([nb.GetIdx() for nb in neighbor if nb.GetAtomicNum() != 1]) | 40ea2e151bd343fff208cb77e14122d021ee4733 | 3,630,335 |
import numpy
from typing import Optional
def structured_array_generic_full(
request: Request,
reader=Depends(reader),
slice=Depends(slice_),
expected_shape=Depends(expected_shape),
format: Optional[str] = None,
serialization_registry=Depends(get_serialization_registry),
):
"""
Fetch a slice of array-like data.
"""
if reader.structure_family != "structured_array_generic":
raise HTTPException(
status_code=404,
detail=f"Cannot read {reader.structure_family} structure with /structured_array_generic/full route.",
)
# Deferred import because this is not a required dependency of the server
# for some use cases.
try:
array = reader.read()
if slice:
array = array[slice]
array = numpy.asarray(array) # Force dask or PIMS or ... to do I/O.
except IndexError:
raise HTTPException(status_code=400, detail="Block index out of range")
if (expected_shape is not None) and (expected_shape != array.shape):
raise HTTPException(
status_code=400,
detail=f"The expected_shape {expected_shape} does not match the actual shape {array.shape}",
)
try:
return construct_data_response(
"structured_array_generic",
serialization_registry,
array,
reader.metadata,
request,
format,
expires=getattr(reader, "content_stale_at", None),
)
except UnsupportedMediaTypes as err:
raise HTTPException(status_code=406, detail=err.args[0]) | ffc68744741d66f36dd38b23996cf790eff10ff8 | 3,630,336 |
import requests
def _get_api_key(api_url: str, username: str, password: str, save: bool) -> str:
"""Get an RGD API Key for the given user from the server, and save it if requested."""
resp = requests.post(f'{api_url}/api-token-auth', {'username': username, 'password': password})
resp.raise_for_status()
token = resp.json()['token']
if save:
API_KEY_DIR_PATH.mkdir(parents=True, exist_ok=True)
with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'w') as fd:
fd.write(token)
return token | c0adc024340310d2047d25c73772c530d17d81fe | 3,630,337 |
def fitness_func_large(vector):
""" returns a very large number for fitness"""
return 9999999999999999999 | 08e6f43c5f891fe7138dfc7b1d0809ba048bf070 | 3,630,338 |
def create_implicit_binary_heap(key_type=float):
"""Create an implicit (array-based) binary heap.
:param key_type: the key type
:type key_type: float, int or object
:returns: the heap
:rtype: :py:class:`.Heap`
"""
heap_type = _HeapType.HEAP_TYPE_BINARY_IMPLICIT
return _create_and_wrap_heap(
heap_type,
key_type,
value_type=None,
addressable=False,
mergeable=False,
double_ended=False,
) | b598a8525b47e65a97578a69d40e22866cf65b4d | 3,630,339 |
def Prob_APD_select_v2(
fitness: list,
uncertainty: list,
vectors: "ReferenceVectors",
penalty_factor: float,
ideal: list = None,
):
"""Select individuals for mating on basis of Angle penalized distance.
Args:
fitness (list): Fitness of the current population.
uncertainty (list) : Uncertainty of the predicted objective values
vectors (ReferenceVectors): Class containing reference vectors.
penalty_factor (float): Multiplier of angular deviation from Reference
vectors. See RVEA paper for details.
ideal (list): ideal point for the population.
Uses the min fitness value if None.
Returns:
[type]: A list of indices of the selected individuals.
"""
"""
penalty_factor = 2.0
fitness = np.array([[1.2, 0.6],
[1.23, 0.65],
[0.2, 1.5],
[0.2, 1.5],
[0.6,1.2],
[0.7, 1],
[1.4, 0.1],
[1.38, 0.09]])
uncertainty = np.array([[0.1, 0.2],
[0.03, 0.05],
[0.1, 0.1],
[0.05, 0.05],
[0.05,0.1],
[0.1,0.05],
[0.1, 0.05],
[0.1, 0.05]])
penalty_factor = 1.0
fitness = np.array([[0.6, 0.6],
[0.1, 0.1],
])
uncertainty = np.array([[0.03, 0.1],
[0.1, 0.1],
])
#uncertainty = np.array([[0.1, 0.03],
# [0.1, 0.1],
# ])
"""
#penalty_factor = 1.0
# fitness = np.array([[1, 1],[1.2, 1.2]])
# uncertainty = np.array([[0.8, 0.09],[0.09, 0.8]])
# penalty_factor = 2.0
# print(penalty_factor)
refV = vectors.neighbouring_angles_current
# Normalization - There may be problems here
# if ideal is not None:
# fmin = ideal
# else:
# fmin = np.amin(fitness, axis=0)
fmin = np.amin(fitness, axis=0)
# fmin = np.array([0,0])
translated_fitness = fitness - fmin
pwrong = Probability_wrong(mean_values=translated_fitness, stddev_values=uncertainty, n_samples=1000)
pwrong.vect_sample_f()
# pwrong.f_samples = pwrong.f_samples - np.reshape(fmin,(1,2,1))
"""
pwrong = Probability_wrong(mean_values=fitness, stddev_values=uncertainty, n_samples=1000)
pwrong.vect_sample_f()
fmin = np.amin(pwrong.f_samples, axis=(0,2))
translated_fitness = fitness - fmin
pwrong.f_samples = pwrong.f_samples - np.reshape(fmin,(1,2,1))
"""
fitness_norm = np.linalg.norm(pwrong.f_samples, axis=1)
fitness_norm = np.repeat(np.reshape(fitness_norm, (len(fitness), 1, pwrong.n_samples)), len(fitness[0, :]), axis=1)
normalized_fitness = np.divide(pwrong.f_samples, fitness_norm) # Checked, works.
# Find cosine angles for all the samples
"""
cosine = None
for i in range(pwrong.n_samples):
cosine_temp = np.dot(normalized_fitness[:,:,i], np.transpose(vectors.values))
cosine_temp = np.reshape(cosine_temp,(len(fitness), len(vectors.values), 1))
if cosine is None:
cosine = cosine_temp
else:
np.shape(cosine_temp)
np.shape(cosine)
cosine = np.concatenate((cosine, cosine_temp),axis=2)
"""
cosine = np.tensordot(normalized_fitness, np.transpose(vectors.values), axes=([1], [0]))
cosine = np.transpose(cosine, (0, 2, 1))
if cosine[np.where(cosine > 1)].size:
# print(
# "RVEA.py line 60 cosine larger than 1 decreased to 1:"
# )
cosine[np.where(cosine > 1)] = 1
if cosine[np.where(cosine < 0)].size:
# print(
# "RVEA.py line 64 cosine smaller than 0 decreased to 0:"
# )
cosine[np.where(cosine < 0)] = 0
# Calculation of angles between reference vectors and solutions
theta = np.arccos(cosine)
# Reference vector asub_population_indexssignment
# pwrong.compute_pdf(cosine)
# Compute rank of cos theta (to be vectorized)
rank_cosine = np.mean(cosine, axis=2)
# print("Rank cosine:")
# print(rank_cosine)
assigned_vectors = np.argmax(rank_cosine, axis=1)
selection = np.array([], dtype=int)
# Selection
vector_selection = None
# fig = plt.figure(1, figsize=(6, 6))
# ax = fig.add_subplot(111)
#for i in range(0, len(vectors.values)):
#input = ((i) for i in
# itertools.product(range(len(vectors.values))))
#input = ((i, j, k) for i, j, k in
# itertools.product(range(len(vectors.values)),pwrong, assigned_vectors, theta, refV, penalty_factor))
#results = p.map(select_apd, (input, pwrong, assigned_vectors, theta, refV, penalty_factor))
#p = mp.Pool(int(mp.cpu_count()))
#results = p.map(fun_wrapper, (
# range(len(vectors.values)), repeat(pwrong), repeat(assigned_vectors), repeat(theta), repeat(refV), repeat(penalty_factor)))
#p.close()
#p.join()
############## Parallel Process 1
"""
worker = Parallel(n_jobs=20,pre_dispatch='all')
res=worker(delayed(select_apd)(i,pwrong, assigned_vectors, theta, refV, penalty_factor) for i in range(len(vectors.values)))
print(res)
results = []
for val in res:
if val != None:
results.append(val)
selection = np.asarray(results)
#selection = np.reshape(results, (0,len(results)))
#selection = [x for x in selection if x != -1]
"""
apd_list, inidiv_index_list = select_apd_2(pwrong, assigned_vectors, theta, refV, penalty_factor, vectors)
selection = pwrong.compute_rank_vectorized_apd(apd_list, inidiv_index_list)
if selection.shape[0] == 1:
print("Only one individual!!")
rand_select = np.random.randint(len(fitness), size=1)
# if rand_select == selection[0,0]:
# rand_select = np.random.randint(len(fitness), size=1)
selection = np.vstack((selection, np.transpose(rand_select[0])))
# Plots here
"""
#plt.rcParams["text.usetex"] = True
vector_anno = np.arange(len(vectors.values))
fig = plt.figure(1, figsize=(6, 6))
fig.set_size_inches(5, 5)
ax = fig.add_subplot(111)
ax.set_xlabel('$f_1$')
ax.set_ylabel('$f_2$')
plt.xlim(-0.02, 1.75)
plt.ylim(-0.02, 1.75)
#plt.scatter(vectors.values[:, 0], vectors.values[:, 1])
sx= selection.squeeze()
#plt.scatter(translated_fitness[sx,0],translated_fitness[sx,1])
#plt.scatter(fitness[sx, 0], fitness[sx, 1])
#for i, txt in enumerate(vector_anno):
# ax.annotate(vector_anno, (vectors.values[i, 0], vectors.values[i, 1]))
[plt.arrow(0, 0, dx, dy, color='b', length_includes_head=True,
head_width=0.02, head_length=0.04) for ((dx, dy)) in vectors.values]
plt.errorbar(fitness[:, 0], fitness[:, 1], xerr=1.96 * uncertainty[:, 0], yerr=1.96 * uncertainty[:, 1],fmt='o', ecolor='r', c='r')
for i in vector_anno:
ax.annotate(vector_anno[i], ((vectors.values[i, 0]+0.01), (vectors.values[i, 1]+0.01)))
plt.errorbar(fitness[sx,0],fitness[sx,1], xerr=1.96*uncertainty[sx, 0], yerr=1.96*uncertainty[sx, 1],fmt='o', ecolor='g',c='g')
for i in range(len(assigned_vectors)):
ax.annotate(assigned_vectors[i], ((fitness[i, 0]+0.01), (fitness[i, 1]+0.01)))
#ax.annotate(assigned_vectors[i], ((fitness[i, 0] + 0.01), (fitness[i, 1] + 0.01)))
plt.show()
#fig.savefig('t_select.pdf')
######### for distribution plots
fig = plt.figure(1, figsize=(6, 6))
fig.set_size_inches(5, 5)
ax = fig.add_subplot(111)
plt.xlim(-0.02, 1.1)
plt.ylim(-0.02, 1.1)
vector_anno = np.arange(len(vectors.values))
viridis = cm.get_cmap('viridis',len(vectors.values[:,0]))
samples = pwrong.f_samples.tolist()
samplesx = samples[0][0]
samplesy = samples[0][1]
#samplesx = samples[0][0] + samples[1][0]
#samplesy = samples[0][1] + samples[1][1]
colour = np.argmax(cosine, axis=1)
coloursamples = colour.tolist()[0]
#coloursamples = colour.tolist()[0] + colour.tolist()[1]
colourvectors = list(range(0,len(vectors.values[:,0])))
#colourvectors=['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b']
[plt.arrow(0,0,dx, dy, color=viridis.colors[c],length_includes_head=True,
head_width=0.02, head_length=0.04) for ((dx,dy),c) in zip(vectors.values, colourvectors)]
for i in range(len(assigned_vectors)-1):
ax.annotate(assigned_vectors[i], ((translated_fitness[i, 0]+0.08), (translated_fitness[i, 1]+0.03)))
for i in vector_anno:
ax.annotate(vector_anno[i], ((vectors.values[i, 0]+0.01), (vectors.values[i, 1]+0.01)))
ax.set_xlabel('$f_1$')
ax.set_ylabel('$f_2$')
plt.scatter(samplesx, samplesy, c=viridis.colors[coloursamples], s=0.2)
plt.scatter(translated_fitness[0][0], translated_fitness[0][1], c='r', s=10, marker='*')
ellipse = Ellipse(xy=(translated_fitness[0][0], translated_fitness[0][1]), width=uncertainty[0][0] * 1.96 * 2,
height=uncertainty[0][1] * 1.96 * 2,
edgecolor='r', fc='None', lw=1)
ax.add_patch(ellipse)
plt.show()
print((colour[0, :] == 3).sum())
print((colour[0, :] == 2).sum())
#print(selection)
#fig.savefig('t1.pdf')
"""
return selection.squeeze() | a562c5908d6dab5ca231220dda1eec94f5020d86 | 3,630,340 |
import os
def get_initial_band_powers(bands_min, bands_max, idx_zbin1, idx_zbin2):
"""
get_initial_band_powers(bands_min, bands_max, idx_zbin1, idx_zbin2)
Function supplying the initial guess for the requested {'EE', 'BB', 'EB'} band
powers for the current redshift bin correlation idx_zbin1 x idx_zbin2.
Parameters
----------
bands_min : 1D numpy.ndarray
Array containing the lower limits of the band power multipole intervals.
bands_max : 1D numpy.ndarray
Array containing the upper limits of the band power multipole intervals.
idx_zbin1 : int
Index of the first z-bin to be used in the analysis.
idx_zbin2 : int
Index of the second z-bin to be used in the analysis.
Returns
-------
initial_band_powers : 1D numpy.ndarray
Array containing the initial guesses for all requested band powers
indices_bands_to_use : 1D numpy.ndarray
Array containing the indices (with respect to 'initial_band_powers') for the bands
that will be checked for convergence.
slicing_points_bands : dict
Dictionary containing the sets of indices at which to split 'initial_band_powers' into
the single EE, BB, EB components.
"""
# use a read-in power spectrum with random but realistic amplitude scaling:
initial_band_powers_EE = initialize_band_powers(bands_EE_min, bands_EE_max, band='EE', constant=False)
# set BB and EB to constant zero for now!
initial_band_powers_BB = initialize_band_powers(bands_BB_min, bands_BB_max, band='BB', constant=True)
initial_band_powers_EB = initialize_band_powers(bands_EB_min, bands_EB_max, band='EB', constant=True)
initial_band_powers_list = [initial_band_powers_EE, initial_band_powers_BB, initial_band_powers_EB]
# More correct: initial_EE_z1z1, initial_EE_z1z2, initial_EE_z2z2; initial_BB_z1z1, initial_BB_z1z2, initial_BB_z2z2; since initial_BB_z1z2 = initial_BB_z2z1
for idx_band in xrange(len(ini.bands)):
if idx_band == 0:
initial_band_powers = initial_band_powers_list[idx_band]
else:
initial_band_powers = np.concatenate((initial_band_powers, initial_band_powers_list[idx_band]))
# slicing points for all EE bands
slicing_points_bands = {'EE': (0, initial_band_powers_EE.size),
# slicing points for all BB bands
'BB': (initial_band_powers_EE.size, initial_band_powers_EE.size + initial_band_powers_BB.size),
# slicing points for all EB bands
'EB': (initial_band_powers_EE.size + initial_band_powers_BB.size, initial_band_powers.size)
}
for i, band in enumerate(ini.bands):
initial_band_powers_band = initial_band_powers[slicing_points_bands[band][0]:slicing_points_bands[band][1]]
fname = 'initial_guess_{:}_z{:}xz{:}.dat'.format(band, idx_zbin2 + 1, idx_zbin1 + 1)
if not os.path.isfile(fname):
savedata = np.column_stack(((bands_min[i] + bands_max[i]) / 2., bands_min[i], bands_max[i], initial_band_powers_band))
header = 'naive bin center, bin_min, bin_max, initial_guess_for_band_powers'
np.savetxt(fname, savedata, header=header)
# write also out multipoles in extra file for convenience:
fname = 'multipoles_{:}.dat'.format(band)
if not os.path.isfile(fname):
savedata = np.column_stack(((bands_min[i] + bands_max[i]) / 2., bands_min[i], bands_max[i]))
header = 'naive bin center, bin_min, bin_max'
np.savetxt(fname, savedata, header=header)
# create here some indices for the convergence criterion:
if band == 'EE':
bands_to_use_EE = np.ones_like(bands_EE_min)
# don't use first EE band:
bands_to_use_EE[0] = 0
# don't use last EE band:
bands_to_use_EE[-1] = 0
# don't use second to last EE band:
#bands_in_conv_BB[-2] = 0
all_bands_to_use_EE = bands_to_use_EE.tolist()
if band == 'BB':
bands_to_use_BB = np.ones_like(bands_BB_min)
# don't use last BB band:
bands_to_use_BB[-1] = 0
# don't use second to last BB band:
#bands_in_conv_BB[-2] = 0
all_bands_to_use_BB = bands_to_use_BB.tolist()
if band == 'EB':
bands_to_use_EB = np.ones_like(bands_EB_min)
# don't use last BB band:
bands_to_use_EB[-1] = 0
# don't use second to last BB band:
#bands_in_conv_EB[-2] = 0
all_bands_to_use_EB = bands_to_use_EB.tolist()
# TODO: Expand this to possibly include also EB bands in convergence (but this is a very unlikely use-case as is already the inclusion of BB bands)
if ini.include_BB_in_convergence:
all_bands_to_use = all_bands_to_use_EE + all_bands_to_use_BB
elif not ini.include_BB_in_convergence and 'BB' in ini.bands:
all_bands_to_use = all_bands_to_use_EE + np.zeros_like(all_bands_to_use_BB).tolist()
else:
all_bands_to_use = all_bands_to_use_EE
# this boolean contains now all indices of bands that should be included in convergence criterion for Newton Raphson!
indices_bands_to_use = np.where(np.asarray(all_bands_to_use) == 1)[0]
return initial_band_powers, indices_bands_to_use, slicing_points_bands | 0351ebf0314c687439ed183c07172a03d81e2981 | 3,630,341 |
def remove_prefix(s, pre):
"""
Remove prefix from the beginning of the string
Parameters:
----------
s : str
pre : str
Returns:
-------
s : str
string with "pre" removed from the beginning (if present)
"""
if pre and s.startswith(pre):
return s[len(pre):]
return s | 6bae14cddd38fcfabfb0fadb9f4dbeaea81ff4ac | 3,630,342 |
import logging
import os
import glob
import gc
def raw_screenshots_analysis(jobs, job_id):
"""
Returns a report (dict) with:
report["raw_screenshot_mse"] = 0. <- [0, 100], 0 meaning images are equal
report["raw_screenshot_ssim"] = 0. <- [0, 100], 100 meaning images are equal
report["raw_screenshot_diff_pixels"] = 0. <- [0, 100], 0 meaning images are equal
report["raw_screenshot_divergence] = 0. <- [0, 100], 0 meaning images are equal, it is the combination of the indicators above
* all of the above are mins()/maxes() of all pages. It basically reports the most diverging screenshot pair
For each page:
report["pages"][p]["raw_screenshot_analysis"][resolution]["mse"] = mse
report["pages"][p]["raw_screenshot_analysis"][resolution]["ssim"] = ssim
report["pages"][p]["raw_screenshot_analysis"][resolution]["diff_pixels"] = percent_different_pixels
report["pages"][p]["raw_screenshot_analysis"][resolution]["baseline_file_path"] = "jobs//job_id//...//filename.png"
report["pages"][p]["raw_screenshot_analysis"][resolution]["updated_file_path"] = ...
report["pages"][p]["raw_screenshot_analysis"][resolution]["baseline_file_path_highlight"] = bf.replace(".png", "_highlight.png")
report["pages"][p]["raw_screenshot_analysis"][resolution]["updated_file_path_highlight"] = uf.replace(".png", "_highlight.png")
report["pages"][p]["raw_screenshot_analysis"][resolution]["updated_file_path_difference"] = uf.replace(".png", "_difference.png")
report["pages"][p]["raw_screenshot_analysis"][resolution]["updated_file_path_threshold"] = uf.replace(".png", "_threshold.png")
"""
logging.info("Starting raw image analysis for job id {}".format(job_id))
report = {}
error = ""
base_path = os.path.join("jobs", job_id)
baseline_pages = [x for x in os.listdir(os.path.join(base_path, "baseline")) if
os.path.isdir(os.path.join(base_path, "baseline", x))]
updated_pages = [x for x in os.listdir(os.path.join(base_path, "updated")) if
os.path.isdir(os.path.join(base_path, "updated", x))]
common_pages = set(baseline_pages) & set(updated_pages)
report["raw_screenshot_mse"] = 100.
report["raw_screenshot_ssim"] = 0.
report["raw_screenshot_diff_pixels"] = 100.
report["raw_screenshot_divergence"] = 100.
mse_scores = []
ssim_scores = []
diff_pixels = []
avgs = []
report["pages"] = {}
for p in common_pages:
# print("-"*50)
# print(p)
report["pages"][p] = {}
report["pages"][p]["raw_screenshot_analysis"] = {}
# get resolutions
base_files = []
for f in glob.glob(os.path.join("jobs", job_id, "baseline", p, "raw_*.png")):
dir, name = os.path.split(f)
parts = name.replace(".png", "").split("_")
if len(parts) == 3:
base_files.append(f)
upgr_files = []
for f in glob.glob(os.path.join("jobs", job_id, "updated", p, "raw_*.png")):
dir, name = os.path.split(f)
parts = name.replace(".png", "").split("_")
if len(parts) == 3:
upgr_files.append(f)
pairs = [] # this part should never throw and error but we need to check everytime
for bf in base_files:
dir, name = os.path.split(bf)
counterpart = os.path.join(base_path, "updated", p, name)
if not os.path.exists(counterpart):
pass # TODO throw error if resolution does not exist
if [bf, counterpart] not in pairs:
pairs.append([bf, counterpart])
for uf in upgr_files:
dir, name = os.path.split(uf)
counterpart = os.path.join(base_path, "baseline", p, name)
if not os.path.exists(counterpart):
pass # TODO throw error if resolution does not exist
if [counterpart, uf] not in pairs:
pairs.append([counterpart, uf])
if len(pairs) == 0:
error += " Page {} does not have any screenshots! ".format(p)
continue
for [bf, uf] in pairs:
_, name = os.path.split(uf)
resolution = name.split("_")[1] # e.g. raw_512_00.png
logging.debug("Running raw analysis on {}, page {} resolution {} ...".format(job_id, p, resolution))
report["pages"][p]["raw_screenshot_analysis"][resolution] = {}
mse, ssim, percent_different_pixels, perror = raw_screenshot_analysis(bf, uf)
mse_scores.append(mse)
ssim_scores.append(ssim)
diff_pixels.append(percent_different_pixels)
avgs.append((mse + (100 - ssim) + percent_different_pixels) / 3)
report["pages"][p]["raw_screenshot_analysis"][resolution]["baseline_file_path"] = bf
report["pages"][p]["raw_screenshot_analysis"][resolution]["updated_file_path"] = uf
report["pages"][p]["raw_screenshot_analysis"][resolution]["baseline_file_path_highlight"] = bf.replace(
".png", "_highlight.png")
report["pages"][p]["raw_screenshot_analysis"][resolution]["updated_file_path_highlight"] = uf.replace(
".png", "_highlight.png")
report["pages"][p]["raw_screenshot_analysis"][resolution]["updated_file_path_difference"] = uf.replace(
".png", "_difference.png")
report["pages"][p]["raw_screenshot_analysis"][resolution]["updated_file_path_threshold"] = uf.replace(
".png", "_threshold.png")
report["pages"][p]["raw_screenshot_analysis"][resolution]["mse"] = mse
report["pages"][p]["raw_screenshot_analysis"][resolution]["ssim"] = ssim
report["pages"][p]["raw_screenshot_analysis"][resolution]["diff_pixels"] = percent_different_pixels
if perror:
error += perror + " "
if len(mse_scores) > 0:
report["raw_screenshot_mse"] = max(mse_scores)
report["raw_screenshot_ssim"] = min(ssim_scores)
report["raw_screenshot_diff_pixels"] = max(diff_pixels)
# HERE IS THE MAIN CALCULATION
report["raw_screenshot_divergence"] = max(avgs)
logging.info("Finished crawl analysis for job id {}".format(job_id))
gc.collect() # force RAM release
return report, error | 1415d2f8a0c81579a7cc9ed6a24c70c037938f2e | 3,630,343 |
def parseString(string, namespaces=True):
"""Parse a document from a string, returning the resulting
Document node.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
return builder.parseString(string) | bd1da0d0deddd1d09c92979587ffa69e83b54063 | 3,630,344 |
import os
def vcs_dir_contents(path):
"""Return the versioned files under a path.
:return: List of paths relative to path
"""
repo = path
while repo != "/":
if os.path.isdir(os.path.join(repo, ".git")):
ls_files_cmd = [ 'git', 'ls-files', '--full-name',
os_path_relpath(path, repo) ]
cwd = None
env = dict(os.environ)
env["GIT_DIR"] = os.path.join(repo, ".git")
break
elif os.path.isdir(os.path.join(repo, ".bzr")):
ls_files_cmd = [ 'bzr', 'ls', '--recursive', '--versioned',
os_path_relpath(path, repo)]
cwd = repo
env = None
break
repo = os.path.dirname(repo)
if repo == "/":
raise Exception("unsupported or no vcs for %s" % path)
return Utils.cmd_output(ls_files_cmd, cwd=cwd, env=env).split() | 29adf24f1403ef1a33006a8265c9590c5b2a6f22 | 3,630,345 |
def prompt_present(nbwidget):
"""Check if an In prompt is present in the notebook."""
if WEBENGINE:
def callback(data):
global html
html = data
nbwidget.dom.toHtml(callback)
try:
return ' [ ]:' in html
except NameError:
return False
else:
return ' [ ]:' in nbwidget.dom.toHtml() | 9ee7aacf6ad03a22b3b4c002b121e8b61f3649b0 | 3,630,346 |
def _search_range(elem, session, query=None):
"""Perform a range search for DA, DT and TM elements with '-' in them.
Parameters
----------
elem : pydicom.dataelem.DataElement
The attribute to perform the search with.
session : sqlalchemy.orm.session.Session
The session we are using to query the database.
query : sqlalchemy.orm.query.Query, optional
An existing query within which this search should be performed. If
not used then all the Instances in the database will be searched
(default).
Returns
-------
sqlalchemy.orm.query.Query
The resulting query.
"""
# range matching
# <date1> - <date2>: matches any date within the range, inclusive
# - <date2>: match all dates prior to and including <date2>
# <date1> -: match all dates after and including <date1>
# <time>: if Timezone Offset From UTC included, values are in specified
# date: 20060705-20060707 + time: 1000-1800 matches July 5, 10 am to
# July 7, 6 pm.
start, end = elem.value.split("-")
attr = getattr(Instance, _TRANSLATION[elem.keyword])
if not query:
query = session.query(Instance)
if start and end:
return query.filter(attr >= start, attr <= end)
elif start and not end:
return query.filter(attr >= start)
elif not start and end:
return query.filter(attr <= end)
raise ValueError("Invalid attribute value for range matching") | 57ae970c8864f3c00b1df09d3d490a98f6fa82b3 | 3,630,347 |
def is_repo_user(repo_obj, username=None):
""" Return whether the user has some access in the provided repo. """
if username:
user = username
else:
if not authenticated():
return False
user = flask.g.fas_user.username
if is_admin():
return True
usergrps = [usr.user for grp in repo_obj.groups for usr in grp.users]
return (
user == repo_obj.user.user
or (user in [usr.user for usr in repo_obj.users])
or (user in usergrps)
) | 1692814dcdf967ce31a75d613958851a2ee9a8ad | 3,630,348 |
def block_distance(p1, p2):
"""
Returns the Block Distance of a particular point from rest of the points in dataset.
"""
distance = 0
for i in range(len(p1)-1):
distance += abs(p1[i]-p2[i])
return distance | 29d7febe0bd8fcdfc16cbc27c7f3490f265c9daf | 3,630,349 |
def display_profiles():
"""
Function that returns all save profiles
"""
return Passwords.display_profiles() | 162e5685ccb98c5b962737d2ee2866bc2dca0525 | 3,630,350 |
from contextlib import suppress
def mutate(
_data,
*args,
_keep="all",
_before=None,
_after=None,
**kwargs,
):
"""Adds new variables and preserves existing ones
The original API:
https://dplyr.tidyverse.org/reference/mutate.html
Args:
_data: A data frame
_keep: allows you to control which columns from _data are retained
in the output:
- "all", the default, retains all variables.
- "used" keeps any variables used to make new variables;
it's useful for checking your work as it displays inputs and
outputs side-by-side.
- "unused" keeps only existing variables not used to make new
variables.
- "none", only keeps grouping keys (like transmute()).
_before: and
_after: Optionally, control where new columns should appear
(the default is to add to the right hand side).
See relocate() for more details.
*args: and
**kwargs: Name-value pairs. The name gives the name of the column
in the output. The value can be:
- A vector of length 1, which will be recycled to the correct
length.
- A vector the same length as the current group (or the whole
data frame if ungrouped).
- None to remove the column
Returns:
An object of the same type as _data. The output has the following
properties:
- Rows are not affected.
- Existing columns will be preserved according to the _keep
argument. New columns will be placed according to the
_before and _after arguments. If _keep = "none"
(as in transmute()), the output order is determined only
by ..., not the order of existing columns.
- Columns given value None will be removed
- Groups will be recomputed if a grouping variable is mutated.
- Data frame attributes are preserved.
"""
keep = arg_match(_keep, "_keep", ["all", "unused", "used", "none"])
gvars = regcall(group_vars, _data)
data = regcall(as_tibble, _data.copy())
all_columns = data.columns
mutated_cols = []
context = ContextEvalRefCounts()
for val in args:
if (
isinstance(val, (ReferenceItem, ReferenceAttr))
and val._pipda_level == 1
and val._pipda_ref in data
):
mutated_cols.append(val._pipda_ref)
continue
bkup_name = name_of(val)
val = evaluate_expr(val, data, context)
if val is None:
continue
if isinstance(val, DataFrame):
mutated_cols.extend(val.columns)
data = add_to_tibble(data, None, val, broadcast_tbl=False)
else:
key = name_of(val) or bkup_name
mutated_cols.append(key)
data = add_to_tibble(data, key, val, broadcast_tbl=False)
for key, val in kwargs.items():
val = evaluate_expr(val, data, context)
if val is None:
with suppress(KeyError):
data.drop(columns=[key], inplace=True)
else:
data = add_to_tibble(data, key, val, broadcast_tbl=False)
if isinstance(val, DataFrame):
mutated_cols.extend({f"{key}${col}" for col in val.columns})
else:
mutated_cols.append(key)
# names start with "_" are temporary names if they are used
tmp_cols = [
mcol
for mcol in mutated_cols
if mcol.startswith("_")
and mcol in context.used_refs
and mcol not in _data.columns
]
# columns can be removed later
# df >> mutate(Series(1, name="z"), z=None)
mutated_cols = regcall(intersect, mutated_cols, data.columns)
mutated_cols = regcall(setdiff, mutated_cols, tmp_cols)
# new cols always at last
# data.columns.difference() does not keep order
data = data.loc[:, regcall(setdiff, data.columns, tmp_cols)]
if _before is not None or _after is not None:
new_cols = regcall(setdiff, mutated_cols, _data.columns)
data = regcall(
relocate,
data,
*new_cols,
_before=_before,
_after=_after,
)
if keep == "all":
keep = data.columns
elif keep == "unused":
used = list(context.used_refs)
unused = regcall(setdiff, all_columns, used)
keep = regcall(intersect, data.columns, c(gvars, unused, mutated_cols))
elif keep == "used":
used = list(context.used_refs)
keep = regcall(intersect, data.columns, c(gvars, used, mutated_cols))
else: # keep == 'none':
keep = regcall(
union,
regcall(setdiff, gvars, mutated_cols),
regcall(intersect, mutated_cols, data.columns),
)
data = data[keep]
# redo grouping if original columns changed
# so we don't have discripency on
# df.x.obj when df is grouped
if intersect(_data.columns, mutated_cols).size > 0:
data = reconstruct_tibble(_data, data)
# used for group_by
data._datar["mutated_cols"] = mutated_cols
return data | d4af5ca1fa9cd423ac3ec036897703e9f636a725 | 3,630,351 |
def test_triangular_mesh():
"""An example of a cone, ie a non-regular mesh defined by its
triangles.
"""
n = 8
t = np.linspace(-np.pi, np.pi, n)
z = np.exp(1j * t)
x = z.real.copy()
y = z.imag.copy()
z = np.zeros_like(x)
triangles = [(0, i, i + 1) for i in range(1, n)]
x = np.r_[0, x]
y = np.r_[0, y]
z = np.r_[1, z]
t = np.r_[0, t]
return triangular_mesh(x, y, z, triangles, scalars=t) | 4367786b129167223f64f04b989afb79fa536004 | 3,630,352 |
def pos_tag(tokenized_text):
"""
Averaged perceptron tagger from NLTK (originally from @honnibal)
"""
return pos_tag_sents([tokenized_text])[0] | 0405d3090d59260303c82eca46578878246f257d | 3,630,353 |
def cabbeling(SA, CT, p):
"""
Calculates the cabbeling coefficient of seawater with respect to
Conservative Temperature. This function uses the computationally-
efficient expression for specific volume in terms of SA, CT and p
(Roquet et al., 2015).
Parameters
----------
SA : array-like
Absolute Salinity, g/kg
CT : array-like
Conservative Temperature (ITS-90), degrees C
p : array-like
Sea pressure (absolute pressure minus 10.1325 dbar), dbar
Returns
-------
cabbeling : array-like, 1/K^2
cabbeling coefficient with respect to
Conservative Temperature.
"""
return _gsw_ufuncs.cabbeling(SA, CT, p) | 49ea1ce3485b5b4778702c12a17803ba7ef15de0 | 3,630,354 |
def _variant_to_dsl_helper(tokens) -> Variant:
"""Convert variant tokens to DSL objects.
:type tokens: ParseResult
"""
kind = tokens[KIND]
if kind == HGVS:
return Hgvs(tokens[HGVS])
if kind == GMOD:
concept = tokens[CONCEPT]
return GeneModification(
name=concept[NAME],
namespace=concept[NAMESPACE],
identifier=concept.get(IDENTIFIER),
xrefs=tokens.get(XREFS),
)
if kind == PMOD:
concept = tokens[CONCEPT]
return ProteinModification(
name=concept[NAME],
namespace=concept[NAMESPACE],
identifier=concept.get(IDENTIFIER),
xrefs=tokens.get(XREFS),
code=tokens.get(PMOD_CODE),
position=tokens.get(PMOD_POSITION),
)
if kind == FRAGMENT:
start, stop = tokens.get(FRAGMENT_START), tokens.get(FRAGMENT_STOP)
return Fragment(
start=start,
stop=stop,
description=tokens.get(FRAGMENT_DESCRIPTION),
)
raise ValueError('invalid fragment kind: {}'.format(kind)) | bacb81138e0310a787e9f9d4fb490ad0a4215297 | 3,630,355 |
def remove_empty(df):
"""
Drop all rows and columns that are completely null.
Implementation is shamelessly copied from `StackOverflow`_.
.. _StackOverflow: https://stackoverflow.com/questions/38884538/python-pandas-find-all-rows-where-all-values-are-nan # noqa: E501
Functional usage example:
.. code-block:: python
df = remove_empty(df)
Method chaining example:
.. code-block:: python
df = pd.DataFrame(...)
df = jn.DataFrame(df).remove_empty()
:param df: The pandas DataFrame object.
:returns: A pandas DataFrame.
"""
nanrows = df.index[df.isnull().all(axis=1)]
df.drop(index=nanrows, inplace=True)
nancols = df.columns[df.isnull().all(axis=0)]
df.drop(columns=nancols, inplace=True)
return df | c2ea9fc13bfa57bc357a83c607bfe9ce9348fb2e | 3,630,356 |
from typing import Optional
def calculate_serving_size_weight(
weight: Optional[float], number_of_servings: Optional[float]
) -> Optional[float]:
"""
Given a weight (representing the total weight of the
component included in a recipe) and a number of servings
(how many servings of the component are included),
returns a number representing the weight of
just one serving of the component.
"""
if weight is not None and number_of_servings is not None:
return weight/number_of_servings
else:
return None | b22732a60f1f6000277861a615c78e785b4757bb | 3,630,357 |
def validate_positive(value):
"""Check if number is positive."""
if value is not None and value <= 0:
raise ValidationError(f'Expected a positive number, but got {value}')
return value | d4c84a8f8aa476af4e9169b25887f5e5344f22af | 3,630,358 |
import os
from datetime import datetime
import tempfile
import traceback
import subprocess
import sys
def main(args):
"""Process arguments and run validation"""
# Set variables for directory and list of studies
root_dir = args.root_directory
studies = args.list_of_studies
# If studies are filled in, create a list
if studies is not None:
list_studies = [str(study) for study in studies.split(',')]
# Get all studies in root directory if no list of studies is defined
if root_dir is not None and studies is None:
list_studies = []
for study_dir in os.listdir(root_dir):
if os.path.isdir(os.path.join(root_dir, study_dir)):
list_studies.append(os.path.join(root_dir, study_dir))
# When both root directory and list of studies are given, create list of study files
if root_dir is not None and studies is not None:
list_studies = [os.path.join(root_dir, study) for study in list_studies]
# Get current date and time to write to logfilename
d_date = datetime.datetime.now()
reg_format_date = d_date.strftime("%Y-%m-%d_%H:%M")
only_logfilename = 'log-validate-studies-' + reg_format_date + '.txt'
# When html folder is given as input, check if html folder exists, otherwise create folder
# Also, when html folder is defined the logfile is written to this folder as well, otherwise to /tmp
output_folder = args.html_folder
if output_folder is not None:
if not os.path.exists(output_folder):
print("HTML output folder did not exist, so is created: %s" % output_folder)
os.makedirs(output_folder)
logfilename = os.path.join(output_folder, only_logfilename)
else:
# Get systems temp directory and write log file
logfilename = os.path.join(tempfile.gettempdir(), only_logfilename)
print('\nWriting validation logs to: {}'.format(logfilename))
# Make dictionary of possible exit status from validateData and values that should be printed in the console
# when these exit statuses occur
possible_exit_status = {0: 'VALID', 1: 'INVALID', 2: 'INVALID (PROBLEMS OCCURRED)', 3: 'VALID (WITH WARNINGS)'}
# This script can have two possible exit statuses 0 or 1. The validation exit status from this script
# will become 1 when one of the studies fails validation. This will be when validateData has for at
# least one of the studies validated exit status 1 or 2 or when another error occurred during the validation
validation_exit_status = 0
# Go through list of studies and run validation
for study in list_studies:
# Write to stdout and log file that we are validating this particular study
print("\n=== Validating study %s" %study)
# Check which portal info variable is given as input, and set correctly in the arguments for validateData
if args.portal_info_dir is not None:
validator_args = ['-v', '--study_directory', study, '-p', args.portal_info_dir]
elif args.no_portal_checks:
validator_args = ['-v', '--study_directory', study, '-n']
else:
validator_args = ['-v', '--study_directory', study, '-u', args.url_server]
# Append argument for portal properties file if given at input
if args.portal_properties is not None:
validator_args.append('-P')
validator_args.append(args.portal_properties)
# Append argument for strict mode when supplied by user
if args.strict_maf_checks is not False:
validator_args.append('-m')
# Append argument for maximum reported line numbers and encountered values in HTML
if args.max_reported_values != 3:
validator_args.append('-a %s' % args.max_reported_values)
# When HTML file is required, create html file name and add to arguments for validateData
if output_folder is not None:
try:
html = ""
# Look in meta_study file for study name to add to name of validation report
with open(os.path.join(study, "meta_study.txt"), 'r') as meta_study:
for line in meta_study:
if 'cancer_study_identifier' in line:
study_identifier = line.split(':')[1].strip()
html = study_identifier + "-validation.html"
# If in the meta_study file no cancer_study_identifier could be found append study name from input
if html == "":
only_study = study.split("/")[-1]
html = only_study + "-validation.html"
# Add path to HTML file name and add to arguments for validateData
html_file = os.path.join(output_folder, html)
validator_args.append('--html')
validator_args.append(html_file)
# Exception can be thrown when the supplied study folder does not exist, when there is no meta_study
# file or when there is no cancer_study_identifier and it will not create the HTML file at all.
except:
var_traceback = traceback.format_exc()
print('\x1b[31m' + "Error occurred during creating html file name:")
print(var_traceback)
print("Validation from study " + study + " will not be written to HTML file" + '\x1b[0m')
# Get the path to the validator , in the same directory as this script
validation_script = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'validateData.py')
# Run validateData on this study and get the exit status
try:
with open(logfilename, 'a') as log_file:
log_file.write('=== Validation study: {}\n'.format(study))
log_file.flush()
exit_status_study = subprocess.call(
[sys.executable, '--', validation_script] + validator_args,
stdout=log_file)
# If opening the log file or executing the script failed,
except OSError:
# Output the Python stack trace for the error
traceback.print_exc(file=sys.stdout)
# And mark this run as not validated
exit_status_study = 2
# Check exit status and print result
exit_status_message = possible_exit_status.get(exit_status_study, 'Unknown status: {}'.format(exit_status_study))
if exit_status_study == 0 or exit_status_study == 3:
print('\x1b[0m' + "Result: %s" % exit_status_message)
else:
print('\x1b[0m' + "Result: " + '\x1b[31m' + exit_status_message + '\x1b[0m')
validation_exit_status = 1 # When invalid check the exit status to one, for failing circleCI
return validation_exit_status | eefbb1307d8101ef479f8ab7b85a29406ef5ce4e | 3,630,359 |
def filter_invalid_matches_to_single_word_gibberish(
matches,
trace=TRACE_FILTER_SINGLE_WORD_GIBBERISH,
reason=DiscardReason.INVALID_SINGLE_WORD_GIBBERISH,
):
"""
Return a filtered list of kept LicenseMatch matches and a list of
discardable matches given a `matches` list of LicenseMatch by removing
gibberish matches considered as invalid under these conditions:
- the scanned file is a binary file (we could relax this in the future
- the matched rule has a single word (length 1)
- the matched rule "is_license_reference: yes"
- the matched rule has a low relevance, e.g., under 75
- the matched text has either:
- one or more leading or trailing punctuations (except for +)
unless this has a high relevance and the rule is contained as-is
in the matched text (considering case)
- mixed upper and lower case characters (but not a Title case) unless
exactly the same mixed case as the rule text
"""
kept = []
kept_append = kept.append
discarded = []
discarded_append = discarded.append
for match in matches:
rule = match.rule
if rule.length == 1 and rule.is_license_reference and match.query.is_binary:
matched_text = match.matched_text(
whole_lines=False,
highlight=False,
).strip()
rule_text = rule.text().strip()
if trace:
logger_debug(
' ==> POTENTIAL INVALID GIBBERISH:', match,
'matched_text:', repr(matched_text),
'rule_text:', repr(rule_text)
)
if rule.relevance >= 75:
max_diff = 1
else:
max_diff = 0
if is_invalid_short_match(matched_text, rule_text, max_diff=max_diff):
if trace:
logger_debug(' ==> DISCARDING INVALID GIBBERISH:', match)
discarded_append(match)
match.discard_reason = reason
continue
kept_append(match)
return kept, discarded | cae97da74f23db50b4c8ed8f6e1b1659eb73f685 | 3,630,360 |
import json
def start_game(player1, player2, symbols=("O", "X")):
"""Starts a command line tic tac toe game between 2 players (bot or human)"""
assert len(symbols) == 2, "`symbols` must have exactly 2 elements"
gstate = TicTacToe("_________", symbols=symbols)
with open(GAME_TREE_FILE, "r") as gt_file:
game_tree = json.load(gt_file)
while True:
status, player = gstate.game_status
if status == "turn":
print(f"\n=== Player {player}'s turn:\n\n")
print(gstate.printable_board(legend_hint=True))
print("\n")
if player == "1":
p_move = player1(gstate, game_tree)
else:
p_move = player2(gstate, game_tree)
print(f"\n>>> Player {player} has chosen: {p_move}")
new_state = gstate.data.copy()
new_state[int(p_move)] = player
gstate = TicTacToe("".join(new_state), symbols=symbols)
else:
print('\n')
print(gstate.printable_board(legend_hint=True))
if status == "win":
print(f"\n~~~~~ Player {player} wins! ~~~~~\n")
else:
print("\n~~~~~ It's a Tie! ~~~~~\n")
return gstate.game_status | 96e712b399596bd2373c35c4e46e283c7efd8317 | 3,630,361 |
def despike_l1b_array(data, dqf, filter_width=7):
"""
Despike SUVI L1b data and return a despiked `numpy.ndarray`.
Parameters
----------
data : `numpy.ndarray`
Array to despike.
dqf : `numpy.ndarray`
Data quality flags array.
filter_width: `int`, optional.
The filter width for the Gaussian filter, default is 7.
If NaNs are still present in the despiked image, try increasing this value.
Returns
-------
`numpy.ndarray`
The despiked L1b image as a numpy array.
"""
return _despike(data, dqf, filter_width) | 57c3b757856a1c030c245472445f51b33c576cd2 | 3,630,362 |
def scrape(query: str,
listing_age: int = None,
relevance: str = None,
job_type: list = None,
experience: list = None,
locations: list = ['Singapore'],
limit: int = None,
**kwargs):
"""
:param query: str
Job search query
:param listing_age: int
Age of job listing.
Available Options:
1, 7, 30
:param relevance: str
Linkedin Relevance Filter
Available Options:
recent, relevant
:param job_type: list
Linkedin Job Type
Available Options:
contract, temporary, part_time, full_time
:param experience: list
Linkedin Experience Filter
Available Options:
internship, entry_level, associate, mid_senior, director
:param locations: list
List of Locations to Search
Available Options:
Singapore
:param limit: integer
Max Number of Jobs to Fetch
:return:
"""
global jobs
experience_filters = {
'internship': ExperienceLevelFilters.INTERNSHIP,
'entry_level': ExperienceLevelFilters.ENTRY_LEVEL,
'associate': ExperienceLevelFilters.ASSOCIATE,
'mid_senior': ExperienceLevelFilters.MID_SENIOR,
'director': ExperienceLevelFilters.DIRECTOR,
}
time_filters = {
1: TimeFilters.DAY,
7: TimeFilters.WEEK,
30: TimeFilters.MONTH,
None: TimeFilters.ANY
}
relevance_filters = {
'recent': RelevanceFilters.RECENT,
'relevant': RelevanceFilters.RELEVANT,
}
type_filters = {
'contract': TypeFilters.CONTRACT,
'temporary': TypeFilters.TEMPORARY,
'part_time': TypeFilters.PART_TIME,
'full_time': TypeFilters.FULL_TIME
}
time_filter = time_filters.get(listing_age)
relevance_filter = relevance_filters.get(relevance)
type_filter = [type_filters[e] for e in job_type] if job_type else None
experience_filter = [experience_filters[e] for e in experience] if experience else None
jobs = []
queries = [
Query(
query=query,
options=QueryOptions(
locations=locations,
optimize=True,
limit=limit,
filters=QueryFilters(
relevance=relevance_filter,
time=time_filter,
type=type_filter,
experience=experience_filter
)
)
)
]
scraper.run(queries)
[e.update({'query': query}) for e in jobs]
return jobs | 493cd2efff208d49ca5ce0d05814a0f73c872846 | 3,630,363 |
from datetime import datetime
import json
import os
def info( request ):
""" Returns basic info about the easyrequest_hay webapp.
Triggered by root easyrequest_hay url. """
log.debug( '\n\nstarting info(); request.__dict__, ```%s```' % request.__dict__ )
start = datetime.datetime.now()
context = {
'pattern_header': common.grab_pattern_header(),
'pattern_header_active': json.loads( os.environ['EZRQST__PATTERN_HEADER_ACTIVE_JSON'] )
}
if request.GET.get('format', '') == 'json':
context_json = json.dumps(context, sort_keys=True, indent=2)
resp = HttpResponse( context_json, content_type='application/javascript; charset=utf-8' )
else:
if context['pattern_header_active'] == True:
template = 'easyrequest_app_templates/info_02.html'
else:
template = 'easyrequest_app_templates/info.html'
resp = render( request, template, context )
return resp | 06afce552c2fb98f9eb78daebec3220b2901dd09 | 3,630,364 |
def run_command(arguments, parse_stdout=True):
"""Run a command
Args:
A list with the arguments to execute. For example ['ls', 'foo']
Returns:
stdout, return status.
"""
try:
cmd = sp.Popen(arguments, stdout=sp.PIPE, stderr=sp.STDOUT)
stdout, _ = cmd.communicate()
except Exception as e:
m = "Warning: run_command failed with arguments {} and error {}".format(
' '.join(map(str, arguments)), e
)
return '', -1
if parse_stdout:
stdout = stdout.decode("utf-8").split("\n")
status = cmd.returncode
return stdout, status | a2183d8c588f0e749462c225e62183df46ac04b0 | 3,630,365 |
def format_key(key):
"""
Format the key provided for consistency.
"""
if key:
return key if key[-1] == "/" else key + "/"
return "/" | 8b5e41bb76c524ec8c45a22ad0dae84c84ed530b | 3,630,366 |
def get_random_parent_asset(instance_id: int, instance_start_id, instance_end_id) -> int:
"""
get random parent asset not linked to any child assets
:param instance_id: asset instance_id
"""
child_list = [instance_id]
child_list = get_child_assets_id(child_list, instance_id)
all_list = list(Asset.objects.filter(pk__gte=instance_start_id, pk__lt=instance_end_id).
values_list('instance_id', flat=True))
possible_parent_list = [a for a in all_list if a not in child_list]
return possible_parent_list[fake.pyint(max_value=len(possible_parent_list) - 1)] | 1f366ec08fdda6cfc8157bb6dc862a9700844409 | 3,630,367 |
def logout() -> Response:
"""API call: logout of the system."""
config = CorporaAuthConfig()
client = get_oauth_client(config)
params = {"returnTo": config.redirect_to_frontend, "client_id": config.client_id}
response = redirect(client.api_base_url + "/v2/logout?" + urlencode(params))
# remove the cookie
remove_token(config.cookie_name)
return response | 2c9dd72d80ad4a8c27c9da508a932e7dca008ce7 | 3,630,368 |
import time
def elapsed_printer(func):
"""
**中文文档**
此包装器可以打印函数的输入参数, 以及运行时间。
"""
def _wrapper(*args, **kwargs):
print(">>> %s # Running ..." %
_text_of_func_args_and_kwargs(func, args, kwargs))
st = time.clock()
res = func(*args, **kwargs)
elapsed = time.clock() - st
print(" Complete! Elapsed %.6f seconds." % elapsed)
return res
return _wrapper | 396289476d48e91185eae4f5e88691b2dd0756ec | 3,630,369 |
def get_init_fn():
"""Returns a function run by the chief worker to warm-start the training."""
checkpoint_exclude_scopes=["resnet_v1_101/logits","resnet_v1_101/fc","deconv32"]
exclusions = [scope.strip() for scope in checkpoint_exclude_scopes]
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
return slim.assign_from_checkpoint_fn('E:/pythonProject/ckpt/classical/resnet_v1_101.ckpt',
variables_to_restore) | b6ca552c9d1b9cbe67e7cb9af16b8676c107124e | 3,630,370 |
def buildings_from_polygon(polygon, retain_invalid=False):
"""
Get building footprints within some polygon.
Parameters
----------
polygon : Polygon
retain_invalid : bool
if False discard any building footprints with an invalid geometry
Returns
-------
GeoDataFrame
"""
return create_buildings_gdf(polygon=polygon, retain_invalid=retain_invalid) | 85ecf11c5dfe6d717dffb52c80b1195138dfc556 | 3,630,371 |
from typing import Dict
def _define_problem_with_groups(problem: Dict) -> Dict:
"""
Checks if the user defined the 'groups' key in the problem dictionary.
If not, makes the 'groups' key equal to the variables names. In other
words, the number of groups will be equal to the number of variables, which
is equivalent to no groups.
Parameters
----------
problem : dict
The problem definition
Returns
-------
problem : dict
The problem definition with the 'groups' key, even if the user doesn't
define it
"""
# Checks if there isn't a key 'groups' or if it exists and is set to 'None'
if 'groups' not in problem or not problem['groups']:
problem['groups'] = problem['names']
elif len(problem['groups']) != problem['num_vars']:
raise ValueError("Number of entries in \'groups\' should be the same "
"as in \'names\'")
return problem | ab29954f3349509a9153219d040feb8fa3125ec7 | 3,630,372 |
def ljk(epsilon):
"""
Calculates ecliptic triad vectors with respect to BCRS-frame.
(Lindegren, SAG-LL-35, Eq.1)
:param epsilon: obliquity of the equator.
:return: np.array, np.array, np.array
"""
l = np.array([1,0,0])
j = np.array([0, np.cos(epsilon), np.sin(epsilon)])
k = np.array([0, -np.sin(epsilon), np.cos(epsilon)])
return l, j, k | 9e7d8ca724c9bfad11e561f5e6873b938b613341 | 3,630,373 |
from re import DEBUG
def send_mail(subject, message, recipient_list, request):
"""
Wrapper for send_mail() with logging and error messaging
:param subject: Message subject (string)
:param message: Message body (string)
:param recipient_list: Recipients of email (list)
:param request: Request object
:return: Amount of sent email (int)
"""
try:
ret = _send_mail(
subject=subject,
message=message,
from_email=EMAIL_SENDER,
recipient_list=recipient_list,
fail_silently=False,
)
logger.debug(
'{} email{} sent to {}'.format(
ret, 's' if ret != 1 else '', ', '.join(recipient_list)
)
)
return ret
except Exception as ex:
error_msg = 'Error sending email: {}'.format(str(ex))
logger.error(error_msg)
if DEBUG:
raise ex
messages.error(request, error_msg)
return 0 | ebc9674b37a3eebb074024d00b9ef5ad172800f8 | 3,630,374 |
def position_result_list(change_list):
"""
Returns a template which iters through the models and appends a new
position column.
"""
result = result_list(change_list)
# Remove sortable attributes
for x in range(0, len(result['result_headers'])):
result['result_headers'][x]['sorted'] = False
if result['result_headers'][x]['sortable']:
result['result_headers'][x]['class_attrib'] = mark_safe(
' class="sortable"')
# Append position <th> element
result['result_headers'].append({
'url_remove': '?o=',
'sort_priority': 1,
'sortable': True,
'class_attrib': mark_safe(' class="sortable sorted ascending"'),
'sorted': True,
'text': 'position',
'ascending': True,
'url_primary': '?o=-1',
'url_toggle': '?o=-1',
})
# Append the editable field to every result item
for x in range(0, len(result['results'])):
obj = change_list.result_list[x]
# Get position object
c_type = ContentType.objects.get_for_model(obj)
try:
object_position = ObjectPosition.objects.get(
content_type__pk=c_type.id, object_id=obj.id)
except ObjectPosition.DoesNotExist:
object_position = ObjectPosition.objects.create(content_object=obj)
# Add the <td>
html = ('<td><input class="vTextField" id="id_position-{0}"'
' maxlength="10" name="position-{0}" type="text"'
' value="{1}" /></td>').format(object_position.id,
object_position.position)
result['results'][x].append(mark_safe(html))
return result | 5608857697c0d6ae46d3d34577f2b4002bacdc28 | 3,630,375 |
import re
def _build_regex(pattern):
"""Compile regex pattern turn comma-separated list into | in regex."""
compiled_pattern = None
if pattern:
# Trip ' " at the beginning and end
pattern = re.sub("(^\"|^\'|\"$|\'$)", "", pattern)
# Escape
pattern = re.sub("/", r"\/", pattern)
# Change "a,b,c" into "(a|b|c)"
pattern = "(" + re.sub(",", "|", pattern) + ")"
# Compile
tf.logging.info("regex pattern %s" % pattern)
compiled_pattern = re.compile(pattern)
return compiled_pattern | 7351f461a867b7c875749c6d88fdd5f2b26da496 | 3,630,376 |
def fpSub(rm, a, b, ctx=None):
"""Create a Z3 floating-point subtraction expression.
>>> s = FPSort(8, 24)
>>> rm = RNE()
>>> x = FP('x', s)
>>> y = FP('y', s)
>>> fpSub(rm, x, y)
fpSub(RNE(), x, y)
>>> fpSub(rm, x, y).sort()
FPSort(8, 24)
"""
return _mk_fp_bin(Z3_mk_fpa_sub, rm, a, b, ctx) | 2513a4ee917d1e3a773a3637c519a85df28abd69 | 3,630,377 |
def reconstruct_contractility(simulation_folder, d_cyl, l_cyl, r_outer, scalef = 1000, scaleu = 1, scaleb = 1):
"""
Reconstruct the contractility of a given cylindric simulation. Also calculates residuum forces of Inclusion surface and matrix,
which shoould be equal (use to certify simulation and detect possible errors due incorrect lengths as input).
Saves the contractility calculated from absolute values, the contractility calculated only from
x-components and the residuum forces to .txt files and a quiver plot is as .png file.
Args:
simulation_folder(str): File path to the simulation
d_cyl(float): Diameter of the spherical inclusion in the mesh model (in µm)
l_cyl(float): Length of the spherical inclusion in the mesh model (in µm)
r_outer(float): Outer radius of the bulk mesh in the mesh model (in µm)
scalef ,scalu, scaleb: To scale the arrows for deformation , force and
boundary cond. in quiver plot - only visually no impact on valeus
"""
# Read in results
coords = np.genfromtxt(simulation_folder+'/coords.dat')
U = np.genfromtxt(simulation_folder+'/U.dat')
force = np.genfromtxt(simulation_folder+'/F.dat')
bcond = np.genfromtxt(simulation_folder+'/bcond.dat')
# Mask Boundaries on which the tractions are summed up
x, y, z = coords.T
mask_inner = ((y**2. + z**2.) <= (d_cyl*1e-6/2)**2.) & (x**2. <= (l_cyl*1e-6/2)**2.)
mask_outer = ~mask_inner
# Left and right half of cell
mask_inner_l = ((y**2. + z**2.) <= (d_cyl*1e-6/2)**2.) & (x**2. <= (l_cyl*1e-6/2)**2.) & (x<=0)
mask_inner_r = ((y**2. + z**2.) <= (d_cyl*1e-6/2)**2.) & (x**2. <= (l_cyl*1e-6/2)**2.) & (x>=0)
"Display Results in 3D Plot"
fig1 = plt.figure()
ax1 = Axes3D(fig1)
ax1.set_xlabel('X [\u03bcm]', labelpad=6, fontsize = 13)
ax1.set_ylabel('Y [\u03bcm]', labelpad=6, fontsize = 13)
ax1.set_zlabel('Z [\u03bcm]', labelpad=6, fontsize = 13)
# Display Deformation, Forces and selected Boundary
u_x = np.array(U[:,0])
u_y = np.array(U[:,1])
u_z = np.array(U[:,2])
f_x = np.array(force[:,0])
f_y = np.array(force[:,1])
f_z = np.array(force[:,2])
bcond_x = np.array(bcond[:,0])
bcond_y = np.array(bcond[:,1])
bcond_z = np.array(bcond[:,2])
#Scale quiver lengths
scale_um = 1e6 # for axis in um
# Deformationfield
ax1.quiver(coords[:,0]*scale_um,coords[:,1]*scale_um,coords[:,2]*scale_um,
u_x*scaleu*scale_um,u_y*scaleu*scale_um,u_z*scaleu*scale_um,
lw=2,length = 1, colors = 'cadetblue' , normalize=False, label='Displacement')
# Forcefield
ax1.quiver(coords[:,0]*scale_um,coords[:,1]*scale_um,coords[:,2]*scale_um,
f_x*scalef*scale_um,f_y*scalef*scale_um,f_z*scalef*scale_um,
lw=2,length = 1, colors = 'orange' ,normalize=False,label='Force')
ax1.set_xlim(-(l_cyl/2)*1.1, (l_cyl/2)*1.1)
ax1.set_ylim(-(l_cyl/2)*1.1, (l_cyl/2)*1.1)
ax1.set_zlim(-(l_cyl/2)*1.1, (l_cyl/2)*1.1)
ax1.xaxis.set_major_formatter(FormatStrFormatter('%3.0f'))
ax1.yaxis.set_major_formatter(FormatStrFormatter('%3.0f'))
ax1.zaxis.set_major_formatter(FormatStrFormatter('%3.0f'))
plt.legend(fontsize = 15)
plt.savefig(simulation_folder+"\Force_Displ_Field.png")
plt.close()
# Plot 2: Left / Right Forces and boundary conditions
fig2 = plt.figure()
ax2 = Axes3D(fig2)
ax2.set_xlabel('X [\u03bcm]', labelpad=14, fontsize = 15)
ax2.set_ylabel('Y [\u03bcm]', labelpad=14, fontsize = 15)
ax2.set_zlabel('Z [\u03bcm]', labelpad=14, fontsize = 15)
# Left and right forces
ax2.quiver(coords[mask_inner_l][:,0]*scale_um,coords[mask_inner_l][:,1]*scale_um,coords[mask_inner_l][:,2]*scale_um,
f_x[mask_inner_l]*scalef*scale_um,f_y[mask_inner_l]*scalef*scale_um,f_z[mask_inner_l]*scalef*scale_um,
lw=2,length = 1, colors = 'r' ,normalize=False,label='Force (Left)')
ax2.quiver(coords[mask_inner_r][:,0]*scale_um,coords[mask_inner_r][:,1]*scale_um,coords[mask_inner_r][:,2]*scale_um,
f_x[mask_inner_r]*scalef*scale_um,f_y[mask_inner_r]*scalef*scale_um,f_z[mask_inner_r]*scalef*scale_um,
lw=2,length = 1, colors = 'b' ,normalize=False,label='Force (Right)')
# Boundary Cond.
ax2.quiver(coords[:,0]*scale_um,coords[:,1]*scale_um,coords[:,2]*scale_um,
bcond_x*scaleb*scale_um,bcond_y*scaleb*scale_um,bcond_z*scaleb*scale_um,
lw=2,length = 1, colors = 'grey' ,normalize=False,label='Boundary Cond.' , alpha= 0.1 , zorder= -1)
ax2.set_xlim(-(l_cyl/2)*1.1, (l_cyl/2)*1.1)
ax2.set_ylim(-(l_cyl/2)*1.1, (l_cyl/2)*1.1)
ax2.set_zlim(-(l_cyl/2)*1.1, (l_cyl/2)*1.1)
ax2.xaxis.set_major_formatter(FormatStrFormatter('%3.0f'))
ax2.yaxis.set_major_formatter(FormatStrFormatter('%3.0f'))
ax2.zaxis.set_major_formatter(FormatStrFormatter('%3.0f'))
plt.legend(fontsize = 15)
plt.savefig(simulation_folder+"\\SplitForce_bcond.png")
plt.close()
# Plot 3: Distance Deformation Curve --linscale
fig3 = plt.figure()
u_plot = np.sqrt(np.sum(coords ** 2., axis=1)) * 1e6
v_plot = np.sqrt(np.sum(U ** 2., axis=1)) * 1e6
plt.scatter(u_plot, v_plot, lw=0, alpha=1, s=10, c='C2')
plt.xlabel('Distance [µm]')
plt.ylabel('Deformation [µm]')
plt.savefig(simulation_folder+"\\u_r_curve_lin.png", dpi=700)
plt.close()
# Plot 3: Distance Deformation Curve --logscale
fig4 = plt.figure()
#print (np.min(v_plot))
plt.scatter(u_plot, v_plot, lw=0, alpha=1, s=10, c='C2')
plt.xscale('log')
plt.yscale('log')
plt.ylim(10**-10, np.max(v_plot)+10) # fixed, as autoscale does not work with log scale
plt.xlabel('Distance [µm]')
plt.ylabel('Deformation [µm]')
plt.savefig(simulation_folder + "\\u_r_curve_log.png", dpi=700)
plt.close()
"Compute Contractilities"
# initialize result dictionary
results = {'Contractility Absolute (left)': [], 'Contractility Absolute (right)': [],
'Contractility Absolute (mean)': [], 'Contractility x-components (left)': [],
'Contractility x-components (right)': [], 'Contractility x-components (mean)': [],
'Contractility Absolute (total length)': [], 'Residuum Forces': [],
'Residuum Inner': [], 'Residuum Outer': []}
# Residuum Forces: Outer and inner mask
ResOut = np.sum(np.sum(force[mask_outer]))
ResIn = np.sum(np.sum(force[mask_inner]))
# Absolute Contractility computed for left and right site
Contr_abs_l = np.sum(np.sqrt(np.sum(force[mask_inner_l]**2., axis=1)))
Contr_abs_r = np.sum(np.sqrt(np.sum(force[mask_inner_r]**2., axis=1)))
# Contractility computed by x-components for left and right site
Contr_x_l = np.sum(np.abs(force[mask_inner_l][:,0]))
Contr_x_r = np.sum(np.abs(force[mask_inner_r][:,0]))
results['Contractility Absolute (left)'].append(Contr_abs_l)
results['Contractility Absolute (right)'].append(Contr_abs_r)
results['Contractility Absolute (mean)'].append(0.5*(Contr_abs_l+Contr_abs_r))
results['Contractility x-components (left)'].append(Contr_x_l)
results['Contractility x-components (right)'].append(Contr_x_r)
results['Contractility x-components (mean)'].append(0.5*(Contr_x_l+Contr_x_r))
results['Contractility Absolute (total length)'].append(np.sum(np.abs(force[mask_inner][:,0])))
results['Residuum Inner'].append(ResIn)
results['Residuum Outer'].append(ResOut)
results['Residuum Forces'].append(ResOut+ResIn)
df = pd.DataFrame.from_dict(results)
df.columns = ['Contractility Absolute (left) [N]',
'Contractility Absolute (right) [N]',
'Contractility Absolute (mean) [N]',
'Contractility x-components (left) [N]',
'Contractility x-components (right) [N]',
'Contractility x-components (mean) [N]',
'Contractility Absolute (total length) [N]',
'Residuum Inner [N]',
'Residuum Outer [N]',
'Residuum Forces [N]' ]
df.to_excel(simulation_folder+"\Contractilities.xlsx")
return df | f55369580d87e55061934149fad9598c5a29a1fe | 3,630,378 |
def categorical_sample(d):
"""Randomly sample a value from a discrete set based on provided probabilities.
Args:
d: A dictionary mapping choices to probabilities.
Returns:
One of the possible choices.
"""
choice_probs = list(d.items())
probs = [t[1] for t in choice_probs]
index = np.argmax(np.random.multinomial(1, probs))
return choice_probs[index][0] | e99949ebc6d0314f3c2b791415af30c0fe6ed519 | 3,630,379 |
def gaspari_cohn_mid(z,c):
"""
Gaspari-Cohn correlation function for middle distances (between c and 2*c)
Arguments:
- z: Points to be evaluated
- c: Cutoff value
"""
return 1./12*(z/c)**5 - 0.5*(z/c)**4 + 5./8*(z/c)**3 \
+ 5./3*(z/c)**2 - 5*z/c - 2./3*c/z + 4 | 0852e84c1ce10856d69420fcc585054488591e73 | 3,630,380 |
import click
import concurrent
def put(local_file, remote_file, outmode, template, recursive, process):
"""
upload file
example:
pypssh -t 192.168.31.1 put /etc/yum.conf /etc/yum.conf
"""
def _progress(filename, size, sent, peername):
click.echo("(%s:%s) %s's progress: %.2f%% \r" % (
peername[0], peername[1], filename, float(sent)/float(size)*100))
def _upload(host):
try:
ssh = get_ssh_conn_client(host)
scp = SCPClient(ssh.get_transport(), progress4=_progress if process else None)
scp.put(local_file, remote_file, recursive)
get_ssh_logger(host).info(f"file localhost:{local_file} => {host.hostname}:{remote_file} successfully!")
return SCPResult(host.hostname, src=local_file, dst=remote_file, completed=True)
except Exception as ex:
# get_ssh_logger(host).error(f"file localhost:{local_file} => {host.hostname}:{remote_file} faild! because {ex}")
raise SSHException(SCPResult(host.hostname, src=local_file, dst=remote_file, completed=False, exception=repr(ex)))
result = concurrent(_upload, [tuple([h]) for h in TARGET])
echo(outmode, SSHResult, result, template) | 699bd72c5349609513fdcb45ddd428ad6dfad684 | 3,630,381 |
def home_view(request):
"""
DESCRIPTION:
The only FBV to redirect to the original home_view
"""
# image = {'image': 'https://picsum.photos/1024/756'}
image = {}
return render(request, 'home.html', image) | 24d9602004b696610a69c6f38212db06d272a5c2 | 3,630,382 |
import time
import six
def hparams_pb(hparams, trial_id=None, start_time_secs=None):
# NOTE: Keep docs in sync with `hparams` above.
"""Create a summary encoding hyperparameter values for a single trial.
Args:
hparams: A `dict` mapping hyperparameters to the values used in this
trial. Keys should be the names of `HParam` objects used in an
experiment, or the `HParam` objects themselves. Values should be
Python `bool`, `int`, `float`, or `string` values, depending on
the type of the hyperparameter.
trial_id: An optional `str` ID for the set of hyperparameter values
used in this trial. Defaults to a hash of the hyperparameters.
start_time_secs: The time that this trial started training, as
seconds since epoch. Defaults to the current time.
Returns:
A TensorBoard `summary_pb2.Summary` message.
"""
if start_time_secs is None:
start_time_secs = time.time()
hparams = _normalize_hparams(hparams)
group_name = _derive_session_group_name(trial_id, hparams)
session_start_info = plugin_data_pb2.SessionStartInfo(
group_name=group_name, start_time_secs=start_time_secs,
)
for hp_name in sorted(hparams):
hp_value = hparams[hp_name]
if isinstance(hp_value, bool):
session_start_info.hparams[hp_name].bool_value = hp_value
elif isinstance(hp_value, (float, int)):
session_start_info.hparams[hp_name].number_value = hp_value
elif isinstance(hp_value, six.string_types):
session_start_info.hparams[hp_name].string_value = hp_value
else:
raise TypeError(
"hparams[%r] = %r, of unsupported type %r"
% (hp_name, hp_value, type(hp_value))
)
return _summary_pb(
metadata.SESSION_START_INFO_TAG,
plugin_data_pb2.HParamsPluginData(
session_start_info=session_start_info
),
) | 835edf3b8e6a9def39cf33233215801948a20859 | 3,630,383 |
def create_agents(nagi, age_all, nr_infec, nr_immucmpr, nr_freeroam, nr_social):
"""
Creates the array of agents that will be used in the simulation
Parameters
----------
nagi : int
number of human agents that will be created.
age_all : numpy array of ints
distribution of the ages of the agents
nr_infec : int
number of initial infected people (chosen randomly)
nr_immucmpr : int
number of immunocompromised people (chosen randomly)
nr_social: int
number of social people who will go to social areas in first 8 hours of the day
sick_duration : int
number of hours of average infection duration
Returns
-------
humans
list that contains all the human objects created
"""
humans = []
max_size = ENV_DATA["size"]
nr_of_grids = max_size * max_size #used for linear indices
#create random positions (linear indexing) for the agents
pos_all = np.random.randint(nr_of_grids, size=nagi)
#create random indices (without repetition) of infected and immunocompromised individuals, also of freeroam and social agents
initial_infected_pos = np.random.choice(nagi, nr_infec, replace=False)
immunocomp_pos = np.random.choice(nagi, nr_immucmpr, replace=False)
freeroam_pos = np.random.choice(nagi, nr_freeroam, replace = False)
social_pos = np.random.choice(nagi, nr_social, replace = False)
#create random distribution of timesteps until recuperation (only for infected)
dts_until_healthy = (np.random.normal(PARAMS["sick_duration_mean"],48,nr_infec)).astype(int)
dts_until_healthy = dts_until_healthy.clip(min=0) #avoid incredibly rare chances where value is negative
#fill positions of immuno, freeroam and social
immnuno_all = np.zeros(nagi)
immnuno_all[immunocomp_pos] = 1
freeroam_all = np.zeros(nagi)
freeroam_all[freeroam_pos] = 1
social_all = np.zeros(nagi)
social_all[social_pos] = 1
#create chance of death per timestep (only for infected)
death_per_dt_chances = map(death_per_dt_aux, dts_until_healthy, age_all[initial_infected_pos], immnuno_all )
#create our health, immunocompromised, time until healthy and dt chance death arrays (see human.py for futher reference)
health_all = np.zeros(nagi)
time_to_healthy_all = np.negative(np.ones(nagi))
chance_death_dt_all = np.negative(np.ones(nagi))
#here 1 means that they are infected (health) and that they are immunocompromised (immuno)
health_all[initial_infected_pos] = 1
time_to_healthy_all[initial_infected_pos] = dts_until_healthy
chance_death_dt_all[initial_infected_pos] = list(death_per_dt_chances)
#creation of the array of agents
for i in range(nagi):
age = age_all[i]
pos = tuple(np.unravel_index(pos_all[i],(max_size,max_size)))
health = health_all[i]
immunocompromised = immnuno_all[i]
time_until_healthy = time_to_healthy_all[i]
chance_death_dt = chance_death_dt_all[i]
freeroam = freeroam_all[i]
social = social_all[i]
human = agent_class.Human(age, pos, 0, health, 0, immunocompromised, time_until_healthy, chance_death_dt, freeroam, social)
humans.append(human)
if (health == 1):
ENV_DATA["infected_density"][pos[0],pos[1]] += 1 #density of infected individuals
else:
ENV_DATA["susceptible_density"][pos[0],pos[1]] += 1 #addint 1 to selected susceptible density (used for infect method)
ENV_DATA["population_density"][pos[0],pos[1]] += 1 #adding 1 to selected density grid area
#flat indexing of the human objects
(ENV_DATA["agents_flat_positions"][np.ravel_multi_index((pos[0],pos[1]), (max_size,max_size))]).append(human)
return humans | 23b0e92e281521dadcba47f8bfb5b2e8f5de2803 | 3,630,384 |
def get_top_level_data(end, start=20, get_fight_card_stats=False, both=True, avg_pause=.6):
"""
@param end: integer
@param start : integer
@param get_fight_card_stats :
@param both : boolean
@param avg_pause : float
"""
def add_d_set(df_use, name):
df = df_use.copy()
df.insert(0, "Event", f"UFC{name}")
return df
vals = website(start, end)
rets_df = []
if both:
rets_df_2 = []
for j, pause in zip(vals, np.random.poisson(avg_pause, len(vals))):
print(j)
if get_fight_card_stats and not both:
rets_df.append(add_d_set(pd.read_html(j)[2], j))
elif get_fight_card_stats and both:
d = pd.read_html(j)
rets_df.append(add_d_set(d[2], j))
rets_df_2.append(add_d_set(d[0], j))
else:
rets_df.append(add_d_set(pd.read_html(j)[2], j))
sleep(pause)
return pd.concat(rets_df, axis=0) | 5d46f67d370b2c9c431f97f132338ca55edf609c | 3,630,385 |
def compute_normalization_binary_search(activations,
t,
num_iters = 10):
"""Returns the normalization value for each example (t < 1.0).
Args:
activations: A multi-dimensional array with last dimension `num_classes`.
t: Temperature 2 (< 1.0 for finite support).
num_iters: Number of iterations to run the method.
Return: An array of same rank as activation with the last dimension being 1.
"""
mu = jnp.max(activations, -1, keepdims=True)
normalized_activations = activations - mu
shape_activations = activations.shape
effective_dim = jnp.float32(
jnp.sum(
jnp.int32(normalized_activations > -1.0 / (1.0 - t)),
-1,
keepdims=True))
shape_partition = list(shape_activations[:-1]) + [1]
lower = jnp.zeros(shape_partition)
upper = -log_t(1.0 / effective_dim, t) * jnp.ones(shape_partition)
def cond_fun(carry):
_, _, iters = carry
return iters < num_iters
def body_fun(carry):
lower, upper, iters = carry
logt_partition = (upper + lower) / 2.0
sum_probs = jnp.sum(
exp_t(normalized_activations - logt_partition, t), -1, keepdims=True)
update = jnp.float32(sum_probs < 1.0)
lower = jnp.reshape(lower * update + (1.0 - update) * logt_partition,
shape_partition)
upper = jnp.reshape(upper * (1.0 - update) + update * logt_partition,
shape_partition)
return lower, upper, iters + 1
lower = jnp.zeros(shape_partition)
upper = -log_t(1.0 / effective_dim, t) * jnp.ones(shape_partition)
lower, upper, _ = while_loop(cond_fun, body_fun, (lower, upper, 0))
logt_partition = (upper + lower) / 2.0
return logt_partition + mu | 86859bf5834c791e1aa268784a1d353ba192551e | 3,630,386 |
from copy import deepcopy
def _match_units(filter_params, fps, fname):
"""
author: EM
The filtering thresholds must match the timeseries units. If the right
conversion is not possible, then check_ok is False, and the feature
summaries will not be calculated for this file.
"""
if filter_params is None:
return filter_params, True
all_units = filter_params['units']+[filter_params['time_units']]
cfilter_params = deepcopy(filter_params)
if fps==-1:
# In this case, all time-related timeseries will be in frames.
# If thresholds have been defined in seconds there is no way to convert.
if 'seconds' in all_units:
no_attr_flush('fps', fname)
return cfilter_params, False
else:
# In this case, all time-related timeseries will be in seconds.
# We always want the time_units for traj_length in frames
if filter_params['time_units']=='seconds' and \
filter_params['min_traj_length'] is not None:
cfilter_params['min_traj_length'] = \
filter_params['min_traj_length']*fps
# If the timeseries therholds are defined in seconds, no conversion is
# necessary
# If the timeseries thresholds are defined in frames, we need to convert
# to seconds
if 'frame_numbers' in filter_params['units']:
ids = [i for i,x in enumerate(filter_params['units']) if x=='frame_numbers']
for i in ids:
if filter_params['min_thresholds'][i] is not None:
cfilter_params['min_thresholds'][i]= \
filter_params['min_thresholds'][i]/fps
if filter_params['max_thresholds'][i] is not None:
cfilter_params['max_thresholds'][i]= \
filter_params['max_thresholds'][i]/fps
mpp = read_microns_per_pixel(fname)
if mpp==-1:
# In this case, all distance-related timeseries will be in pixels.
# If thresholds have been defined in microns there is no way to convert.
if 'microns' in all_units:
no_attr_flush('mpp', fname)
return cfilter_params, False
else:
# In this case, all distance-related timeseries will be in microns.
# If the timeseries threholds are defined in micorns, no conversion is
# necessary
# If the timeseries thresholds are defined in pixels, we need to convert
# to microns
if filter_params['distance_units']=='pixels' and \
filter_params['min_distance_traveled'] is not None:
cfilter_params['min_distance_traveled'] = \
filter_params['min_distance_traveled']*mpp
if 'pixels' in filter_params['units']:
ids = [i for i,x in enumerate(filter_params['units']) if x=='pixels']
for i in ids:
if filter_params['min_thresholds'][i] is not None:
cfilter_params['min_thresholds'][i]= \
filter_params['min_thresholds'][i]*mpp
if filter_params['max_thresholds'][i] is not None:
cfilter_params['max_thresholds'][i]= \
filter_params['max_thresholds'][i]*mpp
return cfilter_params, True | de260a5570656c3c419c5df96ef6ad7bc75ff114 | 3,630,387 |
import time
import subprocess
import json
import logging
def gatherMetrics(varnishstatExe, fields):
"""Gather varnish metrics using varnishstat.
:param varnishstatExe: Path to the varnishstat executable.
:param fields: Field inclusion glob for varnishstat.
:return: A tuple containing the timestamp of the measurement and the measured metric values.
The timestamp is the time in milliseconds since the epoch.
The metrics are the json-decoded output of varnishstat. For example:
{
"timestamp": "2019-03-28T12:30:51",
"MAIN.cache_hit": {
"description": "Cache hits",
"type": "MAIN", "flag": "c", "format": "i",
"value": 33726681
},
"MAIN.cache_hitpass": {
"description": "Cache hits for pass",
"type": "MAIN", "flag": "c", "format": "i",
"value": 0
},
"MAIN.cache_miss": {
"description": "Cache misses",
"type": "MAIN", "flag": "c", "format": "i",
"value": 14
}
}
"""
fieldInclusionList = []
for field in fields:
fieldInclusionList.extend(("-f", field))
timestamp = int(round(time.time() * 1000))
varnishstat = subprocess.Popen((varnishstatExe, "-j", *fieldInclusionList), bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
metrics = None
try:
metrics = json.load(varnishstat.stdout)
except json.JSONDecodeError as e:
logging.exception("failed to decode JSON output of varnishstat")
# Wait for process to terminate
varnishstat.communicate()
return (timestamp, metrics) | 13550155c06342ee493a720e2df03ae7cc6a321c | 3,630,388 |
def squared_jumps(tracks, n_frames=1, start_frame=None, pixel_size_um=0.16,
pos_cols=["y", "x"]):
"""
Given a set of trajectories, return all of the the squared jumps
as an ndarray.
args
----
tracks : pandas.DataFrame. Must contain the "trajectory"
and "frame" columns, along with whatever columns
are specified in *pos_cols*
n_frames : int, the number of frame intervals over which
to compute the jump. For instance, if *n_frames*
is 1, only compute jumps between consecutive
frames.
start_frame : int, exclude all jumps before this frame
pixel_size_um: float, size of pixels in um
pos_cols : list of str, the columns with the spatial
coordinates of each detection *in pixels*
returns
-------
*jumps*, a 2D ndarray of shape (n_jumps, 5+). Each row corresponds
to a single jump from the dataset.
The columns of *vecs* have the following meaning:
jumps[:,0] -> length of the origin trajectory in frames
jumps[:,1] -> index of the origin trajectory
jumps[:,2] -> frame corresponding to the first point in
the jump
jumps[:,3] -> sum of squared jumps across all spatial dimensions
in squared microns
jumps[:,4:] -> Euclidean jumps in each dimension in microns
"""
def bail():
return np.zeros((0, 6), dtype=np.float64)
# If passed an empty dataframe, bail
if tracks.empty: return bail()
# Do not modify the original dataframe
tracks = tracks.copy()
# Calculate the original trajectory length and exclude
# singlets and negative trajectory indices
tracks = track_length(tracks)
tracks = tracks[np.logical_and(
tracks["trajectory"] >= 0,
tracks["track_length"] > 1
)]
# Only consider trajectories after some start frame
if not start_frame is None:
tracks = tracks[tracks["frame"] >= start_frame]
# If no trajectories remain, bail
if tracks.empty: return bail()
# Convert from pixels to um
tracks[pos_cols] *= pixel_size_um
# Work with an ndarray, for speed
tracks = tracks.sort_values(by=["trajectory", "frame"])
T = np.asarray(tracks[["track_length", "trajectory", "frame", pos_cols[0]] + pos_cols])
# Allowing for gaps, consider every possible comparison that
# leads to the correct frame interval
target_jumps = []
for j in range(1, n_frames+1):
# Compute jumps
jumps = T[j:,:] - T[:-j,:]
# Only consider vectors between points originating
# from the same trajectory and from the target frame
# interval
same_track = jumps[:,1] == 0
target_interval = jumps[:,2] == n_frames
take = np.logical_and(same_track, target_interval)
# Map the corresponding track lengths, track indices,
# and frame indices back to each jump
jumps[:,:3] = T[:-j,:3]
jumps = jumps[take, :]
# Calculate the corresponding 2D squared jump and accumulate
if jumps.shape[0] > 0:
jumps[:,3] = (jumps[:,4:]**2).sum(axis=1)
target_jumps.append(jumps)
# Concatenate
if len(target_jumps) > 0:
return np.concatenate(target_jumps, axis=0)
else:
return bail() | 4fb75a9c4c65c8e8498156defbd4ea1ebfdd1c25 | 3,630,389 |
from typing import get_args
def main():
"""Change vowels of the text into the one selected ("a" as default)"""
args = get_args()
new_text = ''
for char in args.text:
if char in ['a','e','i','o','u']:
new_text = args.text.replace(char, args.vowel) ## doesn't work like this, look at solutions!
return(new_text)
print(new_text) | 638f00d93f53ac56ea6a6c4c1a1893a7abf8dfc3 | 3,630,390 |
def _mt_transpose_ ( self ) :
"""Transpose the graph:
>>> graph = ...
>>> graph_T = graph.transpose ()
>>> graph_T = graph.T() ## ditto
"""
new_graph = ROOT.TMultiGraph()
new_graph._graphs = []
_graphs = mgraph.GetListOfGraps()
for g in _graphs :
tg = g.T()
new_graph.Add ( tg )
new_graph._graphs.append ( tg )
return new_graph | 905936ee404551548fdd789489fdba9d9dcc8f3f | 3,630,391 |
import collections
def recursive_dictionary_update(d, u):
"""
Given two dictionaries, update the first one with new values provided by
the second. Works for nested dictionary sets.
:param d: First Dictionary, to base off of.
:param u: Second Dictionary, to provide updated values.
:return: Dictionary. Merged dictionary with bias towards the second.
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = recursive_dictionary_update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d | 7a040ee36d8d101ed3ce40f96c21c003c5410803 | 3,630,392 |
def uname(space):
""" uname() -> (sysname, nodename, release, version, machine)
Return a tuple identifying the current operating system.
"""
try:
r = os.uname()
except OSError, e:
raise wrap_oserror(space, e)
l_w = [space.wrap(i) for i in [r[0], r[1], r[2], r[3], r[4]]]
return space.newtuple(l_w) | 40ba4d2178fd2701d1ebd07e45c88073efd046e3 | 3,630,393 |
def load_model(
model_path=filepath + "/trained_models/cmu/", model_file_name="model.h5"
):
"""
loads a pre-trained word boundary model
:return: Seq2Seq object
"""
model_path = (
filepath + "/trained_models/{}/".format(model_path)
if model_path in ["cmu"]
else model_path
)
config = SConfig(configuration_file=model_path + "config.pkl")
s2s = Seq2Seq(config)
s2s.load_model(path_to_model=model_path, model_file_name=model_file_name)
return s2s | 27e90f879dd06a840ff5fe4589fd0f98b9786ba1 | 3,630,394 |
def _tol_sweep(arr, tol=1e-15, orders=5):
"""
Find best tolerance 'around' tol to choose nonzero values of arr.
# Sweeps over tolerances +- 'orders' orders of magnitude around tol and picks the most
# stable one (one corresponding to the most repeated number of nonzero entries).
Parameters
----------
arr : ndarray
The array requiring computation of nonzero values.
tol : float
Tolerance. We'll sweep above and below this by 'orders' of magnitude.
orders : int
Number of orders of magnitude for one direction of our sweep.
Returns
-------
float
Chosen tolerance.
int
Number of repeated nonzero counts for the given tolerance.
int
Number of tolerances tested in the sweep.
int
Number of zero entries at chosen tolerance.
"""
nzeros = defaultdict(list)
itol = tol * 10.**orders
smallest = tol / 10.**orders
n_tested = 0
while itol >= smallest:
if itol < 1.:
num_zero = arr[arr <= itol].size
nzeros[num_zero].append(itol)
n_tested += 1
itol /= 10.
# pick lowest tolerance corresponding to the most repeated number of 'zero' entries
sorted_items = sorted(nzeros.items(), key=lambda x: len(x[1]), reverse=True)
good_tol = sorted_items[0][1][-1]
return good_tol, len(sorted_items[0][1]), n_tested, sorted_items[0][0] | 6acfce610fe98f4a1aeb4e3417796dd56901f402 | 3,630,395 |
def test_coo_attr():
"""
Feature: Test COOTensor GetAttr in Graph and PyNative.
Description: Test COOTensor.indices, COOTensor.values, COOTensor.shape.
Expectation: Success.
"""
indices = Tensor([[0, 1], [1, 2]])
values = Tensor([1, 2], dtype=mstype.float32)
shape = (3, 4)
coo = COOTensor(indices, values, shape)
def test_pynative_1():
return coo.indices, coo.values, coo.shape
def test_pynative_2():
return coo.astype(mstype.int32)
def test_pynative_3():
return coo.to_tuple()
test_graph_1 = ms_function(test_pynative_1)
test_graph_2 = ms_function(test_pynative_2)
test_graph_3 = ms_function(test_pynative_3)
py_indices, py_values, py_shape = test_pynative_1()
py_coo = test_pynative_2()
py_tuple = test_pynative_3()
g_indices, g_values, g_shape = test_graph_1()
g_coo = test_graph_2()
g_tuple = test_graph_3()
coo1 = COOTensor(py_indices, py_values, py_shape)
coo2 = COOTensor(g_indices, g_values, g_shape)
# check coo attr
compare_coo(coo1, coo2)
# check astype
compare_coo(py_coo, g_coo)
# check to_tuple
assert len(py_tuple) == len(g_tuple)
for i, _ in enumerate(py_tuple):
if isinstance(py_tuple[i], Tensor):
assert (py_tuple[i].asnumpy() == g_tuple[i].asnumpy()).all()
else:
assert py_tuple[i] == g_tuple[i] | 17cccc6d45cd2c48d39279d7e7055224fdfb54ec | 3,630,396 |
def get_resource_path_if_exists(atlas_category, id):
""" If the specified ID exists in the atlas, return the full path """
atlas_path = ATLAS_CATEGORIES.get(atlas_category)
atlas = get_atlas(atlas_path)
if id in atlas.textures:
logger.debug(f'Found {id} in atlas')
return f'{atlas_path}/{id}'
return None | 3b61a8db1299900e8cf9bd6e5f716d7c9a71e8a4 | 3,630,397 |
def get_all_parties():
"""this gets all parties"""
if not app.api.routes.models.political.parties:
return Responses.not_found("No created parties yet"), 404
return Responses.complete_response(app.api.routes.models.political.parties), 200 | e212a15d5296bb9ba6e288a582e829d714bf225a | 3,630,398 |
def send(channel, apdu: list) -> bytes:
"""
Send APDU to the channel and return the data if there are no errors.
"""
data, sw1, sw2 = channel.transmit(apdu)
# success
if [sw1, sw2] == [0x90, 0x00]:
return bytes(data)
# signals that there is more data to read
elif sw1 == 0x61:
# print("[=] More data to read:", sw2)
return send(channel, [0x00, 0xC0, 0x00, 0x00, sw2]) # GET RESPONSE of sw2 bytes
elif sw1 == 0x6C:
# print("[=] Resending with Le:", sw2)
return send(channel, apdu[0:4] + [sw2]) # resend APDU with Le = sw2
# probably error condition
else:
print(
"Error: %02x %02x, sending APDU: %s"
% (sw1, sw2, " ".join(["{:02x}".format(x) for x in apdu]).upper())
)
channel.disconnect()
exit(1) | bb3a1e52b6fdb5480b23f0646e768a7f90500acd | 3,630,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.