content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def bytes_to_msg(seq, standard="utf-8"):
"""Decode bytes to text."""
return seq.decode(standard)
|
5664d97b3fec5d119daa2171bcb431ca5a4b5f33
| 3,640,400
|
from tqdm.auto import tqdm
from typing import Union
import random
import time
import statistics
def cross_validate(
estimator,
input_relation: Union[str, vDataFrame],
X: list,
y: str,
metric: Union[str, list] = "all",
cv: int = 3,
pos_label: Union[int, float, str] = None,
cutoff: float = -1,
show_time: bool = True,
training_score: bool = False,
**kwargs,
):
"""
---------------------------------------------------------------------------
Computes the K-Fold cross validation of an estimator.
Parameters
----------
estimator: object
Vertica estimator with a fit method and a database cursor.
input_relation: str/vDataFrame
Relation to use to train the model.
X: list
List of the predictor columns.
y: str
Response Column.
metric: str/list, optional
Metric used to do the model evaluation. It can also be a list of metrics.
all: The model will compute all the possible metrics.
For Classification:
accuracy : Accuracy
auc : Area Under the Curve (ROC)
best_cutoff : Cutoff which optimised the ROC Curve prediction.
bm : Informedness = tpr + tnr - 1
csi : Critical Success Index = tp / (tp + fn + fp)
f1 : F1 Score
logloss : Log Loss
mcc : Matthews Correlation Coefficient
mk : Markedness = ppv + npv - 1
npv : Negative Predictive Value = tn / (tn + fn)
prc_auc : Area Under the Curve (PRC)
precision : Precision = tp / (tp + fp)
recall : Recall = tp / (tp + fn)
specificity : Specificity = tn / (tn + fp)
For Regression:
aic : Akaike’s information criterion
bic : Bayesian information criterion
max : Max error
mae : Mean absolute error
median : Median absolute error
mse : Mean squared error
msle : Mean squared log error
r2 : R-squared coefficient
r2a : R2 adjusted
rmse : Root-mean-squared error
var : Explained variance
cv: int, optional
Number of folds.
pos_label: int/float/str, optional
The main class to be considered as positive (classification only).
cutoff: float, optional
The model cutoff (classification only).
show_time: bool, optional
If set to True, the time and the average time will be added to the report.
training_score: bool, optional
If set to True, the training score will be computed with the validation score.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
if isinstance(X, str):
X = [X]
check_types(
[
("X", X, [list],),
("input_relation", input_relation, [str, vDataFrame],),
("y", y, [str],),
("metric", metric, [str, list],),
("cv", cv, [int, float],),
("cutoff", cutoff, [int, float],),
]
)
if isinstance(input_relation, str):
input_relation = vdf_from_relation(input_relation, cursor=estimator.cursor)
if cv < 2:
raise ParameterError("Cross Validation is only possible with at least 2 folds")
if category_from_model_type(estimator.type)[0] == "regressor":
all_metrics = [
"explained_variance",
"max_error",
"median_absolute_error",
"mean_absolute_error",
"mean_squared_error",
"root_mean_squared_error",
"r2",
"r2_adj",
"aic",
"bic",
]
elif category_from_model_type(estimator.type)[0] == "classifier":
all_metrics = [
"auc",
"prc_auc",
"accuracy",
"log_loss",
"precision",
"recall",
"f1_score",
"mcc",
"informedness",
"markedness",
"csi",
]
else:
raise Exception(
"Cross Validation is only possible for Regressors and Classifiers"
)
if metric == "all":
final_metrics = all_metrics
elif isinstance(metric, str):
final_metrics = [metric]
else:
final_metrics = metric
result = {"index": final_metrics}
if training_score:
result_train = {"index": final_metrics}
total_time = []
if verticapy.options["tqdm"] and ("tqdm" not in kwargs or ("tqdm" in kwargs and kwargs["tqdm"])):
loop = tqdm(range(cv))
else:
loop = range(cv)
for i in loop:
try:
estimator.drop()
except:
pass
random_state = verticapy.options["random_state"]
random_state = (
random.randint(-10e6, 10e6) if not (random_state) else random_state + i
)
train, test = input_relation.train_test_split(
test_size=float(1 / cv), order_by=[X[0]], random_state=random_state
)
start_time = time.time()
estimator.fit(
train, X, y, test,
)
total_time += [time.time() - start_time]
if category_from_model_type(estimator.type)[0] == "regressor":
if metric == "all":
result["{}-fold".format(i + 1)] = estimator.regression_report().values[
"value"
]
if training_score:
estimator.test_relation = estimator.input_relation
result_train[
"{}-fold".format(i + 1)
] = estimator.regression_report().values["value"]
elif isinstance(metric, str):
result["{}-fold".format(i + 1)] = [estimator.score(metric)]
if training_score:
estimator.test_relation = estimator.input_relation
result_train["{}-fold".format(i + 1)] = [estimator.score(metric)]
else:
result["{}-fold".format(i + 1)] = [estimator.score(m) for m in metric]
if training_score:
estimator.test_relation = estimator.input_relation
result_train["{}-fold".format(i + 1)] = [
estimator.score(m) for m in metric
]
else:
if (len(estimator.classes_) > 2) and (pos_label not in estimator.classes_):
raise ParameterError(
"'pos_label' must be in the estimator classes, it must be the main class to study for the Cross Validation"
)
elif (len(estimator.classes_) == 2) and (
pos_label not in estimator.classes_
):
pos_label = estimator.classes_[1]
try:
if metric == "all":
result["{}-fold".format(i + 1)] = estimator.classification_report(
labels=[pos_label], cutoff=cutoff
).values["value"][0:-1]
if training_score:
estimator.test_relation = estimator.input_relation
result_train[
"{}-fold".format(i + 1)
] = estimator.classification_report(
labels=[pos_label], cutoff=cutoff
).values[
"value"
][
0:-1
]
elif isinstance(metric, str):
result["{}-fold".format(i + 1)] = [
estimator.score(metric, pos_label=pos_label, cutoff=cutoff)
]
if training_score:
estimator.test_relation = estimator.input_relation
result_train["{}-fold".format(i + 1)] = [
estimator.score(metric, pos_label=pos_label, cutoff=cutoff)
]
else:
result["{}-fold".format(i + 1)] = [
estimator.score(m, pos_label=pos_label, cutoff=cutoff)
for m in metric
]
if training_score:
estimator.test_relation = estimator.input_relation
result_train["{}-fold".format(i + 1)] = [
estimator.score(m, pos_label=pos_label, cutoff=cutoff)
for m in metric
]
except:
if metric == "all":
result["{}-fold".format(i + 1)] = estimator.classification_report(
cutoff=cutoff
).values["value"][0:-1]
if training_score:
estimator.test_relation = estimator.input_relation
result_train[
"{}-fold".format(i + 1)
] = estimator.classification_report(cutoff=cutoff).values[
"value"
][
0:-1
]
elif isinstance(metric, str):
result["{}-fold".format(i + 1)] = [
estimator.score(metric, cutoff=cutoff)
]
if training_score:
estimator.test_relation = estimator.input_relation
result_train["{}-fold".format(i + 1)] = [
estimator.score(metric, cutoff=cutoff)
]
else:
result["{}-fold".format(i + 1)] = [
estimator.score(m, cutoff=cutoff) for m in metric
]
if training_score:
estimator.test_relation = estimator.input_relation
result_train["{}-fold".format(i + 1)] = [
estimator.score(m, cutoff=cutoff) for m in metric
]
try:
estimator.drop()
except:
pass
n = len(final_metrics)
total = [[] for item in range(n)]
for i in range(cv):
for k in range(n):
total[k] += [result["{}-fold".format(i + 1)][k]]
if training_score:
total_train = [[] for item in range(n)]
for i in range(cv):
for k in range(n):
total_train[k] += [result_train["{}-fold".format(i + 1)][k]]
result["avg"], result["std"] = [], []
if training_score:
result_train["avg"], result_train["std"] = [], []
for item in total:
result["avg"] += [statistics.mean([float(elem) for elem in item])]
result["std"] += [statistics.stdev([float(elem) for elem in item])]
if training_score:
for item in total_train:
result_train["avg"] += [statistics.mean([float(elem) for elem in item])]
result_train["std"] += [statistics.stdev([float(elem) for elem in item])]
total_time += [
statistics.mean([float(elem) for elem in total_time]),
statistics.stdev([float(elem) for elem in total_time]),
]
result = tablesample(values=result).transpose()
if show_time:
result.values["time"] = total_time
if training_score:
result_train = tablesample(values=result_train).transpose()
if show_time:
result_train.values["time"] = total_time
if training_score:
return result, result_train
else:
return result
|
fc59bb42c9e776e91b455f5d79e762721246754d
| 3,640,401
|
def bonferroni_correction(pvals):
"""
Bonferroni correction.
Reference: http://en.wikipedia.org/wiki/Bonferroni_correction
"""
n = len(pvals)
return [min(x * n , 1.0) for x in pvals]
|
f57ffd6b77a0a74a61904334604d1cb0eb08f8ff
| 3,640,402
|
from typing import Any
from typing import List
from typing import Optional
from typing import Dict
def to_r4(fhir_json: JsonObj, opts: Namespace, ifn: str) -> JsonObj:
"""
Convert the FHIR Resource in "o" into the R4 value notation
:param fhir_json: FHIR resource
:param opts: command line parser arguments
:param ifn: input file name
:return: reference to "o" with changes applied. Warning: object is NOT copied before change
"""
server = opts.fhirserver # If absent, the FILE becomes the base of the context
def is_choice_element(name):
# TODO: we really do need to be a lot more clever if this is to scale in the longer term. For now, we
# assume that valueX is a choice unless it is an exception
return name.startswith(VALUE_KEY) and name[VALUE_KEY_LEN:] and name[VALUE_KEY_LEN].isupper() and\
name[VALUE_KEY_LEN:] not in ['Set']
def map_element(element_key: str, element_value: Any, container_type: str, path: List[str],
container: JsonObj, id_map: Optional[Dict[str, str]] = None, in_container: bool = False) -> None:
"""
Transform element_value into the R4 RDF json structure
:param element_key: Key for element value
:param element_value: Element itself. Can be any JSON object
:param container_type: The type of the containing resource
:param path: The path from the containing resource down to the element excluding element_key
:param container: Dictionary that contains key/value
:param id_map: Map from local resource to URI if inside a bundle
:param in_container: True means don't tack the resource type onto the identifier
"""
if element_key.startswith('@'): # Ignore JSON-LD components
return
if not is_choice_element(element_key):
path.append(element_key)
if path == ['Coding', 'system']:
add_type_arc(container)
inner_type = local_name(getattr(container, RESOURCETYPE_KEY, None))
if isinstance(element_value, JsonObj): # Inner object -- process each element
dict_processor(element_value, resource_type, path, id_map)
elif isinstance(element_value, list): # List -- process each member individually
container[element_key] = list_processor(element_key, element_value, resource_type, path, id_map)
# We have a primitive JSON value
elif element_key == RESOURCETYPE_KEY and not element_value.startswith('fhir:'):
container[element_key] = 'fhir:' + element_value
container['@context'] = f"{opts.contextserver}{element_value.lower()}.context.jsonld"
elif element_key == ID_KEY: # Internal ids are relative to the document
if in_container or getattr(container, RESOURCETYPE_KEY, None) is None:
relative_id = '#' + element_value
else:
relative_id = element_value if element_value.startswith('#') else \
((inner_type or container_type) + '/' + element_value)
container_id = id_map.get(relative_id, relative_id) if id_map else relative_id
if not hasattr(container, '@id'):
# Bundle ids have already been added elsewhere
container['@id'] = container_id
container[element_key] = to_value(element_value)
elif element_key not in [NODEROLE_KEY, INDEX_KEY, DIV_KEY]: # Convert most other nodes to value entries
container[element_key] = to_value(element_value)
if not isinstance(element_value, list):
add_type_arcs(element_key, container[element_key], container, path, opts, server, id_map)
if not is_choice_element(element_key):
path.pop()
def dict_processor(container: JsonObj, resource_type: Optional[str] = None, path: List[str] = None,
id_map: Optional[Dict[str, str]] = None, in_container: bool = False) -> None:
"""
Process the elements in container
:param container: JSON dictionary to be processed
:param resource_type: type of resource that container appears in
:param path: Full path from the base resource type to the actual element
:param id_map: Map from local resource to URI if inside a bundle
:param in_container: If True then don't tack they type onto the identifier
"""
# Rule: Whenever we find an embedded resourceType, we assume that we've encountered a brand new resource
# Update the passed resource type (example: container is Observation, we're processing the subject node
# and the inner resourceType is Patient
#
# Note: If there isn't a declared resourceType, it may be able to be extracted from the URL if the URL matches
# the predefined FHIR structure
if hasattr(container, RESOURCETYPE_KEY):
resource_type = container[RESOURCETYPE_KEY]
path = [resource_type]
# If we've got bundle, build an id map to use in the interior
id_map = bundle_urls(container) # Note that this will also assign ids to bundle entries
# Add any contained resources to the contained URL map
add_contained_urls(container, id_map)
# Process each of the elements in the dictionary
# Note: use keys() and re-look up to prevent losing the JsonObj characteristics of the values
for k in [k for k in as_dict(container).keys() if not k.startswith('_')]:
if is_choice_element(k):
map_element(k, container[k], resource_type, [k[VALUE_KEY_LEN:]], container, id_map, in_container)
else:
map_element(k, container[k], resource_type, path, container, id_map, in_container)
# Merge any extensions (keys that start with '_') into the base
# This happens when either:
# A) there is only an extension and no base
# B) there is a base, but it isn't a JSON object
for ext_key in [k for k in as_dict(container).keys() if k.startswith('_')]:
base_key = ext_key[1:]
ext_value = container[ext_key]
del(container[ext_key])
if not hasattr(container, base_key):
container[base_key] = ext_value # No base -- move the extension in
elif not isinstance(container[base_key], JsonObj):
container[base_key] = to_value(container[base_key]) # Base is not a JSON object
container[base_key]['extension'] = ext_value['extension'] \
if isinstance(ext_value, JsonObj) else ext_value
else:
container[base_key]['extension'] = ext_value['extension']
map_element(base_key, ext_value, EXTENSION_RESOURCE_TYPE, [EXTENSION_RESOURCE_TYPE], container, id_map)
def list_processor(list_key: str, list_object: List[Any], resource_type: str, path: List[str] = None,
id_map: Optional[Dict[str, str]] = None) -> List[Any]:
"""
Process the elements in the supplied list adding indices and doing an iterative transformation on the
interior nodes
:param list_key: JSON key at the start of the list
:param list_object: List to be processed
:param resource_type: The type of resource containing the list
:param path: JSON path to list element. Head of path is the root resource type
:param id_map: Map from local resource to URI if inside a bundle
:return Ordered list of entries
"""
def list_element(entry: Any, pos: int) -> Any:
"""
Add a list index to list element "e"
:param entry: Element in a list
:param pos: position of element
:return: adjusted object
"""
if isinstance(entry, JsonObj):
dict_processor(entry, resource_type, path, id_map, list_key == CONTAINED_KEY)
if getattr(entry, INDEX_KEY, None) is not None and '_' not in opts.fsv.flat_path(path):
print(f'{ifn} - problem: "{list_key}: {opts.fsv.flat_path(path)}" element {pos} already has an index')
else:
entry.index = pos # Add positioning
if list_key == CODING_KEY:
add_type_arc(entry)
elif isinstance(entry, list):
print(f"{ifn} - problem: {list_key} has a list in a list")
else:
entry = to_value(entry)
add_type_arcs(list_key, entry, entry, path, opts, server, id_map)
entry.index = pos
return entry
return [list_element(list_entry, pos) for pos, list_entry in enumerate(list_object)]
# =========================
# Start of to_r4 base code
# =========================
# Do the recursive conversion
resource_type = fhir_json[RESOURCETYPE_KEY] # Pick this up before it processed for use in context below
dict_processor(fhir_json)
# Add nodeRole
fhir_json['nodeRole'] = "fhir:treeRoot"
# Traverse the graph adjusting relative URL's
adjust_urls(fhir_json)
# Add the "ontology header"
hdr = JsonObj()
if '@id' in fhir_json:
hdr["@id"] = fhir_json['@id'] + ".ttl"
hdr["owl:versionIRI"] = (opts.versionbase + ('' if opts.versionbase[-1] == '/' else '') +
hdr['@id']) if opts.versionbase else hdr["@id"]
hdr["owl:imports"] = "fhir:fhir.ttl"
hdr["@type"] = 'owl:Ontology'
fhir_json["@included"] = hdr
else:
print(f"{ifn} does not have an identifier")
# Fill out the rest of the context
if opts.addcontext:
fhir_json['@context'] = [f"{opts.contextserver}{resource_type.lower()}.context.jsonld"]
fhir_json['@context'].append(f"{opts.contextserver}root.context.jsonld")
local_context = JsonObj()
local_context["nodeRole"] = JsonObj(**{"@type": "@id", "@id": "fhir:nodeRole"})
if server:
local_context["@base"] = server
local_context['owl:imports'] = JsonObj(**{"@type": "@id"})
local_context['owl:versionIRI'] = JsonObj(**{"@type": "@id"})
fhir_json['@context'].append(local_context)
return fhir_json
|
c1e249387fa5045be65bc4ca742ebf086745a8a8
| 3,640,403
|
from itertools import accumulate, chain, repeat
def make_fib():
"""Returns a function that returns the next Fibonacci number
every time it is called.
>>> fib = make_fib()
>>> fib()
0
>>> fib()
1
>>> fib()
1
>>> fib()
2
>>> fib()
3
>>> fib2 = make_fib()
>>> fib() + sum([fib2() for _ in range(5)])
12
"""
return map(lambda x_y: x_y[0], accumulate(chain(((0, 1),), repeat(None)), lambda x_y, _: (x_y[1], x_y[0] + x_y[1]))).__next__
|
e546ce79c4b441418f5325b0ac5d7c3faf6ac35e
| 3,640,404
|
import codecs
import os
def find_issue(case):
"""
Find the issue sentence for a given case.
"""
if ".txt" not in case:
case += ".txt"
f = codecs.open(os.path.join(BASE_DIR, case), encoding="utf-8", errors="replace")
issue, switch = "", False
for line in f.readlines():
if line.startswith("THE ISSUE"):
switch = True
elif switch and line.strip() != "" and line == line.upper():
break
elif switch:
issue += line
return "The issue is " + " ".join(issue.split())
|
d7e063cb131b1658a40bd04155c4d839889825b5
| 3,640,405
|
def render_injected(http_resp, extra_html):
"""
render_injected(http_resp, extra_html) -> HttpResponse
Inject the extra html into the content of the http_resp.
``extra_html`` can be a string or an object with an ``html`` method/field.
"""
assert isinstance(http_resp, HttpResponse)
if 'text/html' not in http_resp.get('content-type', ''):
return http_resp
# Look for the </body> tag and inject the popup <div>
markers = ('</body>', '</BODY>')
content = http_resp.content
for marker in markers:
pos = content.rfind(marker)
if pos != -1:
break
else:
# No </body>
return http_resp
if hasattr(extra_html, 'html'):
extra_html = extra_html.html
if callable(extra_html):
extra_html = extra_html()
http_resp.content = ''.join((content[:pos], extra_html, content[pos:]))
return http_resp
|
a3f49419359a68ecc72f78f00ae3e4a18f4957d6
| 3,640,406
|
import os
def _extract_username(filename):
"""Return username (if found) from the credentials"""
if not os.path.exists(filename):
logger.warning("Cifs credentials file %s does not exist", filename)
return
for line in open(filename):
if ("username" in line) and ("=" in line):
username = line.split("=")[1]
return username.strip()
|
51c72e7febfb435f61eee32aaaa628a4af6f1f85
| 3,640,407
|
from unittest.mock import Mock
from unittest.mock import patch
def mock_gitlab_api_projects(save=None, mergerequests_list=None):
"""A pseudo mock"""
def get(*args, **kwargs):
project = Mock('gitlab.v4.objects.Project')
project.save = save
project.mergerequests = \
Mock('gitlab.v4.objects.ProjectMergeRequestManager')
project.mergerequests.list = mergerequests_list
return project
with patch('gitlab.Gitlab'):
gitlab = GitlabAPI(uri=None, token=None, insecure=False)
gitlab.api.projects = Mock('gitlab.v4.objects.ProjectManager')
gitlab.api.projects.get = get
return gitlab.api
|
52b48a0f9083a22fdafcfa3b797d6580967b1f02
| 3,640,408
|
def text_in_bytes(text, binary_data, encoding="utf-8"):
"""Return True of the text can be found in the decoded binary data"""
return text in binary_data.decode(encoding)
|
e416057989c452718fa27b5f84286e347b986117
| 3,640,409
|
import json
def make_auth(sub, tenant=None):
"""
Prepare an almost-valid JWT token header, suitable for consumption by our identity middleware (needs sub and optionally mender.tenant claims).
The token contains valid base64-encoded payload, but the header/signature are bogus.
This is enough for the identity middleware to interpret the identity
and select the correct db; note that there is no gateway in the test setup, so the signature
is never verified.
If 'tenant' is specified, the 'mender.tenant' claim is added.
"""
payload = {"sub": sub}
if tenant is not None:
payload["mender.tenant"] = tenant
payload = json.dumps(payload)
payloadb64 = b64encode(payload.encode("utf-8"))
jwt = "bogus_header." + payloadb64.decode() + ".bogus_sign"
return {"Authorization": "Bearer " + jwt}
|
c8f8896814d1571bb4bb54a5507eda19e5ffd46c
| 3,640,410
|
import re
def available_mem(cores, mem, fmtstring=True):
"""Calculate available memory for a process
Params:
cores (int): number of cores
mem (str): set memory as string with conversion (M, G, g)
fmtstring (bool): return memory as formatted string
"""
prefix = "G"
m = re.match("[0-9]+([a-zA-Z]*)", str(mem))
if m:
prefix = m.groups()[0]
requested_mem_per_core = int(re.sub("[a-zA-Z]*", "", str(mem)))
core_mem = mem_per_core(prefix)
requested_cores = min(cores, available_cpu_count())
mem = min(requested_cores * core_mem,
requested_cores * requested_mem_per_core)
if fmtstring:
return "{}{}".format(mem, prefix)
else:
return mem
|
241ad5133917d84b78bc0670fc974184b88e3978
| 3,640,411
|
def normalize_v(v):
""" Normalize velocity to [-1, 1].
Ref: https://github.com/microsoft/AirSim-Drone-Racing-VAE-Imitation/blob/e651be52ff8274c9f595e88b13fe42d51302403d/racing_utils/dataset_utils.py#L20 """
# normalization of velocities from whatever to [-1, 1] range
v_x_range = [-1, 7]
v_y_range = [-3, 3]
v_z_range = [-3, 3]
v_yaw_range = [-1, 1]
if len(v.shape) == 1:
# means that it's a 1D vector of velocities
v[0] = 2.0 * (v[0] - v_x_range[0]) / (v_x_range[1] - v_x_range[0]) - 1.0
v[1] = 2.0 * (v[1] - v_y_range[0]) / (v_y_range[1] - v_y_range[0]) - 1.0
v[2] = 2.0 * (v[2] - v_z_range[0]) / (v_z_range[1] - v_z_range[0]) - 1.0
v[3] = 2.0 * (v[3] - v_yaw_range[0]) / (v_yaw_range[1] - v_yaw_range[0]) - 1.0
elif len(v.shape) == 2:
# means that it's a 2D vector of velocities
v[:, 0] = 2.0 * (v[:, 0] - v_x_range[0]) / (v_x_range[1] - v_x_range[0]) - 1.0
v[:, 1] = 2.0 * (v[:, 1] - v_y_range[0]) / (v_y_range[1] - v_y_range[0]) - 1.0
v[:, 2] = 2.0 * (v[:, 2] - v_z_range[0]) / (v_z_range[1] - v_z_range[0]) - 1.0
v[:, 3] = 2.0 * (v[:, 3] - v_yaw_range[0]) / (v_yaw_range[1] - v_yaw_range[0]) - 1.0
else:
raise Exception('Error in data format of V shape: {}'.format(v.shape))
return v
# Note: The version used in Shuang's code base is below, which should be equivalent to the above version.
# self.targets[:, 0] = 2. * (self.targets[:, 0] + 1.) / (7. + 1.) - 1.
# self.targets[:, 1] = 2. * (self.targets[:, 1] + 3.) / (3. + 3.) - 1.
# self.targets[:, 2] = 2. * (self.targets[:, 2] + 3.) / (3. + 3.) - 1.
# self.targets[:, 3] = 2. * (self.targets[:, 3] + 1.) / (1. + 1.) - 1.
|
cd47c8d3498e677a1f566b64199224f23a4b5896
| 3,640,412
|
def allele_counts_dataframe(read_evidence_generator):
"""
Creates a DataFrame containing number of reads supporting the
ref vs. alt alleles for each variant.
"""
return dataframe_from_generator(
element_class=ReadEvidence,
variant_and_elements_generator=read_evidence_generator,
# DataFrameBuilder will take the length of these fields' values
rename_dict={
"ref_reads": "num_ref_reads",
"alt_reads": "num_alt_reads",
"other_reads": "num_other_reads",
},
extra_column_fns={
"num_ref_fragments": lambda _, x: len(x.ref_read_names),
"num_alt_fragments": lambda _, x: len(x.alt_read_names),
"num_other_fragments": lambda _, x: len(x.other_read_names)
})
|
dd76b3b168977eb185ba1205a75ba4642becc913
| 3,640,413
|
def validate_rule_paths(sched: schedule.Schedule) -> schedule.Schedule:
"""A validator to be run after schedule creation to ensure
each path contains at least one rule with an expression or value.
A ValueError is raised when this check fails."""
for path in sched.unfold():
if path.is_final and not list(path.rules_with_expr_or_value):
raise ValueError(
"No expression or value specified along the path {}."
.format(path)
)
return sched
|
2ddfd6f9607687f6a3e3c955ed3470913fdf14bd
| 3,640,414
|
def spiralcontrolpointsvert(
x: int, y: int,
step: int,
growthfactor: float,
turns: int):
"""Returns a list[(int, int)]
of 2D vertices along a path
defined by a square spiral
Args:
x, y: int centerpoint
coordinates
step: int step increment
growthfactor: float multiplier
to step increment
to make exponential
spirals
turns: number of turns of the
spiral
Yields:
list of vertices of the spiral
list[[x: int, y: int]]
"""
v = [[x, y]]
inc = step
while turns > 0:
x += step
v.append([x, y])
step += inc
y += step
v.append([x, y])
step += inc
x -= step
v.append([x, y])
step += inc
y -= step
v.append([x, y])
turns -= 1
step += inc
inc *= growthfactor
return v
|
de9f577ed826b227d44b69e638dd33e08ea9c430
| 3,640,415
|
def determine_current_taxid(given_taxid):
"""Determine NCBI's current taxonomic ID given an (old) taxonomic ID
Args:
given_taxid: previously used NCBI taxonomic ID
Returns:
most current NCBI taxonomic ID
"""
taxid = given_taxid
aka_taxid_in_xml = None
redirects = 0
# Set at most 5 redirects to avoid infinite looping
while ((aka_taxid_in_xml != 0) and (redirects <= 5)):
tax_url = ncbi_taxonomy_url(taxid)
tax_raw_xml = urlopen_with_tries(tax_url)
aka_taxid_in_xml = parse_taxonomy_xml_for_aka_taxid(tax_raw_xml)
if aka_taxid_in_xml != 0:
logger.warning("Taxid %d is being redirected to taxid %d" %
(taxid, aka_taxid_in_xml))
taxid = aka_taxid_in_xml
else:
logger.debug("Taxid %d did not need to be redirected" %
(taxid))
redirects += 1
return taxid
|
f924309e8e19b5ffd9a46bf8c80054577bb7a28b
| 3,640,416
|
def validate_dependencies():
"""Validate external dependencies.
This function does NOT have to exist. If it does exist the runtime will call and execute it during api
initialization. The purpose of this function is to verify that external dependencies required to auto-generate
a problem are properly installed and configured on this system. Some common tasks that may be performed are
checking that a certain program is installed (such as 'javac') and that it is executable. You may also want to
verify that template files that the generator modifies exist in the templates/ directory. If any dependency
check fails the function should print out the respective error message and return False. If all checks pass
correctly the function should return True. If the function does not exist the API initializer will assume that
all dependencies are met and will add the generator to the pre-fetched generator list assuming there is an
auto-generated problem in the database that has the given generator set for it's 'generator' field.
The following code demonstrates how to check that the java compiler (javac) is present on the system and can be
executed by the current user.
"""
print "DEPENDENCY CHECK - TEMPLATE.py (TEMPLATE)"
javac_path = "/usr/bin/javac" # This should have scope across the entire module but doesn't for template purposes
if not os.path.exists(javac_path):
print "ERROR - TEMPLATE - The specified java compiler (%s) does not appear to exist." % javac_path
return False
if not os.access(javac_path, os.X_OK):
print "ERROR - TEMPLATE - javac is not executable by the python runtime."
return False
return True
|
0bea4c7bbf6198bf18514233c902f5bfa62dc8f8
| 3,640,417
|
def find_closest_vertex(desired_hop, available_vertices):
""" Find the closest downstream (greater than or equal) vertex
in availbale vertices. If nothing exists, then return -1.
Keyword arguments:
desired_hop -- float representing the desired hop location
available_location -- np array of available vertices in model
Returns:
vertex -- the closest available vertex that is >= desired_hop
"""
available_vertices = np.sort(available_vertices)
forward_vertices = available_vertices[available_vertices >= desired_hop]
if forward_vertices.size < 1:
vertex = -1
else:
vertex = forward_vertices[0]
return vertex
|
c0aa85238faf58ff30cc937bf73b53ce2cc0ee48
| 3,640,418
|
def second_smallest(numbers):
"""Find second smallest element of numbers."""
m1, m2 = float('inf'), float('inf')
for x in numbers:
if x <= m1:
m1, m2 = x, m1
elif x < m2:
m2 = x
return m2
|
0ca7b297da68651e4a8b56377e08f09d4d82cfb7
| 3,640,419
|
import numpy
def calc_sft_ccs_by_dictionary(dict_crystal, dict_in_out, flag_use_precalculated_data: bool = False):
"""Calculate structure factor tensor in CCS (X||a*, Z||c) based on the information given in dictionary.
Output information is written in the same dictionary.
"""
dict_crystal_keys = dict_crystal.keys()
dict_in_out_keys = dict_in_out.keys()
necessary_crystal_keys = set(["unit_cell_parameters", ])
diff_set_crystal = necessary_crystal_keys.difference(set(dict_crystal_keys))
if len(diff_set_crystal) != 0:
raise AttributeError(f"The following attributes have to be defined {diff_set_crystal:}")
flag_reduced_symm_elems = len(set(["reduced_symm_elems", "centrosymmetry", "translation_elems"]).difference(set(dict_crystal_keys))) == 0
flag_full_symm_elems = len(set(["full_symm_elems", ]).difference(set(dict_crystal_keys))) == 0
flag_full_mcif_elems = len(set(["full_mcif_elems", ]).difference(set(dict_crystal_keys))) == 0
if not(flag_reduced_symm_elems or flag_full_symm_elems or flag_full_mcif_elems):
raise AttributeError("The symmetry elements have to be defined.")
necessary_in_out_keys = set(["index_hkl", ])
diff_set_in_out = necessary_in_out_keys.difference(set(dict_in_out_keys))
if len(diff_set_in_out) != 0:
raise AttributeError(f"The following attributes have to be defined {diff_set_in_out:}")
index_hkl = dict_in_out["index_hkl"]
non_zero_keys = set(["mag_atom_lande_factor", "mag_atom_kappa",
"mag_atom_j0_parameters", "mag_atom_j2_parameters"])
diff_set_crystal = non_zero_keys.difference(set(dict_crystal_keys))
if len(diff_set_crystal) != 0:
sft_ccs = numpy.zeros((9, index_hkl.shape[-1]), dtype=complex)
dder = {}
return sft_ccs, dder
if "flag_only_orbital" in dict_in_out_keys:
flag_only_orbital = dict_in_out["flag_only_orbital"]
else:
flag_only_orbital = False
if flag_reduced_symm_elems:
reduced_symm_elems = dict_crystal["reduced_symm_elems"]
centrosymmetry = dict_crystal["centrosymmetry"]
if centrosymmetry:
centrosymmetry_position = dict_crystal["centrosymmetry_position"]
else:
centrosymmetry_position = None
translation_elems = dict_crystal["translation_elems"]
elif flag_full_symm_elems:
full_symm_elems = dict_crystal["full_symm_elems"]
reduced_symm_elems = full_symm_elems
centrosymmetry = False
centrosymmetry_position = None
translation_elems = numpy.array([[0], [0], [0], [1]], dtype=int)
elif flag_full_mcif_elems:
full_mcif_elems = dict_crystal["full_mcif_elems"]
reduced_symm_elems = full_mcif_elems[:13]
centrosymmetry = False
centrosymmetry_position = None
translation_elems = numpy.array([[0], [0], [0], [1]], dtype=int)
unit_cell_parameters = dict_crystal["unit_cell_parameters"]
atom_para_index = dict_crystal["atom_para_index"]
atom_para_fract_xyz = dict_crystal["atom_fract_xyz"][:, atom_para_index]
atom_para_sc_fract = dict_crystal["atom_site_sc_fract"][:, atom_para_index]
atom_para_sc_b = dict_crystal["atom_site_sc_b"][:, atom_para_index]
atom_para_fract_xyz = calc_m_v(
atom_para_sc_fract, numpy.mod(atom_para_fract_xyz, 1), flag_m=False, flag_v=False)[0] + atom_para_sc_b
atom_para_occupancy = dict_crystal["atom_occupancy"][atom_para_index]
atom_para_b_iso = dict_crystal["atom_b_iso"][atom_para_index]
atom_beta = dict_crystal["atom_beta"]
if "atom_site_aniso_sc_beta" in dict_crystal_keys:
atom_site_aniso_sc_beta = dict_crystal["atom_site_aniso_sc_beta"]
atom_site_aniso_index = dict_crystal["atom_site_aniso_index"]
atom_sc_beta = numpy.zeros((6,)+atom_beta.shape, dtype=float)
atom_sc_beta[:, :, atom_site_aniso_index] = atom_site_aniso_sc_beta
atom_beta = (atom_sc_beta*numpy.expand_dims(atom_beta, axis=0)).sum(axis=1)
atom_para_beta = atom_beta[:, atom_para_index]
mag_atom_para_index = dict_crystal["mag_atom_para_index"]
atom_para_lande_factor = dict_crystal["mag_atom_lande_factor"][mag_atom_para_index]
atom_para_kappa = dict_crystal["mag_atom_kappa"][mag_atom_para_index]
atom_para_j0_parameters = dict_crystal["mag_atom_j0_parameters"][:, mag_atom_para_index]
atom_para_j2_parameters = dict_crystal["mag_atom_j2_parameters"][:, mag_atom_para_index]
atom_para_susceptibility = dict_crystal["atom_para_susceptibility"]
atom_para_sc_chi = dict_crystal["atom_para_sc_chi"]
flag_unit_cell_parameters = numpy.any(dict_crystal["flags_unit_cell_parameters"])
flag_atom_para_fract_xyz = numpy.any(dict_crystal["flags_atom_fract_xyz"][:, atom_para_index])
flag_atom_para_occupancy = numpy.any(dict_crystal["flags_atom_occupancy"][atom_para_index])
flag_atom_para_b_iso = numpy.any(dict_crystal["flags_atom_b_iso"][atom_para_index])
flag_atom_para_beta = numpy.any(dict_crystal["flags_atom_beta"][:, atom_para_index])
flag_atom_para_susceptibility = numpy.any(dict_crystal["flags_atom_para_susceptibility"])
flag_atom_para_lande_factor = numpy.any(dict_crystal["flags_mag_atom_lande_factor"][mag_atom_para_index])
flag_atom_para_kappa = numpy.any(dict_crystal["flags_mag_atom_kappa"][mag_atom_para_index])
sft_ccs, dder = calc_sft_ccs(index_hkl,
reduced_symm_elems, centrosymmetry, centrosymmetry_position, translation_elems,
unit_cell_parameters, atom_para_fract_xyz, atom_para_occupancy, atom_para_susceptibility, atom_para_b_iso, atom_para_beta,
atom_para_lande_factor, atom_para_kappa, atom_para_j0_parameters, atom_para_j2_parameters, atom_para_sc_chi,
dict_in_out=dict_in_out, flag_only_orbital=flag_only_orbital,
flag_unit_cell_parameters=flag_unit_cell_parameters, flag_atom_para_fract_xyz=flag_atom_para_fract_xyz,
flag_atom_para_occupancy=flag_atom_para_occupancy, flag_atom_para_susceptibility=flag_atom_para_susceptibility,
flag_atom_para_b_iso=flag_atom_para_b_iso, flag_atom_para_beta=flag_atom_para_beta,
flag_atom_para_lande_factor=flag_atom_para_lande_factor, flag_atom_para_kappa=flag_atom_para_kappa,
flag_use_precalculated_data=flag_use_precalculated_data)
return sft_ccs, dder
|
6905b0c1b4d4b3b65099600822ca0f6077fe1393
| 3,640,420
|
def normalize_mesh(mesh, in_place=True):
"""Rescales vertex positions to lie inside unit cube."""
scale = 1.0 / np.max(mesh.bounds[1, :] - mesh.bounds[0, :])
centroid = mesh.centroid
scaled_vertices = (mesh.vertices - centroid) * scale
if in_place:
scaled_mesh = mesh
scaled_mesh.vertices = scaled_vertices
else:
scaled_mesh = mesh.copy()
scaled_mesh.vertices = scaled_vertices
scaled_mesh.fix_normals()
return scaled_mesh
|
e70ff4dea9173a541267c2a5bd040f12de4499c8
| 3,640,421
|
from datetime import datetime
import socket
def get_run_name():
""" A unique name for each run """
return datetime.now().strftime(
'%b%d-%H-%M-%S') + '_' + socket.gethostname()
|
26f57e72912e896fe192de61b6477ef65051fccd
| 3,640,422
|
import traceback
def process_request(identifier, browser, document_type='Annual Return', num_doc=1, status_df=None):
"""
Search ICRIS for the passed identifier, analyze the returned documents,
and cart the documents depending on whether we purchased
the document before.
Parameters
----------
identifier : str
Name or Companies Registry Number of the company to
purchase documents for
browser : selenium.webdriver.remote.webdriver.WebDriver
An instance of Selenium WebDriver
document_type : str, optional
Type of document to be purchased, default `Annual Return`
num_doc : int, optional
Number of documents of type `document_type` to be purchased
status_df : pandas.DataFrame
Dataframe object to append data related to the status of the
operations to
Returns
-------
status_df : pandas.DataFrame
Dataframe object containing information about the status of
the carting operations with the following columns
"""
if status_df is None:
status_df = pd.DataFrame()
cart_number = 0
try:
try: # Check if there were no matches for the passed identifier
companies = CompaniesIndexPage(browser)
companies.NO_MATCHES()
raise Exception(f"No matches found for identifier: {identifier}")
except NoSuchElementException:
pass
main_menu, search, companies, info, doc_index = init_webpages(browser)
exception = 'None'
main_menu.navigate_to_search_page()
if identifier.isdigit():
search.crNo_search(identifier)
else:
search.name_search(identifier)
if identifier.isdigit():
try:
companies.choose_number(identifier)
except TimeoutError:
raise Exception(f"No companies found for company number {identifier}")
else:
try:
companies.choose_name(identifier)
except TimeoutError:
raise Exception(f"No companies found for company name {identifier}")
info.proceed()
doc_index.list_documents()
cart_status, cart_number = doc_index.index_and_cart(document_type, num_doc)
row = pd.Series([identifier,document_type, str(cart_status).upper(), cart_number, exception])
status_df = status_df.append(row, ignore_index = True)
return status_df
except Exception:
exception = traceback.format_exc(7)
try:
cart_status
except NameError:
cart_status = False
row = pd.Series([identifier, document_type, str(cart_status).upper(), cart_number, exception])
status_df = status_df.append(row, ignore_index = True)
return status_df
|
7a6071488aba447959264d80d5bb201deb9a2339
| 3,640,423
|
def create_blackboard():
"""
Create a blackboard with a few variables.
Fill with as many different types as we need to get full coverage on
pretty printing blackboard tests.
"""
Blackboard.clear()
blackboard = Client(name="Tester")
for key in {"foo", "some_tuple", "nested", "nothing"}:
blackboard.register_key(
key=key,
access=py_trees.common.Access.READ
)
for key in {"foo", "some_tuple", "nested", "nothing"}:
blackboard.register_key(
key=key,
access=py_trees.common.Access.WRITE
)
blackboard.foo = "bar"
blackboard.some_tuple = (1, "bar")
blackboard.nested = Nested()
blackboard.nothing = None
return blackboard
|
776129ba57a545ef3bcfca75c99816fd198bfc3d
| 3,640,424
|
def adminRoomDelete(*args, **kwargs):
""" 删除房间 """
params = kwargs['params']
filters = {
Room.room_uuid == params['room_uuid']
}
Room().delete(filters)
filters = {
UserRoomRelation.room_uuid == params['room_uuid']
}
UserRoomRelation().delete(filters)
return BaseController().successData()
|
d8f9a29b46aac908eda9e5fedbc113ce93bc2bf4
| 3,640,425
|
def who_is_it(image_path, database, model):
"""
Arguments:
image_path -- path to an image
database -- database containing image encodings along with the name of the person on the image
model -- your Inception model instance in Keras
Returns:
min_dist -- the minimum distance between image_path encoding and the encodings from the database
identity -- string, the name prediction for the person on image_path
"""
### START CODE HERE ###
## Step 1: Compute the target "encoding" for the image. Use img_to_encoding() see example above. ## (≈ 1 line)
encoding = create_encoding(image_path, model)
## Step 2: Find the closest encoding ##
# Initialize "min_dist" to a large value, say 100 (≈1 line)
min_dist = 100
# Loop over the database dictionary's names and encodings.
for (name, db_enc) in database.items():
# Compute L2 distance between the target "encoding" and the current "emb" from the database. (≈ 1 line)
dist = np.linalg.norm(encoding-db_enc)
# If this distance is less than the min_dist, then set min_dist to dist, and identity to name. (≈ 3 lines)
if dist < min_dist:
min_dist = dist
identity = name
### END CODE HERE ###
if min_dist > 0.85:
print("Not in the database.")
print("distance", min_dist)
identity = "Unknown"
else:
print ("it's " + str(identity) + ", the distance is " + str(min_dist))
return min_dist, identity
|
e2301b2504cc7bbb4c362b98541cad325b4b8587
| 3,640,426
|
def compute_win_state_str_row(n_rows, n_cols, n_connects):
"""Each win state will be a string of 0s and 1s which can
then converted into an integer in base 2.
I assume that at the maximum n_rows = n_cols = 5, which means
that a 31 bit integer (since in Python it's always signed)
should be more than enough for a 25 bit string.
"""
n_cells = n_rows * n_cols
win_states = list()
# each iteration in the for loop computes the possible
# winning states for a particular row, e.g.,
# - if n_connects == n_cols, there's just one winning state
for row_ind in range(n_rows):
prefix = '0' * (row_ind * n_cols)
row_end = (row_ind * n_cols) + n_cols
win_start_ind = row_ind * n_cols
win_end_ind = win_start_ind + n_connects
while win_end_ind <= row_end:
# save the winning state
suffix = '0' * (n_cells - win_end_ind)
win_state = prefix + '1' * n_connects + suffix
win_states.append(win_state)
# update for the next possible win state of the row
win_start_ind = win_start_ind + 1
win_end_ind = win_start_ind + n_connects
prefix += '0'
return win_states
|
b0f0f2846de7506b4b69f90bb8a0b1641a421659
| 3,640,427
|
import hmac
import hashlib
def hmac_sha512(key: bytes, data: bytes) -> bytes:
"""
Return the SHA512 HMAC for the byte sequence ``data`` generated with the
secret key ``key``.
Corresponds directly to the "HMAC-SHA512(Key = ..., Data = ...)" function
in BIP32
(https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#conventions).
:param key: The secret key used for HMAC calculation.
:param data: The data for which an HMAC should be calculated.
:return: A byte sequence containing the HMAC of ``data`` generated with the
secret key ``key``.
"""
h = hmac.new(key, data, hashlib.sha512)
return h.digest()
|
64850ea2d5e921138d8e0ebc2d021f8eaf5a7357
| 3,640,428
|
def __scheduler_trigger(cron_time_now, now_sec_tuple, crontask, deltasec=2):
"""
SchedulerCore logic
actual time: cron_time_now format: (WD, H, M, S)
actual time in sec: now_sec_tuple: (H sec, M sec, S)
crontask: ("WD:H:M:S", "LM FUNC")
deltasec: sample time window: +/- sec: -sec--|event|--sec-
"""
# Resolve "normal" time
check_time = tuple(int(t.strip()) if t.isdigit() else t.strip() for t in crontask[0].split(':'))
# Resolve "time tag" to "normal" time
if len(check_time) < 3:
tag = crontask[0].strip()
value = Sun.TIME.get(tag, None)
if value is None or len(value) < 3:
errlog_add('cron syntax error: {}:{}'.format(tag, value))
return False
check_time = ('*', value[0], value[1], value[2])
# Cron actual time (now) parts summary in sec
check_time_now_sec = now_sec_tuple[0] + now_sec_tuple[1] + now_sec_tuple[2]
# Cron overall requested time in sec - hour in sec, minute in sec, sec
check_time_scheduler_sec = int(now_sec_tuple[0] if check_time[1] == '*' else check_time[1] * 3600) \
+ int(now_sec_tuple[1] if check_time[2] == '*' else check_time[2] * 60) \
+ int(now_sec_tuple[2] if check_time[3] == '*' else check_time[3])
# Time frame +/- corrections
tolerance_min_sec = 0 if check_time_now_sec - deltasec < 0 else check_time_now_sec - deltasec
tolerance_max_sec = check_time_now_sec + deltasec
task_id = "{}:{}|{}".format(check_time[0], check_time_scheduler_sec, str(crontask[1]).replace(' ', ''))
# Check WD - WEEK DAY
if check_time[0] == '*' or check_time[0] == cron_time_now[0]:
# Check H, M, S in sec format between tolerance range
if tolerance_min_sec <= check_time_scheduler_sec <= tolerance_max_sec:
__cron_task_cache_manager(check_time_now_sec, deltasec)
if check_time[3] == '*' or task_id not in LAST_CRON_TASKS:
lm_state = False
if isinstance(crontask[1], str):
# [1] Execute Load Module as a string (user LMs)
lm_state = exec_lm_core_schedule(crontask[1].split())
else:
try:
# [2] Execute function reference (built-in functions)
console_write("[builtin cron] {}".format(crontask[1]()))
lm_state = True
except Exception as e:
errlog_add("[cron] function exec error: {}".format(e))
if not lm_state:
console_write("[cron]now[{}] {} <-> {} conf[{}] exec[{}] LM: {}".format(cron_time_now,
__convert_sec_to_time(tolerance_min_sec),
__convert_sec_to_time(tolerance_max_sec),
crontask[0],
lm_state,
crontask[1]))
# SAVE TASK TO CACHE
if check_time[3] != '*':
# SAVE WHEN SEC not *
LAST_CRON_TASKS.append(task_id)
return True
return False
|
8c3cc2f23bf94bfe7f817db542f50345be8f1a20
| 3,640,429
|
from typing import Dict
import os
import yaml
import getpass
def gen_canvas_config() -> Dict:
"""Generates yaml config from user input for canvas interface
Returns
-------
Dict
canvas config dict
"""
# check for existing canvas config
if (
os.path.exists(CANVAS_CONF_PATH) and
(input("Delete existing canvas config and restart [y/N] ?: ").lower() != "y")
):
with open(CANVAS_CONF_PATH, "r") as canvas_conf_in:
return yaml.load(canvas_conf_in, Loader=yaml.Loader)
# init canvas conf
canvas_conf = {}
# get API url, username, key
canvas_conf["api_url"] = input("Canvas URL: ")
canvas_conf["api_username"] = input("Canvas Username: ")
keyring.set_password('canvas-token', canvas_conf["api_username"], getpass("Canvas Key: "))
# dump config
with open(CANVAS_CONF_PATH, "w") as canvas_conf_out:
yaml.dump(canvas_conf, canvas_conf_out)
return canvas_conf
|
2141e0793444b1687dbb2c5d786327cdb3d935cc
| 3,640,430
|
def get13FAmendmentType(accNo, formType=None) :
"""
Gets the amendment type for a 13F-HR/A filing - may be RESTATEMENT or NEW HOLDINGS.
This turned out to be unreliable (often missing or wrong), so I don't use it to get
the combined holdings for an investor. Instead I just look at the number of holdings
in an amendment compared to the previous filing, and treat it as a restatement
if the new number of holdings is more than half the old number.
"""
info = basicInfo.getSecFormInfo(accNo, formType)
xmlUrls = [l[-1] for l in info['links'] if l[0].lower().endswith('xml')]
xmlSummTab = utils.downloadSecUrl(xmlUrls[0],toFormat='xml')
coverPage = findChildSeries(xmlSummTab,['formdata','coverpage'])
isAmendment = findChildEndingWith(coverPage,'isamendment')
if isAmendment is None or isAmendment.text.strip().lower() not in ['true','yes'] :
return None
return findChildSeries(coverPage,['amendmentinfo','amendmenttype']).text.strip()
|
a8ff184b4d3eb43ea8da75a64e83cb136908364e
| 3,640,431
|
def tabs_to_cover_string(string):
"""
Get the number of tabs required to be at least the same length as a given string.
:param string: The string
:return: The number of tabs to cover it
:rtype: int
"""
num_tabs = int(np.floor(len(string) / 8) + 1)
return num_tabs
|
242496271bafc78a2180c8e8798b9ed4892afb29
| 3,640,432
|
def kl(p, q):
"""
Kullback-Leibler divergence for discrete distributions
Parameters
----------
p: ndarray
probability mass function
q: ndarray
probability mass function
Returns
--------
float : D(P || Q) = sum(p(i) * log(p(i)/q(i))
Discrete probability distributions.
"""
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
|
905903324958414972329e32becf9c8848f54029
| 3,640,433
|
def map_uris(uris):
"""Map URIs from external URI to HDFS
:return:
"""
pkgs_path = __pillar__['hdfs']['pkgs_path']
ns = nameservice_names()
return map(lambda x: 'hdfs://{0}{1}/{2}'.format(ns[0], pkgs_path, __salt__['system.basename'](x)), uris)
|
8dd682e932c2ac4dd495cfd11e88abdf58e78800
| 3,640,434
|
from typing import Any
def delete_task(id: str, db: Session = Depends(get_db)) -> Any:
"""Delete a task"""
try:
todo_interactor = ToDoInteractor(db=db)
todo_interactor.remove(id=id)
return {"success": f"removed task: {id}"}
except HTTPException as e:
logger.exception(e)
raise HTTPException(e)
except Exception as e:
logger.exception(e)
raise ToDoInteractorError(e)
|
93841bd4975bcd3851ee4cc6e38f62c735b85183
| 3,640,435
|
def get_body(name):
"""Retrieve the Body structure of a JPL .bsp file object
Args:
name (str)
Return:
:py:class:`~beyond.constants.Body`
"""
return Pck()[name]
|
d9949d9638c27b77f0bff203d2015aeb7af8c389
| 3,640,436
|
def is_available():
"""
Convenience function to check if the current platform is supported by this
module.
"""
return ProcessMemoryInfo().update()
|
d7d1d842009b39f79c650d54f776db664b30ea14
| 3,640,437
|
def render_path_spiral(c2w, up, rads, focal, zrate, rots, N):
"""
enumerate list of poses around a spiral
used for test set visualization
"""
render_poses = []
rads = np.array(list(rads) + [1.])
for theta in np.linspace(0., 2. * np.pi * rots, N+1)[:-1]:
c = np.dot(c2w[:3,:4], np.array([np.cos(theta), -np.sin(theta), -np.sin(theta*zrate), 1.]) * rads)
z = normalize(c - np.dot(c2w[:3,:4], np.array([0,0,-focal, 1.])))
render_poses.append(viewmatrix(z, up, c))
return render_poses
|
0eb608be5f29425cca62b15ce48db02e2297be17
| 3,640,438
|
from typing import Dict
from typing import List
from typing import Any
def placeAnchorSourceToLagunaTX(
common_anchor_connections: Dict[str, List[Dict[str, Any]]]
) -> List[str]:
"""
The anchors are placed on the Laguna RX registers
We move the source cell of the anchor onto the corresponding TX registers
"""
anchor_to_source_cell = _getAnchorToSourceCell(common_anchor_connections)
slr_to_source_cell_to_loc = defaultdict(dict)
for anchor, loc in anchor_2_loc.items():
assert 'LAGUNA' in loc and 'RX_REG' in loc
source_cell = anchor_to_source_cell[anchor]
# if two anchor registers are connected
if 'q0_reg' in source_cell:
assert False, source_cell
target_tx = getPairingLagunaTXOfRX(loc)
slr_index = getSLRIndexOfLaguna(target_tx)
slr_to_source_cell_to_loc[slr_index][source_cell] = target_tx
script = []
for slr_index, source_cell_to_loc in slr_to_source_cell_to_loc.items():
script.append('catch { place_cell { \\')
for source_cell, loc in source_cell_to_loc.items():
script.append(f' {source_cell} {loc} \\')
script.append('} }')
# if both the TX and the RX lagunas are in the FIXED state, the router will not perform hold violation fix
script.append('catch { set_property IS_LOC_FIXED 0 [get_cells -hierachical -filter { BEL =~ *LAGUNA*TX* }] }')
open('place_laguna_anchor_source_cells.tcl', 'w').write('\n'.join(script))
return script
|
d30cef6a42b846a3d82467eabebce1275a3d81ed
| 3,640,439
|
def post_example_form():
"""Example of a post form."""
return render_template("post-form.html")
|
e5e934dfe0d2b81081cda5deeca483f46fae89fe
| 3,640,440
|
def validate(data):
"""Validates incoming data
Args:
data(dict): the incoming data
Returns:
True if the data is valid
Raises:
ValueError: the data is not valid
"""
if not isinstance(data, dict):
raise ValueError("data should be dict")
if "text" not in data or not isinstance(data["text"],
str) or len(data["text"]) < 1:
raise ValueError("text field is required and should not be empty")
if "markdown" in data and not isinstance(data["markdown"], bool):
raise ValueError("markdown field should be bool")
if "attachments" in data:
if not isinstance(data["attachments"], list):
raise ValueError("attachments field should be list")
for attachment in data["attachments"]:
if "text" not in attachment and "title" not in attachment:
raise ValueError("text or title is required in attachment")
return True
|
ae8b7e74bd7607a7c8f5079014a0f5e3af5bc011
| 3,640,441
|
def stripExtra(name):
"""This function removes paranthesis from a string
*Can later be implemented for other uses like removing other characters from string
Args:
name (string): character's name
Returns:
string: character's name without paranthesis
"""
startIndexPer=name.find('(')
start = 0
if(startIndexPer!=-1):
start = startIndexPer
if(start==0):
return name
else:
return name[0:start-1]
|
fd9b8c2d6f513f06d8b1df067520c7f05cff023d
| 3,640,442
|
def google_maps(maiden: str) -> str:
"""
generate Google Maps URL from Maidenhead grid
Parameters
----------
maiden : str
Maidenhead grid
Results
-------
url : str
Google Maps URL
"""
latlon = toLoc(maiden)
url = "https://www.google.com/maps/@?api=1&map_action=map" "¢er={},{}".format(
latlon[0], latlon[1]
)
return url
|
04c2a6d730831746dc63ce6c733b16322d0696da
| 3,640,443
|
def format_signed(feature, # type: Dict[str, Any]
formatter=None, # type: Callable[..., str]
**kwargs
):
# type: (...) -> str
"""
Format unhashed feature with sign.
>>> format_signed({'name': 'foo', 'sign': 1})
'foo'
>>> format_signed({'name': 'foo', 'sign': -1})
'(-)foo'
>>> format_signed({'name': ' foo', 'sign': -1}, lambda x: '"{}"'.format(x))
'(-)" foo"'
"""
txt = '' if feature['sign'] > 0 else '(-)'
name = feature['name'] # type: str
if formatter is not None:
name = formatter(name, **kwargs)
return '{}{}'.format(txt, name)
|
4adeecb92b0d102ae512c2c8acf89d38454b4e4e
| 3,640,444
|
def load_ligand(sdf):
"""Loads a ligand from an sdf file and fragments it.
Args:
sdf: Path to sdf file containing a ligand.
"""
lig = next(Chem.SDMolSupplier(sdf, sanitize=False))
frags = generate_fragments(lig)
return lig, frags
|
984ba4bf61af6f8197f96a80f0f493b7dae84f08
| 3,640,445
|
def CMDpending(parser, args):
"""Lists pending jobs."""
parser.add_option('-b',
'--builder',
dest='builders',
action='append',
default=[],
help='Builders to filter on')
options, args, buildbot = parser.parse_args(args)
if args:
parser.error('Unrecognized parameters: %s' % ' '.join(args))
if not options.builders:
options.builders = buildbot.builders.keys
for builder in options.builders:
builder = buildbot.builders[builder]
pending_builds = builder.data.get('pendingBuilds', 0)
if not pending_builds:
continue
print('Builder %s: %d' % (builder.name, pending_builds))
if not options.quiet:
for pending in builder.pending_builds.data:
if 'revision' in pending['source']:
print(' revision: %s' % pending['source']['revision'])
for change in pending['source']['changes']:
print(' change:')
print(' comment: %r' % unicode(change['comments'][:50]))
print(' who: %s' % change['who'])
return 0
|
a9d56333fa84f2c92a969135c0dcc02bf94b972f
| 3,640,446
|
def numpy_translation(xyz):
"""Returns the dual quaternion for a pure translation.
"""
res = np.zeros(8)
res[3] = 1.0
res[4] = xyz[0]/2.0
res[5] = xyz[1]/2.0
res[6] = xyz[2]/2.0
return res
|
8180449ec6128237f63b4519117553e85a2d1369
| 3,640,447
|
def sort_car_models(car_db):
"""return a copy of the cars dict with the car models (values)
sorted alphabetically"""
sorted_db = {}
for model in car_db:
sorted_db[model] = sorted(car_db[model])
return sorted_db
|
a478f16ece83058ba411480b91584e4c61026141
| 3,640,448
|
import requests
def test_module(params) -> str:
"""Tests API connectivity and authentication'"
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
try:
url = params.get('url')[:-1] if str(params.get('url')).endswith('/') \
else params.get('url')
credentials = params.get('apikey')
creds = "Bearer " + credentials
headers = {"Authorization": creds}
url = urljoin(url, '/customer/getSubmission/7bf5ba92-30e1-4d42-821f-6d4ac94c3be1')
response = requests.request("GET", url, headers=headers)
status = response.status_code
if status != 200:
if 'UnauthorizedError' in str(response.content):
return 'Authorization Error: make sure API Key is correctly set'
else:
return str(status)
except Exception as e:
raise e
return 'ok'
|
64d45742c82854a9eb3d4e04aadbe05d458f9aca
| 3,640,449
|
from typing import Tuple
from typing import Container
import asyncio
def create(auto_remove: bool = False) -> Tuple[str, str]:
"""
Creates a database inside a docker container
:return: container name, database name
:rtype: Tuple[str, str]
"""
piccolo_docker_repository = PiccoloDockerRepository(auto_remove=auto_remove)
piccolo_docker_repository.create_container()
container: Container = piccolo_docker_repository.container
database_name: str = ""
if container:
loop = asyncio.get_event_loop()
database_name = loop.run_until_complete(
piccolo_docker_repository.create_database()
)
return container.name, database_name
|
c875b034b564d85e104655086b7faa1ed7a2df2c
| 3,640,450
|
def get_by_id(db: Session, work_item_id):
"""Get a specified WorkItem and return it."""
workitem = db.get(WorkItem, work_item_id)
if workitem:
return workitem
if not workitem:
logger.debug("Item not found")
raise HTTPException(status_code=404, detail="Item not found")
|
c9e8899ec9d159115e03f91ab9e9656984f20cf9
| 3,640,451
|
import os
import tqdm
import torch
def evaluate(args, model, eval_dataset, tokenizer, step, prefix=""):
"""
Evaluation of model
:param args: input arguments from parser
:param model: pytorch model to be evaluated
:param eval_dataset: dataset used for evaluation
:param tokenizer: tokenizer used by the model
:param step: the current step in training
:param prefix: prescript to be added to the beginning of save file
:return: results of evaluation
"""
# Loop to handle MNLI double evaluation (matched, mis-matched)
print('')
eval_output_dir = args.output_dir
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
eval_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm=True,
mlm_probability=0.15
)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler,
batch_size=eval_batch_size,
collate_fn=data_collator
)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.train_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader,
desc="Evaluating",
position=0,
leave=True):
with torch.no_grad():
outputs = model(input_ids=batch['input_ids'].to(args.device),
labels=batch['labels'].to(args.device))
loss = outputs['loss']
eval_loss += loss.mean().item()
nb_eval_steps += 1
eval_loss /= nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {
"perplexity": perplexity,
'loss': eval_loss,
"Iteration": str(step)
}
output_eval_file = os.path.join(eval_output_dir, prefix,
"eval_results.txt")
with open(output_eval_file, "a") as writer:
logger.info("***** Eval results {} *****".format(prefix))
writer.write('\n')
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s, " % (key, str(result[key])))
writer.close()
return result
|
720898cff8566b7f48f5c11abde8de0875797f87
| 3,640,452
|
import os
import subprocess
import time
import itertools
def test(path, shell, indent=2):
"""Run test at path and return input, output, and diff.
This returns a 3-tuple containing the following:
(list of lines in test, same list with actual output, diff)
diff is a generator that yields the diff between the two lists.
If a test exits with return code 80, the actual output is set to
None and diff is set to [].
"""
indent = ' ' * indent
cmdline = '%s$ ' % indent
conline = '%s> ' % indent
f = open(path)
abspath = os.path.abspath(path)
env = os.environ.copy()
env['TESTDIR'] = os.path.dirname(abspath)
env['TESTFILE'] = os.path.basename(abspath)
p = subprocess.Popen([shell, '-'], bufsize=-1, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True, env=env,
preexec_fn=makeresetsigpipe(),
close_fds=os.name == 'posix')
salt = 'CRAM%s' % time.time()
after = {}
refout, postout = [], []
i = pos = prepos = -1
stdin = []
for i, line in enumerate(f):
refout.append(line)
if line.startswith(cmdline):
after.setdefault(pos, []).append(line)
prepos = pos
pos = i
stdin.append('echo "\n%s %s $?"\n' % (salt, i))
stdin.append(line[len(cmdline):])
elif line.startswith(conline):
after.setdefault(prepos, []).append(line)
stdin.append(line[len(conline):])
elif not line.startswith(indent):
after.setdefault(pos, []).append(line)
stdin.append('echo "\n%s %s $?"\n' % (salt, i + 1))
output = p.communicate(input=''.join(stdin))[0]
if p.returncode == 80:
return (refout, None, [])
# Add a trailing newline to the input script if it's missing.
if refout and not refout[-1].endswith('\n'):
refout[-1] += '\n'
# We use str.split instead of splitlines to get consistent
# behavior between Python 2 and 3. In 3, we use unicode strings,
# which has more line breaks than \n and \r.
pos = -1
ret = 0
for i, line in enumerate(output[:-1].split('\n')):
line += '\n'
if line.startswith(salt):
presalt = postout.pop()
if presalt != '%s\n' % indent:
postout.append(presalt[:-1] + ' (no-eol)\n')
ret = int(line.split()[2])
if ret != 0:
postout.append('%s[%s]\n' % (indent, ret))
postout += after.pop(pos, [])
pos = int(line.split()[1])
else:
if needescape(line):
line = escape(line)
postout.append(indent + line)
postout += after.pop(pos, [])
diffpath = os.path.basename(abspath)
diff = unified_diff(refout, postout, diffpath, diffpath + '.err')
for firstline in diff:
return refout, postout, itertools.chain([firstline], diff)
return refout, postout, []
|
a973190b180ca2bf2c46ed93ee91c55275235d1c
| 3,640,453
|
import re
def load_expected_results(file, pattern):
"""Reads the file, named file, which contains test results separated
by the regular expression pattern.
The test results are returned as a dictionary.
"""
expected = {}
compiled_pattern = re.compile(pattern)
with open(file) as f:
test = None
for line in f:
line = line.rstrip().decode('utf-8')
match = compiled_pattern.search(line)
if match:
test = match.groups()[0]
expected[test] = ''
else:
expected[test] += line + '\n'
return expected
|
05e20e2e6932c2a4db634f48046ea3e3f6e5dedc
| 3,640,454
|
def following(request):
"""View all posts from followed users"""
if request.method == "GET":
user = User.objects.get(pk=request.user.id)
following = user.follow_list.following.all()
# Post pagination: https://docs.djangoproject.com/en/3.1/topics/pagination/
posts = Post.objects.filter(user__in=following).order_by("-date")
following_paginator = Paginator(posts, 10)
following_page = request.GET.get('page')
page_obj = following_paginator.get_page(following_page)
else:
return redirect("index")
context = {"page_obj": page_obj}
return render(request, "network/following.html", context)
|
1b350c835d6bbf6d51c1a99d56d405a989f681a2
| 3,640,455
|
def create_random_polygon(min_x, min_y, max_x, max_y, vertex_num):
"""Create a random polygon with the passed x and y bounds and the passed number of vertices; code adapted from: https://stackoverflow.com/a/45841790"""
# generate the point coordinates within the bounds
x = np.random.uniform(min_x, max_x, vertex_num)
y = np.random.uniform(min_y, max_y, vertex_num)
# determine the center of all points
center = (sum(x) / vertex_num, sum(y) / vertex_num)
# find the angle of each point from the center
angles = np.arctan2(x - center[0], y - center[1])
# sort points by their angle from the center to avoid self-intersections
points_sorted_by_angle = sorted([(i, j, k) for i, j, k in zip(x, y, angles)], key=lambda t: t[2])
# the process fails if there are duplicate points
if len(points_sorted_by_angle) != len(set(points_sorted_by_angle)):
return None
# structure points as x-y tuples
points = [(x, y) for (x, y, a) in points_sorted_by_angle]
# create the polygon
return Polygon(points)
|
ad04591daf524bd0c97890a36722233cd08c4e5e
| 3,640,456
|
def document_hidden(session):
"""Polls for the document to become hidden."""
def hidden(session):
return session.execute_script("return document.hidden")
return Poll(session, timeout=3, raises=None).until(hidden)
|
21376291398aea859ed0f4d080a7bf617d93521f
| 3,640,457
|
def create_blueprint():
"""Creates a Blueprint"""
blueprint = Blueprint('Tasks Blueprint', __name__, url_prefix='/tasks')
blueprint.route('/', methods=['POST'])(tasks.create)
blueprint.route('/', methods=['PATCH'])(tasks.patch)
blueprint.route('/', methods=['GET'])(tasks.list)
return blueprint
|
c0e0412e599e4f4378efa2e2749f957a8b58043b
| 3,640,458
|
import gzip
def estimate_null_variance_gs(gs_lists, statslist, Wsq, single_gs_hpo=False,
n_or_bins=1):
"""
Estimates null variance from the average of a list of known causal windows
"""
statspaths = {h : p for h, p in [x.rstrip().split('\t')[:2] \
for x in open(statslist).readlines()]}
with gzip.open(list(statspaths.values())[0], 'rt') as ex_statfile:
statscols = ex_statfile.readline().rstrip().split('\t')
# Estimate null variance for each entry in gs_lists
for gspath in gs_lists:
for hpo, statspath in statspaths.items():
# Intersect sumstats for phenotype with GS regions
gsdf = pbt.BedTool(statspath).\
intersect(pbt.BedTool(gspath), u=True, f=1.0).\
to_dataframe(names=statscols)
gsdf['window'] = gsdf[['#chr', 'start', 'end']].astype(str).\
aggregate('_'.join, axis=1)
# Read effect sizes per window and convert to mean variance
stats = gsdf.loc[:, 'window meta_lnOR'.split()].\
rename(columns={'meta_lnOR' : 'lnOR'})
gs_var = np.nanmean((stats.lnOR.astype(float) / 1.96) ** 2)
# Update Wsq estimates for all sig. and effect size quantiles
if single_gs_hpo:
for hpo in Wsq.keys():
for sig in 'gw fdr'.split():
for i in range(n_or_bins):
Wsq[hpo][sig][i].append(gs_var)
break
else:
for sig in 'gw fdr'.split():
for i in range(n_or_bins):
Wsq[hpo][sig][i].append(gs_var)
return Wsq
|
755f35c108b9045d237a6cbfa442e7b6b0c24829
| 3,640,459
|
import torch
def create_model(config):
"""Create the score model."""
model_name = config.model.name
score_model = get_model(model_name)(config)
score_model = score_model.to(config.device)
score_model = torch.nn.DataParallel(score_model)
return score_model
|
ca0b9fa1c68d83c1697d82273fc740ba35826c87
| 3,640,460
|
import gc
def at(addr):
"""Look up an object by its id."""
for o in gc.get_objects():
if id(o) == addr:
return o
return None
|
f408b9a63afad1638f156163c6249e0e8095bff4
| 3,640,461
|
import warnings
def money_flow_index(close_data, high_data, low_data, volume, period):
"""
Money Flow Index.
Formula:
MFI = 100 - (100 / (1 + PMF / NMF))
"""
catch_errors.check_for_input_len_diff(
close_data, high_data, low_data, volume
)
catch_errors.check_for_period_error(close_data, period)
mf = money_flow(close_data, high_data, low_data, volume)
tp = typical_price(close_data, high_data, low_data)
flow = [tp[idx] > tp[idx-1] for idx in range(1, len(tp))]
pf = [mf[idx] if flow[idx] else 0 for idx in range(0, len(flow))]
nf = [mf[idx] if not flow[idx] else 0 for idx in range(0, len(flow))]
pmf = [sum(pf[idx+1-period:idx+1]) for idx in range(period-1, len(pf))]
nmf = [sum(nf[idx+1-period:idx+1]) for idx in range(period-1, len(nf))]
# Dividing by 0 is not an issue, it turns the value into NaN which we would
# want in that case
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
money_ratio = np.array(pmf) / np.array(nmf)
mfi = 100 - (100 / (1 + money_ratio))
mfi = fill_for_noncomputable_vals(close_data, mfi)
return mfi
|
7c122ae10ef406fabf56f63ac80a35557999c2ee
| 3,640,462
|
def Get_Weights(dict_rank):
"""Converts rankings into weights."""
Weights = adapt.create_Weightings(dict_rank)
return Weights
|
d50b9dcad7803a34c969f357cd4659ffe49ab740
| 3,640,463
|
import os
import torch
def load_client_model(models_path, config):
"""
Returns Pytorch client model loaded given the client model's path
"""
device = load_device()
client_hparams = config.get("client_hparams")
for needed_param in client_hparams.get("needed", []):
client_hparams[needed_param] = config.get(needed_param)
model_file = os.path.join(models_path, "client_model.pt")
model = models.ResNet18Client(client_hparams)
try:
model.load_state_dict(torch.load(model_file, map_location=device))
except:
model = models.ResNet18Client(client_hparams)
state_dict = cleanse_state_dict(torch.load(model_file, map_location=device))
model.load_state_dict(state_dict)
if torch.cuda.is_available():
model.cuda()
return model
|
c9af03430daf779c4d5f84cf873e2bf02131ddef
| 3,640,464
|
def psisloo(log_likelihood):
"""
Summarize the model fit using Pareto-smoothed importance sampling (PSIS)
and approximate Leave-One-Out cross-validation (LOO).
Takes as input an ndarray of posterior log likelihood terms [ p( y_i | theta^s ) ]
per observation unit.
e.x. if using pystan:
loosummary = stanity.psisloo(stan_fit.extract()['log_lik'])
Returns a Psisloo object. Useful methods such as print_summary() & plot().
References
----------
Aki Vehtari, Andrew Gelman and Jonah Gabry (2015). Efficient implementation
of leave-one-out cross-validation and WAIC for evaluating fitted Bayesian
models. arXiv preprint arXiv:1507.04544.
Aki Vehtari and Andrew Gelman (2015). Pareto smoothed importance sampling.
arXiv preprint arXiv:1507.02646.
"""
return Psisloo(log_likelihood)
|
5500ebd85eb9ac796b0410756e4b51f674890ca6
| 3,640,465
|
from typing import Tuple
from typing import Optional
from typing import Pattern
def rxdelim(content: str) -> Tuple[Optional[Pattern], Optional[Pattern]]:
"""
Return suitable begin and end delimiters for the content `content`.
If no matching delimiters are found, return `None, None`.
"""
tp = magic.from_buffer(content).lower()
for rxtp, rxbegin, rxend in DELIMITERS:
if rxtp.match(tp):
return rxbegin, rxend
return None, None
|
884efcd13846da9938b6f120cb3d8963addd0b42
| 3,640,466
|
def GenKeyOrderAttrs(soappy_service, ns, type_name):
"""Generates the order and attributes of keys in a complex type.
Args:
soappy_service: SOAPpy.WSDL.Proxy The SOAPpy service object encapsulating
the information stored in the WSDL.
ns: string The namespace the given WSDL-defined type belongs to.
type_name: string The name of the WSDL-defined type to search for.
Returns:
list A list of dictionaries containing the attributes of keys within a
complex type, in order.
"""
complex_type = soappy_service.wsdl.types[ns].types[type_name]
if IsASubType(type_name, ns, soappy_service):
# This is an extension of another type.
key_order = GenKeyOrderAttrs(
soappy_service,
complex_type.content.derivation.attributes['base'].getTargetNamespace(),
complex_type.content.derivation.attributes['base'].getName())
if hasattr(complex_type.content.derivation.content, 'content'):
key_order.extend([element.attributes for element in
complex_type.content.derivation.content.content])
return key_order
else:
# This is a base type.
return [element.attributes for element in complex_type.content.content]
|
794f74502b5db305f51bd560b388c64d305cd4e2
| 3,640,467
|
def read_binary_stl(filename):
"""Reads a 3D triangular mesh from an STL file (binary format).
:param filename: path of the stl file
:type filename: str
:return: The vertices, normals and index array of the mesh
:rtype: Mesh
:raises: ValueError
"""
with open(filename, 'rb') as stl_file:
stl_file.seek(80)
face_count = np.frombuffer(stl_file.read(4), dtype=np.int32)[0]
record_dtype = np.dtype([
('normals', np.float32, (3,)),
('vertices', np.float32, (3, 3)),
('attr', '<i2', (1,)),
])
data = np.fromfile(stl_file, dtype=record_dtype)
if face_count != data.size:
raise ValueError('stl data has incorrect size')
vertices = data['vertices'].reshape(-1, 3)
indices = np.arange(face_count * 3).astype(np.uint32)
normals = np.repeat(data['normals'], 3, axis=0)
return Mesh(vertices, indices, normals, clean=True)
|
53ae2a1806413280719286813e091392d965ce76
| 3,640,468
|
def monotonise_tree(tree, n_feats, incr_feats, decr_feats):
"""Helper to turn a tree into as set of rules
"""
PLUS = 0
MINUS = 1
mt_feats = np.asarray(list(incr_feats) + list(decr_feats))
def traverse_nodes(node_id=0,
operator=None,
threshold=None,
feature=None,
path=None):
if path is None:
path = np.zeros([n_feats, 2])
else:
path[feature, PLUS if operator[0] == '>' else MINUS] = 1
if not node_is_leaf(
tree,
node_id):
feature = tree.feature[node_id]
threshold = tree.threshold[node_id]
left_node_id = tree.children_left[node_id]
traverse_nodes(left_node_id, "<=", threshold, feature, path.copy())
right_node_id = tree.children_right[node_id]
traverse_nodes(right_node_id, ">", threshold, feature, path.copy())
else: # a leaf node
if np.sum(path) > 0:
# check if all increasing
all_increasing = np.sum(np.asarray([path[i_feat,
MINUS] if i_feat + 1 in incr_feats else path[i_feat,
PLUS] for i_feat in mt_feats - 1])) == 0
all_decreasing = np.sum(np.asarray([path[i_feat,
MINUS] if i_feat + 1 in decr_feats else path[i_feat,
PLUS] for i_feat in mt_feats - 1])) == 0
counts = np.asarray(tree.value[node_id][0])
probs = counts / np.sum(counts)
predicted_value = np.sign(probs[1] - 0.5)
if predicted_value >= 0 and all_increasing: # ok
pass
elif predicted_value <= 0 and all_decreasing: # ok
pass
else: # not a valid rule
tree.value[node_id][0] = [0., 0.]
else:
print('Tree has only one node (i.e. the root node!)')
return None
if len(mt_feats) > 0:
traverse_nodes()
return tree
|
78405b1d2bf5c2617b248210058caca0b062b668
| 3,640,469
|
import pickle
def pythonify_and_pickle(file, out_filename):
"""Convert all the data in the XML file and save as pickled files for
nodes, ways, relations and tags separately.
:param file: Filename (the file will be opened 4 times, so passing a file
object will not work). Can be anything which :module:`digest` can parse.
:param out_filename: If is `test` then writes files `test_nodes.pic.xz`
through `test_tags.pic.xz`
:return: A tuple of the 4 output filenames for nodes, ways, relations
and tags.
"""
obj = NodesPacked(file)
out = [out_filename + "_nodes.pic.xz"]
pickle(obj, out[0])
for typpe, name in [(Ways, "ways"), (Relations, "relations"),
(Tags, "tags")]:
obj = None
obj = typpe(file)
name = "{}_{}.pic.xz".format(out_filename, name)
pickle(obj, name)
out.append(name)
return out
|
4140c9e66b9a43b6880b152c50facf89ba723339
| 3,640,470
|
def compute_inverse_volatility_weights(df: pd.DataFrame) -> pd.Series:
"""
Calculate inverse volatility relative weights.
:param df: cols contain log returns
:return: series of weights
"""
dbg.dassert_isinstance(df, pd.DataFrame)
dbg.dassert(not df.columns.has_duplicates)
# Compute inverse volatility weights.
# The result of `compute_volatility_normalization_factor()`
# is independent of the `target_volatility`.
weights = df.apply(
lambda x: compute_volatility_normalization_factor(
x, target_volatility=0.1
)
)
# Replace inf's with 0's in weights.
weights.replace([np.inf, -np.inf], np.nan, inplace=True)
# Rescale weights to percentages.
weights /= weights.sum()
weights.name = "weights"
# Replace NaN with zero for weights.
weights = hdataf.apply_nan_mode(weights, mode="fill_with_zero")
return weights
|
347343729dc271dd161f419a394151b99f1ce876
| 3,640,471
|
def resattnet164(**kwargs):
"""
ResAttNet-164 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resattnet(blocks=164, model_name="resattnet164", **kwargs)
|
35023591d70a577526280489c19f47fffb0800a2
| 3,640,472
|
def stype(obj):
"""
Return string shape representation of structured objects.
>>> import numpy as np
>>> a = np.zeros((3,4), dtype='uint8')
>>> b = np.zeros((1,2), dtype='float32')
>>> stype(a)
'<ndarray> 3x4:uint8'
>>> stype(b)
'<ndarray> 1x2:float32'
>>> stype([a, (b, b)])
'[<ndarray> 3x4:uint8, (<ndarray> 1x2:float32, <ndarray> 1x2:float32)]'
>>> stype([1, 2.0, [a], [b]])
'[<int> 1, <float> 2.0, [<ndarray> 3x4:uint8], [<ndarray> 1x2:float32]]'
>>> stype({'a':a, 'b':b, 'c':True})
'{a:<ndarray> 3x4:uint8, b:<ndarray> 1x2:float32, c:<bool> True}'
:param object obj: Any object
:return: String representation of object where arrays are replace by their
shape and dtype descriptions
:rtype: str
"""
typestr = lambda obj: '<' + type(obj).__name__ + '> '
mklist = lambda obj: ', '.join(stype(o) for o in obj)
mkset = lambda obj: ', '.join(stype(o) for o in sorted(obj))
mkdict = lambda obj: ', '.join(
str(k) + ':' + stype(v) for k, v in sorted(obj.items()))
if istensor(obj, ['shape', 'dtype']):
return typestr(obj) + shapestr(obj, True)
if isinstance(obj, list):
return '[' + mklist(obj) + ']'
if isinstance(obj, tuple):
return '(' + mklist(obj) + ')'
if isinstance(obj, set):
return '{' + mkset(obj) + '}'
if isinstance(obj, dict):
return '{' + mkdict(obj) + '}'
return typestr(obj) + str(obj)
|
76b805684361a13f03955692dacd02c045c43bd9
| 3,640,473
|
def board2key(Z):
""" Turn a "Game of Life" board into a key.
"""
return(bin2hex(array2string(Z[1:-1, 1:-1].reshape((1, 512 * 512 * 4))[0])))
|
101b4ecdf03e9a9a832434d59e2eaa9a6bed2ef5
| 3,640,474
|
def CipherArray(Array = [[" "]," "], Random = 1):
"""
Array - array to coding
Key - Key number to coding
It's а function that encodes elements
Returns an array consisting of coded elements
"""
if (type(Array) != list):
raise TypeError("Неправильний формат масиву")
if (type(Random) != int):
raise TypeError("Неправильний формат коду")
for i in range(len(Array)):
for j in range(len(Array[i])):
Array[i][j] = chr(ord(Array[i][j]) * Random)
return Mover(Array)
|
a2d472cd49a803a4a08f0fba5362b54cce24c37a
| 3,640,475
|
def voltage(raw_value, v_min=0, v_max=10, res=32760, gain=1):
"""Converts a raw value to a voltage measurement.
``V = raw_value / res * (v_max - v_min) * gain``
"""
return (float(raw_value) / res * (v_max - v_min) * gain, "V")
|
b4ea7d2521e1fa856a21b98ace2a9490f8a3b043
| 3,640,476
|
def extract_characteristics_from_string(species_string):
"""
Species are named for the SBML as species_name_dot_characteristic1_dot_characteristic2
So this transforms them into a set
Parameters:
species_string (str) = species string in MobsPy for SBML format (with _dot_ instead of .)
"""
return set(species_string.split('_dot_'))
|
abfcc0d3e425e8f43d776a02254a04b0e85dc6d1
| 3,640,477
|
def _get_bzr_version():
"""Looks up bzr version by calling bzr --version.
:raises: VcsError if bzr is not installed"""
try:
value, output, _ = run_shell_command('bzr --version',
shell=True,
us_env=True)
if value == 0 and output is not None and len(output.splitlines()) > 0:
version = output.splitlines()[0]
else:
raise VcsError("bzr --version returned %s," +
" maybe bzr is not installed" %
value)
except VcsError as e:
raise VcsError("Coud not determine whether bzr is installed: %s" % e)
return version
|
fb0171fe286e6251b25536dd40323c4af73a1255
| 3,640,478
|
def C(source):
"""Compile at runtime and run code in-line"""
return _embed_or_inline_c(source, True)
|
d1bd11370a1df3c93209b8d60046c077ac872d3e
| 3,640,479
|
def normalize(x):
"""Standardize the original data set."""
max_x = np.max(x, axis=0)
min_x = np.min(x, axis=0)
x = (x-min_x) / (max_x-min_x)
return x
|
eba8ff32dca072b134c689d727e0246d7563a95d
| 3,640,480
|
def _diff_bearings(bearings, bearing_thresh=40):
"""
Identify kinked nodes (nodes that change direction of an edge) by diffing
Args:
bearings (list(tuple)): containing (start_node, end_node, bearing)
bearing_thresh (int): threshold for identifying kinked nodes (range 0, 360)
Returns:
list[str] of kinked nodes
"""
kinked_nodes = []
# diff bearings
nodes = [b[0] for b in bearings]
bearings_comp = [b[2] for b in bearings]
bearing_diff = [y - x for x, y in zip(bearings_comp, bearings_comp[1:])]
node2bearing_diff = list(zip(nodes[1:-1], bearing_diff))
# id nodes to remove
for n in node2bearing_diff:
# controlling for differences on either side of 360
if min(abs(n[1]), abs(n[1] - 360)) > bearing_thresh:
kinked_nodes.append(n[0])
return kinked_nodes
|
a29c3cdd009065d7a73dd993ae66f81853d5e2bc
| 3,640,481
|
from typing import Any
import json
import aiohttp
import re
async def request(method: str,
url: str,
params: dict = None,
data: Any = None,
credential: Credential = None,
no_csrf: bool = False,
json_body: bool = False,
**kwargs):
"""
向接口发送请求。
Args:
method (str) : 请求方法。
url (str) : 请求 URL。
params (dict, optional) : 请求参数。
data (Any, optional) : 请求载荷。
credential (Credential, optional): Credential 类。
no_csrf (bool, optional) : 不要自动添加 CSRF。
json_body (bool, optional) 载荷是否为 JSON
Returns:
接口未返回数据时,返回 None,否则返回该接口提供的 data 或 result 字段的数据。
"""
if credential is None:
credential = Credential()
method = method.upper()
# 请求为非 GET 且 no_csrf 不为 True 时要求 bili_jct
if method != 'GET' and not no_csrf:
credential.raise_for_no_bili_jct()
# 使用 Referer 和 UA 请求头以绕过反爬虫机制
DEFAULT_HEADERS = {
"Referer": "https://www.bilibili.com",
"User-Agent": "Mozilla/5.0"
}
headers = DEFAULT_HEADERS
if params is None:
params = {}
# 自动添加 csrf
if not no_csrf and method in ['POST', 'DELETE', 'PATCH']:
if data is None:
data = {}
data['csrf'] = credential.bili_jct
data['csrf_token'] = credential.bili_jct
# jsonp
if params.get("jsonp", "") == "jsonp":
params["callback"] = "callback"
config = {
"method": method,
"url": url,
"params": params,
"data": data,
"headers": headers,
"cookies": credential.get_cookies()
}
config.update(kwargs)
if json_body:
config["headers"]["Content-Type"] = "application/json"
config["data"] = json.dumps(config["data"])
# 如果用户提供代理则设置代理
if settings.proxy:
config["proxy"] = settings.proxy
session = get_session()
async with session.request(**config) as resp:
# 检查状态码
try:
resp.raise_for_status()
except aiohttp.ClientResponseError as e:
raise NetworkException(e.status, e.message)
# 检查响应头 Content-Length
content_length = resp.headers.get("content-length")
if content_length and int(content_length) == 0:
return None
# 检查响应头 Content-Type
content_type = resp.headers.get("content-type")
# 不是 application/json
if content_type.lower().index("application/json") == -1:
raise ResponseException("响应不是 application/json 类型")
raw_data = await resp.text()
resp_data: dict
if 'callback' in params:
# JSONP 请求
resp_data = json.loads(
re.match("^.*?({.*}).*$", raw_data, re.S).group(1))
else:
# JSON
resp_data = json.loads(raw_data)
# 检查 code
code = resp_data.get("code", None)
if code is None:
raise ResponseCodeException(-1, "API 返回数据未含 code 字段", resp_data)
if code != 0:
msg = resp_data.get('msg', None)
if msg is None:
msg = resp_data.get('message', None)
if msg is None:
msg = "接口未返回错误信息"
raise ResponseCodeException(code, msg, resp_data)
real_data = resp_data.get("data", None)
if real_data is None:
real_data = resp_data.get("result", None)
return real_data
|
68b68df293f474ecfff5fdd7ae93f0431d700d50
| 3,640,482
|
def InflRate():
"""Inflation rate"""
return asmp.InflRate()
|
33fcaa24cc00875e059850574469a95bcab3b469
| 3,640,483
|
def author_single_view(request, slug):
"""
Render Single User
:param request:
:param slug:
:return:
"""
author = get_object_or_404(Profile, slug=slug)
author_forum_list = Forum.objects.filter(forum_author=author.id).order_by("-is_created")[:10]
author_comments = Comment.objects.filter(comment_author=author.id).order_by("-is_created")[:10]
total_forums = Forum.objects.filter(forum_author=author.id).annotate(num_comments=Count('forum_author'))
total_comments = Comment.objects.filter(comment_author=author.id).annotate(num_comments=Count('comment_author'))
template = 'app_author/author_single.html'
context = {
'author': author,
'author_forum_list': author_forum_list,
'author_comments': author_comments,
'total_forums': total_forums,
'total_comments': total_comments
}
return render(request, template, context)
|
506ec5f980d5ee59809358ac2add7cfcd0327a60
| 3,640,484
|
def get_predefined(schedule):
"""
Predefined learn rate changes at specified epochs
:param schedule: dictionary that maps epochs to to learn rate values.
"""
def update(lr, epoch):
if epoch in schedule:
return floatX(schedule[epoch])
else:
return floatX(lr)
return update
|
5cb9fab3bb3b4b4d868504953d78e3f93f5a7198
| 3,640,485
|
def launch_ec2_instances(config, nb=1):
"""
Launch new ec2 instance(s)
"""
conf = config[AWS_CONFIG_SECTION]
ami_image_id = conf.get(AMI_IMAGE_ID_FIELD)
ami_name = conf.get(AMI_IMAGE_NAME_FIELD)
if ami_image_id and ami_name:
raise ValueError('The fields ami_image_id and ami_image_name cannot be both'
'specified at the same time. Please specify either ami_image_id'
'or ami_image_name')
if ami_name:
ami_image_id = _get_image_id(config, ami_name)
instance_type = conf[INSTANCE_TYPE_FIELD]
key_name = conf[KEY_NAME_FIELD]
security_group = conf[SECURITY_GROUP_FIELD]
logger.info('Launching {} new ec2 instance(s)...'.format(nb))
# tag all instances using RAMP_AWS_BACKEND_TAG to be able
# to list all instances later
tags = [{
'ResourceType': 'instance',
'Tags': [
{'Key': RAMP_AWS_BACKEND_TAG, 'Value': '1'},
]
}]
sess = _get_boto_session(config)
resource = sess.resource('ec2')
instances = resource.create_instances(
ImageId=ami_image_id,
MinCount=nb,
MaxCount=nb,
InstanceType=instance_type,
KeyName=key_name,
TagSpecifications=tags,
SecurityGroups=[security_group],
)
return instances
|
27b3aa745021f3a1516b09746db1c8111b8905d2
| 3,640,486
|
def residual_error(X_train,X_test,y_train,y_test, reg="linear"):
"""
Plot the residual error of the Regresssion model for the input data,
and return the fitted Regression model.
-------------------------------------------------------------------
# Parameters
# X_train,X_test,y_train,y_test (np.arrays): Given X, a 2-D array of Data,
and y, an array of target data, we can use:
sklearn.model_selection.train_test_split(X,y)
to obtain X_train, X_test, y_train, and y_test.
# reg (string): Whether the regression model is linear or logistical (default="linear").
"""
if reg.lower() == "linear":
reg=LinearRegression()
reg.fit(X_train,y_train)
elif reg.lower() == "logistic":
reg=LogisticRegression()
reg.fit(X_train,y_train)
## setting plot style
plt.style.use('fivethirtyeight')
## plotting residual errors in training data
plt.scatter(reg.predict(X_train), reg.predict(X_train) - y_train,
color = "green", s = 10, label = 'Train data')
## plotting residual errors in test data
plt.scatter(reg.predict(X_test), reg.predict(X_test) - y_test,
color = "blue", s = 10, label = 'Test data')
## plotting line for zero residual error
plt.hlines(y = 0, xmin = 0, xmax = 50, linewidth = 2)
## plotting legend
plt.legend(loc = 'upper right')
## plot title
plt.title("Residual errors")
return reg
|
38513473122ff1430f6f6cf44eb984086bcdda72
| 3,640,487
|
def centerSquare(pil_img: Image.Image):
"""Adds padding on both sides to make an image square. (Centered)"""
pil_img = pil_img.convert('RGBA') # ensure transparency
background_color = (0, 0, 0, 0)
width, height = pil_img.size
if width == height:
return pil_img
elif width > height:
result = Image.new(pil_img.mode, (width, width), background_color)
result.paste(pil_img, (0, (width - height) // 2))
return result
else:
result = Image.new(pil_img.mode, (height, height), background_color)
result.paste(pil_img, ((height - width) // 2, 0))
return result
|
a991fa4a7a877334bba9e93126e4c8561c27e6f7
| 3,640,488
|
def _convert_steplist_to_string(step_data):
"""Converts list of step data into a single string.
Parameters
----------
step_data : list
List of step data
Returns
-------
str
A space delimited string where every 6th value is followed by a newline.
"""
text = ''
for i, datum in enumerate(step_data):
if i == 0:
text += f'\n{datum}\n'
else:
if i%6 == 0:
text += f'{datum}\n'
else:
text += f'{datum} '
return text
|
112495edbafc3db39946d7abeefff6466e2dff94
| 3,640,489
|
from typing import Optional
def get_global_public_delegated_prefix(project: Optional[str] = None,
public_delegated_prefix: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGlobalPublicDelegatedPrefixResult:
"""
Returns the specified global PublicDelegatedPrefix resource.
"""
__args__ = dict()
__args__['project'] = project
__args__['publicDelegatedPrefix'] = public_delegated_prefix
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:compute/beta:getGlobalPublicDelegatedPrefix', __args__, opts=opts, typ=GetGlobalPublicDelegatedPrefixResult).value
return AwaitableGetGlobalPublicDelegatedPrefixResult(
creation_timestamp=__ret__.creation_timestamp,
description=__ret__.description,
fingerprint=__ret__.fingerprint,
ip_cidr_range=__ret__.ip_cidr_range,
is_live_migration=__ret__.is_live_migration,
kind=__ret__.kind,
name=__ret__.name,
parent_prefix=__ret__.parent_prefix,
public_delegated_sub_prefixs=__ret__.public_delegated_sub_prefixs,
region=__ret__.region,
self_link=__ret__.self_link,
status=__ret__.status)
|
7c48f4ccce1fb1640d3e3851d9e5481a9dd6a281
| 3,640,490
|
def has_conformer(molecule, check_two_dimension=False):
"""
Check if conformer exists for molecule. Return True or False
Parameters
----------
molecule
check_two_dimension: bool, optional. Default False
If True, will also check if conformation is a 2D conformation (all z coordinates are zero) and return False if
conformation is 2D
Returns
-------
"""
conformer_bool = True
try:
if molecule.NumConfs() <= 1:
# Check if xyz coordinates are not zero
for conf in molecule.GetConfs():
# print(conf.GetCoords().__len__())
# coords = molecule.GetCoords()
# values = np.asarray(list(coords.values()))
# print(values)
# print(values.all())
# if not values.all():
# conformer_bool = False
#for i in range(conf.GetCoords().__len__()):
values = np.asarray([conf.GetCoords().__getitem__(i) == (0.0, 0.0, 0.0) for i in
conf.GetCoords()])
if values.all():
conformer_bool = False
except AttributeError:
conformer_bool = False
if conformer_bool and check_two_dimension:
for conf in molecule.GetConfs():
values = np.asarray([conf.GetCoords().__getitem__(i)[-1] == 0.0 for i in conf.GetCoords()])
if values.all():
conformer_bool = False
return conformer_bool
|
fd0501a70f3ad002612be7d0625678ccc9f24dc9
| 3,640,491
|
def pad_sequence(yseqs, batch_first=False, padding_value=0):
"""Numpy implementation of torch.pad_sequence
Args:
yseqs (np.ndarray): List of array. (B, *)
batch_first (bool):
padding_value (int, optional): Padding value. Defaults to 0.
Returns:
np.ndarray
Examples:
>>> a = np.ones(25, 300)
>>> b = np.ones(22, 300)
>>> c = np.ones(15, 300)
>>> pad_sequence([a, b, c]).size()
(25, 3, 300)
>>> pad_sequence([a, b, c], batch_first=True).size()
(3, 25, 300)
"""
if len(yseqs) == 1:
return np.array(yseqs)
max_idx = np.argmax([y.shape[0] for y in yseqs])
max_shape = yseqs[max_idx].shape
base = np.ones((len(yseqs), *max_shape)) * padding_value
for i, y in enumerate(yseqs):
base[i][:y.shape[0]] = y
if batch_first:
return base
else:
return base.transpose(1, 0, *np.arange(2, len(base.shape)))
|
42fe65a15b39227a31b6022f8dae84cabd1888fb
| 3,640,492
|
async def fetch_sequence_id(session: _session.Session) -> int:
"""Fetch sequence ID."""
params = {
"limit": 0,
"tags": ["INBOX"],
"before": None,
"includeDeliveryReceipts": False,
"includeSeqID": True,
}
log.debug("Fetching MQTT sequence ID")
# Same doc id as in `Client.fetch_threads`
(j,) = await session._graphql_requests(_graphql.from_doc_id("1349387578499440", params))
sequence_id = j["viewer"]["message_threads"]["sync_sequence_id"]
if not sequence_id:
raise _exception.NotLoggedIn("Failed fetching sequence id")
return int(sequence_id)
|
f7fee50e13002ffa02110358c21a1d8794008f30
| 3,640,493
|
import sys
def main(args=None):
"""
Processes command line parameters into options and files, then checks
or update FITS DATASUM and CHECKSUM keywords for the specified files.
"""
errors = 0
fits_files = handle_options(args or sys.argv[1:])
setup_logging()
for filename in fits_files:
errors += process_file(filename)
if errors:
log.warning(f'{errors} errors')
return int(bool(errors))
|
563d7c7366e25932d42e6e01468b1db244faf64b
| 3,640,494
|
import re
def parse_transceiver_dom_sensor(output_lines):
"""
@summary: Parse the list of transceiver from DB table TRANSCEIVER_DOM_SENSOR content
@param output_lines: DB table TRANSCEIVER_DOM_SENSOR content output by 'redis' command
@return: Return parsed transceivers in a list
"""
result = []
p = re.compile(r"TRANSCEIVER_DOM_SENSOR\|(Ethernet\d+)")
for line in output_lines:
m = p.match(line)
assert m, "Unexpected line %s" % line
result.append(m.group(1))
return result
|
367d6a744add04e7649c971ef8fec3788ed8db88
| 3,640,495
|
import math
def superimposition_matrix(
v0: np.ndarray,
v1: np.ndarray,
scaling: bool = False,
usesvd: bool = True
) -> np.ndarray:
"""
Return matrix to transform given vector set into second vector set.
Args:
----
v0: shape (3, *) or (4, *) arrays of at least 3 vectors.
v1: shape (3, *) or (4, *) arrays of at least 3 vectors.
scaling: True scaling is desired.
usesvd: True if SVD decomposition is used.
If usesvd is True, the weighted sum of squared deviations (RMSD) is
minimized according to the algorithm by W. Kabsch [8]. Otherwise the
quaternion based algorithm by B. Horn [9] is used (slower when using
this Python implementation).
The returned matrix performs rotation, translation and uniform scaling
(if specified).
"""
v0 = np.array(v0, dtype=np.float64, copy=False)[:3]
v1 = np.array(v1, dtype=np.float64, copy=False)[:3]
if v0.shape != v1.shape or v0.shape[1] < 3:
raise ValueError('Vector sets are of wrong shape or type.')
# move centroids to origin
t0 = np.mean(v0, axis=1)
t1 = np.mean(v1, axis=1)
v0 = v0 - t0.reshape(3, 1)
v1 = v1 - t1.reshape(3, 1)
if usesvd:
# Singular Value Decomposition of covariance matrix
u, s, vh = np.linalg.svd(np.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = np.dot(u, vh)
if np.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= np.outer(u[:, 2], vh[2, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = np.identity(4)
M[:3, :3] = R
else:
# compute symmetric matrix N
xx, yy, zz = np.sum(v0 * v1, axis=1)
xy, yz, zx = np.sum(v0 * np.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = np.sum(v0 * np.roll(v1, -2, axis=0), axis=1)
N = ((xx+yy+zz, yz-zy, zx-xz, xy-yx),
(yz-zy, xx-yy-zz, xy+yx, zx+xz),
(zx-xz, xy+yx, -xx+yy-zz, yz+zy),
(xy-yx, zx+xz, yz+zy, -xx-yy+zz))
# quaternion: eigenvector corresponding to most positive eigenvalue
l, V = np.linalg.eig(N)
q = V[:, np.argmax(l)]
q /= vector_norm(q) # unit quaternion
q = np.roll(q, -1) # move w component to end
# homogeneous transformation matrix
M = quaternion_matrix(q)
# scale: ratio of rms deviations from centroid
if scaling:
v0 *= v0
v1 *= v1
M[:3, :3] *= math.sqrt(np.sum(v1) / np.sum(v0))
# translation
M[:3, 3] = t1
T = np.identity(4)
T[:3, 3] = -t0
M = np.dot(M, T)
return M
|
a83fb9532a59cffdd986c364825c32fa682a45dc
| 3,640,496
|
def get_graph_metadata(graph_id: int):
"""Returns the metadata for a single graph. This is automatically generated
by the datasource classes.
Parameters
----------
graph_id : int
Graph ID.
Returns 404 if the graph ID is not found
Returns
-------
Dict
A dictionary representing the metadata of the current graph.
"""
graph_obj = Graph.query.filter_by(id=graph_id).first()
if not graph_obj:
return make_response(jsonify({"message": "Graph not found"}), 404)
response = jsonify(graph_obj.meta)
return response
|
a3eb61fcaf901d8caa47da345a8279e4e7058a84
| 3,640,497
|
def username_exists(username, original=""):
"""Returns true if the given username exists."""
return username != original and User.objects.filter(username=username).count() > 0
|
16f9a53922d0141459327e79aba5678af9446536
| 3,640,498
|
def set_n_jobs(n_jobs: int, x_df: pd.DataFrame) -> int:
"""
Sets the number of n_jobs, processes to run in parallel. If n_jobs is not specified, the max number of CPUs is
used. If n_jobs is set to a higher amount than the number of observations in x_df, n_jobs is rebalanced to match
the length of x_df.
:param n_jobs: number of jobs to run in parallel
:param x_df: x dataframe
:return: number of jobs to run in parallel, using the above logic
"""
if not n_jobs:
n_jobs = mp.cpu_count()
if n_jobs > len(x_df):
n_jobs = len(x_df)
return n_jobs
|
e081f0f2ee6ceeac7587cb362c62ffef0a114a56
| 3,640,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.