content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def EvalCode(code_str, parse_ctx, comp_lookup=None, mem=None, aliases=None):
"""
Unit tests can evaluate code strings and then use the resulting
CommandEvaluator.
"""
arena = parse_ctx.arena
comp_lookup = comp_lookup or completion.Lookup()
mem = mem or state.Mem('', [], arena, [])
parse_opts, exec_opts, mutable_opts = state.MakeOpts(mem, None)
mem.exec_opts = exec_opts
state.InitMem(mem, {}, '0.1')
mutable_opts.Init()
line_reader, _ = InitLexer(code_str, arena)
c_parser = parse_ctx.MakeOshParser(line_reader)
cmd_ev = InitCommandEvaluator(parse_ctx=parse_ctx, comp_lookup=comp_lookup,
arena=arena, mem=mem, aliases=aliases)
main_loop.Batch(cmd_ev, c_parser, arena) # Parse and execute!
return cmd_ev | 181ea721a788da2a56a2cf1c1df6bfc35cdfbefc | 3,629,400 |
import csv
def read_forces(fname):
"""Read .forces file format from Cortex MAC.
The .forces file in ASCII contains force plate data. The data is saved
based on the forcepla.cal file of the trial and converts the raw force
plate data into calibrated forces. The units used are Newtons and
Newton-meters and each line in the file equates to one analog sample.
Parameters
----------
fname : string
full file name of the .forces file to be opened
Returns
-------
h : Python dictionary
.forces header information
keys: name, nforceplates, data_rate, nsamples, ch_names
data : pandas dataframe
force plate data with shape (nsamples, 7*nforceplates)
"""
with open(file=fname, mode='rt', encoding='utf-8', newline='') as f:
# get header information
read = csv.reader(f, delimiter='\t')
header = [next(read) for x in range(5)]
h = {'name': header[0][0],
'nforceplates': int(header[1][0].split('=')[1]),
'data_rate': float(header[2][0].split('=')[1]),
'nsamples': int(header[3][0].split('=')[1]),
'ch_names': header[4][1:]}
# force plate data
data = pd.read_csv(f, sep='\t', names=h['ch_names'], index_col=False,
usecols=np.arange(1, 1+7*h['nforceplates']), engine='c')
return h, data | 62b7c8050be5fd9cb79d2edfb2b459e4d705f704 | 3,629,401 |
def mpw_plot_points(slope, years, values):
"""
Calculate start and end points for line describing MPW best fit
:param float slope: line slope
:param float array years: x-coordinates
:param float array values: y-coordinates
:returns: [[x1,x2], [y1,y1]]
"""
mu_x = np.ma.mean(years)
mu_y = np.ma.mean(values)
y1 = slope * (years[0] - mu_x) + mu_y
y2 = slope * (years[-1] - mu_x) + mu_y
return [years[0], years[-1]], [y1, y2] | 4265b26485e22ba8306edd0b647292eba2803b6b | 3,629,402 |
def get_target(module, array):
"""Return Pod or None"""
try:
return array.get_pod(module.params['target'])
except Exception:
return None | ee95f125073c852bdef11b42e9e9dda394fec9e1 | 3,629,403 |
def int_kinenergy(basis):
"""Calculates the kinenergy matrix for a given basis.
Parameters
----------
basis : BasisSet
Returns
-------
kinenergy : np.ndarray
Kinetic energy matrix with shape M x M, where `M = len(basis)`.
"""
M = len(basis)
kinenergy = np.zeros((M, M))
for i, basisfn_i in enumerate(basis):
# Only calculate diagonal and upper off-diagonal values
# to reduce number of iterations necessary.
for j, basisfn_j in enumerate(basis[i:], i):
ij_kinenergy = 0
for prim_i in basisfn_i:
for prim_j in basisfn_j:
ij_kinenergy += prim_i.d * prim_j.d * \
prim_i.N * prim_j.N * \
kinenergy_primitive(prim_i.a, prim_j.a,
prim_i.alpha, prim_j.alpha,
prim_i.A, prim_j.A)
kinenergy[i, j] = ij_kinenergy
if i != j:
kinenergy[j, i] = ij_kinenergy
else:
pass
return kinenergy | 0dfd6777bea20620d20f08578d61e6961777a10c | 3,629,404 |
from datetime import datetime
import requests
def test_commit_in_last_year_yes(monkeypatch):
"""
11. Has there been a commit in the last year?
"""
headers = {}
def mock_get(*args, **kwargs):
return MockResponseCommitsYes()
today = datetime.now()
test_date = datetime.strptime(GOOD_DATE_Z, "%Y-%m-%dT%H:%M:%SZ")
days = (today - test_date).days
# apply the monkeypatch for requests.get to mock_get
monkeypatch.setattr(requests, "get", mock_get)
url = "https://fakeurl"
actual = commit_in_last_year(url, headers=headers)
expected = f"[green]Yes. The last commit was on {datetime.strftime(test_date, '%m-%d-%Y')} which was {days} days ago"
assert actual == expected | 2473c6a86322749f26e3c9086d822799fa6d48fd | 3,629,405 |
def all_columns_functional(use_cache = True, update_cache = True):
""" Returns all functional categories identified in the data set. """
return get_columns('pathabundance_columns.pickle', 'pathabundance_relab', use_cache, update_cache) | b30bb59556f2597eda0821490515cd855c31c293 | 3,629,406 |
def json_replace(json_obj, **values):
"""
Search for elements of `{"{{REPLACE_PARAM}}": "some_key"}` and replace
with the result of `values["some_key"]`.
"""
if type(json_obj) is list:
return [json_replace(x, **values) for x in json_obj]
elif type(json_obj) is dict:
new = {}
for key, value in json_obj.items():
if type(value) is dict and list(value) == ["{{REPLACE_PARAM}}"]:
param_name = value["{{REPLACE_PARAM}}"]
new[key] = values[param_name]
else:
new[key] = json_replace(value, **values)
return new
else:
return json_obj | f6a8b44b5dd10d37140445b9dc8ebd71107df0a2 | 3,629,407 |
def slit_scan_area_comp(slits, yag, x_width=1.0,y_width=1.0,samples=1):
"""Find the ratio of real space/pixel in the PIM
1. Send slits to specified position
2. Measure pixel dimensions of passed light.
The idea is that the width, height values will be pulled from the
PIMPulnixDetector instance.
2b. Should diffraction issues (as observed with the test laser) persist
when using the x-ray laser, another method will be necessary instead
of using the gap dimensions for calibration, we could move the gap in
the slits a small distance and observe the position change of the
passed light. If the light is highly collimated (it should be), the
motion of the gap should be 1:1 with the motion of the passed light on
the PIM detector. Only investigate if issues persisit in x-ray.
Parameters
----------
slits : pcdsdevices.slits.Slits
Ophyd slits object from pcdsdevices.slits.Slits
yag : pcdsdevices.sim.pim.PIM (subject to change?)
Ophyd object of some type, this will allow me to read the w, h
(w,h don't exist yet but they should shortly)
x_width : int
Define the target x width of the gap in the slits. Units: mm
y_width : int
Define the target y width of the gap in the slits. Units: mm
samples : int
number of sampels to use and average over when measuring width, height
Returns
-------
(float,float)
returns a tuple of x and y scaling respectively. Units mm/pixels
"""
# place slits then read a value that doesn't exist yet
# easy
# measure_average()
#data = yield from measure_average([yag],['xwidth','ywidth'])
# set slits to specified gap size
yield from abs_set(slits, x=x_width, y=y_width, wait=True)
# read profile dimensions from image (width plugin pending)
yag_measurements = yield from measure_average(
[yag],
num=samples
)
# extract measurements of interest from returned dict
yag_measured_x_width = yag_measurements[field_prepend('xwidth', yag)]
yag_measured_y_width = yag_measurements[field_prepend('ywidth', yag)]
logger.debug("Measured x width: {}".format(yag_measured_x_width))
logger.debug("Measured y width: {}".format(yag_measured_y_width))
# err if image not received or image has 0 width,height
if (yag_measured_x_width <= 0 \
or yag_measured_y_width <=0):
raise ValueError("A measurement less than or equal to zero has been"
"measured. Unable to calibrate")
x_scaling = nan
y_scaling = nan
else:
#data format: Real space / pixel
x_scaling = x_width / yag_measured_x_width
y_scaling = y_width / yag_measured_y_width
return x_scaling, y_scaling | de7081c1fedae29563ab8473af7d25bee9239674 | 3,629,408 |
def diff(f, *x, allowed_nonsmoothness="discontinuous"):
"""
A differentiator which computes :math:`\\partial f / \\partial x` and understands
:class:`Field`\\ s. If ``x`` is one of ``t``, ``x``, ``y``, or ``z`` and ``f``
is a :class:`DynamicField`, the corresponding derivative :class:`Field` is
returned.
Examples:
.. doctest::
>>> f = ps.DynamicField("f")
>>> print(ps.diff(f**3, f))
3*f**2
>>> print(ps.diff(f**3, f, f))
3*2*f
>>> print(ps.diff(f**3, "t"))
3*f**2*dfdt
>>> print(ps.diff(f**3, f, "t"))
3*2*f*dfdt
>>> print(ps.diff(f + 2, "x"))
dfdx[0]
:arg f: A :mod:`pymbolic` expression to be differentiated.
:arg x: A :class:`pymbolic.primitives.Expression` or a string to be parsed
(or multiple thereof). If multiple positional arguments are provided,
derivatives are taken with respect to each in order.
(See the examples above.)
"""
if len(x) > 1:
return diff(diff(f, x[0]), *x[1:])
else:
differentiator = FieldDifferentiationMapper(
pp.make_variable(x[0]), allowed_nonsmoothness=allowed_nonsmoothness
)
return differentiator(f) | 2031ae7cea5b36b4c660116ff82ee3570eb647d9 | 3,629,409 |
def append_store_prices(ticker_list, loc, start = '01/01/1990'):
"""
Given an existing store located at ``loc``, check to make sure
the tickers in ``ticker_list`` are not already in the data
set, and then insert the tickers into the store.
:ARGS:
ticker_list: :class:`list` of tickers to add to the
:class:`pandas.HDStore`
loc: :class:`string` of the path to the
:class:`pandas.HDStore`
start: :class:`string` of the date to begin the price data
:RETURNS:
:class:`NoneType` but appends the store and comments the
successes ands failures
"""
try:
store = pandas.HDFStore(path = loc, mode = 'a')
except IOError:
print loc + " is not a valid path to an HDFStore Object"
return
store_keys = map(lambda x: x.strip('/'), store.keys())
not_in_store = numpy.setdiff1d(ticker_list, store_keys )
new_prices = tickers_to_dict(not_in_store, start = start)
map(lambda x: store.put(x, new_prices[x]), not_in_store)
store.close()
return None | b7aa347eaa84b79342d44db2c5b757f56d831cf9 | 3,629,410 |
from typing import List
def trace_for_kbps(kbps: int) -> List[int]:
""" Returns Mahimahi trace lines whose average kbps approximates the passed in value """
# - convert kbps to a Fraction representing # packets per line of a Mahimahi trace file
# - Fraction will also simplify the ratio, giving us exactly how many lines we need and what
# the total ms duration of the trace
# - limit the denominator to 100 so that we don't generate arbitrarily large trace files
ratio = Fraction(numerator=kbps, denominator=PACKET_SIZE).limit_denominator(MAX_TRACE_MS)
# the numerator gives the number of lines (total packets) to deliver
num_lines = ratio.numerator
# the denominator gives the maximum line value (aka the largest time offset from the start
# of the trace) to deliver all packets within. Since Mahimahi traces wrap around, we can view
# the trace as a rate. Taking num_lines/max_line will give us packets/ms --> kbps exactly, as expected
max_line = ratio.denominator
# if the number of lines is 1, just return the max_line value as there is no other solution.
# this essentially corresponds to saying send 1 packet every `max_line` milliseconds
if num_lines == 1:
return [max_line]
trace_lines = []
for i in range(1, max_line + 1):
# compute the number of packets that should be delivered at time offset i
n_times = num_lines // max_line + ((max_line - i) < (num_lines % max_line))
trace_lines.extend([i] * n_times)
return trace_lines | 80c16739ae85a5c236aa03cf23ce63fcf51a226f | 3,629,411 |
def barotropic_input_qref_to_compute_lwa(ylat,qref,vort,area,dmu,planet_radius = 6.378e+6): # used to be Eqlat_LWA
"""
This function computes LWA based on a *prescribed* Qref instead of Qref obtained from the vorticity field on a barotropic sphere.
Parameters
----------
ylat : sequence or array_like
1-d numpy array of latitude (in degree) with equal spacing in ascending order; dimension = nlat.
qref : sequence or array_like
1-d numpy array of prescribed reference value of vorticity at each latitude; dimension = nlat.
vort : ndarray
2-d numpy array of vorticity values; dimension = (nlat, nlon).
area : ndarray
2-d numpy array specifying differential areal element of each grid point; dimension = (nlat, nlon).
dmu: sequence or array_like
1-d numpy array of latitudinal differential length element (e.g. dmu = cos(lat) d(lat)). Size = nlat.
planet_radius : float, default 6.378e+6
radius of spherical planet of interest consistent with input 'area'.
Returns
-------
lwa_result : ndarray
2-d numpy array of local wave activity values; dimension = [nlat_s x nlon]
"""
nlat = vort.shape[0]
nlon = vort.shape[1]
lwa_result = lwa(nlon,nlat,vort,qref,dmu)
return lwa_result | aae459dd30fa4fbf8f1ab2a5ff29ed2e36f34878 | 3,629,412 |
def merge_models(old, new):
"""docstring for merge_model"""
if old.rowCount() == 0:
return new
if new.rowCount() == 0:
return old
old_crcs = [(old.item(row, COLUMN_CRC).text(), row) for row in
xrange(old.rowCount())]
new_crcs = [(new.item(row, COLUMN_CRC).text(), row) for row in
xrange(new.rowCount())]
for crc, row in new_crcs:
for old_crc, _ in old_crcs:
if crc == old_crc:
break
old.appendRow(new.takeRow(row))
return old | 985f8ac5aa43409dab2d31859125b1d3d64a79fb | 3,629,413 |
import torch
def get_operator_norm(*ops):
""" Computes the l2-operator norm of a product of linear operators. """
if all([hasattr(op, "get_matrix") for op in ops]):
mat = ops[-1].get_matrix()
for op in ops[:-1][::-1]:
mat = torch.matmul(op.get_matrix(), mat)
return np.linalg.norm(mat.cpu().numpy(), 2)
else:
raise ValueError(
"Could not compute operator norm. At least one of "
"the provided operators does not implement a matrix "
"representation, which is required."
) | 4a1030d64fbdd0431a6351aa408b927dae98480b | 3,629,414 |
def fileexistspolicy(
overwrite_if_exists: bool, skip_if_file_exists: bool
) -> FileExistsPolicy:
"""Return the policy for overwriting existing files."""
return (
FileExistsPolicy.RAISE
if not overwrite_if_exists
else FileExistsPolicy.SKIP
if skip_if_file_exists
else FileExistsPolicy.OVERWRITE
) | d8bd5e07c7f46aa9036460884ae5e1c588765cc2 | 3,629,415 |
import requests
def createDummy(name, type):
"""
Create a dummy entry to the SSRQ Person db.
:param name:
:param type:
:return link_id (string):
"""
try:
print("Requesting dummy id for " + name)
if type == "person":
r = requests.get("https://www.ssrq-sds-fds.ch/persons-db-api/?create_per={}".format(name))
elif type == "organization":
r = requests.get("https://www.ssrq-sds-fds.ch/persons-db-api/?create_org={}".format(name))
elif type == "place":
r = requests.get("https://www.ssrq-sds-fds.ch/places-db-edit/edit/create-place.xq?name={}".format(name),
auth=("linking-tool", PLACE_DB_PW))
else:
print("{} type not recognized.".format(type))
return ""
except:
print("No Internet connection. Can't create dummy id.")
return ""
if r.status_code == requests.codes.ok:
if r.text == "Request limit reached for this hour":
return "Too many requests."
elif r.text == "not logged in as linking-tool":
return "Not logged in."
elif type == "place":
link_id = r.text.strip('"')
else:
link_id = r.json()["ID"]
else:
print(r.status_code)
print(r.headers)
print("Failed to create dummy id for " + name)
link_id = ""
return link_id | e7a6a124dbf49a51fa3971206fb9d630c9e2221c | 3,629,416 |
def generate(fin, fout):
"""
Search first author last name, year, and first meaningful word in title.
Assemble Google-Scholar-style keys (lastname+year+titlefirstword).
Replace original keys ``RN+number" with new keys.
Also adds letter suffixes to repeated keys
(e.g. peter1998researcha, peter1998researchb).
"""
keyList = []
meaninglesswordList = ['a', 'an', 'the', 'on', 'in', 'upon', 'before', 'after',
'with', 'by', 'for', 'at', 'about', 'under', 'of', 'to', 'from',
'is', 'are', 'am', 'why', 'what', 'where', 'when', 'who', 'how',
'because', 'as', 'since', 'between', 'beyond', 'near', 'off', 'over',
'through', 'toward', 'towards', 'per', 'past', 'without'] ## should be enough..
symbolList = list(punctuation)
with open(fin, 'r') as f:
lastname, title, year = '', '', ''
done = False
for line in f:
if '@' in line:
lastname, title, year = '', '', ''
done = False
if line.replace(' ', '').startswith('author='):
lastname = line.split('=')[1]
for symbol in symbolList:
lastname = lastname.replace(symbol, ' ')
lastname = lastname.split()[0].lower()
if line.replace(' ', '').startswith('title='):
title = line.split('=')[1].replace('{', '').replace('}', '')
if title.split()[0] in ['1-D', '2-D', '3-D']:
title = title.split()[0].replace('-', '').lower()
else:
for symbol in symbolList:
title = title.replace(symbol, ' ')
for title in title.split():
title = title.lower()
if title in meaninglesswordList:
continue
else:
break
if line.replace(' ', '').startswith('year='):
year = line.split('{')[1].split('}')[0]
if len(lastname) != 0 and len(title) != 0 and len(year) != 0 and done == False:
key = lastname + year + title
keyList.append(key)
done = True
repeatedkeyList = []
for key in keyList:
if keyList.count(key) > 1:
repeatedkeyList.append(key)
repeatedkeyList = list(set(repeatedkeyList))
for key in repeatedkeyList:
for letter in ascii_lowercase:
_list2str = ' '.join(keyList)
_list2str = _list2str.replace(key+' ', key+letter+' ', 1)
keyList = _list2str.split(' ')
with open(fin, 'r') as f:
count = '\n'.join(f.readlines()).count('@')
if len(keyList) == count:
with open(fout, 'w') as fo:
with open(fin, 'r') as fi:
i = 0
for line in fi:
if '@' in line: ## assemble keys
line = line.replace(line.split('{')[1].split(',')[0], keyList[i])
i += 1
if line.replace(' ', '').startswith('title=') or line.replace(' ', '').startswith('journal='):
line = line.replace('&', '\&') ## make '&' symbol visible in BibTex
line = line.replace('{', '{{') ## lock title and journal name to avoid...
line = line.replace('}', '}}') ## ...automatic UPPER to lower case change
if line.replace(' ', '').startswith('journal='):
if 'Ieee' in line:
line = line.replace('Ieee', 'IEEE')
if 'Asce' in line:
line = line.replace('Asce', 'ASCE')
if 'Asme' in line:
line = line.replace('Asme', 'ASME')
if line.replace(' ', '').startswith('university='): ## it seems only 'school' works for...
line = line.replace('university', 'school', 1) ## ...phdthesis type; 'university' not working
if line.replace(' ', '').startswith('DOI='): ## fix common issues in DOI
for useless in ['Artn','ARTN','Unsp','UNSP', 'Pii', 'PII']:
if useless in line:
line = line.split(useless)[0]
if line.replace(' ', '').startswith('DOI={Doi'):
line = line.replace('Doi ','')
if line.replace(' ', '').startswith('DOI={DOI'):
line = line[::-1].replace(' IOD','',1)[::-1]
if line.replace(' ', '').startswith('DOI={Book_Doi'):
line = line.replace('Book_Doi ','')
if line.split(' ')[0] in ['Artn','ARTN','Unsp','UNSP', 'Pii', 'PII']:
continue
if line.startswith('Doi 10'):
line = line.replace('Doi ','')
if line.startswith('DOI 10'):
line = line.replace('DOI ','')
if line.replace(' ', '').startswith('url=') or line.replace(' ', '').startswith('http'):
continue ## remove long url if [Find Full Text]ed in EndNote
fo.write(line)
return keyList, len(keyList), count | 30d7f2ae0a344dc663198aabecec4c8b9b05a8ff | 3,629,417 |
import torch
from typing import Optional
from typing import Union
def fbeta_score(
outputs: torch.Tensor,
targets: torch.Tensor,
beta: float = 1.0,
eps: float = 1e-7,
argmax_dim: int = -1,
num_classes: Optional[int] = None,
) -> Union[float, torch.Tensor]:
"""
Counts fbeta score for given ``outputs`` and ``targets``.
Args:
outputs: A list of predicted elements
targets: A list of elements that are to be predicted
beta: beta param for f_score
eps: epsilon to avoid zero division
argmax_dim: int, that specifies dimension for argmax transformation
in case of scores/probabilities in ``outputs``
num_classes: int, that specifies number of classes if it known
Raises:
Exception: If ``beta`` is a negative number.
Returns:
float: F_1 score.
"""
if beta < 0:
raise Exception("beta parameter should be non-negative")
_p, _r, fbeta, _ = precision_recall_fbeta_support(
outputs=outputs,
targets=targets,
beta=beta,
eps=eps,
argmax_dim=argmax_dim,
num_classes=num_classes,
)
return fbeta | 081a90826acd0174a46af3cf8969a7935c7cde19 | 3,629,418 |
def gc_blocks(seq,block_size):
""" Divide sequence into non-overlapping blocks and compute GC content
of each block."""
blocks = []
for i in range(0, len(seq) - (len(seq) % block_size), block_size):
blocks.append(gc_content(seq[i:i+block_size]))
return tuple(blocks) | 3bf33ab361f520ffc53781ae48d51037bdd343e6 | 3,629,419 |
def is_android(filename):
"""
check if the files is an apk file or not
"""
with open(filename, "rb") as f:
# AndroidManifest.xml
if b"AndroidManifest.xml" in f.read(4096):
return True
return False | 95405710bbf361eef9ddc4dff877745c4c42be02 | 3,629,420 |
def enrich(alert, rules):
"""Determine if an alert meets an enrichment rule
:param alert: The alert to test
:param rules: An array of enrichment rules to test against
:returns: Alert - The enriched Alert object
"""
for enrichment in rules:
updates = enrichment(alert)
if not updates:
continue
for name, value in updates.items():
alert[name] = value
return alert | 97bf2d387e4c6e1ab38628860415bdf83c4634b9 | 3,629,421 |
def max_ea():
"""
Return the highest mapped address of the IDB.
Wrapper on :meth:`BipIdb.max_ea`.
"""
return BipIdb.max_ea() | 9f79ad7e6b71e81b3ecf9440a313243b6946d116 | 3,629,422 |
from ..architectures import create_unet_model_3d
from ..utilities import get_pretrained_network
from ..utilities import get_antsxnet_data
from ..utilities import preprocess_brain_image
def desikan_killiany_tourville_labeling(t1,
do_preprocessing=True,
antsxnet_cache_directory=None,
verbose=False):
"""
Cortical and deep gray matter labeling using Desikan-Killiany-Tourville
Perform DKT labeling using deep learning
The labeling is as follows:
Inner labels:
Label 0: background
Label 4: left lateral ventricle
Label 5: left inferior lateral ventricle
Label 6: left cerebellem exterior
Label 7: left cerebellum white matter
Label 10: left thalamus proper
Label 11: left caudate
Label 12: left putamen
Label 13: left pallidium
Label 15: 4th ventricle
Label 16: brain stem
Label 17: left hippocampus
Label 18: left amygdala
Label 24: CSF
Label 25: left lesion
Label 26: left accumbens area
Label 28: left ventral DC
Label 30: left vessel
Label 43: right lateral ventricle
Label 44: right inferior lateral ventricle
Label 45: right cerebellum exterior
Label 46: right cerebellum white matter
Label 49: right thalamus proper
Label 50: right caudate
Label 51: right putamen
Label 52: right palladium
Label 53: right hippocampus
Label 54: right amygdala
Label 57: right lesion
Label 58: right accumbens area
Label 60: right ventral DC
Label 62: right vessel
Label 72: 5th ventricle
Label 85: optic chasm
Label 91: left basal forebrain
Label 92: right basal forebrain
Label 630: cerebellar vermal lobules I-V
Label 631: cerebellar vermal lobules VI-VII
Label 632: cerebellar vermal lobules VIII-X
Outer labels:
Label 1002: left caudal anterior cingulate
Label 1003: left caudal middle frontal
Label 1005: left cuneus
Label 1006: left entorhinal
Label 1007: left fusiform
Label 1008: left inferior parietal
Label 1009: left inferior temporal
Label 1010: left isthmus cingulate
Label 1011: left lateral occipital
Label 1012: left lateral orbitofrontal
Label 1013: left lingual
Label 1014: left medial orbitofrontal
Label 1015: left middle temporal
Label 1016: left parahippocampal
Label 1017: left paracentral
Label 1018: left pars opercularis
Label 1019: left pars orbitalis
Label 1020: left pars triangularis
Label 1021: left pericalcarine
Label 1022: left postcentral
Label 1023: left posterior cingulate
Label 1024: left precentral
Label 1025: left precuneus
Label 1026: left rostral anterior cingulate
Label 1027: left rostral middle frontal
Label 1028: left superior frontal
Label 1029: left superior parietal
Label 1030: left superior temporal
Label 1031: left supramarginal
Label 1034: left transverse temporal
Label 1035: left insula
Label 2002: right caudal anterior cingulate
Label 2003: right caudal middle frontal
Label 2005: right cuneus
Label 2006: right entorhinal
Label 2007: right fusiform
Label 2008: right inferior parietal
Label 2009: right inferior temporal
Label 2010: right isthmus cingulate
Label 2011: right lateral occipital
Label 2012: right lateral orbitofrontal
Label 2013: right lingual
Label 2014: right medial orbitofrontal
Label 2015: right middle temporal
Label 2016: right parahippocampal
Label 2017: right paracentral
Label 2018: right pars opercularis
Label 2019: right pars orbitalis
Label 2020: right pars triangularis
Label 2021: right pericalcarine
Label 2022: right postcentral
Label 2023: right posterior cingulate
Label 2024: right precentral
Label 2025: right precuneus
Label 2026: right rostral anterior cingulate
Label 2027: right rostral middle frontal
Label 2028: right superior frontal
Label 2029: right superior parietal
Label 2030: right superior temporal
Label 2031: right supramarginal
Label 2034: right transverse temporal
Label 2035: right insula
Preprocessing on the training data consisted of:
* n4 bias correction,
* denoising,
* brain extraction, and
* affine registration to MNI.
The input T1 should undergo the same steps. If the input T1 is the raw
T1, these steps can be performed by the internal preprocessing, i.e. set
do_preprocessing = True
Arguments
---------
t1 : ANTsImage
raw or preprocessed 3-D T1-weighted brain image.
do_preprocessing : boolean
See description above.
antsxnet_cache_directory : string
Destination directory for storing the downloaded template and model weights.
Since these can be resused, if is None, these data will be downloaded to a
~/.keras/ANTsXNet/.
verbose : boolean
Print progress to the screen.
Returns
-------
List consisting of the segmentation image and probability images for
each label.
Example
-------
>>> image = ants.image_read("t1.nii.gz")
>>> flash = desikan_killiany_tourville_labeling(image)
"""
if t1.dimension != 3:
raise ValueError( "Image dimension must be 3." )
if antsxnet_cache_directory == None:
antsxnet_cache_directory = "ANTsXNet"
################################
#
# Preprocess images
#
################################
t1_preprocessed = t1
if do_preprocessing == True:
t1_preprocessing = preprocess_brain_image(t1,
truncate_intensity=(0.01, 0.99),
do_brain_extraction=True,
template="croppedMni152",
template_transform_type="AffineFast",
do_bias_correction=True,
do_denoising=True,
antsxnet_cache_directory=antsxnet_cache_directory,
verbose=verbose)
t1_preprocessed = t1_preprocessing["preprocessed_image"] * t1_preprocessing['brain_mask']
################################
#
# Download spatial priors for outer model
#
################################
spatial_priors_file_name_path = get_antsxnet_data("priorDktLabels",
antsxnet_cache_directory=antsxnet_cache_directory)
spatial_priors = ants.image_read(spatial_priors_file_name_path)
priors_image_list = ants.ndimage_to_list(spatial_priors)
################################
#
# Build outer model and load weights
#
################################
template_size = (96, 112, 96)
labels = (0, 1002, 1003, *tuple(range(1005, 1032)), 1034, 1035,
2002, 2003, *tuple(range(2005, 2032)), 2034, 2035)
channel_size = 1 + len(priors_image_list)
unet_model = create_unet_model_3d((*template_size, channel_size),
number_of_outputs = len(labels),
number_of_layers = 4, number_of_filters_at_base_layer = 16, dropout_rate = 0.0,
convolution_kernel_size = (3, 3, 3), deconvolution_kernel_size = (2, 2, 2),
weight_decay = 1e-5, add_attention_gating=True)
weights_file_name = None
weights_file_name = get_pretrained_network("dktOuterWithSpatialPriors", antsxnet_cache_directory)
unet_model.load_weights(weights_file_name)
################################
#
# Do prediction and normalize to native space
#
################################
if verbose == True:
print("Outer model Prediction.")
downsampled_image = ants.resample_image(t1_preprocessed, template_size, use_voxels=True, interp_type=0)
image_array = downsampled_image.numpy()
image_array = (image_array - image_array.mean()) / image_array.std()
batchX = np.zeros((1, *template_size, channel_size))
batchX[0,:,:,:,0] = image_array
for i in range(len(priors_image_list)):
resampled_prior_image = ants.resample_image(priors_image_list[i], template_size, use_voxels=True, interp_type=0)
batchX[0,:,:,:,i+1] = resampled_prior_image.numpy()
predicted_data = unet_model.predict(batchX, verbose=verbose)
origin = downsampled_image.origin
spacing = downsampled_image.spacing
direction = downsampled_image.direction
probability_images = list()
for i in range(len(labels)):
probability_image = \
ants.from_numpy(np.squeeze(predicted_data[0, :, :, :, i]),
origin=origin, spacing=spacing, direction=direction)
resampled_image = ants.resample_image( probability_image, t1_preprocessed.shape, use_voxels=True, interp_type=0)
if do_preprocessing == True:
probability_images.append(ants.apply_transforms(fixed=t1,
moving=resampled_image,
transformlist=t1_preprocessing['template_transforms']['invtransforms'],
whichtoinvert=[True], interpolator="linear", verbose=verbose))
else:
probability_images.append(resampled_image)
image_matrix = ants.image_list_to_matrix(probability_images, t1 * 0 + 1)
segmentation_matrix = np.argmax(image_matrix, axis=0)
segmentation_image = ants.matrix_to_images(
np.expand_dims(segmentation_matrix, axis=0), t1 * 0 + 1)[0]
dkt_label_image = ants.image_clone(segmentation_image)
for i in range(len(labels)):
dkt_label_image[segmentation_image==i] = labels[i]
################################
#
# Build inner model and load weights
#
################################
template_size = (160, 192, 160)
labels = (0, 4, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 24, 26,
28, 30, 43, 44, 45, 46, 49, 50, 51, 52, 53, 54, 58, 60, 91, 92, 630, 631, 632)
unet_model = create_unet_model_3d((*template_size, 1),
number_of_outputs = len(labels),
number_of_layers = 4, number_of_filters_at_base_layer = 8, dropout_rate = 0.0,
convolution_kernel_size = (3, 3, 3), deconvolution_kernel_size = (2, 2, 2),
weight_decay = 1e-5, add_attention_gating=True)
weights_file_name = get_pretrained_network("dktInner", antsxnet_cache_directory=antsxnet_cache_directory)
unet_model.load_weights(weights_file_name)
################################
#
# Do prediction and normalize to native space
#
################################
if verbose == True:
print("Prediction.")
cropped_image = ants.crop_indices(t1_preprocessed, (12, 14, 0), (172, 206, 160))
batchX = np.expand_dims(cropped_image.numpy(), axis=0)
batchX = np.expand_dims(batchX, axis=-1)
batchX = (batchX - batchX.mean()) / batchX.std()
predicted_data = unet_model.predict(batchX, verbose=verbose)
origin = cropped_image.origin
spacing = cropped_image.spacing
direction = cropped_image.direction
probability_images = list()
for i in range(len(labels)):
probability_image = \
ants.from_numpy(np.squeeze(predicted_data[0, :, :, :, i]),
origin=origin, spacing=spacing, direction=direction)
if i > 0:
decropped_image = ants.decrop_image(probability_image, t1_preprocessed * 0)
else:
decropped_image = ants.decrop_image(probability_image, t1_preprocessed * 0 + 1)
if do_preprocessing == True:
probability_images.append(ants.apply_transforms(fixed=t1,
moving=decropped_image,
transformlist=t1_preprocessing['template_transforms']['invtransforms'],
whichtoinvert=[True], interpolator="linear", verbose=verbose))
else:
probability_images.append(decropped_image)
image_matrix = ants.image_list_to_matrix(probability_images, t1 * 0 + 1)
segmentation_matrix = np.argmax(image_matrix, axis=0)
segmentation_image = ants.matrix_to_images(
np.expand_dims(segmentation_matrix, axis=0), t1 * 0 + 1)[0]
################################
#
# Incorporate the inner model results into the final label image.
# Note that we purposely prioritize the inner label results.
#
################################
for i in range(len(labels)):
if labels[i] > 0:
dkt_label_image[segmentation_image==i] = labels[i]
return(dkt_label_image) | e7b37f918f07823e9b4e297f065e72fa5a8aad16 | 3,629,423 |
def empty(document, selection, selectmode=''):
"""Reduce the selection to a single uppermost empty interval."""
beg = selection[0][0]
return Selection(Interval(beg, beg)) | dc349c95bd9cf492695e906630762d6e67bf33bf | 3,629,424 |
def re(rm,rf,beta):
"""Returns cost of equity using CAPM formula."""
return rf + beta*(rm-rf) | 5f91fd21ba1833dcb816ac767c8e1a15e2a30a5a | 3,629,425 |
def find_path(matrix, start_x, start_y, end_x, end_y, tile_size, map_height):
"""
Creates a path from a matrix of barriers, a start position,
a desired end position, the size of tiles used in a map,
and the map's height (in tiles).
"""
# TODO: Get map height from matrix instead of it being a parameter
grid = Grid(matrix=matrix)
start_x = clamp(start_x, 0, start_x)
start_y = clamp(start_y, 0, start_y)
end_x = clamp(end_x, 0, end_x)
end_y = clamp(end_y, 0, end_y)
print(len(matrix))
print((int(
start_x/tile_size), map_height-int(start_y/tile_size), (int(
end_x/tile_size), map_height-int(end_y/tile_size))))
# For some reason the y value is inverted. Probably has to do with the grid
# Hence map_height - ...,
# TODO: Figure out why bottom of the map causes IndexError
try:
start = grid.node(int(
start_x / tile_size), map_height - int(start_y / tile_size) - 1
)
end = grid.node(int(
end_x / tile_size), map_height - int(end_y / tile_size)
)
finder = AStarFinder(diagonal_movement=DiagonalMovement.always)
path, runs = finder.find_path(start, end, grid)
except IndexError:
print("Out of range.")
path = []
# Again, subtracting by the tile height otherwise the path is flipped
path_resized = [
(position[0]*tile_size, (map_height-position[1])
* tile_size) for position in path
]
# print('operations:', runs, 'path length:', len(path))
# print(grid.grid_str(path=path, start=start, end=end))
return path_resized | a18222ab80f58d7db4e3adfc795046bf1566228d | 3,629,426 |
from typing import Dict
def _kwargs_to_bond_parameters(bond_type: Array,
kwargs: Dict[str, Array]) -> Dict[str, Array]:
"""Extract parameters from keyword arguments."""
# NOTE(schsam): We could pull out the species case from the generic case.
for k, v in kwargs.items():
if bond_type is not None:
kwargs[k] = _get_bond_type_parameters(v, bond_type)
return kwargs | 6ff9cf3d3662d84ca5a8c0fbb0298a33f59ac253 | 3,629,427 |
import time
def wait_for_result(func, *args, matcher=simple_matcher(True), attempts=20,
interval=5, decode=decode_wrapper):
"""Runs `func` with `args` until `matcher(out)` returns true or timesout
Returns the matching result, or raises an exception.
"""
for i in range(attempts):
out = func(*args)
if decode:
out = decode(out)
if matcher(out):
return out
time.sleep(interval)
raise Exception(
"Timed out waiting for result %s in %s(%s)" % (matcher, func, args)
) | fcaf21eeceac3c2f5096763f41250de1d9508fec | 3,629,428 |
def host_is_trusted(hostname: str, trusted_list: t.Iterable[str]) -> bool:
"""Check if a host matches a list of trusted names.
:param hostname: The name to check.
:param trusted_list: A list of valid names to match. If a name
starts with a dot it will match all subdomains.
.. versionadded:: 0.9
"""
if not hostname:
return False
if isinstance(trusted_list, str):
trusted_list = [trusted_list]
def _normalize(hostname: str) -> bytes:
if ":" in hostname:
hostname = hostname.rsplit(":", 1)[0]
return _encode_idna(hostname)
try:
hostname_bytes = _normalize(hostname)
except UnicodeError:
return False
for ref in trusted_list:
if ref.startswith("."):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
try:
ref_bytes = _normalize(ref)
except UnicodeError:
return False
if ref_bytes == hostname_bytes:
return True
if suffix_match and hostname_bytes.endswith(b"." + ref_bytes):
return True
return False | e33fb74f12e61016f0e042d6765c93547b26fd15 | 3,629,429 |
from typing import Sequence
import copy
def average_data(flow_fields: Sequence[GmxFlow]) -> GmxFlow:
"""Average a given list of flow fields.
The flow fields must be of identical shape and be regular. It is further
assumed that they have the same origin and bin spacing, and that the bins
have the same index ordering.
If the input list is empty, `None` is returned.
"""
try:
avg_flow = flow_fields[0].copy()
except IndexError:
return None
for flow in flow_fields[1:]:
avg_flow.data['M'] += flow.data['M']
avg_flow.data['N'] += flow.data['N']
# Averaging the actual temperature properly requires access to the number
# of degrees of freedom for atoms in the bin, which we do not have. We
# thus simply take the arithmetic mean.
avg_flow.data['T'] += flow.data['T']
# Velocities are mass-averaged.
avg_flow.data['U'] += flow.data['M'] * flow.data['U']
avg_flow.data['V'] += flow.data['M'] * flow.data['V']
avg_flow.data['flow'] += flow.data['M'] * flow.data['flow']
num_data = float(len(flow_fields))
# We do not want to divide the velocities with 0. Thus we set the mass in these
# bins to a number. Since no data is present in the bins, the velocities will be 0
# after the division.
mass_div = copy.deepcopy(avg_flow.data['M'])
mass_div[mass_div == 0.] = num_data
avg_flow.data['U'] /= mass_div
avg_flow.data['V'] /= mass_div
avg_flow.data['flow'] /= mass_div
avg_flow.data['M'] /= num_data
avg_flow.data['N'] /= num_data
avg_flow.data['T'] /= num_data
return avg_flow | 57454ec23498e54bc6792c37b94ed5c2bb9cec50 | 3,629,430 |
import typing
import os
import glob
import warnings
def identify_background_video_folder(parent_folder: typing.Union[str, bytes, os.PathLike], fname: str, fname_format: str, optional_settings: dict = {}) -> typing.Tuple[bool,str]:
"""
Identifies a background folder that matches a given experimental fname.
Identifies a background folder tagged with appropriate parameters such that
it matches the given base folder name for an experimental video.
Parameters
----------
parent_folder: path-like
Path in which to look for background video folders.
fname: str
The base name of the experimental video folder.
ex. "20210929_6M-PEO_fps-25k_1"
fname_format: str
The format of the fname with parameter names separated
by the deliminator specified by fname_split. Must contain the "vtype"
tag.
ex. "date_sampleinfo_fps_run_vtype"
optional_settings: dict
A dictionary of optional settings.
Optional Settings and Defaults
------------------------------
fname_split: string
The deliminator for splitting folder/file names, used in fname_format.
Default is "_".
background_tag: string
The tag for identifying background videos. May not be empty.
Default is "bg".
one_background: bool
True to use one background for a group of experiments only differing by
run number. False to pair backgrounds and experiments 1:1.
Default is True.
Returns
-------
matched_bg: bool
True if a matching background is found, False otherwise.
bg_folder: string
Name of background folder if a matching one is found, '' otherwise.
Raises
------
ValueError
If the given fname_format does not contain the tag "vtype."
Warns
-----
UserWarning
If multiple matched backgrounds are found for a given fname.
"""
settings = integration.set_defaults(optional_settings)
fname_split = settings["fname_split"]
background_tag = settings["background_tag"]
one_background = settings["one_background"]
# Checks for "vtype" tag since it is needed for further processing.
if not tags.check_fname_format_for_tag(fname_format,"vtype",fname_split):
# fname_format must have vtype to be able to match videos.
raise ValueError("fname_format must contain the tag 'vtype' (video type) to identify background vs. experimental videos.")
# Starts by inserting background_tag in vtype location.
if tags.check_fname_format_for_tag(fname_format,"remove",fname_split):
no_remove_format = tags.remove_tag_from_fname(fname_format,fname_format,"remove",fname_split)
else:
no_remove_format = fname_format
bg_fname = tags.insert_tag_in_fname(fname,no_remove_format,"vtype",background_tag,fname_split)
# Then puts "*" where "remove" tags would exist.
bg_fname = tags.insert_tag_in_fname(bg_fname,fname_format,"remove","*",fname_split)
if one_background:
# If only one background, handles two cases: no run number or
# still has a run number but we are using the first background for
# every run.
bg_norun_fname = tags.remove_tag_from_fname(bg_fname,fname_format,"run",fname_split)
bg_norun_folders = glob.glob(os.path.join(parent_folder,bg_norun_fname))
# 2nd case, sub the run tag with *, then search.
bg_run_fname = tags.replace_tag_in_fname(bg_fname,fname_format,"run","*",fname_split)
bg_run_folders = glob.glob(os.path.join(parent_folder,bg_run_fname))
# Combines, sorts, then takes the 1st.
bg_folders = bg_run_folders + bg_norun_folders
bg_folders = list(dict.fromkeys(sorted(bg_folders)))
if bg_folders == []:
bg_folder = ''
matched_bg = False
else:
bg_folder = os.path.basename(bg_folders[0])
matched_bg = True
else:
# If matched backgrounds, matchs by run number.
bg_folders = glob.glob(os.path.join(parent_folder,bg_fname))
bg_folders = sorted(bg_folders)
if bg_folders == []:
bg_folder = ''
matched_bg = False
else:
bg_folder = os.path.basename(bg_folders[0])
matched_bg = True
# Warns if there are multiple matching backgrounds.
if len(bg_folders) > 1:
warnings.warn("Multiple folders matched background for " + str(fname) + ". First used.", UserWarning)
return matched_bg, bg_folder | e20d015ff47d0f1d54f50a7a09d9b30750e254e9 | 3,629,431 |
def user_dashboard_request_view(request, **kwargs):
"""User dashboard request details view."""
avatar = current_user_resources.users_service.links_item_tpl.expand(
current_user
)["avatar"]
request_type = request["type"]
is_draft_submission = request_type == CommunitySubmission.type_id
is_invitation = request_type == CommunityInvitation.type_id
request_is_accepted = request["status"] == AcceptAction.status_to
if is_draft_submission:
topic = _resolve_topic_draft(request)
return render_template(
"invenio_requests/community-submission/index.html",
base_template="invenio_app_rdm/users/base.html",
user_avatar=avatar,
request=request.to_dict(),
record=topic["record_ui"],
permissions=topic["permissions"],
is_preview=True,
draft_is_accepted=request_is_accepted,
files=[],
)
elif is_invitation:
return render_template(
"invenio_requests/community-invitation/user_dashboard.html",
base_template="invenio_app_rdm/users/base.html",
user_avatar=avatar,
request=request.to_dict(),
invitation_accepted=request_is_accepted,
) | afd57187d5a0c74229e3190c5e30079a7d8019a9 | 3,629,432 |
def _load_discrim_net(path: str, venv: VecEnv) -> common.RewardFn:
"""Load test reward output from discriminator."""
del venv # Unused.
discriminator = th.load(path)
# TODO(gleave): expose train reward as well? (hard due to action probs?)
return discriminator.predict_reward_test | 1b9e11f8182183b80540b544180385ae52798119 | 3,629,433 |
from typing import Sequence
def _is_generator_like(data):
"""Checks if data is a generator, Sequence, or Iterator."""
return (hasattr(data, 'next') or hasattr(data, '__next__') or isinstance(
data, (Sequence, iterator_ops.Iterator, iterator_ops.IteratorV2))) | 444516eed895f1877cd63d1970254ad3890c9cd9 | 3,629,434 |
import socket
def check_tcp_port(host, port, timeout=3):
"""
Try connecting to a given TCP port.
:param host: Host to connect to
:param port: TCP port to connect to
:param timeout: Connection timeout, in seconds
:return: True if the port is open, False otherwise.
"""
s = socket.socket()
try:
s.settimeout(timeout)
s.connect((host, port))
except socket.error:
return False
else:
s.close()
return True | 5e49ebab2c219e9772174d830dffcb958033befd | 3,629,435 |
import logging
def create_logger(logger=None, loglevel=DEFAULT_LOGLEVEL):
"""
Attaches or creates a new logger and creates console handlers if not present
"""
logger = logger or logging.getLogger('{0}.SSHTunnelForwarder'. \
format(__name__))
if not logger.handlers: # if no handlers, add a new one (console)
logger.setLevel(loglevel)
console_handler = logging.StreamHandler()
console_handler.setFormatter( \
logging.Formatter('%(asctime)s | %(levelname)-8s| %(message)s'))
logger.addHandler(console_handler)
# Add a console handler for paramiko.transport's logger if not present
paramiko_logger = logging.getLogger('paramiko.transport')
if not paramiko_logger.handlers:
console_handler = logging.StreamHandler()
console_handler.setFormatter( \
logging.Formatter('%(asctime)s | %(levelname)-8s| PARAMIKO: '
'%(lineno)03d@%(module)-10s| %(message)s'))
paramiko_logger.addHandler(console_handler)
return logger | 7f6f9721964c0644d882e0206c2eb75f07fe0258 | 3,629,436 |
def psi_from(grid, axis_ratio, core_radius):
"""
Returns the $\Psi$ term in expressions for the calculation of the deflection of an elliptical isothermal mass
distribution. This is used in the `Isothermal` and `Chameleon` `MassProfile`'s.
The expression for Psi is:
$\Psi = \sqrt(q^2(s^2 + x^2) + y^2)$
Parameters
----------
grid
The (y,x) coordinates of the grid, in an arrays of shape (total_coordinates, 2)
axis_ratio
Ratio of profiles ellipse's minor and major axes (b/a)
core_radius
The radius of the inner core
Returns
-------
float
The value of the Psi term.
"""
return np.sqrt(
np.add(
np.multiply(
axis_ratio ** 2.0, np.add(np.square(grid[:, 1]), core_radius ** 2.0)
),
np.square(grid[:, 0]),
)
) | 257bbbd0f5826e5d78c1d24a9a565fd4473d6887 | 3,629,437 |
from io import StringIO
def create_station_dataframe():
"""Creates a pandas dataframe from the csv list of all stations
downloaded from the uhrqds.
"""
files = get_station_files()
combinedlst = ""
for item in files:
for line in item.read().decode().splitlines():
try:
#Try to convert the first three letters of the line to
#an int, if successful then we can assume we have a
#station info line, so save it to the overall file.
int(line[:3])
combinedlst += line + "\n"
except:
pass
#print(combinedlst)
stat_df = pd.read_fwf(StringIO(combinedlst),
header=None,
names = ["stat_idx", "oc_idx", "x", "loc_name",
"country", "Lat", "Lon", "data_years",
"CI", "Contributor"])
#dump to file
if not constants.SAVEFILELOCATION:
constants.SAVEFILELOCATION.mkdir(parents=True)
else:
stat_df.to_csv(constants.STATIONFILE)
return stat_df | 924de681b7cc6803e83705d35fe8ad81d7a87c07 | 3,629,438 |
def _get_value_from_value_pb(value_pb):
"""Given a protobuf for a Value, get the correct value.
The Cloud Datastore Protobuf API returns a Property Protobuf which
has one value set and the rest blank. This function retrieves the
the one value provided.
Some work is done to coerce the return value into a more useful type
(particularly in the case of a timestamp value, or a key value).
:type value_pb: :class:`gcloud.datastore._generated.entity_pb2.Value`
:param value_pb: The Value Protobuf.
:returns: The value provided by the Protobuf.
:raises: :class:`ValueError <exceptions.ValueError>` if no value type
has been set.
"""
value_type = value_pb.WhichOneof('value_type')
if value_type == 'timestamp_value':
result = _pb_timestamp_to_datetime(value_pb.timestamp_value)
elif value_type == 'key_value':
result = key_from_protobuf(value_pb.key_value)
elif value_type == 'boolean_value':
result = value_pb.boolean_value
elif value_type == 'double_value':
result = value_pb.double_value
elif value_type == 'integer_value':
result = value_pb.integer_value
elif value_type == 'string_value':
result = value_pb.string_value
elif value_type == 'blob_value':
result = value_pb.blob_value
elif value_type == 'entity_value':
result = entity_from_protobuf(value_pb.entity_value)
elif value_type == 'array_value':
result = [_get_value_from_value_pb(value)
for value in value_pb.array_value.values]
elif value_type == 'geo_point_value':
result = GeoPoint(value_pb.geo_point_value.latitude,
value_pb.geo_point_value.longitude)
elif value_type == 'null_value':
result = None
else:
raise ValueError('Value protobuf did not have any value set')
return result | cc3d2585d1afbb92fcf8dd363c28c1d362ee97a7 | 3,629,439 |
def load_audio(path):
"""Load audio data, mp3 or wav format
Parameters
----------
path : str
audio file path.
Returns:
-------
data : array-like
Audio data.
fs : int
Sampling frequency in Hz.
"""
if path[-4:] == ".wav":
fs, data = load_wav(path)
elif path[-4:] == ".mp3":
fs, data = load_mp3(path)
else:
raise ValueError("Wrong file format, use mp3 or wav")
return fs, data | d4c4ba20cde2332fa9e6379ddcfdd6a2a40a5e51 | 3,629,440 |
from . import operators
import inspect
def export_rule_data(variables, actions):
""" export_rule_data is used to export all information about the
variables, actions, and operators to the client. This will return a
dictionary with three keys:
- variables: a list of all available variables along with their label, type and options
- actions: a list of all actions along with their label and params
- variable_type_operators: a dictionary of all field_types -> list of available operators
"""
actions_data = actions.get_all_actions()
variables_data = variables.get_all_variables()
variable_type_operators = {}
for variable_class in inspect.getmembers(operators, lambda x: getattr(x, 'export_in_rule_data', False)):
variable_type = variable_class[1] # getmembers returns (name, value)
variable_type_operators[variable_type.name] = variable_type.get_all_operators()
return {"variables": variables_data,
"actions": actions_data,
"variable_type_operators": variable_type_operators} | d348ca2e6b79276a41da9b63764f9a56c534727d | 3,629,441 |
def produce_nm_phase_locked_sig(sig, phase_lag, n, m, wn_base, sfreq, nonsin_mode=2, kappa=None):
"""
:param sig:
:param phase_lag:
:param n:
:param m:
:param wn_base:
:param sfreq:
:param kappa:
if None, the signals are completely locked to each other
:return:
"""
if not np.iscomplexobj(sig):
sig = hilbert_(sig)
if sig.ndim == 1:
sig = sig[np.newaxis, :]
sig_angle = np.angle(sig)
n_samples = sig.shape[1]
if nonsin_mode == 2: # the same amplitude envelopes
sig_abs = np.abs(sig)
else:
sig_ = filtered_randn(m*wn_base[0], m*wn_base[1], sfreq, n_samples)
sig_abs = np.abs(sig_)
if kappa is None:
sig_hat = sig_abs * np.exp(1j * m / n * sig_angle + 1j * phase_lag)
# TODO: kappa von mises
return sig_hat | 0888c40dafd96faec38cf9053c97eba8bb49b8f4 | 3,629,442 |
import functools
def patch_schedule_and_run():
"""
Patches ``luigi.interface._schedule_and_run`` to invoke all callbacks registered via
:py:func:`before_run` right before luigi starts running scheduled tasks. This is achieved by
patching ``luigi.worker.Worker.run`` within the scope of ``luigi.interface._schedule_and_run``.
"""
_schedule_and_run_orig = luigi.interface._schedule_and_run
@functools.wraps(_schedule_and_run_orig)
def _schedule_and_run(*args, **kwargs):
run_orig = luigi.worker.Worker.run
@functools.wraps(run_orig)
def run(self):
# invoke all registered before_run functions
for func in _before_run_funcs:
if callable(func):
logger.debug("calling before_run function {}".format(func))
func()
else:
logger.warning("registered before_run function {} is not callable".format(func))
return run_orig(self)
with law.util.patch_object(luigi.worker.Worker, "run", run):
return _schedule_and_run_orig(*args, **kwargs)
luigi.interface._schedule_and_run = _schedule_and_run
logger.debug("patched luigi.interface._schedule_and_run") | aed2991df52ed54cb7861e0e4cbf4d7b7c00232a | 3,629,443 |
def randonness_test(ts, lag=None):
"""
样本的随机性检验;
:param ts:
:return:
"""
# The Run Test
"""
H0: the sequence was produced in a random manner
"""
ts = ts.dropna()
statistic, pval = runstest_1samp(ts, correction=False)
print(" The Run Test Result \n"
"=====================================\n"
"Statistic = {0}\n"
"P-Value = {1}\n"
"-------------------------------------\n".format(statistic, pval))
# Variance Ratio Test
"""
H0: The series is ~ Random Walk
"""
if lag is None:
lag = 2
vr = VarianceRatio(ts, lags=lag)
print(vr.summary().as_text())
return (statistic, pval, ) | 9e4325c29c07a219741664317838fcea90c56998 | 3,629,444 |
import torch
def batch_norm1d ( input # in_minibatch x 1 x in_size
, running_mean # 1 x in_size (not no minibatch)
, running_var # 1 x in_size (not no minibatch)
, weight=None # 1 x in_size
, bias=None # 1 x in_size
, eps=1E-5
, rigor=False
, verbose=False):
"""
Correspond torch.nn.functional.batch_norm(input, running_mean, running_var,
weight, bias,
training=False, momentum=0.1, eps)
Returns output tensor on success
Applies a batch normalization over an input data
Note that all nd-array lists are PyTorch tensor (immutable).
:param input: input data, input[in_minibatch][in_channel][in_size]
:param running_mean: running_mean[in_channel]
:param running_var: running_var[in_channel]
:param weight: None or weight[in_channel]
:param bias: None or bias[in_channel]
:param rigor: check values rigorously when 'True'
:param verbose: output message more when 'True'
:return: out_data on success, None on failure.
"""
if rigor:
error = 0
if (input.dim()!=3) and (input.dim()!=2): error += 1
if (input.dim()==3):
in_channel = input.shape[1]
if (input.dim()!=3): error += 1
if (running_mean.dim()!=1): error += 1 # mind channel
if (running_var.dim()!=1): error += 1 # mind channel
if (running_mean.numel()!=in_channel): error += 1
if (running_var.numel()!=in_channel): error += 1
if (weight is not None) and (weight.dim()!=1): error += 1
if (weight is not None) and (weight.numel()!=in_channel): error += 1
if (bias is not None) and (bias.dim()!=1): error += 1
if (bias is not None) and (bias.numel()!=in_channel): error += 1
else: error += 1; _dlr.DlrError("only supported for data with channel")
if error!=0: return None
dtype = input.dtype
in_minibatch = input.shape[0]
out_data = torch.empty(input.shape, dtype=dtype)
for mb in range(in_minibatch):
xout_data = out_data[mb]
xin_data = input[mb]
status = _dlr.Norm1dBatch( xout_data.data.numpy() # ndim x out_size
, xin_data.data.numpy() # ndim x in_size
, running_mean.data.numpy() # out_size x in_size
, running_var.data.numpy() # out_size x in_size
, None if weight is None else weight.data.numpy() # out_size
, None if bias is None else bias.data.numpy() # out_size
, eps
, rigor=rigor
, verbose=verbose)
if not status: return None
out_data[mb] = xout_data
return out_data | 4ac97e421e835e4488711e0e490401be24daf558 | 3,629,445 |
def isolated_margin_account(self, **kwargs):
"""Query Isolated Margin Account Info (USER_DATA)
GET /sapi/v1/margin/isolated/account
https://binance-docs.github.io/apidocs/spot/en/#query-isolated-margin-account-info-user_data
Keyword Args:
symbols (str, optional): Max 5 symbols can be sent; separated by ",". e.g. "BTCUSDT,BNBUSDT,ADAUSDT"
recvWindow (int, optional): The value cannot be greater than 60000
"""
return self.sign_request("GET", "/sapi/v1/margin/isolated/account", kwargs) | 6109b995f9f64f850816963fa098117f4a4230fd | 3,629,446 |
import random
def multiplex_erdos_renyi(mg, seed=None, include_all=True):
"""Return a Multinet such that each layer is an Erdos-Renyi network with
same p as the original Multinet given.
Parameters
----------
mg : Multinet
Multiplex network to be configured.
seed : object
Seed for the model.
Return
------
A new Multinet instance.
"""
layers = mg.layers()
r = random.Random()
r.seed(seed)
directed = mg.is_directed()
if directed:
nmg = mn.DiMultinet()
else:
nmg = mn.Multinet()
remove_isolates = not include_all
for layer in layers:
sg = mg.sub_layer(layer, remove_isolates=remove_isolates)
nodes = sg.nodes()
nnode = sg.number_of_nodes()
nedge = sg.number_of_edges()
if directed:
p = nedge / (nnode * (nnode - 1))
else:
p = 2 * nedge/ (nnode * (nnode - 1))
rsg = nx.erdos_renyi_graph(
nnode, p, seed=r, directed=directed)
rnodes = rsg.nodes()
mapping = dict(zip(rnodes, nodes))
nrsg = nx.relabel_nodes(rsg, mapping)
nmg.add_layer(nrsg, layer)
return nmg | 78a8e7989b0fbe53e680f35734f73ef8b5078ef2 | 3,629,447 |
import os
def load_model(model_dir,
model_file=None,
model_name=None,
serialize_model=True):
"""Loads the model from the catalog or a definition file.
Args:
model_dir: The model directory.
model_file: An optional model configuration.
Mutually exclusive with :obj:`model_name`.
model_name: An optional model name from the catalog.
Mutually exclusive with :obj:`model_file`.
serialize_model: Serialize the model definition in the model directory to
make it optional for future runs.
Returns:
A :class:`opennmt.models.Model` instance.
Raises:
ValueError: if both :obj:`model_file` and :obj:`model_name` are set.
"""
if model_file and model_name:
raise ValueError("only one of model_file and model_name should be set")
model_name_or_path = model_file or model_name
model_description_path = os.path.join(model_dir, "model_description.py")
if model_name_or_path:
if tf.train.latest_checkpoint(model_dir) is not None:
tf.get_logger().warning(
"You provided a model configuration but a checkpoint already exists. "
"The model configuration must define the same model as the one used for "
"the initial training. However, you can change non structural values like "
"dropout.")
if model_file:
model = load_model_from_file(model_file)
if serialize_model:
tf.io.gfile.copy(model_file, model_description_path, overwrite=True)
elif model_name:
model = load_model_from_catalog(model_name)
if serialize_model:
with tf.io.gfile.GFile(model_description_path, mode="w") as model_description_file:
model_description_file.write("from opennmt.models import catalog\n")
model_description_file.write("model = catalog.%s\n" % model_name)
elif tf.io.gfile.exists(model_description_path):
tf.get_logger().info("Loading model description from %s", model_description_path)
model = load_model_from_file(model_description_path)
else:
raise RuntimeError("A model configuration is required: you probably need to "
"set --model or --model_type on the command line.")
return model | deacb60d7459d3ef11b82079086444cc1e6309f9 | 3,629,448 |
def homography(points1, points1_indices, points2, points2_indices, num_points=4, min_num_points=4):
"""
Computes homography matrix for given two sets of points
:param points1: First point set
:param points1_indices: First point set indices
:param points2: Second point set
:param points2_indices: Second Point set indices
:param num_points: Number of points to use for calculating homography
:param min_num_points: Minimum number of points required (Degree of freedom)
:return: A 3x3 normalized homography matrix
"""
assert num_points >= min_num_points
# build A matrix
a_matrix = np.zeros((num_points * 2, 9))
idx = 0
for i, j in zip(points1_indices, points2_indices):
a_matrix[idx, :] = np.array([-points1[i, 0, 0], -points1[i, 0, 1], -1,
0, 0, 0,
points2[j, 0, 0] * points1[i, 0, 0],
points2[j, 0, 0] * points1[i, 0, 1],
points2[j, 0, 0]])
idx += 1
a_matrix[idx, :] = np.array([0, 0, 0,
-points1[i, 0, 0], -points1[i, 0, 1], -1,
points2[j, 0, 1] * points1[i, 0, 0],
points2[j, 0, 1] * points1[i, 0, 1],
points2[j, 0, 1]])
idx += 1
u, s, v = np.linalg.svd(a_matrix)
h_unnormalized = v[8].reshape(3, 3)
h = (1 / h_unnormalized.flatten()[8]) * h_unnormalized
# eig = np.linalg.eig(a_matrix.T.dot(a_matrix))
# # smallest_idx = eig[0].argmin()
# h_ = eig[1][-1].reshape(3, 3)
# h = (1 / h_.flatten()[8]) * h_
return h | f873c5adb4f78f9e3d9e903ff4b0e4055c486a4a | 3,629,449 |
def get_sid_trid_combination_score(site_id, tr_ids_list, idfilt2best_trids_dic):
"""
Get site ID - transcript ID combination score, based on selected
transcripts for each of the 10 different filter settings.
10 transcript quality filter settings:
EIR
EXB
TSC
ISRN
ISR
ISRFC
SEO
FUCO
TCOV
TSL
idfilt2best_trids_dic:
"site_id,filter_id"
-> top transcript ID(s) after applying filter on exon IDs > min_eir
>>> site_id = "s1"
>>> idfilt2best_trids_dic = {"s1,EIR" : ["t1"], "s1,EXB" : ["t1"], "s1,TSC" : ["t1"], "s1,ISRN" : ["t1"], "s1,ISR" : ["t1"], "s1,ISRFC" : ["t1"], "s1,SEO" : ["t1"], "s1,FUCO" : ["t1"], "s1,TCOV" : ["t1"], "s1,TSL" : ["t1"]}
>>> tr_ids_list = ["t1"]
>>> get_sid_trid_combination_score(site_id, tr_ids_list, idfilt2best_trids_dic)
{'t1': 10}
>>> idfilt2best_trids_dic = {"s1,EIR" : ["t1", "t2"], "s1,EXB" : ["t1", "t2"], "s1,TSC" : ["t1"], "s1,ISRN" : ["t2"], "s1,ISR" : ["t1"], "s1,ISRFC" : ["t1"], "s1,SEO" : ["t1"], "s1,FUCO" : ["t1", "t2"], "s1,TCOV" : ["t1"], "s1,TSL" : ["t2"]}
>>> tr_ids_list = ["t1", "t2", "t3"]
>>> get_sid_trid_combination_score(site_id, tr_ids_list, idfilt2best_trids_dic)
{'t1': 8, 't2': 5, 't3': 0}
"""
assert tr_ids_list, "tr_ids_list empty"
filter_ids = ["EIR", "EXB", "TSC", "ISRN", "ISR", "ISRFC", "SEO", "FUCO", "TCOV", "TSL"]
trid2comb_sc_dic = {}
for tr_id in tr_ids_list:
trid2comb_sc_dic[tr_id] = 0
for tr_id in tr_ids_list:
for fid in filter_ids:
sitefiltid = "%s,%s" %(site_id, fid)
if tr_id in idfilt2best_trids_dic[sitefiltid]:
trid2comb_sc_dic[tr_id] += 1
return trid2comb_sc_dic | 9cc2d9a0f2fab4e4bf3030ef360b582caeaab45f | 3,629,450 |
def extract_address_from_dnb_company(dnb_company, prefix, ignore_when_missing=()):
"""
Extract address from dnb company data. This takes a `prefix` string to
extract address fields that start with a certain prefix.
"""
country = Country.objects.filter(
iso_alpha2_code=dnb_company[f'{prefix}_country'],
).first() if dnb_company.get(f'{prefix}_country') else None
extracted_address = {
'line_1': dnb_company.get(f'{prefix}_line_1') or '',
'line_2': dnb_company.get(f'{prefix}_line_2') or '',
'town': dnb_company.get(f'{prefix}_town') or '',
'county': dnb_company.get(f'{prefix}_county') or '',
'postcode': dnb_company.get(f'{prefix}_postcode') or '',
'country': country.id if country else None,
}
for field in ignore_when_missing:
if not extracted_address[field]:
return None
return extracted_address | 9d004152bf2091538c00b9d871ebfaba9ad09739 | 3,629,451 |
def prepare_inverse_operator(orig, nave, lambda2, dSPM):
"""Prepare an inverse operator for actually computing the inverse
Parameters
----------
orig: dict
The inverse operator structure read from a file
nave: int
Number of averages (scales the noise covariance)
lambda2: float
The regularization factor. Recommended to be 1 / SNR**2
dSPM: bool
If True, compute the noise-normalization factors for dSPM.
Returns
-------
inv: dict
Prepared inverse operator
"""
if nave <= 0:
raise ValueError('The number of averages should be positive')
print 'Preparing the inverse operator for use...'
inv = orig.copy()
#
# Scale some of the stuff
#
scale = float(inv['nave']) / nave
inv['noise_cov']['data'] = scale * inv['noise_cov']['data']
inv['noise_cov']['eig'] = scale * inv['noise_cov']['eig']
inv['source_cov']['data'] = scale * inv['source_cov']['data']
#
if inv['eigen_leads_weighted']:
inv['eigen_leads']['data'] = sqrt(scale) * inv['eigen_leads']['data']
print ('\tScaled noise and source covariance from nave = %d to '
'nave = %d' % (inv['nave'], nave))
inv['nave'] = nave
#
# Create the diagonal matrix for computing the regularized inverse
#
inv['reginv'] = inv['sing'] / (inv['sing'] ** 2 + lambda2)
print '\tCreated the regularized inverter'
#
# Create the projection operator
#
inv['proj'], ncomp, _ = make_projector(inv['projs'],
inv['noise_cov']['names'])
if ncomp > 0:
print '\tCreated an SSP operator (subspace dimension = %d)' % ncomp
#
# Create the whitener
#
if not inv['noise_cov']['diag']:
inv['whitener'] = np.zeros((inv['noise_cov']['dim'],
inv['noise_cov']['dim']))
#
# Omit the zeroes due to projection
#
eig = inv['noise_cov']['eig']
nzero = (eig > 0)
inv['whitener'][nzero, nzero] = 1.0 / np.sqrt(eig[nzero])
#
# Rows of eigvec are the eigenvectors
#
inv['whitener'] = np.dot(inv['whitener'], inv['noise_cov']['eigvec'])
print ('\tCreated the whitener using a full noise covariance matrix '
'(%d small eigenvalues omitted)' % (inv['noise_cov']['dim']
- np.sum(nzero)))
else:
#
# No need to omit the zeroes due to projection
#
inv['whitener'] = np.diag(1.0 /
np.sqrt(inv['noise_cov']['data'].ravel()))
print ('\tCreated the whitener using a diagonal noise covariance '
'matrix (%d small eigenvalues discarded)' % ncomp)
#
# Finally, compute the noise-normalization factors
#
if dSPM:
print '\tComputing noise-normalization factors...',
noise_norm = np.zeros(inv['eigen_leads']['nrow'])
nrm2, = linalg.get_blas_funcs(('nrm2',), (noise_norm,))
if inv['eigen_leads_weighted']:
for k in range(inv['eigen_leads']['nrow']):
one = inv['eigen_leads']['data'][k, :] * inv['reginv']
noise_norm[k] = nrm2(one)
else:
for k in range(inv['eigen_leads']['nrow']):
one = sqrt(inv['source_cov']['data'][k]) * \
inv['eigen_leads']['data'][k, :] * inv['reginv']
noise_norm[k] = nrm2(one)
#
# Compute the final result
#
if inv['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI:
#
# The three-component case is a little bit more involved
# The variances at three consequtive entries must be squared and
# added together
#
# Even in this case return only one noise-normalization factor
# per source location
#
noise_norm = combine_xyz(noise_norm[:, None]).ravel()
inv['noisenorm'] = 1.0 / np.abs(noise_norm)
print '[done]'
else:
inv['noisenorm'] = []
return inv | 5d48cada9c80fd77e155b737676dca2be19822d6 | 3,629,452 |
def filter_non_primary_chromosomes(df):
"""Filter out all variants that do not reside on the primary chromosomes
(i.e. 1-22, X, Y, MT)."""
# Print excluded variants for debugging/logging purpose.
exclude_indices = df.index[~df['Chromosome'].isin(["%s" % chrom for chrom in range(1, 23)] + ['X', 'Y', 'MT'])]
if len(exclude_indices) > 0:
print "##"
print "## WARNING: Filtering out all variants that do not reside on the primary chromosomes:"
print "##"
print df[df.index.isin(exclude_indices)]
# Drop the variants
df.drop(exclude_indices, inplace=True)
return df | e5b646e399ffdb46952874a8fa85c8ed8a873671 | 3,629,453 |
def float_convert(d, include_keys=None, exclude_keys=None):
"""Convert elements in a document to floats.
By default, traverse all keys
If include_keys is specified, only convert the list from include_keys a.b, a.b.c
If exclude_keys is specified, only exclude the list from exclude_keys
:param d: a dictionary to traverse keys on
:param include_keys: only convert these keys (optional)
:param exclude_keys: exclude all other keys except these keys (optional)
:return: generate key, value pairs
"""
return value_convert_incexcl(d, to_float, include_keys, exclude_keys) | 5bb592e20e37696c36738a41c20c1c516bb24d7c | 3,629,454 |
def bin_downsample(Ain, dsfac):
"""
Downsample an array by binning.
Parameters
----------
Ain : 2-D array
The matrix to be downsampled
dsfac : int
Downsampling factor for the matrix
Returns
-------
Aout : 2-D array
Downsampled array
"""
# Error checks on inputs
check.twoD_array(Ain, 'Ain', ValueError)
check.positive_scalar_integer(dsfac, 'dsfac', ValueError)
# Array Sizes
ny0, nx0 = Ain.shape
if (nx0 % dsfac != 0) or (ny0 % dsfac != 0):
ValueError('The size of Ain must be divisible by dsfac.')
nx1 = int(nx0/dsfac)
ny1 = int(ny0/dsfac)
# Bin and average values from the high-res array into the low-res array
Aout = np.zeros((ny1, nx1))
for ix in range(nx1):
for iy in range(ny1):
Aout[iy, ix] = np.sum(Ain[dsfac*iy:dsfac*(iy+1),
dsfac*ix:dsfac*(ix+1)])/dsfac/dsfac
return Aout | 80bf451b719f6e62275c3ded950cdc34cfecaa4d | 3,629,455 |
def testComPolValidity(compol):
"""
The P3P header syntax must be one of the followings:
* P3P: CP="...", policyref="..."
* P3P: policyref="...", CP="..."
* P3P: CP="..."
* P3P: policyref="..."
---
'policyref="..."' contains ONE Link to a Policy Reference file
'CP="..."' contains specified Compact Policy Tokens with len(Token)<=4
---
If all the requirements above are met the compact policy seems valid and
the variable compoval is set to '1'.
The possily found link to a reference file is passed to the
getRef() function.
"""
compolval = 1
p3pheadCP = re.findall(r'CP="(.*?)"', compol)
p3pheadPR = re.findall(r'policyref="(.*?)"', compol)
if len(p3pheadCP) > 1:
compolval = 0
return compolval
if len(p3pheadPR) > 1:
compolval = 0
return compolval
if p3pheadPR:
linkRef = p3pheadPR[0]
if 'http' not in linkRef:
if linkRef[:5] == '/w3c/':
linkRef = 'http://'+url+linkRef
else:
if linkRef[:1] == '/':
linkRef = 'http://'+url+'/w3c'+linkRef
else:
linkRef = 'http://'+url+'/w3c/'+linkRef
print '* reference file should be at: ' + linkRef
getRef(linkRef)
if p3pheadCP:
CPTokens = p3pheadCP[0].split(' ')
for i in range(0, len(CPTokens)):
if len(CPTokens[i]) > 4:
compolval = 0
else:
pass
return compolval | c7e364d63e1afba0309b986e4344e1f792eb834a | 3,629,456 |
from typing import List
from typing import Optional
from typing import Dict
from typing import Any
import re
def sentry_event_filter( # noqa: WPS231
event, hint, ignored_types: List[str] = None, ignored_messages: List[str] = None
) -> Optional[Dict[str, Any]]:
"""Avoid sending events to Sentry that match the specified types or regexes.
In order to avoid flooding Sentry with events that are not useful and prevent
wasting those network resources it is possible to filter those events. This
function accepts a list of import paths for exception objects and/or a list of
regular expressions to match against the exception message.
:param event: Sentry event
:type event: Sentry event object
:param hint: Sentry event hint
https://docs.sentry.io/platforms/python/configuration/filtering/hints/
:type hint: Sentry event hint object
:param ignored_types: List of exception classes that should be ignored by Sentry.
Written as the full import path for the given exception type. For builtins this
is just the string representation of the builtin class (e.g. 'ValueError' or
'requests.exceptions.HTTPError')
:type ignored_types: List[str]
:param ignored_messages: List of regular expressions to be matched against the
contents of the exception message for filtering specific instances of a given
exception type.
:type ignored_messages: List[str]
:returns: An unedited event object or None in the event that the event should be
filtered.
:rtype: Optional[Dict[str, Any]]
"""
exception_info = hint.get("exc_info")
exception_class = None
exception_value = ""
exception_traceback = ""
if exception_info:
exception_class, exception_value, exception_traceback = exception_info
for ignored_type in ignored_types or []:
ignored_exception_class = _load_exception_class(ignored_type)
if isinstance(exception_class, type(ignored_exception_class)):
return None
for ignored_message in ignored_messages or []:
if re.search(ignored_message, exception_value or ""):
return None
return event | 54f4aa87a256418e9fad3a82ec57636af2420b61 | 3,629,457 |
def map_dymola_and_json(results, case, res_fin, case_dict):
"""
This function couples the .mat file variable with the final .json variable
:param results: Result obtained from the _extract_data function
:param case: Dictionary that specifies the BESTEST case
:param res_fin: Dictionary with the same format as the desired json file
:param case_dict: in case_dict is stored TestN (which .json file format\
should be used)"
"""
dict_hourly = [{'json': 'dry_bulb_temperature',
'mat': 'weaBusHHorIR.TDryBul'},
{'json': 'relative_humidity',
'mat': 'weaBusHHorIR.relHum'},
{'json': 'humidity_ratio',
'mat': 'toDryAir.XiDry'},
{'json': 'dewpoint_temperature',
'mat': 'weaBusHHorIR.TDewPoi'},
{'json': 'wet_bulb_temperature',
'mat': 'weaBusHHorIR.TWetBul'},
{'json': 'wind_speed',
'mat': 'weaBusHHorIR.winSpe'},
{'json': 'wind_direction',
'mat': 'weaBusHHorIR.winDir'},
{'json': 'station_pressure',
'mat': 'weaBusHHorIR.pAtm'},
{'json': 'total_cloud_cover',
'mat': 'weaBusHHorIR.nTot'},
{'json': 'opaque_cloud_cover',
'mat': 'weaBusHHorIR.nOpa'},
{'json': 'sky_temperature',
'matHor': 'weaBusHHorIR.TBlaSky',
'matDew': 'weaBusTDryBulTDewPoiOpa.TBlaSky'},
{'json': 'total_horizontal_radiation',
'matIso': 'azi000til00.H',
'matPer': 'azi000til00.HPer'},
{'json': 'beam_horizontal_radiation',
'mat': 'azi000til00.HDir.H'},
{'json': 'diffuse_horizontal_radiation',
'matIso': 'azi000til00.HDiffIso.H',
'matPer': 'azi000til00.HDiffPer.H'},
{'json': 'total_radiation_s_90',
'matIso': 'azi000til90.H',
'matPer': 'azi000til90.HPer'},
{'json': 'beam_radiation_s_90',
'mat': 'azi000til90.HDir.H'},
{'json': 'diffuse_radiation_s_90',
'matIso': 'azi000til90.HDiffIso.H',
'matPer': 'azi000til90.HDiffPer.H'},
{'json': 'total_radiation_e_90',
'matIso': 'azi270til90.H',
'matPer': 'azi270til90.HPer'},
{'json': 'beam_radiation_e_90',
'mat': 'azi270til90.HDir.H'},
{'json': 'diffuse_radiation_e_90',
'matIso': 'azi270til90.HDiffIso.H',
'matPer': 'azi270til90.HDiffPer.H'},
{'json': 'total_radiation_n_90',
'matIso': 'azi180til90.H',
'matPer': 'azi180til90.HPer'},
{'json': 'beam_radiation_n_90',
'mat': 'azi180til90.HDir.H'},
{'json': 'diffuse_radiation_n_90',
'matIso': 'azi180til90.HDiffIso.H',
'matPer': 'azi180til90.HDiffPer.H'},
{'json': 'total_radiation_w_90',
'matIso': 'azi090til90.H',
'matPer': 'azi090til90.HPer'},
{'json': 'beam_radiation_w_90',
'mat': 'azi090til90.HDir.H'},
{'json': 'diffuse_radiation_w_90',
'matIso': 'azi090til90.HDiffIso.H',
'matPer': 'azi090til90.HDiffPer.H'},
{'json': 'total_radiation_45_e_90',
'matIso': 'azi315til90.H',
'matPer': 'azi315til90.HPer'},
{'json': 'beam_radiation_45_e_90',
'mat': 'azi315til90.HDir.H'},
{'json': 'diffuse_radiation_45_e_90',
'matIso': 'azi315til90.HDiffIso.H',
'matPer': 'azi315til90.HDiffPer.H'},
{'json': 'total_radiation_45_w_90',
'matIso': 'azi045til90.H',
'matPer': 'azi045til90.HPer'},
{'json': 'beam_radiation_45_w_90',
'mat': 'azi045til90.HDir.H'},
{'json': 'diffuse_radiation_45_w_90',
'matIso': 'azi045til90.HDiffIso.H',
'matPer': 'azi045til90.HDiffPer.H'},
{'json': 'total_radiation_e_30',
'matIso': 'azi270til30.H',
'matPer': 'azi270til30.HPer'},
{'json': 'beam_radiation_e_30',
'mat': 'azi270til30.HDir.H'},
{'json': 'diffuse_radiation_e_30',
'matIso': 'azi270til30.HDiffIso.H',
'matPer': 'azi270til30.HDiffPer.H'},
{'json': 'total_radiation_s_30',
'matIso': 'azi000til30.H',
'matPer': 'azi000til30.HPer'},
{'json': 'beam_radiation_s_30',
'mat': 'azi000til30.HDir.H'},
{'json': 'diffuse_radiation_s_30',
'matIso': 'azi000til30.HDiffIso.H',
'matPer': 'azi000til30.HDiffPer.H'},
{'json': 'total_radiation_w_30',
'matIso': 'azi090til30.H',
'matPer': 'azi090til30.HPer'},
{'json': 'beam_radiation_w_30',
'mat': 'azi090til30.HDir.H'},
{'json': 'diffuse_radiation_w_30',
'matIso': 'azi090til30.HDiffIso.H',
'matPer': 'azi090til30.HDiffPer.H'}]
dict_sub_hourly = [{'json': 'dry_bulb_temperature',
'mat': 'weaBusHHorIR.TDryBul'},
{'json': 'relative_humidity',
'mat': 'weaBusHHorIR.relHum'},
{'json': 'total_horizontal_radiation',
'matIso': 'azi000til00.H',
'matPer': 'azi000til00.HPer'},
{'json': 'beam_horizontal_radiation',
'mat': 'azi000til00.HDir.H'},
{'json': 'diffuse_horizontal_radiation',
'matIso': 'azi000til00.HDiffIso.H',
'matPer': 'azi000til00.HDiffPer.H'},
{'json': 'total_radiation_s_90',
'matIso': 'azi000til90.H',
'matPer': 'azi000til90.HPer'},
{'json': 'beam_radiation_s_90',
'mat': 'azi000til90.HDir.H'},
{'json': 'diffuse_radiation_s_90',
'matIso': 'azi000til90.HDiffIso.H',
'matPer': 'azi000til90.HDiffPer.H'},
{'json': 'total_radiation_e_90',
'matIso': 'azi270til90.H',
'matPer': 'azi270til90.HPer'},
{'json': 'beam_radiation_e_90',
'mat': 'azi270til90.HDir.H'},
{'json': 'diffuse_radiation_e_90',
'matIso': 'azi270til90.HDiffIso.H',
'matPer': 'azi270til90.HDiffPer.H'},
{'json': 'total_radiation_n_90',
'matIso': 'azi180til90.H',
'matPer': 'azi180til90.HPer'},
{'json': 'beam_radiation_n_90',
'mat': 'azi180til90.HDir.H'},
{'json': 'diffuse_radiation_n_90',
'matIso': 'azi180til90.HDiffIso.H',
'matPer': 'azi180til90.HDiffPer.H'},
{'json': 'total_radiation_w_90',
'matIso': 'azi090til90.H',
'matPer': 'azi090til90.HPer'},
{'json': 'beam_radiation_w_90',
'mat': 'azi090til90.HDir.H'},
{'json': 'diffuse_radiation_w_90',
'matIso': 'azi090til90.HDiffIso.H',
'matPer': 'azi090til90.HDiffPer.H'},
{'json': 'total_radiation_45_e_90',
'matIso': 'azi315til90.H',
'matPer': 'azi315til90.HPer'},
{'json': 'beam_radiation_45_e_90',
'mat': 'azi315til90.HDir.H'},
{'json': 'diffuse_radiation_45_e_90',
'matIso': 'azi315til90.HDiffIso.H',
'matPer': 'azi315til90.HDiffPer.H'},
{'json': 'total_radiation_45_w_90',
'matIso': 'azi045til90.H',
'matPer': 'azi045til90.HPer'},
{'json': 'beam_radiation_45_w_90',
'mat': 'azi045til90.HDir.H'},
{'json': 'diffuse_radiation_45_w_90',
'matIso': 'azi045til90.HDiffIso.H',
'matPer': 'azi045til90.HDiffPer.H'},
{'json': 'total_radiation_e_30',
'matIso': 'azi270til30.H',
'matPer': 'azi270til30.HPer'},
{'json': 'beam_radiation_e_30',
'mat': 'azi270til30.HDir.H'},
{'json': 'diffuse_radiation_e_30',
'matIso': 'azi270til30.HDiffIso.H',
'matPer': 'azi270til30.HDiffPer.H'},
{'json': 'total_radiation_s_30',
'matIso': 'azi000til30.H',
'matPer': 'azi000til30.HPer'},
{'json': 'beam_radiation_s_30',
'mat': 'azi000til30.HDir.H'},
{'json': 'diffuse_radiation_s_30',
'matIso': 'azi000til30.HDiffIso.H',
'matPer': 'azi000til30.HDiffPer.H'},
{'json': 'total_radiation_w_30',
'matIso': 'azi090til30.H',
'matPer': 'azi090til30.HPer'},
{'json': 'beam_radiation_w_30',
'mat': 'azi090til30.HDir.H'},
{'json': 'diffuse_radiation_w_30',
'matIso': 'azi090til30.HDiffIso.H',
'matPer': 'azi090til30.HDiffPer.H'},
{'json': 'integrated_total_horizontal_radiation',
'matIso': 'azi000til00.H',
'matPer': 'azi000til00.HPer'},
{'json': 'integrated_beam_horizontal_radiation',
'mat': 'azi000til00.HDir.H'},
{'json': 'integrated_diffuse_horizontal_radiation',
'matIso': 'azi000til00.HDiffIso.H',
'matPer': 'azi000til00.HDiffPer.H'}]
dict_yearly = [{'json': 'average_dry_bulb_temperature',
'mat': 'weaBusHHorIR.TDryBul'},
{'json': 'average_relative_humidity',
'mat': 'weaBusHHorIR.relHum'},
{'json': 'average_humidity_ratio',
'mat': 'toDryAir.XiDry'},
{'json': 'average_wet_bulb_temperature',
'mat': 'weaBusHHorIR.TWetBul'},
{'json': 'average_dew_point_temperature',
'mat': 'weaBusHHorIR.TDewPoi'},
{'json': 'total_horizontal_solar_radiation',
'matIso': 'azi000til00.H',
'matPer': 'azi000til00.HPer'},
{'json': 'total_horizontal_beam_solar_radiation',
'mat': 'azi000til00.HDir.H'},
{'json': 'total_horizontal_diffuse_solar_radiation',
'matIso': 'azi000til00.HDiffIso.H',
'matPer': 'azi000til00.HDiffPer.H'},
{'json': 'total_radiation_s_90',
'matIso': 'azi000til90.H',
'matPer': 'azi000til90.HPer'},
{'json': 'total_beam_radiation_s_90',
'mat': 'azi000til90.HDir.H'},
{'json': 'total_diffuse_radiation_s_90',
'matIso': 'azi000til90.HDiffIso.H',
'matPer': 'azi000til90.HDiffPer.H'},
{'json': 'total_radiation_e_90',
'matIso': 'azi270til90.H',
'matPer': 'azi270til90.HPer'},
{'json': 'total_beam_radiation_e_90',
'mat': 'azi270til90.HDir.H'},
{'json': 'total_diffuse_radiation_e_90',
'matIso': 'azi270til90.HDiffIso.H',
'matPer': 'azi270til90.HDiffPer.H'},
{'json': 'total_radiation_n_90',
'matIso': 'azi180til90.H',
'matPer': 'azi180til90.HPer'},
{'json': 'total_beam_radiation_n_90',
'mat': 'azi180til90.HDir.H'},
{'json': 'total_diffuse_radiation_n_90',
'matIso': 'azi180til90.HDiffIso.H',
'matPer': 'azi180til90.HDiffPer.H'},
{'json': 'total_radiation_w_90',
'matIso': 'azi090til90.H',
'matPer': 'azi090til90.HPer'},
{'json': 'total_beam_radiation_w_90',
'mat': 'azi090til90.HDir.H'},
{'json': 'total_diffuse_radiation_w_90',
'matIso': 'azi090til90.HDiffIso.H',
'matPer': 'azi090til90.HDiffPer.H'},
{'json': 'total_radiation_45_e_90',
'matIso': 'azi315til90.H',
'matPer': 'azi315til90.HPer'},
{'json': 'total_beam_radiation_45_e_90',
'mat': 'azi315til90.HDir.H'},
{'json': 'total_diffuse_radiation_45_e_90',
'matIso': 'azi315til90.HDiffIso.H',
'matPer': 'azi315til90.HDiffPer.H'},
{'json': 'total_radiation_45_w_90',
'matIso': 'azi045til90.H',
'matPer': 'azi045til90.HPer'},
{'json': 'total_beam_radiation_45_w_90',
'mat': 'azi045til90.HDir.H'},
{'json': 'total_diffuse_radiation_45_w_90',
'matIso': 'azi045til90.HDiffIso.H',
'matPer': 'azi045til90.HDiffPer.H'},
{'json': 'total_radiation_e_30',
'matIso': 'azi270til30.H',
'matPer': 'azi270til30.HPer'},
{'json': 'total_beam_radiation_e_30',
'mat': 'azi270til30.HDir.H'},
{'json': 'total_diffuse_radiation_e_30',
'matIso': 'azi270til30.HDiffIso.H',
'matPer': 'azi270til30.HDiffPer.H'},
{'json': 'total_radiation_s_30',
'matIso': 'azi000til30.H',
'matPer': 'azi000til30.HPer'},
{'json': 'total_beam_radiation_s_30',
'mat': 'azi000til30.HDir.H'},
{'json': 'total_diffuse_radiation_s_30',
'matIso': 'azi000til30.HDiffIso.H',
'matPer': 'azi000til30.HDiffPer.H'},
{'json': 'total_radiation_w_30',
'matIso': 'azi090til30.H',
'matPer': 'azi090til30.HPer'},
{'json': 'total_beam_radiation_w_30',
'mat': 'azi090til30.HDir.H'},
{'json': 'total_diffuse_radiation_w_30',
'matIso': 'azi090til30.HDiffIso.H',
'matPer': 'azi090til30.HDiffPer.H'}]
Days = {'WD100': {'days': ['yearly', 'may4', 'jul14', 'sep6'],
'tstart': [0, 10627200, 16761600, 21427200],
'tstop': [0, 10713600, 16848000, 21513600]},
'WD200': {'days': ['yearly', 'may24', 'aug26'],
'tstart': [0, 12355200, 20476800, 0],
'tstop': [0, 12441600, 20563200, 31536000]},
'WD300': {'days': ['yearly', 'feb7', 'aug13'],
'tstart': [0, 3196800, 19353600],
'tstop': [0, 3283200, 19440000]},
'WD400': {'days': ['yearly', 'jan24', 'jul1'],
'tstart': [0, 1987200, 15638400],
'tstop': [0, 2073600, 15724800]},
'WD500': {'days': ['yearly', 'mar1', 'sep14'],
'tstart': [0, 5097600, 22118400],
'tstop': [0, 5184000, 22204800]},
'WD600': {'days': ['yearly', 'may4', 'jul14', 'sep6'],
'tstart': [0, 10627200, 16761600, 21427200],
'tstop': [0, 10713600, 16848000, 21513600]}}
Days2 = {'WD100': {'days': ['test2'],
'tstart': [0],
'tstop': [31536000+3600]},
'WD200': {'days': ['test2'],
'tstart': [0],
'tstop': [31536000+3600]},
'WD300': {'days': ['test2'],
'tstart': [0],
'tstop': [31536000+3600]},
'WD400': {'days': ['test2'],
'tstart': [0],
'tstop': [31536000+3600]},
'WD500': {'days': ['test2'],
'tstart': [0],
'tstop': [31536000+3600]},
'WD600': {'days': ['test2'],
'tstart': [0],
'tstop': [31536000+3600]}}
dictTest2 = [{'json': 'dry_bulb_temperature',
'mat': 'weaBusHHorIR.TDryBul'},
{'json': 'relative_humidity',
'mat': 'weaBusHHorIR.relHum'},
{'json': 'humidity_ratio',
'mat': 'toDryAir.XiDry'},
{'json': 'dewpoint_temperature',
'mat': 'weaBusHHorIR.TDewPoi'},
{'json': 'wet_bulb_temperature',
'mat': 'weaBusHHorIR.TWetBul'},
{'json': 'wind_speed',
'mat': 'weaBusHHorIR.winSpe'},
{'json': 'wind_direction',
'mat': 'weaBusHHorIR.winDir'},
{'json': 'station_pressure',
'mat': 'weaBusHHorIR.pAtm'},
{'json': 'total_cloud_cover',
'mat': 'weaBusHHorIR.nTot'},
{'json': 'opaque_cloud_cover',
'mat': 'weaBusHHorIR.nOpa'},
{'json': 'sky_temperature',
'matHor': 'weaBusHHorIR.TBlaSky',
'matDew': 'weaBusTDryBulTDewPoiOpa.TBlaSky'},
{'json': 'total_horizontal_radiation',
'matIso': 'azi000til00.H',
'matPer': 'azi000til00.HPer'},
{'json': 'beam_horizontal_radiation',
'mat': 'azi000til00.HDir.H'},
{'json': 'diffuse_horizontal_radiation',
'matIso': 'azi000til00.HDiffIso.H',
'matPer': 'azi000til00.HDiffPer.H'},
{'json': 'total_radiation_s_90',
'matIso': 'azi000til90.H',
'matPer': 'azi000til90.HPer'},
{'json': 'beam_radiation_s_90',
'mat': 'azi000til90.HDir.H'},
{'json': 'diffuse_radiation_s_90',
'matIso': 'azi000til90.HDiffIso.H',
'matPer': 'azi000til90.HDiffPer.H'},
{'json': 'total_radiation_e_90',
'matIso': 'azi270til90.H',
'matPer': 'azi270til90.HPer'},
{'json': 'beam_radiation_e_90',
'mat': 'azi270til90.HDir.H'},
{'json': 'diffuse_radiation_e_90',
'matIso': 'azi270til90.HDiffIso.H',
'matPer': 'azi270til90.HDiffPer.H'},
{'json': 'total_radiation_n_90',
'matIso': 'azi180til90.H',
'matPer': 'azi180til90.HPer'},
{'json': 'beam_radiation_n_90',
'mat': 'azi180til90.HDir.H'},
{'json': 'diffuse_radiation_n_90',
'matIso': 'azi180til90.HDiffIso.H',
'matPer': 'azi180til90.HDiffPer.H'},
{'json': 'total_radiation_w_90',
'matIso': 'azi090til90.H',
'matPer': 'azi090til90.HPer'},
{'json': 'beam_radiation_w_90',
'mat': 'azi090til90.HDir.H'},
{'json': 'diffuse_radiation_w_90',
'matIso': 'azi090til90.HDiffIso.H',
'matPer': 'azi090til90.HDiffPer.H'},
{'json': 'total_radiation_45_e_90',
'matIso': 'azi315til90.H',
'matPer': 'azi315til90.HPer'},
{'json': 'beam_radiation_45_e_90',
'mat': 'azi315til90.HDir.H'},
{'json': 'diffuse_radiation_45_e_90',
'matIso': 'azi315til90.HDiffIso.H',
'matPer': 'azi315til90.HDiffPer.H'},
{'json': 'total_radiation_45_w_90',
'matIso': 'azi045til90.H',
'matPer': 'azi045til90.HPer'},
{'json': 'beam_radiation_45_w_90',
'mat': 'azi045til90.HDir.H'},
{'json': 'diffuse_radiation_45_w_90',
'matIso': 'azi045til90.HDiffIso.H',
'matPer': 'azi045til90.HDiffPer.H'},
{'json': 'total_radiation_e_30',
'matIso': 'azi270til30.H',
'matPer': 'azi270til30.HPer'},
{'json': 'beam_radiation_e_30',
'mat': 'azi270til30.HDir.H'},
{'json': 'diffuse_radiation_e_30',
'matIso': 'azi270til30.HDiffIso.H',
'matPer': 'azi270til30.HDiffPer.H'},
{'json': 'total_radiation_s_30',
'matIso': 'azi000til30.H',
'matPer': 'azi000til30.HPer'},
{'json': 'beam_radiation_s_30',
'mat': 'azi000til30.HDir.H'},
{'json': 'diffuse_radiation_s_30',
'matIso': 'azi000til30.HDiffIso.H',
'matPer': 'azi000til30.HDiffPer.H'},
{'json': 'total_radiation_w_30',
'matIso': 'azi090til30.H',
'matPer': 'azi090til30.HPer'},
{'json': 'beam_radiation_w_30',
'mat': 'azi090til30.HDir.H'},
{'json': 'diffuse_radiation_w_30',
'matIso': 'azi090til30.HDiffIso.H',
'matPer': 'azi090til30.HDiffPer.H'}]
if case_dict['TestN']:
caseDays = [{key: value[i] for key, value in Days2[case].items()}
for i in range(len(Days2[case]['days']))]
else:
caseDays = [{key: value[i] for key, value in Days[case].items()}
for i in range(len(Days[case]['days']))]
out_dir = res_fin
missing = list()
for dR in results:
for day in caseDays:
if day['days'] in 'yearly':
res = extrapolate_results(dict_yearly, dR, day)
if not res:
missing.append(day['days'] + '_' + dR['variable'])
else:
# float(res['res'])
out_dir[case]['annual_results'][res['json']] =\
float(res['res'])
elif day['days'] in 'test2':
ressH = extrapolate_results(dictTest2, dR, day)
if 'dry_bulb_temperature' in ressH['json']:
out_dir[case]['hour_of_year'] = (ressH['time']/
3600).tolist()
out_dir[case][ressH['json']] = ressH['res'].tolist()
else:
resH = extrapolate_results(dict_hourly, dR, day)
ressH = extrapolate_results(dict_sub_hourly, dR, day)
if not resH:
missing.append(day['days'] + '_hourly_' + dR['variable'])
else:
resH['res'] = resH['res'][0::4]
resH['time'] = resH['time'][0::4]
HRlist = list()
k = 0
for HR in resH['res']:
HRdict = {}
HRdict['time'] = float((resH['time'][k] -
resH['time'][0]) / 3600)
HRdict['value'] = float(HR)
HRlist.append(HRdict)
k += 1
out_dir[case]['hourly_results'][day['days']]\
[resH['json']] = HRlist
if not ressH:
missing.append(day['days'] + '_subhourly_' +
dR['variable'])
else:
sHRlist = list()
k = 0
for sHR in ressH['res']:
sHRdict = {}
sHRdict['time'] = float((ressH['time'][k] -
ressH['time'][0]) / 3600)
if 'radiation' in ressH['json']:
sHRdict['value'] = float(sHR)
else:
sHRdict['value'] = float(sHR)
sHRlist.append(sHRdict)
k += 1
out_dir[case]['subhourly_results'][day['days']]\
[ressH['json']] = sHRlist
# Manually update integrated values for 'integrated'
# variables for subhourly results
if 'horizontal_radiation' in ressH['json']:
ressH['time'] = ressH['time']
time_int = ressH['time'][0::4]
H_int = np.interp(time_int, ressH['time'],
ressH['res'])
sHRlist = list()
k = 0
for sHR in H_int:
sHRdict = {}
sHRdict['time'] = float((time_int[k] -
time_int[0]) / 3600)
sHRdict['value'] = float(sHR)
sHRlist.append(sHRdict)
k += 1
out_dir[case]['subhourly_results']\
[day['days']]['integrated_' +
ressH['json']] = sHRlist
return out_dir | 138224f9fd3e2060b43ca08cbc4b0af459b60b6f | 3,629,458 |
import os
def upload_word_book(request):
"""
处理上传单词本
"""
username = request.POST.get("username")
# 获取上传的文件,如果没有文件,则默认为None
word_book = request.FILES.get("word_book", None)
# 没有上传文件,没有上传txt文件
book_name = str(word_book.name)
# 错误需要重新渲染html
user = request.session['user']
# 获取数据
book_list = get_book_list(user)
the_current_book = get_current_book(user)
current_book = []
# 如果有数据
if the_current_book:
current_book = the_current_book[0]
left_book_list = []
right_book_list = []
# book_list存在数据
if book_list:
flag = True
for book in book_list:
# 交替分配数据给两个列表
if flag:
left_book_list.append(book[0])
flag = False
else:
right_book_list.append(book[0])
flag = True
context = {'user': user,
'left_book_list': left_book_list,
'right_book_list': right_book_list,
'current_book': current_book}
# 没有上传文件
if not word_book:
error = '没有上传文件'
context['error'] = error
return render(request, 'word_book.html', context)
# 上传文件不是txt
if not book_name.endswith(".txt"):
error = '上传单词本不是txt格式'
context['error'] = error
return render(request, 'word_book.html', context)
# 命名带有下划线
if len(book_name.split('_')) > 1 or len(book_name.split('.')) > 2:
error = '命名不能带有下划线"_"或者"."'
context['error'] = error
return render(request, 'word_book.html', context)
# 上传了同名文件
if is_exist_book(username, book_name):
error = '已经上传了相同名字的单词本'
context['error'] = error
return render(request, 'word_book.html', context)
# 先存储,再处理,再放入mysql
# 命名:用户名_单词本
filename = username + "_" + book_name
# getcwd()获取当前路径,然后找到word_book文件夹
destination = open(os.path.join(os.getcwd() + r"\word_book", filename), 'wb+')
# 分块写入文件
for chunk in word_book.chunks():
destination.write(chunk)
destination.close()
if debug:
print("文件 " + filename + " 写入word_book文件夹完成!")
# 将单词本导入到数据库中
temp = word_book_to_sql(filename)
is_success = temp[0]
error_word = temp[1]
if not is_success:
# 插入的单词本有不在单词库的单词
error = '插入的单词本有不在单词库的单词:'
for i in error_word:
error += i + ' '
context['error'] = error
return render(request, 'word_book.html', context)
return HttpResponseRedirect('/word_book/') | 33bc92e91cdabcbca287efdcf321e77c58491637 | 3,629,459 |
def statusName(dictname):
"""Return the underlying key used for access to the status of the
dictlist named dictname.
"""
return (dictname, "S") | 77700d17830c1521d543551a380ad611b050bda5 | 3,629,460 |
def stydiffstat(dataNameList, SELECT_RANGE, dateStart, dateEnd):
"""
Return the place name of input places
Parameters
----------
dataNameList : list - list of strings of all participant id with shared data
SELECT_RANGE: var - flag to define if select certain period
dateStart: str - the start date of the period if selecting certain period
dateEnd: str - the end date of the period if selecting certain period
Returns
-------
staythredstat: useful statistics to semi-automatically choose thresholds
"""
ddiff_max = []
ddiff_min = []
ddiff_mean = []
ddiff_median = []
ddiff_quar = []
tdiff_max = []
tdiff_min = []
tdiff_mean = []
tdiff_median = []
tdiff_quar = []
for dataName in dataNameList:
dataPathLocs,dataPathTrips = hlp.getDataPaths(dataName)
if SELECT_RANGE:
dataPathLocs,dataPathTrips = hlp.selectRange(dataPathLocs, dataPathTrips, dateStart, dateEnd)
locs, locsgdf = hlp.parseLocs(dataPathLocs)
locs['d_diff'] = np.append(haversine_dist(locs.longitudeE7[1:], locs.latitudeE7[1:], locs.longitudeE7[:-1], locs.latitudeE7[:-1]),0)
accuracy_threshold = np.quantile(locs['d_diff'], .95)
locs['t_diff'] = np.append((locs.index[1:]-locs.index[:-1]).total_seconds(),0)
maxi = max(locs['d_diff'])
ddiff_max.append(maxi)
mini = min(locs['d_diff'])
ddiff_min.append(mini)
meani = np.mean(locs['d_diff'])
ddiff_mean.append(meani)
mediani = np.median(locs['d_diff'])
ddiff_median.append(mediani)
quari = np.quantile(locs['d_diff'], .25)
ddiff_quar.append(quari)
maxi = max(locs['t_diff'])
tdiff_max.append(maxi)
mini = min(locs['t_diff'])
tdiff_min.append(mini)
meani = np.mean(locs['t_diff'])
tdiff_mean.append(meani)
mediani = np.median(locs['t_diff'])
tdiff_median.append(mediani)
quari = np.quantile(locs['t_diff'], .25)
tdiff_quar.append(quari)
ddiff_max = np.array(ddiff_max)
ddiff_max = np.transpose(ddiff_max)
ddiff_min = np.array(ddiff_min)
ddiff_min = np.transpose(ddiff_min)
ddiff_mean = np.array(ddiff_mean)
ddiff_mean = np.transpose(ddiff_mean)
ddiff_median = np.array(ddiff_median)
ddiff_median = np.transpose(ddiff_median)
ddiff_quar = np.array(ddiff_quar)
ddiff_quar = np.transpose(ddiff_quar)
tdiff_max = np.array(tdiff_max)
tdiff_max = np.transpose(tdiff_max)
tdiff_min = np.array(tdiff_min)
tdiff_min = np.transpose(tdiff_min)
tdiff_mean = np.array(tdiff_mean)
tdiff_mean = np.transpose(tdiff_mean)
tdiff_median = np.array(tdiff_median)
tdiff_median = np.transpose(tdiff_median)
tdiff_quar = np.array(tdiff_quar)
tdiff_quar = np.transpose(tdiff_quar)
thredstat = {'dataName': np.array(dataNameList),
'dist_max': ddiff_max,
'dist_min': ddiff_min,
'dist_range': ddiff_max-ddiff_min,
'dist_mean': ddiff_mean,
'dist_median': ddiff_median,
'dist_quarter': ddiff_quar,
'time_max': tdiff_max,
'time_min': tdiff_min,
'time_range': tdiff_max-tdiff_min,
'time_mean': tdiff_mean,
'time_median': tdiff_median,
'time_quarter': tdiff_quar}
staythredstat = pd.DataFrame(thredstat)
return staythredstat | aef52a67e06013aa51ab43f1b2cc0220f7a94b72 | 3,629,461 |
def ergsperSecondtoLsun(ergss):
"""
Converts ergs per second to solar luminosity in L_sun.
:param ergss: ergs per second
:type ergss: float or ndarray
:return: luminosity in L_sun
:rtype: float or ndarray
"""
return ergss / 3.839e33 | 806b590c713bc9177db66993aff2f6feaa32d736 | 3,629,462 |
def svd(A, maxiter=30):
"""
Given a matrix A, this routine computes its SVD A = U.W.VT
- The matrix U is output mxm matrix. This mean
matrix U will have same size of matrix A.
- The matrix W is ouput as the diagonal mxn matrix that contains
the singular values
- The matrix V (not the transpose VT) is output as nxn matrix V
"""
# Bidiagonal form
U, W, V, e = bidiagonalize(A, tosvd=True)
# Diagonalization of the bidiagonal form:
# - loop over singular values
# - for each singular value apply golub-kahan method
#
for k in np.arange(U.shape[1])[::-1]:
golub_kahan(U, W, V, e, k, maxiter=maxiter)
m, n = U.shape
idsorted = np.argsort(-W)
U = U[:,idsorted]
W = W[idsorted]
V = V[:,idsorted]
return U[:,:m], np.diag(W)[:m,:], V | 121efa0b27fb135af9a226a74f58feb3ac9ae9e3 | 3,629,463 |
import os
def sys_unlink(kernel: Kernel, pathname_addr: Uint):
"""
int sys_unlink(const char * pathname)
"""
pathname = kernel.kernel_read_string(pathname_addr).decode()
logger.info('sys_unlink(const char * pathname = %r)', pathname)
try:
os.unlink(pathname)
except OSError:
ret = -1
logger.info('\tsys_unlink: [ERR] failed to unlink %r', pathname)
else:
logger.info('\tsys_unlink: [SUCC] unlinked %r', pathname)
ret = 0
return ret | c26aae34113670137e6d2fbb836c0dee260d74fe | 3,629,464 |
import os
def make(merger, toc, default_folder, parent, bookmarks, evenpages):
"""Join several pdf files to target."""
for title, pdf, childs in toc:
if pdf.startswith(FOLDER):
pdf = os.path.join(
default_folder,
pdf.replace(FOLDER, '')
)
new_parent = bookmarks.add(title, merger.getNumPages(), parent)
if pdf:
print(pdf)
merger.appendPagesFromReader(PdfFileReader(open(pdf, 'rb'))) # pylint: disable=consider-using-with
if evenpages and (merger.getNumPages() % 2):
merger.addBlankPage()
if childs:
make(merger, childs, default_folder, new_parent, bookmarks, evenpages)
return 0 | 85638f43315b96136f0fe4d9d359cb409b1d5ed3 | 3,629,465 |
import os
from datetime import datetime
def directory_for_model(args):
"""
:param args:
:return:
"""
model_dir = os.path.join(args.models_folder, args.model)
model_img_dir = os.path.join(model_dir, 'images')
check_dir(model_img_dir)
model_video_dir = os.path.join(model_dir, 'videos')
check_dir(model_video_dir)
metrics_path = os.path.join(model_dir, 'metrics.pkl')
run_name = datetime.now().strftime("%Y%m%d-%H%M%S")
tboard_dir = os.path.join(args.tensorboard_folder, args.model, run_name)
check_dir(tboard_dir)
return model_dir, model_img_dir, model_video_dir, metrics_path, tboard_dir | 5d2f6156aebad7824332ace07a8ca306b300c054 | 3,629,466 |
from typing import Any
def produces_record(obj: Any) -> bool:
"""Check if `obj` is annotated to generate records."""
if hasattr(obj, 'get_data_specs'):
return True
else:
return False | b65ffe3d599963f8f5ee4d1581179ab7567aa074 | 3,629,467 |
import json
def readJson(fname):
""" Read json file and load it line-by-line into data
"""
data = []
line_num = 0
with open(fname, encoding="utf-8") as f:
for line in f:
line_num += 1
try:
data.append(json.loads(line))
except:
print("error", line_num)
return data | 0a4a78ce7e36fbc444b27ca6eec3ad5ba582b7cd | 3,629,468 |
def parse_address(address):
"""Parse an address and return it as an Integer."""
if is_hex(address):
return int(address, 16)
return to_unsigned_long(gdb.parse_and_eval(address)) | a6fbde1fc69f1ea815c983e0b43455dc0030e617 | 3,629,469 |
def index(request):
"""Index page for upload images"""
form = ImageForm(request.POST or None, files=request.FILES or None)
context = {
"form": form
}
if not form.is_valid():
return render(request, "index.html", context)
image = form.save()
pixel_count_add(image.id)
return redirect('result', image_id=image.id) | 7e2b31845ce492aee84de0cb275c3f0c8328a380 | 3,629,470 |
def binary_search(arr, val):
"""
Summary of binary_search function: searches an input array for a value and
returns the index to matching element in array or -1 if not found.
Parameters:
array (array): An array of values
val (integer): An integer value
Returns:
index (integer): Returns index of array element
"""
index = -1
start = 0
end = len(arr) - 1
found = False
while (found == False) and (start <= end):
# import pdb; pdb.set_trace()
middle_index = (start + end) // 2
if (val == arr[middle_index]):
index = middle_index
found = True
else:
# reassign the end and start value excluding the middle index.
if (val < arr[middle_index]):
end = middle_index - 1
else:
start = middle_index + 1
return index | 3d5a44b5edce3820d1e669e549f9395d9052d433 | 3,629,471 |
import os
def get_authorized_http():
"""Create an httplib2.Http wrapped with OAuth credentials.
This checks the user's configuration directory for stored
credentials. If found, it uses them. If not found, this opens a
browser window to prompt the user to sign in and authorize access
to the user's email address, then stores the credentials.
Returns:
The wrapped Http instance.
"""
# TODO: make config dir path overrideable at the command line
# TODO: command line option to disable browser prompt (and just fail)
user_config_path = os.path.expanduser(USER_CONFIG_DIR)
if not os.path.exists(user_config_path):
os.makedirs(user_config_path)
credentials_path = os.path.join(user_config_path, CREDENTIALS_FILENAME)
storage = file.Storage(credentials_path)
credentials = storage.get()
if credentials is None or credentials.invalid:
flow = client.OAuth2WebServerFlow(
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
scope='https://www.googleapis.com/auth/userinfo.email')
credentials = tools.run(flow, storage)
http = credentials.authorize(httplib2.Http())
return http | ceb58a9148bca367bca681fe02f51f57e76561b8 | 3,629,472 |
def update_resource(resource, incoming_request):
"""Replace the contents of a resource with *data* and return an appropriate
*Response*.
:param resource: :class:`sandman.model.Model` to be updated
:param data: New values for the fields in *resource*
"""
resource.from_dict(get_resource_data(incoming_request))
_perform_database_action('merge', resource)
return no_content_response() | f21b679b16926fea8b1d47897bbd3f5e86d2b58c | 3,629,473 |
import types
def new_object_graph(
modules=finding.ALL_IMPORTED_MODULES, classes=None, binding_specs=None,
only_use_explicit_bindings=False, allow_injecting_none=False,
configure_method_name='configure',
dependencies_method_name='dependencies',
get_arg_names_from_class_name=(
bindings.default_get_arg_names_from_class_name),
get_arg_names_from_provider_fn_name=(
providing.default_get_arg_names_from_provider_fn_name),
id_to_scope=None, is_scope_usable_from_scope=lambda _1, _2: True,
use_short_stack_traces=True):
"""Creates a new object graph.
Args:
modules: the modules in which to search for classes for which to create
implicit bindings; if None, then no modules; by default, all
modules imported at the time of calling this method
classes: the classes for which to create implicit bindings; if None (the
default), then no classes
binding_specs: the BindingSpec subclasses to get bindings and provider
methods from; if None (the default), then no binding specs
only_use_explicit_bindings: whether to use only explicit bindings (i.e.,
created by binding specs or @pinject.injectable, etc.)
allow_injecting_none: whether to allow a provider method to provide None
configure_method_name: the name of binding specs' configure method
dependencies_method_name: the name of binding specs' dependencies method
get_arg_names_from_class_name: a function mapping a class name to a
sequence of the arg names to which those classes should be
implicitly bound (if any)
get_arg_names_from_provider_fn_name: a function mapping a provider
method name to a sequence of the arg names for which that method is
a provider (if any)
id_to_scope: a map from scope ID to the concrete Scope implementation
instance for that scope
is_scope_usable_from_scope: a function taking two scope IDs and
returning whether an object in the first scope can be injected into
an object from the second scope; by default, injection is allowed
from any scope into any other scope
use_short_stack_traces: whether to shorten the stack traces for
exceptions that Pinject raises, so that they don't contain the
innards of Pinject
Returns:
an ObjectGraph
Raises:
Error: the object graph is not creatable as specified
"""
try:
if modules is not None and modules is not finding.ALL_IMPORTED_MODULES:
_verify_types(modules, types.ModuleType, 'modules')
if classes is not None:
_verify_types(classes, types.TypeType, 'classes')
if binding_specs is not None:
_verify_subclasses(
binding_specs, bindings.BindingSpec, 'binding_specs')
if get_arg_names_from_class_name is not None:
_verify_callable(get_arg_names_from_class_name,
'get_arg_names_from_class_name')
if get_arg_names_from_provider_fn_name is not None:
_verify_callable(get_arg_names_from_provider_fn_name,
'get_arg_names_from_provider_fn_name')
if is_scope_usable_from_scope is not None:
_verify_callable(is_scope_usable_from_scope,
'is_scope_usable_from_scope')
injection_context_factory = injection_contexts.InjectionContextFactory(
is_scope_usable_from_scope)
id_to_scope = scoping.get_id_to_scope_with_defaults(id_to_scope)
bindable_scopes = scoping.BindableScopes(id_to_scope)
known_scope_ids = id_to_scope.keys()
found_classes = finding.find_classes(modules, classes)
if only_use_explicit_bindings:
implicit_class_bindings = []
else:
implicit_class_bindings = bindings.get_implicit_class_bindings(
found_classes, get_arg_names_from_class_name)
explicit_bindings = bindings.get_explicit_class_bindings(
found_classes, get_arg_names_from_class_name)
binder = bindings.Binder(explicit_bindings, known_scope_ids)
required_bindings = required_bindings_lib.RequiredBindings()
if binding_specs is not None:
binding_specs = list(binding_specs)
processed_binding_specs = set()
while binding_specs:
binding_spec = binding_specs.pop()
if binding_spec in processed_binding_specs:
continue
processed_binding_specs.add(binding_spec)
all_kwargs = {'bind': binder.bind,
'require': required_bindings.require}
has_configure = hasattr(binding_spec, configure_method_name)
if has_configure:
configure_method = getattr(binding_spec, configure_method_name)
configure_kwargs = _pare_to_present_args(
all_kwargs, configure_method)
if not configure_kwargs:
raise errors.ConfigureMethodMissingArgsError(
configure_method, all_kwargs.keys())
try:
configure_method(**configure_kwargs)
except NotImplementedError:
has_configure = False
dependencies = None
if hasattr(binding_spec, dependencies_method_name):
dependencies_method = (
getattr(binding_spec, dependencies_method_name))
dependencies = dependencies_method()
binding_specs.extend(dependencies)
provider_bindings = bindings.get_provider_bindings(
binding_spec, known_scope_ids,
get_arg_names_from_provider_fn_name)
explicit_bindings.extend(provider_bindings)
if (not has_configure and
not dependencies and
not provider_bindings):
raise errors.EmptyBindingSpecError(binding_spec)
binding_key_to_binding, collided_binding_key_to_bindings = (
bindings.get_overall_binding_key_to_binding_maps(
[implicit_class_bindings, explicit_bindings]))
binding_mapping = bindings.BindingMapping(
binding_key_to_binding, collided_binding_key_to_bindings)
binding_mapping.verify_requirements(required_bindings.get())
except errors.Error as e:
if use_short_stack_traces:
raise e
else:
raise
is_injectable_fn = {True: decorators.is_explicitly_injectable,
False: (lambda cls: True)}[only_use_explicit_bindings]
obj_provider = object_providers.ObjectProvider(
binding_mapping, bindable_scopes, allow_injecting_none)
return ObjectGraph(
obj_provider, injection_context_factory, is_injectable_fn,
use_short_stack_traces, get_arg_names_from_class_name) | d44d83a9d47260fd545ad9cb4193f927ceadff7e | 3,629,474 |
def telegram_settings():
"""set telegram client configuration.
"""
return {
'result': []
} | 01aff4c347759ca34c609b69b53b4ce4880dc803 | 3,629,475 |
def find_pointing_documents(path, index):
"""
Returns the Metadata of the documents that use a given block as a pointer.
Args:
path(str): Path to the file
index(int): Index of the block in the file
Returns:
list(Metadata): List of documents that used the block as a pointer
"""
block_metadata = get_block_metadata(path, index)
files = Files()
pointing_documents = files.get_files(block_metadata.entangled_with)
return pointing_documents | 4e6b447cdb2c4b0a841313956d67cce59c099bf2 | 3,629,476 |
def make_shell_context():
"""注册了程序,数据库实例,以及模型,使得这些对象可直接导入shell"""
return dict(app=app, db=db, User=User, Post=Post, Category=Category,
Tag=Tag, Role=Role, Permission=Permission) | 470667f7b551817999a0c2f51660487cdd42ab51 | 3,629,477 |
import torch
def compute_jacobian(x, y, structured_tensor=False,
retain_graph=False):
"""Compute the Jacobian matrix of output with respect to input.
If input and/or output have more than one dimension, the Jacobian of the
flattened output with respect to the flattened input is returned if
`structured_tensor` is `False`. If `structured_tensor` is `True`, the
Jacobian is structured in dimensions `[y_shape, flattened_x_shape]`.
Note that `y_shape` can contain multiple dimensions.
Args:
x (list or torch.Tensor): Input tensor or sequence of tensors with the
parameters to which the Jacobian should be computed. Important:
the `requires_grad` attribute of input needs to be `True` while
computing output in the forward pass.
y (torch.Tensor): Output tensor with the values of which the
Jacobian is computed.
structured_tensor (bool): A flag indicating if the Jacobian should be
structured in a tensor of shape `[y_shape, flattened_x_shape]`
instead of `[flattened_y_shape, flattened_x_shape]`.
Returns:
(torch.Tensor): 2D tensor containing the Jacobian of output with
respect to input if `structured_tensor` is `False`.
If `structured_tensor` is `True`, the Jacobian is structured in a
tensor of shape `[y_shape, flattened_x_shape]`.
"""
if isinstance(x, torch.Tensor):
x = [x]
# Create the empty Jacobian.
output_flat = y.view(-1)
numel_input = 0
for input_tensor in x:
numel_input += input_tensor.numel()
jacobian = torch.Tensor(y.numel(), numel_input)
# Compute the Jacobian.
for i, output_elem in enumerate(output_flat):
if i == output_flat.numel() - 1:
gradients = torch.autograd.grad(output_elem, x,
retain_graph=retain_graph,
create_graph=False,
only_inputs=True)
else:
gradients = torch.autograd.grad(output_elem, x,
retain_graph=True,
create_graph=False,
only_inputs=True)
jacobian_row = torch.cat([g.view(-1).detach() for g in gradients])
jacobian[i, :] = jacobian_row
if structured_tensor:
shape = list(y.shape)
shape.append(-1)
jacobian = jacobian.view(shape)
return jacobian | bd5fd8e3e2b8171680bf059d10fadfe1c39d8899 | 3,629,478 |
def unwarp_chunk_slices_backward(mat3D, xcenter, ycenter, list_fact,
start_index, stop_index):
"""
Generate a chunk of unwarped slices [:,start_index: stop_index, :] used
for tomographic data.
Parameters
----------
mat3D : array_like
3D array. Correction is applied along axis 1.
xcenter : float
Center of distortion in x-direction.
ycenter : float
Center of distortion in y-direction.
list_fact : list of floats
Polynomial coefficients of a backward model.
start_index : int
Starting index of slices.
stop_index : int
Stopping index of slices.
Returns
-------
array_like
3D array. Distortion-corrected slices.
"""
if (len(mat3D.shape) < 3):
raise ValueError("Input must be a 3D data")
(depth, height, width) = mat3D.shape
index_list = np.arange(height, dtype=np.int16)
if stop_index == -1:
stop_index = height
if (start_index not in index_list) or (stop_index not in index_list):
raise ValueError("Selected index is out of the range")
xu_list = np.arange(0, width) - xcenter
yu1 = start_index - ycenter
ru_list = np.sqrt(xu_list**2 + yu1**2)
flist = np.sum(
np.asarray([factor * ru_list**i for i,
factor in enumerate(list_fact)]), axis=0)
yd_list1 = np.clip(ycenter + flist * yu1, 0, height - 1)
yu2 = stop_index - ycenter
ru_list = np.sqrt(xu_list**2 + yu2**2)
flist = np.sum(
np.asarray([factor * ru_list**i for i,
factor in enumerate(list_fact)]), axis=0)
yd_list2 = np.clip(ycenter + flist * yu2, 0, height - 1)
yd_min = np.int16(np.floor(np.amin(yd_list1)))
yd_max = np.int16(np.ceil(np.amax(yd_list2))) + 1
yu_list = np.arange(start_index, stop_index + 1) - ycenter
xu_mat, yu_mat = np.meshgrid(xu_list, yu_list)
ru_mat = np.sqrt(xu_mat**2 + yu_mat**2)
fact_mat = np.sum(
np.asarray([factor * ru_mat**i for i,
factor in enumerate(list_fact)]), axis=0)
xd_mat = np.float32(np.clip(xcenter + fact_mat * xu_mat, 0, width - 1))
yd_mat = np.float32(
np.clip(ycenter + fact_mat * yu_mat, 0, height - 1)) - yd_min
sino_chunk = np.asarray(
[_mapping(mat3D[i, yd_min: yd_max, :],
xd_mat, yd_mat) for i in range(depth)])
return sino_chunk | 0f6115c63ce752d82087c93582952dae08e1d475 | 3,629,479 |
def load_data(database_filepath):
"""
Function to load data from a database
Inputs:
database_filepath (path): location of the database
Returns:
X (pandas dataframe): messages (features)
Y (pandas dataframe): categories (targets)
category_names (list): category names
"""
engine = create_engine('sqlite:///' + database_filepath)
df = pd.read_sql('select * from dataset', engine)
X = df['message']
Y = df.iloc[:, 4:]
category_names = Y.columns.tolist()
return X, Y, category_names | 7115c50901b5edca01f92387c2971817cff6c6b7 | 3,629,480 |
def ndarray_duplicate_element_by_array(arr1,arr2):
"""
Duplicate each element in arr1[i] by the corresponding value of
arr2[i], if arr2[i]==0, then arr1[i] will be dropped in final
output, if all elements of arr2 is zero, then a None value
will be returned.
Parameters:
-----------
arr1,arr2: 1-dim ndarray with equal length, arr2 must be interger type.
"""
if len(arr1.shape) > 1 or len(arr2.shape) > 1:
raise TypeError("could only be 1-dim array")
elif len(arr1) != len(arr2):
raise TypeError("the length not equal for two input array")
else:
if not issubclass(arr2.dtype.type,np.integer):
raise TypeError("arr2 is not integer type!")
else:
clist = []
for i,j in zip(arr1,arr2):
if j == 0:
pass
else:
clist.append(np.array([i]*j))
if clist == []:
return None
else:
return np.concatenate(clist) | b2d2b3979681d818f20f47ada905756101a36488 | 3,629,481 |
import collections
def namedtuple(typename, field_names, default_value=None, default_values=()):
"""namedtuple with default value.
Args:
typename (str): type name of this namedtuple
field_names (list[str]): name of each field
default_value (Any): the default value for all fields
default_values (list|dict): default value for each field
Returns:
the type for the namedtuple
"""
T = collections.namedtuple(typename, field_names)
T.__new__.__defaults__ = (default_value, ) * len(T._fields)
if isinstance(default_values, collections.Mapping):
prototype = T(**default_values)
else:
prototype = T(*default_values)
T.__new__.__defaults__ = tuple(prototype)
return T | 1b387e870e3e5acfd11e5c709d21e77ebd67723d | 3,629,482 |
def log_quaternion_loss_batch(predictions, labels, params):
"""A helper function to compute the error between quaternions.
Args:
predictions: A Tensor of size [batch_size, 4].
labels: A Tensor of size [batch_size, 4].
params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'.
Returns:
A Tensor of size [batch_size], denoting the error between the quaternions.
"""
use_logging = params['use_logging']
assertions = []
if use_logging:
assertions.append(
tf.Assert(
tf.reduce_all(
tf.less(
tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1),
1e-4)),
['The l2 norm of each prediction quaternion vector should be 1.']))
assertions.append(
tf.Assert(
tf.reduce_all(
tf.less(
tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)),
['The l2 norm of each label quaternion vector should be 1.']))
with tf.control_dependencies(assertions):
product = tf.multiply(predictions, labels)
internal_dot_products = tf.reduce_sum(product, [1])
if use_logging:
internal_dot_products = tf.Print(
internal_dot_products,
[internal_dot_products, tf.shape(internal_dot_products)],
'internal_dot_products:')
logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products))
return logcost | 39738a93a62d4330703aa58fb08fc11ac40e88ac | 3,629,483 |
from typing import List
import requests
import time
def scrape_users(basic_users: List[BasicUser]) -> List[User]:
"""Scrape user pages for list of User objects.
"""
def major_delay(base: int, offset: int) -> None:
delay = getdelay(base, offset)
print(f"\nDelaying server request for {delay} seconds...")
countdown(delay)
print()
users = []
print(f"Scraping user data from {len(basic_users)} user pages started...")
for i, basic_user in enumerate(basic_users, start=1):
markup = requests.get(get_user_url(basic_user.name), headers=HEADERMAP).text
parser = None
counter = 0
while True:
try:
parser = UserPageParser(markup)
except RequestBlockedError:
if counter > MAX_BLOCKS_COUNT:
break
print("*** Server has blocked requests! ***")
major_delay(MAJOR_USER_LATENCY_BASE, MAJOR_USER_LATENCY_OFFSET)
counter += 1
continue
break
if not parser:
raise ValueError("Nothing to parse. Server is blocking requests.")
user = User(basic_user.name, basic_user.score, parser.ratingscount, parser.reviewscount)
users.append(user)
print(f"Scraped user #{i} '{user}'")
if i % 200 == 0:
major_delay(USER_LATENCY_BASE, USER_LATENCY_OFFSET)
else:
latency = getdelay(MINOR_USER_LATENCY_BASE, MINOR_USER_LATENCY_OFFSET)
time.sleep(latency)
return users | 5996033a94b121e545ca896c1a12b2ba887b0c0b | 3,629,484 |
def angle_section(
d: float,
b: float,
t: float,
r_r: float,
r_t: float,
n_r: int,
material: pre.Material = pre.DEFAULT_MATERIAL,
) -> geometry.Geometry:
"""Constructs an angle section with the bottom left corner at the origin *(0, 0)*, with depth
*d*, width *b*, thickness *t*, root radius *r_r* and toe radius *r_t*, using *n_r* points to
construct the radii.
:param float d: Depth of the angle section
:param float b: Width of the angle section
:param float t: Thickness of the angle section
:param float r_r: Root radius of the angle section
:param float r_t: Toe radius of the angle section
:param int n_r: Number of points discretising the radii
:param Optional[sectionproperties.pre.pre.Material]: Material to associate with this geometry
The following example creates an angle section with a depth of 150, a width of 100, a thickness
of 8, a root radius of 12 and a toe radius of 5, using 16 points to discretise the radii. A
mesh is generated with a maximum triangular area of 2.0::
from sectionproperties.pre.library.steel_sections import angle_section
geometry = angle_section(d=150, b=100, t=8, r_r=12, r_t=5, n_r=16)
geometry.create_mesh(mesh_sizes=[2.0])
.. figure:: ../images/sections/angle_geometry.png
:align: center
:scale: 75 %
Angle section geometry.
.. figure:: ../images/sections/angle_mesh.png
:align: center
:scale: 75 %
"""
if r_t > t:
raise ValueError(
"The radius of the toe (r_t) cannot be larger than the toe thickness (t)."
)
points = []
# add first two points
points.append([0, 0])
points.append([b, 0])
# construct the bottom toe radius
pt = [b - r_t, t - r_t]
points += draw_radius(pt, r_t, 0, n_r)
# construct the root radius
pt = [t + r_r, t + r_r]
points += draw_radius(pt, r_r, 1.5 * np.pi, n_r, False)
# construct the top toe radius
pt = [t - r_t, d - r_t]
points += draw_radius(pt, r_t, 0, n_r)
# add the next point
points.append([0, d])
polygon = Polygon(points)
return geometry.Geometry(polygon, material) | e66a2a2a717c1b1fbe02a3ba5a456bee7b3fc27e | 3,629,485 |
def normalize_adj(adj, type='sym'):
"""Symmetrically normalize adjacency matrix."""
if type == 'sym':
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
# d_inv_sqrt = np.power(rowsum, -0.5)
# d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
# return adj*d_inv_sqrt*d_inv_sqrt.flatten()
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
elif type == 'rw':
rowsum = np.array(adj.sum(1))
d_inv = np.power(rowsum, -1.0).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat_inv = sp.diags(d_inv)
adj_normalized = d_mat_inv.dot(adj)
return adj_normalized | 0e1e2c428fbfd1961c13e86279a5072b76db5951 | 3,629,486 |
def get_mb(
num):
"""get_mb
convert a the number of bytes (as an ``integer``)
to megabytes with 2 decimal points of precision
:param num: integer - number of bytes
"""
return to_f(num / NUM_BYTES_IN_AN_MB) | 984ee371db8d9f82f9bf8bcd288a1677acf483ff | 3,629,487 |
def GetComments(node, layers='core') :
"""Get the rdfs:comment(s) we find on this node within any of the specified layers."""
return GetTargets(Unit.GetUnit("rdfs:comment", True), node, layers=layers ) | 61959b90ff8f522bcfdda637e3cb01284f899548 | 3,629,488 |
def _ValidateObbFileList(arg_internal_name, arg_value):
"""Validates that 'obb-files' contains at most 2 entries."""
arg_value = ValidateStringList(arg_internal_name, arg_value)
if len(arg_value) > 2:
raise test_exceptions.InvalidArgException(
arg_internal_name, 'At most two OBB files may be specified.')
return arg_value | b99604b220e475ae858908f2decba0af4fd0bf07 | 3,629,489 |
def _depol_error_value_one_qubit(gate_error, gate_time=0, t1=inf, t2=inf):
"""Return 2-qubit depolarizing channel probability for device model"""
# Check trivial case where there is no gate error
if gate_error is None:
return None
if gate_error == 0:
return 0
# Check t1 and t2 are valid
if t1 <= 0:
raise NoiseError("Invalid T_1 relaxation time parameter: T_1 <= 0.")
if t2 <= 0:
raise NoiseError("Invalid T_2 relaxation time parameter: T_2 <= 0.")
if t2 - 2 * t1 > 0:
raise NoiseError("Invalid T_2 relaxation time parameter: T_2 greater than 2 * T_1.")
# If T1 or T2 we have only a depolarizing error model
# in this case p_depol = dim * gate_error / (dim - 1)
# with dim = 2 for 1-qubit
if gate_time is None:
gate_time = 0
if gate_time == 0 or (t1 == inf and t2 == inf):
if gate_error is not None and gate_error > 0:
return 2 * gate_error
else:
return 0
# Otherwise we calculate the depolarizing error probability to account
# for the difference between the relaxation error and gate error
if t1 == inf:
par1 = 1
else:
par1 = exp(-gate_time / t1)
if t2 == inf:
par2 = 1
else:
par2 = exp(-gate_time / t2)
p_depol = 1 + 3 * (2 * gate_error - 1) / (par1 + 2 * par2)
return p_depol | 135304ea8853f81aba5ad00bf7a556072fcb8370 | 3,629,490 |
def meh(text):
"""
>>> meh(EXAMPLE_INPUT)
[3, 8, 9, 1, 2, 5, 4, 6, 7]
"""
return [int(c) for c in text] | a295b94395f132cf4f8906fb293e9c989da1d7d1 | 3,629,491 |
def get_latest_dataset():
""" Return latest dataset that was created """
return getattr(qcodes.DataSet._latest, None) | 7179904df6a7d2269ad845c5401f934ec4527bed | 3,629,492 |
def insert_newlines(text, line_length):
"""
Given text and a desired line length, wrap the text as a typewriter would.
Insert a newline character ("\n") after each word that reaches or exceeds
the desired line length.
text: a string containing the text to wrap.
line_length: the number of characters to include on a line before wrapping
the next word.
returns: a string, with newline characters inserted appropriately.
"""
return insert_newlines_rec(text, line_length, line_length-1) | 3f6ef4dc02cac415c586be04c86b032b415ef0ea | 3,629,493 |
def gen_annular_fpm(inputs):
"""
Generate an annular FPM using PROPER.
Outside the outer ring is opaque.If rhoOuter = infinity, then the outer
ring is omitted and the mask is cropped down to the size of the inner spot.
The inner spot has a specifyable amplitude value. The output array is the
smallest size that fully contains the mask.
Parameters
----------
inputs : dict
dictionary of input values
Returns
-------
mask : array_like
2-D FPM representation
"""
check.is_dict(inputs, 'inputs')
# Required keys
pixresFPM = inputs["pixresFPM"]
rhoInner = inputs["rhoInner"]
rhoOuter = inputs["rhoOuter"]
# Optional keys
xOffset = inputs.get("xOffset", 0)
yOffset = inputs.get("yOffset", 0)
centering = inputs.get("centering", "pixel")
FPMampFac = inputs.get("FPMampFac", 0)
dx = 1.0 / pixresFPM # lambda_c/D per pixel.
maxAbsOffset = np.max(np.array([np.abs(xOffset), np.abs(yOffset)]))
if np.isinf(rhoOuter):
if centering == "interpixel":
Narray = ceil_even(2*rhoInner/dx + 2*maxAbsOffset/dx)
elif centering == "pixel":
Narray = ceil_even(2*rhoInner/dx + 2*maxAbsOffset/dx + 1)
Dmask = 2 * pixresFPM * rhoInner # Diameter of the mask
else:
if centering == "interpixel":
Narray = ceil_even(2*rhoOuter/dx + 2*maxAbsOffset/dx)
elif centering == "pixel":
Narray = ceil_even(2*rhoOuter/dx + 2*maxAbsOffset/dx + 1)
Dmask = 2 * pixresFPM * rhoOuter # Diameter of the mask
if "Narray" in inputs:
Narray = inputs["Narray"]
Darray = Narray * dx # width of array in lambda_c/D
bdf = Dmask / Darray
wl_dummy = 1e-6 # wavelength (m); Dummy value
if centering == "interpixel":
cshift = -Darray / 2 / Narray
else:
cshift = 0
# INITIALIZE PROPER. Note that: bm.dx = Darray / bdf / np;
wf = proper.prop_begin(Dmask, wl_dummy, Narray, bdf)
proper.prop_set_antialiasing(101)
if not np.isinf(rhoOuter):
# Outer opaque ring of FPM
cx_OD = 0 + cshift + xOffset
cy_OD = 0 + cshift + yOffset
proper.prop_circular_aperture(wf, rhoOuter, cx_OD, cy_OD)
# Inner spot of FPM (Amplitude transmission can be nonzero)
cx_ID = 0 + cshift + xOffset
cy_ID = 0 + cshift + yOffset
innerSpot = proper.prop_ellipse(wf, rhoInner, rhoInner, cx_ID, cy_ID,
DARK=True) * (1 - FPMampFac) + FPMampFac
mask = np.fft.ifftshift(np.abs(wf.wfarr)) # undo PROPER's fftshift
return mask * innerSpot | 74591fb8a5eeccc332d804b0c8c757ecc4bd52c1 | 3,629,494 |
import time
import re
def _get_time_ts(string: str) -> float:
"""
通过传入的字符串获取时间戳
"""
year = time.localtime().tm_year
month = re.search(r"(\d+)月", string).group(1)
day = re.search(r"(\d+)日", string).group(1)
return time.mktime(time.strptime(f"{year}-{month:>02}-{day:>02}", "%Y-%m-%d")) | bce5550c23bb3f60ada2e0a8fdc1054a347fdecc | 3,629,495 |
def octresnet10_ad2(**kwargs):
"""
Oct-ResNet-10 (alpha=1/2) model from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional Neural Networks
with Octave Convolution,' https://arxiv.org/abs/1904.05049.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_octresnet(blocks=10, oct_alpha=0.5, model_name="octresnet10_ad2", **kwargs) | 1eab26df3ac3c8b4f7a905e9dc90d602d7ef2672 | 3,629,496 |
import base64
import sys
import pickle
import subprocess
def create_standalone_plot(fig, fname, backend=None):
"""
Create a script which can be executed to plot the given figure.
Pickles the figure and stores it as string in the script.
Parameter
---------
fig : matplotlib.figure.Figure
Matplotlib figure to store.
fname : str
File name.
backend : str or None, optional
Sets the used backend. Default is None.
Expamle: 'Qt5Agg, TkAgg'
Examples
--------
Normal
>>> create_standalone_plot(fig, 'pmf')
Changing the backend
>>> create_standalone_plot(fig, 'pmf', backend='Qt5Agg')
"""
def in_ipynb():
return 'ipykernel' in sys.modules
pkl_string = pickle.dumps(fig, protocol=0)
with open(fname, 'w') as fp:
fp.write('#!/usr/bin/env python{}.{} \n'.format(sys.version_info.major, sys.version_info.minor))
fp.write('import pickle \n')
if backend is not None:
fp.write('import matplotlib \n')
fp.write('matplotlib.use("{}")\n'.format(backend))
fp.write('import matplotlib.pyplot as plt \n')
if sys.version_info.major < 3:
fp.write("import base64 \n")
fp.write("pkl_string = b'''{}''' \n".format(base64.b64encode(pkl_string)))
fp.write('fig = pickle.loads( base64.b64decode(pkl_string) ) \n')
else:
fp.write("pkl_string = {} \n".format(pkl_string))
fp.write('fig = pickle.loads(pkl_string) \n')
if in_ipynb():
fp.write('fig._original_dpi = {} \n'.format(fig.get_dpi()))
fp.write('dummy = plt.figure(figsize={}, dpi={}) \n'.format(
tuple(fig.get_size_inches()), fig.get_dpi()))
fp.write('new_manager = dummy.canvas.manager \n')
fp.write('new_manager.canvas.figure = fig \n')
fp.write('fig.set_canvas(new_manager.canvas) \n')
fp.write('plt.show() \n')
subprocess.Popen("chmod +x {}".format(fname), shell=True)
print("Created : \033[0;31m{}\033[0m".format(fname)) | 2fd128d46f1abd89364046444d0ccd99527afb70 | 3,629,497 |
import os
def getdict_userid_username():
"""
Make a dictionary to map from Moodle user IDs to user names
"""
dict_userid_username = {}
userxml = os.path.join("backup","users.xml")
docs = parse(userxml)
users = docs.getElementsByTagName("user")
for user in users:
userid = int(user.getAttribute("id"))
username = user.getElementsByTagName("username")[0].firstChild.nodeValue
dict_userid_username[userid] = username
return dict_userid_username | e4fcea087e8f2bac0277a56fe641e3dbe5af65c4 | 3,629,498 |
def generate_template(global_entity, sentence, sent_ent, kb_arr, domain):
"""
code from GLMP: https://github.com/jasonwu0731/GLMP/blob/master/utils/utils_Ent_kvr.py
Based on the system response and the provided entity table, the output is the sketch response.
"""
sketch_response = []
if sent_ent == []:
sketch_response = sentence.split()
else:
for word in sentence.split():
# only replace the entities
if word not in sent_ent:
sketch_response.append(word)
else:
ent_type = None
# ignore it for weather?
if domain != 'weather':
for kb_item in kb_arr:
if word == kb_item[0]:
ent_type = kb_item[1]
break
# for the case it cannot catch and weather.
if ent_type == None:
for key in global_entity.keys():
if key!='poi':
global_entity[key] = [x.lower() for x in global_entity[key]]
if word in global_entity[key] or word.replace('_', ' ') in global_entity[key]:
ent_type = key
break
else:
poi_list = [d['poi'].lower() for d in global_entity['poi']]
if word in poi_list or word.replace('_', ' ') in poi_list:
ent_type = key
break
sketch_response.append('@'+ent_type)
sketch_response = " ".join(sketch_response)
return sketch_response | d409a21965ce84e09fc785172aae9fdd41726d69 | 3,629,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.