content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def get_rgeo(coordinates):
"""Geocode specified coordinates
:argument coordinates: address coordinates
:type coordinates: tuple
:returns tuple
"""
params = {'language': GEOCODING_LANGUAGE,
'latlng': ','.join([str(crdnt) for crdnt in coordinates])}
result = get(url=GEOCODING_URL, params=params)
return result, coordinates
|
ca8d07f526260d48955dee1b32d18bf14b21f9f6
| 3,643,600
|
def norm_lib_size_log(assay, counts: daskarr) -> daskarr:
"""
Performs library size normalization and then transforms the
values into log scale.
Args:
assay: An instance of the assay object
counts: A dask array with raw counts data
Returns: A dask array (delayed matrix) containing normalized data.
"""
return np.log1p(assay.sf * counts / assay.scalar.reshape(-1, 1))
|
3fdcde36daa3c3c491c3b85f718d75e6276af8fa
| 3,643,601
|
def compare_dicts(cloud1, cloud2):
"""
Compare the dicts containing cloud images or flavours
"""
if len(cloud1) != len(cloud2):
return False
for item in cloud1:
if item in cloud2:
if cloud1[item] != cloud2[item]:
return False
else:
return False
return True
|
4c13ed92da2cd40b543b75fac119b5da302717e3
| 3,643,602
|
import json
def ajax_stats():
"""
获取客户统计
:return:
"""
time_based = request.args.get('time_based', 'hour')
result_customer_middleman = customer_middleman_stats(time_based)
result_customer_end_user = customer_end_user_stats(time_based)
line_chart_data = {
'labels': [label for label, _ in result_customer_middleman],
'datasets': [
{
'label': '同行',
'backgroundColor': 'rgba(220,220,220,0.5)',
'borderColor': 'rgba(220,220,220,1)',
'pointBackgroundColor': 'rgba(220,220,220,1)',
'pointBorderColor': '#fff',
'pointBorderWidth': 2,
'data': [data for _, data in result_customer_middleman]
},
{
'label': '终端',
'backgroundColor': 'rgba(151,187,205,0.5)',
'borderColor': 'rgba(151,187,205,1)',
'pointBackgroundColor': 'rgba(151,187,205,1)',
'pointBorderColor': '#fff',
'pointBorderWidth': 2,
'data': [data for _, data in result_customer_end_user]
}
]
}
return json.dumps(line_chart_data, default=json_default)
|
a467bd656535695333030ded34ccb299d57c8ef7
| 3,643,603
|
import string
def str2int(string_with_int):
""" Collect digits from a string """
return int("".join([char for char in string_with_int if char in string.digits]) or 0)
|
86955812fa3b2e6af0b98a04a1516897ccf95c25
| 3,643,604
|
def grid_to_3d(reward: np.ndarray) -> np.ndarray:
"""Convert gridworld state-only reward R[i,j] to 3D reward R[s,a,s']."""
assert reward.ndim == 2
reward = reward.flatten()
ns = reward.shape[0]
return state_to_3d(reward, ns, 5)
|
f848900b3b9ba7eb94fc1539fb1b24107e3db551
| 3,643,605
|
def find_routes(paths) -> list:
"""returns routes as tuple from path as list\
like 1,2,3 --> (1,2)(2,3)"""
routes = []
for path in paths:
for i in range(len(path)):
try:
route = (path[i], path[i + 1])
if route not in routes:
routes.append(route)
except IndexError:
pass
return routes
|
67fb8eb575dd45879f5e5b465a7886f2a2387b26
| 3,643,606
|
def z_step_ncg_hess_(Z, v, Y, F, phi, C_Z, eta_Z):
"""A wrapper of the hess-vector product for ncg calls."""
return z_step_tron_hess(v, Y, F, phi, C_Z, eta_Z)
|
2c6e800040e5090333cbba0924985bf7fe17c873
| 3,643,607
|
def list_servers(**kwargs) -> "list[NovaServer]":
"""List all servers under the current project.
Args:
kwargs: Keyword arguments, which will be passed to
:func:`novaclient.v2.servers.list`. For example, to filter by
instance name, provide ``search_opts={'name': 'my-instance'}``
Returns:
All servers associated with the current project.
"""
return nova().servers.list(**kwargs)
|
3e12a6e24687e74942cc86bc616d57ebdb5a6521
| 3,643,608
|
from typing import Optional
from typing import cast
def resolve_xref(
app: Sphinx,
env: BuildEnvironment,
node: nodes.Node,
contnode: nodes.Node,
) -> Optional[nodes.reference]:
"""
Resolve as-yet-unresolved XRefs for :rst:role:`tconf` roles.
:param app: The Sphinx application.
:param env: The Sphinx build environment.
:param node: The cross reference node which has not yet been.
:param contnode: The child node of the reference node, which provides the formatted text.
"""
if not isinstance(node, nodes.Element): # pragma: no cover
return None
if node.get("refdomain", None) != "std": # pragma: no cover
return None
elif node.get("reftype", None) != "tconf": # pragma: no cover
return None
elif not node.get("reftarget"): # pragma: no cover
return None
std_domain = cast(StandardDomain, env.get_domain("std"))
objtypes = std_domain.objtypes_for_role("tconf") or []
reftarget = node["reftarget"]
candidates = []
for (obj_type, obj_name), (docname, labelid) in std_domain.objects.items():
if not docname: # pragma: no cover
continue
if obj_type in objtypes:
if obj_name.endswith(f".{reftarget}"):
candidates.append((docname, labelid, obj_name))
if not candidates:
return None # pragma: no cover
elif len(candidates) > 1:
logger.warning(
__("more than one target found for cross-reference %r: %s"),
reftarget,
", ".join(c[2] for c in candidates),
type="ref",
subtype="tconf",
location=node,
)
return make_refnode(
app.builder,
env.docname,
candidates[0][0], # docname
candidates[0][1], # labelid
contnode,
)
|
d4bc46765de1e892aa6753678fab5ad2ff693f68
| 3,643,609
|
def deploy_tester_contract(
web3,
contracts_manager,
deploy_contract,
contract_deployer_address,
get_random_address,
):
"""Returns a function that can be used to deploy a named contract,
using conract manager to compile the bytecode and get the ABI"""
def f(contract_name, libs=None, args=None):
json_contract = contracts_manager.get_contract(contract_name)
contract = deploy_contract(
web3,
contract_deployer_address,
json_contract['abi'],
json_contract['bin'],
args,
)
return contract
return f
|
ee925e9632f3bfd66a843d336bd287c92543b2ed
| 3,643,610
|
def make_hashable_params(params):
"""
Checks to make sure that the parameters submitted is hashable.
Args:
params(dict):
Returns:
"""
tuple_params = []
for key, value in params.items():
if isinstance(value, dict):
dict_tuple = tuple([(key2, value2) for key2, value2 in value.items()])
tuple_params.append(dict_tuple)
else:
if isinstance(value, (list, set)):
tuple_params.append((key, tuple(value)))
else:
tuple_params.append((key, value))
tuple_params = tuple(tuple_params)
try:
hash(tuple_params)
except TypeError:
raise TypeError('The values of keywords given to this class must be hashable.')
return tuple_params
|
39d5de594b8caf776d2732e0e58b1c11127e5047
| 3,643,611
|
def check_member_role(member: discord.Member, role_id: int) -> bool:
"""
Checks if the Member has the Role
"""
return any(role.id == role_id for role in member.roles)
|
500c9c33dd0e25a6a4704165add3d39c05d510d2
| 3,643,612
|
import itertools
def tag_bedpe(b, beds, verbose=False):
"""
Tag each end of a BEDPE with a set of (possibly many) query BED files.
For example, given a BEDPE of interacting fragments from a Hi-C experiment,
identify the contacts between promoters and ChIP-seq peaks. In this case,
promoters and ChIP-seq peaks of interest would be provided as BED files.
The strategy is to split the BEDPE into two separate files. Each file is
intersected independently with the set of queries. The results are then
iterated through in parallel to tie the ends back together. It is this
iterator that is returned (see example below).
Parameters
----------
bedpe : str
BEDPE-format file. Must be name-sorted.
queries : dict
Dictionary of BED/GFF/GTF/VCF files to use. After splitting the BEDPE,
these query files (values in the dictionary) will be passed as the `-b`
arg to `bedtools intersect`. The keys are passed as the `names`
argument for `bedtools intersect`.
Returns
-------
Tuple of (iterator, n, extra).
`iterator` is described below. `n` is the total number of lines in the
BEDPE file, which is useful for calculating percentage complete for
downstream work. `extra` is the number of extra fields found in the BEDPE
(also useful for downstream processing).
`iterator` yields tuples of (label, end1_hits, end2_hits) where `label` is
the name field of one line of the original BEDPE file. `end1_hits` and
`end2_hits` are each iterators of BED-like lines representing all
identified intersections across all query BED files for end1 and end2 for
this pair.
Recall that BEDPE format defines a single name and a single score for each
pair. For each item in `end1_hits`, the fields are::
chrom1
start1
end1
name
score
strand1
[extra fields]
query_label
fields_from_query_intersecting_end1
where `[extra fields]` are any additional fields from the original BEDPE,
`query_label` is one of the keys in the `beds` input dictionary, and the
remaining fields in the line are the intersecting line from the
corresponding BED file in the `beds` input dictionary.
Similarly, each item in `end2_hits` consists of:
chrom2
start2
end2
name
score
strand2
[extra fields]
query_label
fields_from_query_intersecting_end2
At least one line is reported for every line in the BEDPE file. If there
was no intersection, the standard BEDTools null fields will be shown. In
`end1_hits` and `end2_hits`, a line will be reported for each hit in each
query.
Example
-------
Consider the following BEDPE (where "x1" is an aribtrary extra field).
>>> bedpe = pybedtools.example_bedtool('test_bedpe.bed')
>>> print(bedpe) # doctest: +NORMALIZE_WHITESPACE
chr1 1 10 chr1 50 90 pair1 5 + - x1
chr1 2 15 chr1 200 210 pair2 1 + + y1
<BLANKLINE>
And the following transcription start sites (TSSes) in BED4 format:
>>> tsses = pybedtools.example_bedtool('test_tsses.bed')
>>> print(tsses) # doctest: +NORMALIZE_WHITESPACE
chr1 5 6 gene1
chr1 60 61 gene2
chr1 88 89 gene3
<BLANKLINE>
And the following called peaks as BED6:
>>> peaks = pybedtools.example_bedtool('test_peaks.bed')
>>> print(peaks) # doctest: +NORMALIZE_WHITESPACE
chr1 3 4 peak1 50 .
<BLANKLINE>
Then we can get the following iterator, n, and extra:
>>> from pybedtools.contrib.long_range_interaction import tag_bedpe
>>> iterator, n, extra = tag_bedpe(bedpe, {'tss': tsses, 'pk': peaks})
>>> print(n)
2
>>> print(extra)
1
The following illustrates that each item in the iterator represents one
pair, and each item in each group represents an intersection with one end:
>>> for (label, end1_hits, end2_hits) in iterator:
... print('PAIR = {}'.format(label))
... print('end1_hits:')
... for i in end1_hits:
... print(i, end='')
... print('end2_hits:')
... for i in end2_hits:
... print(i, end='') # doctest: +NORMALIZE_WHITESPACE
PAIR = pair1
end1_hits:
chr1 1 10 pair1 5 + x1 pk chr1 3 4 peak1 50 . 1
chr1 1 10 pair1 5 + x1 tss chr1 5 6 gene1 1
end2_hits:
chr1 50 90 pair1 5 - x1 tss chr1 60 61 gene2 1
chr1 50 90 pair1 5 - x1 tss chr1 88 89 gene3 1
PAIR = pair2
end1_hits:
chr1 2 15 pair2 1 + y1 pk chr1 3 4 peak1 50 . 1
chr1 2 15 pair2 1 + y1 tss chr1 5 6 gene1 1
end2_hits:
chr1 200 210 pair2 1 + y1 . -1 -1 . -1 . 0
See the `cis_trans_interactions()` function for one way of summarizing
these data.
"""
b = pybedtools.BedTool(b)
# Figure out if the supplied bedpe had any extra fields. If so, the fields
# are repeated in each of the split output files.
observed = b.field_count()
extra = observed - 10
extra_inds = [10 + i for i in range(extra)]
end1_fn = pybedtools.BedTool._tmp()
end2_fn = pybedtools.BedTool._tmp()
# Performance notes:
# We don't need the overhead of converting every line into
# a pybedtools.Interval object just so we can grab the fields. Doing so
# takes 3.5x more time than simply splitting each line on a tab.
if verbose:
print('splitting BEDPE into separate files')
n = 0
with open(end1_fn, 'w') as end1_out, open(end2_fn, 'w') as end2_out:
for line in open(b.fn):
n += 1
f = line.strip().split('\t')
end1_out.write(
'\t'.join(
(f[i] for i in [0, 1, 2, 6, 7, 8] + extra_inds)) + '\n')
end2_out.write(
'\t'.join(
(f[i] for i in [3, 4, 5, 6, 7, 9] + extra_inds)) + '\n')
# Performance notes:
#
# For small BEDPE and large set of query files, it would be faster to sort
# these independently, intersect with sorted=True, and then re-sort by name
# for the grouping. For large BEDPE, I don't think the sorted=True
# performance gain outweighs the hit from sorting twice.
#
# On the other hand, if BEDPE was coord-sorted in the first place, only
# end2 would need to be sorted and re-sorted. On the other (third!?) hand,
# BEDPE creation from BAM implies name-sorting, so it's probably not
# reasonable to assume coord-sorted.
#
# In the end: don't do any sorting.
end1_bt = pybedtools.BedTool(end1_fn)
end2_bt = pybedtools.BedTool(end2_fn)
names, fns = [], []
for name, fn in beds.items():
names.append(name)
if isinstance(fn, pybedtools.BedTool):
fns.append(fn.fn)
else:
fns.append(fn)
if verbose:
print('intersecting end 1')
end1_hits = end1_bt.intersect(list(fns), names=names, wao=True)
if verbose:
print('intersecting end 2')
end2_hits = end2_bt.intersect(list(fns), names=names, wao=True)
grouped_end1 = itertools.groupby(end1_hits, lambda f: f[3])
grouped_end2 = itertools.groupby(end2_hits, lambda f: f[3])
def gen():
for (label1, group1), (label2, group2) \
in itertools.izip(grouped_end1, grouped_end2):
assert label1 == label2
yield label1, group1, group2
return gen(), n, extra
|
a1b95e04abd9401a6494fad2c2b6d48ecb14d414
| 3,643,613
|
from typing import Tuple
def point(x: float, y: float, z: float) -> Tuple:
"""Create a point."""
return Tuple(x, y, z, 1.0)
|
035f01d990d16634867b147b7fcb7e9d5edf7f92
| 3,643,614
|
def partial_pipeline_data(backend, user=None, *args, **kwargs): # pragma: no cover
"""
Add the session key to a signed base64 encoded signature on the email request.
"""
data = backend.strategy.request_data()
if 'signature' in data:
try:
signed_details = signing.loads(data['signature'], key=settings.SECRET_KEY)
session = Session.objects.get(pk=signed_details['session_key'])
except (BadSignature, Session.DoesNotExist) as e:
raise InvalidEmail(backend)
session_details = session.get_decoded()
backend.strategy.session_set('email_validation_address', session_details['email_validation_address'])
backend.strategy.session_set('next', session_details.get('next'))
backend.strategy.session_set('partial_pipeline', session_details['partial_pipeline'])
backend.strategy.session_set(backend.name + '_state', session_details.get(backend.name + '_state'))
backend.strategy.session_set(backend.name + 'unauthorized_token_name',
session_details.get(backend.name + 'unauthorized_token_name'))
partial = backend.strategy.session_get('partial_pipeline', None)
if partial:
idx, backend_name, xargs, xkwargs = \
backend.strategy.partial_from_session(partial)
if backend_name == backend.name:
kwargs.setdefault('pipeline_index', idx)
if user: # don't update user if it's None
kwargs.setdefault('user', user)
kwargs.setdefault('request', backend.strategy.request_data())
xkwargs.update(kwargs)
return xargs, xkwargs
else:
backend.strategy.clean_partial_pipeline()
|
54c0124b49fead91fed238ded15f6c3167f0aed4
| 3,643,615
|
def arrayinv(F, Fx):
"""
Args:
F: dx.ds function value at x
Fx: dx.dx.ds derivative of function at x
Returns:
"""
return np.array([np.linalg.solve(a, b) for a, b in zip(Fx.swapaxes(0,2), F.T)]).T
|
ac412bf0cb03a77d0a18295b899aeabd8bcdbfb3
| 3,643,616
|
import os
import csv
def schedule_list(req):
"""List scheduled jobs
"""
schedule = []
if os.path.exists(SCHEDULE):
with open(SCHEDULE) as f:
for n, a, t in csv.reader(f):
schedule.append({'Name': n, 'Timer': t, 'Action': a})
return {'Err': '', 'Schedule': schedule}
|
cdcb9eb15b2faae83bd03765aeb329fd0f4ca6ae
| 3,643,617
|
def mil(val):
"""convert mil to mm"""
return float(val) * 0.0254
|
9071b0116a7062ef93d6bee56a08db2b9bec906a
| 3,643,618
|
def ask_number(question, low, high):
"""Poproś o podanie liczby z określonego zakresu."""
response = None
while type(response) != int:
try:
response = int(input(question))
while response not in range(low, high):
response = int(input(question))
except ValueError:
print("Value must be a number")
return response
|
fdae37e6a0cd34d36b647a23f4a0f58cad46680a
| 3,643,619
|
import os
def CheckGypFile(gypfile):
"""Check |gypfile| for common mistakes."""
if not os.path.exists(gypfile):
# The file has been deleted.
return
with open(gypfile) as fp:
return CheckGypData(gypfile, fp.read())
|
edc9971616f0fd6e65872034f485f0156e219fae
| 3,643,620
|
import numpy
from typing import Tuple
import math
def _beams_longitude_latitude(
ping_header: PingHeader, along_track: numpy.ndarray, across_track: numpy.ndarray
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Calculate the longitude and latitude for each beam.
https://en.wikipedia.org/wiki/Geographic_coordinate_system
For lonitude and latitude calculations:
* lat_m_sf = A - B * cos(2 * lat) + C * cos(4 * lat) - D * cos(6 * lat)
* lon_m_sf = E * cos(lat) - F * cos(3 * lat) + G * cos(5 * lat)
"""
# see https://math.stackexchange.com/questions/389942/why-is-it-necessary-to-use-sin-or-cos-to-determine-heading-dead-reckoning # noqa: E501
lat_radians = math.radians(ping_header.latitude)
coef_a = WGS84Coefficients.A.value
coef_b = WGS84Coefficients.B.value
coef_c = WGS84Coefficients.C.value
coef_d = WGS84Coefficients.D.value
coef_e = WGS84Coefficients.E.value
coef_f = WGS84Coefficients.F.value
coef_g = WGS84Coefficients.G.value
lat_mtr_sf = (
coef_a
- coef_b * math.cos(2 * lat_radians)
+ coef_c * math.cos(4 * lat_radians)
- coef_d * math.cos(6 * lat_radians)
)
lon_mtr_sf = (
coef_e * math.cos(lat_radians)
- coef_f * math.cos(3 * lat_radians)
+ coef_g * math.cos(5 * lat_radians)
)
delta_x = math.sin(math.radians(ping_header.heading))
delta_y = math.cos(math.radians(ping_header.heading))
lon2 = (
ping_header.longitude
+ delta_y / lon_mtr_sf * across_track
+ delta_x / lon_mtr_sf * along_track
)
lat2 = (
ping_header.latitude
- delta_x / lat_mtr_sf * across_track
+ delta_y / lat_mtr_sf * along_track
)
return lon2, lat2
|
c43171830206c5db878a817a03a4830aae878765
| 3,643,621
|
def true_range_nb(high: tp.Array2d, low: tp.Array2d, close: tp.Array2d) -> tp.Array2d:
"""Calculate true range."""
prev_close = generic_nb.fshift_nb(close, 1)
tr1 = high - low
tr2 = np.abs(high - prev_close)
tr3 = np.abs(low - prev_close)
tr = np.empty(prev_close.shape, dtype=np.float_)
for col in range(tr.shape[1]):
for i in range(tr.shape[0]):
tr[i, col] = max(tr1[i, col], tr2[i, col], tr3[i, col])
return tr
|
7b7594a1a5adf4e280a53af3e01d9aec5bd3b80c
| 3,643,622
|
def laplacian_operator(data):
"""
apply laplacian operator on data
"""
lap = []
lap.append(0.0)
for index in range(1, len(data) - 1):
lap.append((data[index + 1] + data[index - 1]) / 2.0 - data[index])
lap.append(0.0)
return lap
|
3d7755cdc52352cc445d5942e34c09f65f3e11db
| 3,643,623
|
def _stringmatcher(pattern):
"""
accepts a string, possibly starting with 're:' or 'literal:' prefix.
returns the matcher name, pattern, and matcher function.
missing or unknown prefixes are treated as literal matches.
helper for tests:
>>> def test(pattern, *tests):
... kind, pattern, matcher = _stringmatcher(pattern)
... return (kind, pattern, [bool(matcher(t)) for t in tests])
exact matching (no prefix):
>>> test('abcdefg', 'abc', 'def', 'abcdefg')
('literal', 'abcdefg', [False, False, True])
regex matching ('re:' prefix)
>>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
('re', 'a.+b', [False, False, True])
force exact matches ('literal:' prefix)
>>> test('literal:re:foobar', 'foobar', 're:foobar')
('literal', 're:foobar', [False, True])
unknown prefixes are ignored and treated as literals
>>> test('foo:bar', 'foo', 'bar', 'foo:bar')
('literal', 'foo:bar', [False, False, True])
"""
if pattern.startswith('re:'):
pattern = pattern[3:]
try:
regex = re.compile(pattern)
except re.error, e:
raise error.ParseError(_('invalid regular expression: %s')
% e)
return 're', pattern, regex.search
elif pattern.startswith('literal:'):
pattern = pattern[8:]
return 'literal', pattern, pattern.__eq__
|
76a673133aaf7493b531b4f73364af2d16dd214b
| 3,643,624
|
def enu_to_ecef(ref_lat_rad, ref_lon_rad, ref_alt_m, e_m, n_m, u_m):
"""Convert ENU coordinates relative to reference location to ECEF coordinates.
This converts local east-north-up (ENU) coordinates relative to a given
reference position to earth-centered, earth-fixed (ECEF) cartesian
coordinates. The reference position is specified by its geodetic latitude,
longitude and altitude.
Parameters
----------
ref_lat_rad, ref_lon_rad : float or array
Geodetic latitude and longitude of reference position, in radians
ref_alt_m : float or array
Geodetic altitude of reference position, in metres above WGS84 ellipsoid
e_m, n_m, u_m : float or array
East, North, Up coordinates, in metres
Returns
-------
x_m, y_m, z_m : float or array
X, Y, Z coordinates, in metres
"""
# ECEF coordinates of reference point
ref_x_m, ref_y_m, ref_z_m = lla_to_ecef(ref_lat_rad, ref_lon_rad, ref_alt_m)
sin_lat, cos_lat = np.sin(ref_lat_rad), np.cos(ref_lat_rad)
sin_lon, cos_lon = np.sin(ref_lon_rad), np.cos(ref_lon_rad)
x_m = ref_x_m - sin_lon*e_m - sin_lat*cos_lon*n_m + cos_lat*cos_lon*u_m
y_m = ref_y_m + cos_lon*e_m - sin_lat*sin_lon*n_m + cos_lat*sin_lon*u_m
z_m = ref_z_m + cos_lat*n_m + sin_lat*u_m
return x_m, y_m, z_m
|
a6a7e8e3a67a17894d68d6c62b2ac7fcef7a09ec
| 3,643,625
|
import re
import requests
def is_file_url(share_url: str) -> bool:
"""判断是否为文件的分享链接"""
base_pat = r'https?://[a-zA-Z0-9-]*?\.?lanzou[a-z].com/.+' # 子域名可个性化设置或者不存在
user_pat = r'https?://[a-zA-Z0-9-]*?\.?lanzou[a-z].com/i[a-zA-Z0-9]{5,}/?' # 普通用户 URL 规则
if not re.fullmatch(base_pat, share_url):
return False
elif re.fullmatch(user_pat, share_url):
return True
else: # VIP 用户的 URL 很随意
try:
html = requests.get(share_url, headers=headers).text
html = remove_notes(html)
return True if re.search(r'class="fileinfo"|id="file"|文件描述', html) else False
except (requests.RequestException, Exception):
return False
|
d9b56a2187cedeb79cb848192b544026a5d85e29
| 3,643,626
|
def get_compton_fraction_artis(energy):
"""Gets the Compton scattering/absorption fraction
and angle following the scheme in ARTIS
Parameters
----------
energy : float
Energy of the gamma-ray
Returns
-------
float
Scattering angle
float
Compton scattering fraction
"""
energy_norm = kappa_calculation(energy)
fraction_max = 1.0 + 2.0 * energy_norm
fraction_min = 1.0
normalization = np.random.random() * compton_opacity_partial(
energy_norm, fraction_max
)
epsilon = 1.0e20
count = 0
while epsilon > 1.0e-4:
fraction_try = (fraction_max + fraction_min) / 2.0
sigma_try = compton_opacity_partial(energy_norm, fraction_try)
if sigma_try > normalization:
fraction_max = fraction_try
epsilon = (sigma_try - normalization) / normalization
else:
fraction_min = fraction_try
epsilon = (normalization - sigma_try) / normalization
count += 1
if count > 1000:
print("Error, failure to get a Compton fraction")
break
angle = np.arccos(1.0 - ((fraction_try - 1) / energy_norm))
return angle, fraction_try
|
2121712c542c967ef7008a4bdf8b88a8e2bcdb6c
| 3,643,627
|
def is_argspec_compatible_with_types(argspec, *args, **kwargs):
"""Determines if functions matching 'argspec' accept given 'args'/'kwargs'.
Args:
argspec: An instance of inspect.ArgSpec to verify agains the arguments.
*args: Zero or more positional arguments, all of which must be instances of
computation_types.Type or something convertible to it by
computation_types.to_type().
**kwargs: Zero or more keyword arguments, all of which must be instances of
computation_types.Type or something convertible to it by
computation_types.to_type().
Returns:
True or false, depending on the outcome of the test.
Raises:
TypeError: if the arguments are of the wrong computation_types.
"""
try:
callargs = get_callargs_for_argspec(argspec, *args, **kwargs)
if not argspec.defaults:
return True
except TypeError:
return False
# As long as we have been able to construct 'callargs', and there are no
# default values to verify against the given types, there is nothing more
# to do here, otherwise we have to verify the types of defaults against
# the types we've been given as parameters to this function.
num_specargs_without_defaults = len(argspec.args) - len(argspec.defaults)
for idx, default_value in enumerate(argspec.defaults):
if default_value is not None:
arg_name = argspec.args[num_specargs_without_defaults + idx]
call_arg = callargs[arg_name]
if call_arg is not default_value:
arg_type = computation_types.to_type(call_arg)
default_type = type_utils.infer_type(default_value)
if not type_utils.is_assignable_from(arg_type, default_type):
return False
return True
|
5103fa00737f4faeda49441f9d67388f34599d09
| 3,643,628
|
def get_span_feats_stopwords(stopwords):
"""Get a span dependency tree unary function"""
return partial(get_span_feats, stopwords=stopwords)
|
86fd8c597f39f71c489665c05d164e0a3e1e69c0
| 3,643,629
|
def get_argument_parser(argparser):
"""Augments the given ArgumentParser for use with the Bonobo ETL framework."""
return bonobo.get_argument_parser(parser=argparser)
|
584fc867660f85998a679d1883828ea7a8c3896f
| 3,643,630
|
from pathlib import Path
def input_file_path(directory: str, file_name: str) -> Path:
"""Given the string paths to the result directory, and the input file
return the path to the file.
1. check if the input_file is an absolute path, and if so, return that.
2. if the input_file is a relative path, combine it with the result_directory
and return that.
The resultant path must exist and be a file, otherwise raise an FileNotFoundException.
"""
path_to_file = Path(file_name)
if path_to_file.is_absolute() and path_to_file.is_file():
return path_to_file
input_directory_path = Path(directory)
path_to_file = input_directory_path / path_to_file
if path_to_file.is_file():
return path_to_file.resolve()
else:
raise FileNotFoundError(
'did not find the input file using result_directory={directory}, input_file={input_file}'.format(
directory=directory, input_file=file_name
)
)
|
dd866a5f8b6f776238269844d64686f7fb28347c
| 3,643,631
|
def loss(S, K, n_samples=None):
"""Loss function for time-varying graphical lasso."""
if n_samples is None:
n_samples = np.ones(S.shape[0])
return sum(
-ni * logl(emp_cov, precision)
for emp_cov, precision, ni in zip(S, K, n_samples))
|
07ad436bf5aee5e8b1dc53e89b894c4c8883cedd
| 3,643,632
|
def flat_dict(df):
"""
Add each key-value of a nested dictionary that is saved in a dataframe, as a new column
"""
for col in df.columns:
if type(df[col][0]) == dict:
df = pd.concat(
[df.drop([col], axis=1), df[col].apply(pd.Series)], axis=1)
# sometimes a column is dropped but column 0 stays
df = df.drop([0], axis=1, errors='ignore')
return df
|
ec817b9c7a08aab95bb29981dafbb1f1e03821eb
| 3,643,633
|
from typing import List
async def run_setup_pys(
targets_with_origins: TargetsWithOrigins,
setup_py_subsystem: SetupPySubsystem,
console: Console,
python_setup: PythonSetup,
distdir: DistDir,
workspace: Workspace,
union_membership: UnionMembership,
) -> SetupPy:
"""Run setup.py commands on all exported targets addressed."""
validate_args(setup_py_subsystem.args)
# Get all exported targets, ignoring any non-exported targets that happened to be
# globbed over, but erroring on any explicitly-requested non-exported targets.
exported_targets: List[ExportedTarget] = []
explicit_nonexported_targets: List[Target] = []
for target_with_origin in targets_with_origins:
tgt = target_with_origin.target
if tgt.has_field(PythonProvidesField):
exported_targets.append(ExportedTarget(tgt))
elif isinstance(target_with_origin.origin, AddressLiteralSpec):
explicit_nonexported_targets.append(tgt)
if explicit_nonexported_targets:
raise TargetNotExported(
"Cannot run setup.py on these targets, because they have no `provides=` clause: "
f'{", ".join(so.address.spec for so in explicit_nonexported_targets)}'
)
if setup_py_subsystem.transitive:
# Expand out to all owners of the entire dep closure.
transitive_targets = await Get(
TransitiveTargets, Addresses(et.target.address for et in exported_targets)
)
owners = await MultiGet(
Get(ExportedTarget, OwnedDependency(tgt))
for tgt in transitive_targets.closure
if is_ownable_target(tgt, union_membership)
)
exported_targets = list(FrozenOrderedSet(owners))
py2 = is_python2(
python_setup.compatibilities_or_constraints(
target_with_origin.target.get(PythonInterpreterCompatibility).value
for target_with_origin in targets_with_origins
)
)
chroots = await MultiGet(
Get(SetupPyChroot, SetupPyChrootRequest(exported_target, py2))
for exported_target in exported_targets
)
# If args were provided, run setup.py with them; Otherwise just dump chroots.
if setup_py_subsystem.args:
setup_py_results = await MultiGet(
Get(
RunSetupPyResult,
RunSetupPyRequest(exported_target, chroot, setup_py_subsystem.args),
)
for exported_target, chroot in zip(exported_targets, chroots)
)
for exported_target, setup_py_result in zip(exported_targets, setup_py_results):
addr = exported_target.target.address.spec
console.print_stderr(f"Writing dist for {addr} under {distdir.relpath}/.")
workspace.write_digest(setup_py_result.output, path_prefix=str(distdir.relpath))
else:
# Just dump the chroot.
for exported_target, chroot in zip(exported_targets, chroots):
addr = exported_target.target.address.spec
provides = exported_target.provides
setup_py_dir = distdir.relpath / f"{provides.name}-{provides.version}"
console.print_stderr(f"Writing setup.py chroot for {addr} to {setup_py_dir}")
workspace.write_digest(chroot.digest, path_prefix=str(setup_py_dir))
return SetupPy(0)
|
713f0b7f3558e2a69dcca0a7a251f4991ee49073
| 3,643,634
|
def list_tasks():
"""
显示所有任务列表,方便管理任务
:return:
"""
try:
task_id = request.args.get("task_id")
task_status = request.args.get('status')
# 构造条件查询元组
task_info_list = list()
tasks = TaskService.get_tasks_url_num(task_id=task_id, task_status=task_status)
for task in tasks:
hook_rule = task.hook_rule
# RedisService.get_task(task.id)["hook_rule"]
unscaned_urls_num = task.unscaned_urls_num
scaned_urls_num = task.scaned_urls_num
total_url_num = unscaned_urls_num + scaned_urls_num
if task.task_status in [TaskStatus.KILLED, TaskStatus.DONE]:
percent = 100
else:
percent = 0 if total_url_num == 0 else int((scaned_urls_num / total_url_num) * 100)
task_info_list.append({'receiver_emails': task.receivers_email, 'task_name': task.task_name,
'create_time': task.created_time.strftime("%Y-%m-%d %H:%M"), 'percent': percent,
'unscaned_url_num': unscaned_urls_num, 'scaned_url_num': scaned_urls_num,
'total_url_num': total_url_num, 'hook_rule': hook_rule, 'task_id': task.id,
'task_access_key': task.access_key, 'task_status': task.task_status,
"create_user_name": task.create_user_name})
task_info_list.reverse()
response = jsonify(status=200, message="查询成功", data=task_info_list)
return response
except Exception as e:
logger.exception("show_current_tasks rasie error")
if isinstance(e, BaseHunterException):
return jsonify(status=400, message=str(e), data={"extra_info": "查询任务时传入非法的task_id"})
return jsonify(status=500, message="未知异常", data={"extra_info": "查询任务时出现未知异常,请联系管理员查看异常日志"})
|
c6d205e95bd7a1a2e76baf7f89c917310b683bc0
| 3,643,635
|
import itertools
import torch
def make_fixed_size(
protein,
shape_schema,
msa_cluster_size,
extra_msa_size,
num_res=0,
num_templates=0,
):
"""Guess at the MSA and sequence dimension to make fixed size."""
pad_size_map = {
NUM_RES: num_res,
NUM_MSA_SEQ: msa_cluster_size,
NUM_EXTRA_SEQ: extra_msa_size,
NUM_TEMPLATES: num_templates,
}
for k, v in protein.items():
# Don't transfer this to the accelerator.
if k == "extra_cluster_assignment":
continue
shape = list(v.shape)
schema = shape_schema[k]
msg = "Rank mismatch between shape and shape schema for"
assert len(shape) == len(schema), f"{msg} {k}: {shape} vs {schema}"
pad_size = [
pad_size_map.get(s2, None) or s1 for (s1, s2) in zip(shape, schema)
]
padding = [(0, p - v.shape[i]) for i, p in enumerate(pad_size)]
padding.reverse()
padding = list(itertools.chain(*padding))
if padding:
protein[k] = torch.nn.functional.pad(v, padding)
protein[k] = torch.reshape(protein[k], pad_size)
return protein
|
1125e1cdbe8f12d6613fb8dd9374afdbf1fd065a
| 3,643,636
|
def codegen_reload_data():
"""Parameters to codegen used to generate the fn_urlhaus package"""
reload_params = {"package": u"fn_urlhaus",
"incident_fields": [],
"action_fields": [],
"function_params": [u"urlhaus_artifact_type", u"urlhaus_artifact_value"],
"datatables": [],
"message_destinations": [u"fn_urlhaus"],
"functions": [u"fn_urlhaus", u"fn_urlhaus_submission"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"example_urlhaus_lookup", u"example_urlhaus_url_submission"],
"actions": [u"Example: URLhaus Lookup", u"Example: URLhaus URL Submission"]
}
return reload_params
|
1665121ab3305f517242b122e2aaae2b12fe57f0
| 3,643,637
|
def urls(page, baseurl=auto, direct=True, prev=True, next=True):
"""
Return a list of pagination URLs extracted form the page.
When baseurl is None relative URLs are returned; pass baseurl
to get absolute URLs.
``prev``, ``next`` and ``direct`` arguments control whether to return
'next page', 'previous page' links and links to specific pages.
By default, all link types are returned.
"""
return get_shared_autopager().urls(page, baseurl, direct, prev, next)
|
70f0337b5ed1a1cd8c0cfd1f99f8ad67da85b23d
| 3,643,638
|
def sinc_filter(audio: tf.Tensor,
cutoff_frequency: tf.Tensor,
window_size: int = 512,
sample_rate: int = None,
padding: Text = 'same') -> tf.Tensor:
"""Filter audio with sinc low-pass filter.
Args:
audio: Input audio. Tensor of shape [batch, audio_timesteps].
cutoff_frequency: Frequency cutoff for low-pass sinc filter. If the
sample_rate is given, cutoff_frequency is in Hertz. If sample_rate is
None, cutoff_frequency is normalized ratio (frequency/nyquist) in the
range [0, 1.0]. Shape [batch_size, n_time, 1].
window_size: Size of the Hamming window to apply to the impulse.
sample_rate: Optionally provide the sample rate.
padding: Either 'valid' or 'same'. For 'same' the final output to be the
same size as the input audio (audio_timesteps). For 'valid' the audio is
extended to include the tail of the impulse response (audio_timesteps +
window_size - 1).
Returns:
Filtered audio. Tensor of shape
[batch, audio_timesteps + window_size - 1] ('valid' padding) or shape
[batch, audio_timesteps] ('same' padding).
"""
impulse_response = sinc_impulse_response(cutoff_frequency,
window_size=window_size,
sample_rate=sample_rate)
return fft_convolve(audio, impulse_response, padding=padding)
|
ea13a320744bb380b20643c2a995be67fc9d1303
| 3,643,639
|
def _getDataFlows(blocks):
"""
Given a block dictonary from bifrost.proclog.load_by_pid(), return a list
of chains that give the data flow.
"""
# Find out what rings we have to work with and which blocks are sources
# or sinks
rings = []
sources, sourceRings = [], []
sinks, sinkRings = [], []
for block in blocks.keys():
rins, routs = [], []
rFound = False
for log in blocks[block].keys():
if log not in ('in', 'out'):
continue
for key in blocks[block][log]:
if key[:4] == 'ring':
rFound = True
value = blocks[block][log][key]
if value not in rings:
rings.append( value )
if log == 'in':
if value not in rins:
rins.append( value )
else:
if value not in routs:
routs.append( value )
if rFound:
if len(rins) == 0:
sources.append( block )
sourceRings.extend( routs )
if len(routs) == 0:
sinks.append( block )
sinkRings.extend( rins )
# Find out the chains
chains = []
for refRing in rings:
for block in blocks.keys():
rins, routs = [], []
for log in blocks[block].keys():
if log not in ('in', 'out'):
continue
for key in blocks[block][log]:
if key[:4] == 'ring':
value = blocks[block][log][key]
if log == 'in':
if value not in rins:
rins.append( value )
else:
if value not in routs:
routs.append( value )
if refRing in routs:
refBlock = block
refROuts = routs
for block in blocks.keys():
rins, routs = [], []
dtype = None
for log in blocks[block].keys():
if log.startswith('sequence'):
try:
bits = blocks[block][log]['nbit']
if blocks[block][log]['complex']:
bits *= 2
name = 'cplx' if blocks[block][log]['complex'] else 'real'
dtype = '%s%i' % (name, bits)
except KeyError:
pass
elif log not in ('in', 'out'):
continue
for key in blocks[block][log]:
if key[:4] == 'ring':
value = blocks[block][log][key]
if log == 'in':
if value not in rins:
rins.append( value )
else:
if value not in routs:
routs.append( value )
for ring in rins:
if ring in refROuts:
#print refRing, rins, block
chains.append( {'link':(refBlock,block), 'dtype':dtype} )
# Find out the associations (based on core binding)
associations = []
for block in blocks:
refBlock = block
refCores = []
for i in xrange(32):
try:
refCores.append( blocks[block]['bind']['core%i' % i] )
except KeyError:
break
if len(refCores) == 0:
continue
for block in blocks:
if block == refBlock:
continue
cores = []
for i in xrange(32):
try:
cores.append( blocks[block]['bind']['core%i' % i] )
except KeyError:
break
if len(cores) == 0:
continue
for core in cores:
if core in refCores:
if (refBlock,block) not in associations:
if (block,refBlock) not in associations:
associations.append( (refBlock, block) )
return sources, sinks, chains, associations
|
197cc64b5bf7ecd8e5c7d912239c93a1feffcd14
| 3,643,640
|
def find_lowest_cost_node(costs: dict, processed: list) -> dict:
"""Return the node with the lowest cost"""
lowest_cost = float("inf") # Infinity
lowest_cost_node = None
for node in costs:
cost = costs[node]
if cost < lowest_cost and node not in processed:
lowest_cost = cost
lowest_cost_node = node
return lowest_cost_node
|
aeb0ef046619bc9280d3d712329c672f76e36c90
| 3,643,641
|
def scale_img(image, random_coordinate=False):
"""
对原图大小进行处理,
:param image:
:param random_coordinate:
:return:
"""
h, w, c = image.shape
if max(h, w) > 640:
f_scale = min(640./h, 640./w) # scale factor
image = cv2.resize(src=image, dsize=None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_CUBIC)
else:
f_scale = 1.
h_s, w_s, c_s = image.shape # h scaled
image_full = 255 * np.zeros((640, 640, c), dtype=np.uint8)
if random_coordinate: # random coordinate
h_random = np.random.randint(0, 640 - h + 1)
w_random = np.random.randint(0, 640 - w + 1)
image_full[h_random:h_random + h_s, w_random:w_random + w_s, :] = image.astype(np.uint8)
else:
image_full[0:h_s, 0:w_s, :] = image.astype(np.uint8)
return image_full / 255., f_scale
|
6a0b93f4564c6d83e60f6f7a250822f801e0b65b
| 3,643,642
|
import math
def magnitude(v: Vector) -> float:
"""computes the magnitude (length) of a vector"""
return math.sqrt(sum_of_squares(v))
|
881f2a3e75520b3f8da7ea093765e36d78e48c57
| 3,643,643
|
import os
import requests
def pew(text):
"""PEW -- Percentage of Echomimetic (onomatopoeic) Words."""
pew = None
onomatopoeic_words_num = 0
path = '/tmp/onomatopoeic_words_en-1.0.txt'
if not os.path.exists(path):
url = 'https://raw.githubusercontent.com/korniichuk/phd/master/resources/onomatopoeic_words_en-1.0.txt' # noqa: E501
r = requests.get(url)
with open(path, 'w') as f:
f.write(r.text)
with open(path, 'r') as f:
onomatopoeic_words = f.read().splitlines()
words_num, words = word_counter(text, 'en')
for word in words:
word_lower = word.lower().strip()
if word_lower in onomatopoeic_words:
onomatopoeic_words_num += 1
if words_num != 0:
pew = onomatopoeic_words_num / words_num
return pew
|
781cb95ade2fd3b11440022e0447aef18cdde7fc
| 3,643,644
|
import os
import sys
def topngbytes(name, rows, x, y, **k):
"""
Convenience function for creating a PNG file "in memory" as
a string. Creates a :class:`Writer` instance using the keyword
arguments, then passes `rows` to its :meth:`Writer.write` method.
The resulting PNG file is returned as bytes. `name` is used
to identify the file for debugging.
"""
if os.environ.get('PYPNG_TEST_FILENAME'):
print(name, file=sys.stderr)
f = BytesIO()
w = png.Writer(x, y, **k)
w.write(f, rows)
if os.environ.get('PYPNG_TEST_TMP'):
w = open(name, 'wb')
w.write(f.getvalue())
w.close()
return f.getvalue()
|
569b913211640627132f0c0af13614218bec3f46
| 3,643,645
|
import time
def supply_domes1finesk():
"""
Real Name: b'"Supply Domes-1Finesk"'
Original Eqn: b'MIN("Domes-1 Demad finesk" (Time), (outflow Finesk) )'
Units: b'MCM/Month'
Limits: (None, None)
Type: component
b''
"""
return np.minimum(domes1_demad_finesk(time()), (outflow_finesk()))
|
e7bbbdc49e45044179053a02c4b76c1dda798bc0
| 3,643,646
|
def poll(handle):
"""
Polls an push_pull handle to determine whether underlying
asynchronous operation has completed. After `poll()` returns `True`, `synchronize()`
will return without blocking.
Arguments:
handle: A handle returned by an push_pull asynchronous
operation.
Returns:
A flag indicating whether the operation has completed.
"""
return c_lib.byteps_torch_poll(handle) != 0
|
e228183068517962e7886c020e662b8c1a1f2912
| 3,643,647
|
from typing import Tuple
def _increase_explicit_hydrogen_for_bond_atom(
rwmol: Chem.rdchem.RWMol,
remove_bidx: bool,
bidx: int,
remove_eidx: bool,
eidx: int,
ai_to_remove: list,
) -> Tuple[Chem.rdchem.RWMol, list]:
"""Increase number of explicit hydrogens for atom in a bond.
Args:
rwmol: An RDKit RWmolecule (rdkit.Chem.rdchem.RWMol)
remove_bidx: Begin atom in bond will increase explicit hydrogens (bool)
remove_eidx: End atom in bond will increase explicit hydrogens (bool)
Returns:
Tuple with an RDKit RWmolecule and an updated list to remove
(rdkit.Chem.rdchem.RWMol, list).
"""
if remove_bidx or remove_eidx:
if remove_bidx:
ai_to_remove.append(bidx)
_increase_explicit_hydrogens(rwmol, eidx)
if remove_eidx:
ai_to_remove.append(eidx)
_increase_explicit_hydrogens(rwmol, bidx)
rwmol.RemoveBond(bidx, eidx)
return rwmol, ai_to_remove
|
cf0276730ee0837d43098f9712f7c199ba93b268
| 3,643,648
|
import os
import tempfile
import urllib
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not gfile.Exists(work_directory):
gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not gfile.Exists(filepath):
with tempfile.NamedTemporaryFile() as tmpfile:
temp_file_name = tmpfile.name
urllib.request.urlretrieve(source_url, temp_file_name)
gfile.Copy(temp_file_name, filepath)
with gfile.GFile(filepath) as f:
size = f.Size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
|
ddf3912517ac65d9775302522c1d287cd6499b89
| 3,643,649
|
def plot_historical_actuals_forecast(e, title=None, ylabel='',
include_pred_int=False,
years_prior_include=2,
forecast_display_start=None,
e2=None):
"""Produce a plot of the ensemble forecasts
Returns
----------
plt object
"""
if e.forecast['consensus'] is None:
raise Exception('No forecast found.')
if title is None and e.validation['consensus'] is not None:
title = 'Training, forecast and actuals'
if title is None and e.validation['consensus'] is None:
title = 'Training and forecast'
fig, ax = plt.subplots(figsize=(13, 11))
fig.suptitle(title, fontsize=24)
plt.ylabel(ylabel, fontsize=20)
plt.rc('legend', fontsize=18)
plt.rc('ytick', labelsize=18)
plt.rc('xtick', labelsize=18)
plt.xticks(rotation = 30)
ax.xaxis.set_major_locator(mdates.AutoDateLocator(maxticks=12))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d-%y'))
ax.yaxis.set_major_formatter(FuncFormatter(human_format))
if forecast_display_start is None:
forecast_display_start = min(e.forecast['consensus'].dt)
forecast_mask = (e.forecast['consensus'].dt >= forecast_display_start)
forecast_len = forecast_mask.sum()
max_vals = []
for yp in list(range(1, years_prior_include + 1)):
if len(e.periods_agg) > 0 and max(e.periods_agg) > 1:
agg_str = 'period' + str(max(e.periods_agg))
range_train_yp = {'min':(forecast_display_start -
_datetime_delta(yp, 'Y') +
_datetime_delta(yp, 'D')),
'max':(max(e.forecast['consensus'].dt) -
_datetime_delta(yp, 'Y') +
_datetime_delta(yp, 'D'))}
training_mask = (
(e.training['aggregated'][agg_str].dt >= range_train_yp['min']) &
(e.training['aggregated'][agg_str].dt <= range_train_yp['max']))
train_len = training_mask.sum()
fp = plt.plot(e.forecast['consensus'].dt.loc[forecast_mask][:train_len],
e.training['aggregated'][agg_str].actual.loc[
training_mask][:forecast_len],
label='actuals ' + str(int(yp)) + 'YA',
linewidth=4)
history_len = e.training['aggregated'][agg_str].shape[0]
max_vals = max_vals + [max(
e.training['aggregated'][agg_str].actual.loc[
training_mask][:forecast_len])]
else:
range_train_yp = {'min':(forecast_display_start -
_datetime_delta(yp, 'Y')),
'max':(max(e.forecast['consensus'].dt) -
_datetime_delta(yp, 'Y'))}
training_mask = (
(e.training['history'].dt >= range_train_yp['min']) &
(e.training['history'].dt <= range_train_yp['max']))
fp = plt.plot(e.forecast['consensus'].dt.loc[forecast_mask],
e.training['history'].actual.loc[training_mask],
label='actuals ' + str(int(yp)) + 'YA', linewidth=2)
history_len = e.training['history'].shape[0]
max_vals = max_vals + [max(
e.training['history'].actual.loc[training_mask])]
total_len = history_len + e.forecast['consensus'].shape[0]
fp = plt.plot(e.forecast['consensus'].dt.loc[forecast_mask],
e.forecast['consensus'].forecast.loc[forecast_mask],
label='forecast',
linewidth=2 + 2 * int(total_len < 400),
c='indianred')
max_vals = max_vals + [max(
e.forecast['consensus'].forecast.loc[forecast_mask])]
if include_pred_int:
fp = plt.fill_between(e.forecast['consensus'].dt.loc[forecast_mask],
e.forecast['consensus'].forecast_lower.loc[
forecast_mask],
e.forecast['consensus'].forecast_upper.loc[
forecast_mask],
color='indianred', alpha=0.3,
label=str(round(
e.pred_level * 100)) + '% prediction band')
max_vals = max_vals + [max(e.forecast['consensus'].forecast_upper.loc[
forecast_mask])]
if (e.validation['consensus'] is not None and
len(e.validation['consensus']) > 0):
fp = plt.plot(e.validation['consensus'].dt.loc[forecast_mask],
e.validation['consensus'].actual.loc[forecast_mask],
label='actuals', c='mediumseagreen',
linewidth=2 + 2 * int(total_len < 400))
max_vals = max_vals + [max(
e.validation['consensus'].actual.loc[forecast_mask])]
if (e2 is not None and
len(e.forecast['consensus'].dt) > 0):
forecast_mask2 = (e2.forecast['consensus'].dt >= forecast_display_start)
fp = plt.plot(e2.forecast['consensus'].dt.loc[forecast_mask2],
e2.forecast['consensus'].forecast.loc[forecast_mask2],
label='latest forecast',
linewidth=2 + 2 * int(total_len < 400),
c='purple')
max_vals = max_vals + [max(
e2.forecast['consensus'].forecast.loc[forecast_mask2])]
plt.ylim([0, 1.05 * max(max_vals)])
plt.legend(loc='lower center', ncol=3, framealpha=0.05)
plt.grid()
return fp
|
e6604fe35ce6a65ff61ee45a387167d019be867a
| 3,643,650
|
import time
import threading
def f2(a, b):
"""
concurrent_num = 600 不用怕,因为这是智能线程池,如果函数耗时短,不会真开那么多线程。
这个例子是测试函数耗时是动态变化的,这样就不可能通过提前设置参数预估函数固定耗时和搞鬼了。看看能不能实现qps稳定和线程池自动扩大自动缩小
要说明的是打印的线程数量也包含了框架启动时候几个其他的线程,所以数量不是刚好和所需的线程计算一样的。
## 可以在运行控制台搜索 新启动线程 这个关键字,看看是不是何时适合扩大线程数量。
## 可以在运行控制台搜索 停止线程 这个关键字,看看是不是何时适合缩小线程数量。
"""
result = a + b
sleep_time = 0.01
if time.time() - t_start > 60: # 先测试函数耗时慢慢变大了,框架能不能按需自动增大线程数量
sleep_time = 7
if time.time() - t_start > 120:
sleep_time = 31
if time.time() - t_start > 200:
sleep_time = 79
if time.time() - t_start > 400: # 最后把函数耗时又减小,看看框架能不能自动缩小线程数量。
sleep_time = 0.8
if time.time() - t_start > 500:
sleep_time = None
print(f'{time.strftime("%H:%M:%S")} ,当前线程数量是 {threading.active_count()}, {a} + {b} 的结果是 {result}, sleep {sleep_time} 秒')
if sleep_time is not None:
time.sleep(sleep_time) # 模拟做某事需要阻塞n秒种,必须用并发绕过此阻塞。
return result
|
4f555d2b684e06d171a821fde6c10d2a72596396
| 3,643,651
|
def minimize_loss_single_machine_manual(loss,
accuracy,
layer_collection,
device=None,
session_config=None):
"""Minimize loss with K-FAC on a single machine(Illustrative purpose only).
This function does inverse and covariance computation manually
for illustrative pupose. Check `minimize_loss_single_machine` for
automatic inverse and covariance op placement and execution.
A single Session is responsible for running all of K-FAC's ops. The covariance
and inverse update ops are placed on `device`. All model variables are on CPU.
Args:
loss: 0-D Tensor. Loss to be minimized.
accuracy: 0-D Tensor. Accuracy of classifier on current minibatch.
layer_collection: LayerCollection instance describing model architecture.
Used by K-FAC to construct preconditioner.
device: string or None. The covariance and inverse update ops are run on
this device. If empty or None, the default device will be used.
(Default: None)
session_config: None or tf.ConfigProto. Configuration for tf.Session().
Returns:
final value for 'accuracy'.
"""
device_list = [] if not device else [device]
# Train with K-FAC.
g_step = tf.train.get_or_create_global_step()
optimizer = kfac.KfacOptimizer(
learning_rate=0.0001,
cov_ema_decay=0.95,
damping=0.001,
layer_collection=layer_collection,
placement_strategy="round_robin",
cov_devices=device_list,
inv_devices=device_list,
trans_devices=device_list,
momentum=0.9)
(cov_update_thunks,
inv_update_thunks) = optimizer.make_vars_and_create_op_thunks()
def make_update_op(update_thunks):
update_ops = [thunk() for thunk in update_thunks]
return tf.group(*update_ops)
cov_update_op = make_update_op(cov_update_thunks)
with tf.control_dependencies([cov_update_op]):
inverse_op = tf.cond(
tf.equal(tf.mod(g_step, _INVERT_EVERY), 0),
lambda: make_update_op(inv_update_thunks), tf.no_op)
with tf.control_dependencies([inverse_op]):
with tf.device(device):
train_op = optimizer.minimize(loss, global_step=g_step)
tf.logging.info("Starting training.")
with tf.train.MonitoredTrainingSession(config=session_config) as sess:
while not sess.should_stop():
global_step_, loss_, accuracy_, _ = sess.run(
[g_step, loss, accuracy, train_op])
if global_step_ % _REPORT_EVERY == 0:
tf.logging.info("global_step: %d | loss: %f | accuracy: %s",
global_step_, loss_, accuracy_)
return accuracy_
|
c5f53d7eddabe3ea5ac30ae4ecc050ee43ffa5e7
| 3,643,652
|
def bass_call_0(function, *args):
"""Makes a call to bass and raises an exception if it fails. Does not consider 0 an error."""
res = function(*args)
if res == -1:
code = BASS_ErrorGetCode()
raise BassError(code, get_error_description(code))
return res
|
9355f12b7277914e2397c64103666be0f5b801e5
| 3,643,653
|
def port_speed(value : str | None = None) -> int | None:
"""Port speed -> Mb/s parcer"""
if value is None:
return None
elif value == "X":
return 0
elif value == "M":
return 100
elif value == "G":
return 1000
elif value == "Q":
return 2500
else:
raise(AsusRouterNotImplementedError(value))
|
2bb41bf66211724a12bdf392ecf018c71836f42b
| 3,643,654
|
def convert_flag_frame_to_strings(flag_frame, sep=', ', empty='OK'):
"""
Convert the `flag_frame` output of :py:func:`~convert_mask_into_dataframe`
into a pandas.Series of strings which are the active flag names separated
by `sep`. Any row where all columns are false will have a value of `empty`.
Parameters
----------
flag_frame : pandas.DataFrame
Boolean DataFrame with descriptive column names
sep : str
String to separate column names by
empty : str
String to replace rows where no columns are True
Returns
-------
pandas.Series
Of joined column names from `flag_frame` separated by `sep` if True.
Has the same index as `flag_frame`.
"""
return np.logical_and(flag_frame, flag_frame.columns + sep).replace(
False, '').sum(axis=1).str.rstrip(sep).replace('', empty)
|
fa7f0cc427e4b6e4c703ea2011af59f1bad090ab
| 3,643,655
|
def pp_file_to_dataframe(pp_filename):
""" read a pilot point file to a pandas Dataframe
Parameters
----------
pp_filename : str
pilot point file
Returns
-------
df : pandas.DataFrame
a dataframe with pp_utils.PP_NAMES for columns
"""
df = pd.read_csv(pp_filename, delim_whitespace=True,
header=None, names=PP_NAMES,usecols=[0,1,2,3,4])
df.loc[:,"name"] = df.name.apply(str).apply(str.lower)
return df
|
777272db75f0e6c7bd1eee0b24d4879bf2ceb66a
| 3,643,656
|
import os
def get_ps_lib_dirs():
"""
Add directory to list as required
"""
polysync_install = os.path.join('/', 'usr', 'local', 'polysync')
polysync_lib = os.path.join(polysync_install, 'lib')
polysync_vendor = os.path.join(polysync_install, 'vendor', 'lib')
return [
polysync_lib,
polysync_vendor, ]
|
ce4745ef5dcdb4c00051eff6fae6082f98c90498
| 3,643,657
|
def edit_product(request, product_id):
""" Edit a product in the store """
if not request.user.is_superuser:
messages.error(request, 'Sorry, only store owners can do that.')
return redirect(reverse('home'))
product = get_object_or_404(Product, pk=product_id)
if request.method == 'POST':
form = ProductForm(request.POST, request.FILES, instance=product)
if form.is_valid():
form.save()
messages.success(request, 'Successfully updated product!')
return redirect(reverse('individual_product', args=[product.id]))
else:
messages.error(
request, 'Failed to update product. '
'Please ensure the form is valid.')
else:
form = ProductForm(instance=product)
messages.info(request, f'You are editing {product.name}')
template = 'products/edit_product.html'
context = {
'form': form,
'product': product,
}
return render(request, template, context)
|
0f22ca856ca71e973bd8eed85bba7f54ce3a3464
| 3,643,658
|
def _resolve_target(target, target_frame='icrs'):
"""Return an `astropy.coordinates.SkyCoord` form `target` and its frame."""
if target_frame == 'icrs':
return parse_coordinates(target)
return SkyCoord(target, frame=target_frame)
|
b2b8132ca15b6bcfbb6d67c90abf36760be6a2d1
| 3,643,659
|
import itertools
def iter_fragments(fragiter, start_frag_id = None, stop_frag_id = None):
"""Given a fragment iterator and a start and end fragment id,
return an iterator which yields only fragments within the range.
"""
if start_frag_id and stop_frag_id:
dpred = lambda f: fragment_id_lt(f.fragment_id, start_frag_id)
tpred = lambda f: fragment_id_le(f.fragment_id, stop_frag_id)
return itertools.takewhile(tpred, itertools.dropwhile(dpred, fragiter))
elif start_frag_id and not stop_frag_id:
dpred = lambda f: fragment_id_lt(f.fragment_id, start_frag_id)
return itertools.dropwhile(dpred, fragiter)
elif not start_frag_id and stop_frag_id:
tpred = lambda f: fragment_id_le(f.fragment_id, stop_frag_id)
return itertools.takewhile(tpred, fragiter)
return fragiter
|
a1ab1245a6cb450cdb363a7029147501adf913db
| 3,643,660
|
from typing import Generator
import os
def evaluate_gpt_with_distgen(settings,
archive_path=None,
merit_f=None,
gpt_input_file=None,
distgen_input_file=None,
workdir=None,
use_tempdir=True,
gpt_bin='$GPT_BIN',
timeout=2500,
auto_phase=False,
verbose=False,
gpt_verbose=False,
asci2gdf_bin='$ASCI2GDF_BIN',
kill_msgs=DEFAULT_KILL_MSGS
):
"""
Simple evaluate GPT.
Similar to run_astra_with_distgen, but returns a flat dict of outputs.
Will raise an exception if there is an error.
"""
G = run_gpt_with_distgen(settings=settings,
gpt_input_file=gpt_input_file,
distgen_input_file=distgen_input_file,
workdir=workdir,
use_tempdir=use_tempdir,
gpt_bin=gpt_bin,
timeout=timeout,
auto_phase=auto_phase,
verbose=verbose,
gpt_verbose=gpt_verbose,
asci2gdf_bin=asci2gdf_bin,
kill_msgs=kill_msgs)
if merit_f:
merit_f = tools.get_function(merit_f)
output = merit_f(G)
else:
output = default_gpt_merit(G)
if output['error']:
raise ValueError('error occured!')
#Recreate Generator object for fingerprint, proper archiving
# TODO: make this cleaner
gen = Generator()
gen.input = G.distgen_input
fingerprint = fingerprint_gpt_with_distgen(G, gen)
output['fingerprint'] = fingerprint
if archive_path:
path = tools.full_path(archive_path)
assert os.path.exists(path), f'archive path does not exist: {path}'
archive_file = os.path.join(path, fingerprint+'.h5')
output['archive'] = archive_file
# Call the composite archive method
archive_gpt_with_distgen(G, gen, archive_file=archive_file)
return output
|
ed229084f43cc8143538d1e76ea1a5e4e4f220eb
| 3,643,661
|
from bst import BST
def bst_right_imbalanced():
"""Bst that extends right."""
test_bst = BST((1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
return test_bst
|
4cdb45770634c389831057832b33755fe0a8db23
| 3,643,662
|
import time
def retry(exception_to_check, tries=4, delay=0.5, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
Args:
exception_to_check (Exception): the exception to check.
may be a tuple of exceptions to check
tries (int): number of times to try (not retry) before giving up
delay (float, int): initial delay between retries in seconds
backoff (int): backoff multiplier e.g. value of 2 will double the delay
each retry
logger (logging.Logger): logger to use. If None, print
"""
def deco_retry(func):
@wraps(func)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return func(*args, **kwargs)
except exception_to_check as exc:
msg = "%s, Retrying in %s seconds..." % (str(exc), mdelay)
if logger:
logger.warning(msg)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return func(*args, **kwargs)
return f_retry # true decorator
return deco_retry
|
8e607104abf1cd5165199b7792f6955084e674cd
| 3,643,663
|
def HESSIAN_DIAG(fn):
"""Generates a function which computes per-argument partial Hessians."""
def h_fn(*args, **kwargs):
args = (args,) if not isinstance(args, (tuple, list)) else tuple(args)
ret = [
jaxm.hessian(
lambda arg: fn(*args[:i], arg, *args[i + 1 :], **kwargs)
)(arg)
for (i, arg) in enumerate(args)
]
return ret
return h_fn
|
01075519f7c3ae052a553bd3911e0447fa8da6ce
| 3,643,664
|
from scipy.spatial import cKDTree
import numpy
def match_xy(x1, y1, x2, y2, neighbors=1):
"""Match x1 & y1 to x2 & y2, neighbors nearest neighbors.
Finds the neighbors nearest neighbors to each point in x2, y2 among
all x1, y1."""
vec1 = numpy.array([x1, y1]).T
vec2 = numpy.array([x2, y2]).T
kdt = cKDTree(vec1)
dist, idx = kdt.query(vec2, neighbors)
m1 = idx.ravel()
m2 = numpy.repeat(numpy.arange(len(vec2), dtype='i4'), neighbors)
dist = dist.ravel()
dist = dist
m = m1 < len(x1) # possible if fewer than neighbors elements in x1.
return m1[m], m2[m], dist[m]
|
cd360ee6fc0ec83fad565313f6cbb0e8a4292ca2
| 3,643,665
|
def make_doc():
""" Only used for sphinx documentation """
doc_app = Flask(__name__)
doc_app.register_blueprint(blueprint())
return doc_app
|
beff9149ceffb04f80071f6a595ef13e72ebc838
| 3,643,666
|
def logout(request):
"""Logs out the user"""
user_logout(request)
return redirect(auth_views.login)
|
739ef6b3b4daded0af786f8261072e05e8bba273
| 3,643,667
|
import os
import tempfile
def to_pydot(obj):
"""Specify either of the following options: a dot string (filename or text),
a networkx graph, a pydot graph, an igraph graph, or a callable function.
The function will be called with a filename to write it's dot output to."""
if isinstance(obj, pydot.Graph):
return obj
elif isinstance(obj, str):
if os.path.isfile(obj):
return pydot.graph_from_dot_file(obj)[0]
else:
return pydot.graph_from_dot_data(obj)[0]
elif is_networkx(obj):
return nx_pydot.to_pydot(obj)
elif is_igraph(obj):
with tempfile.NamedTemporaryFile(mode='w+') as f:
obj.write_dot(f.name)
return pydot.graph_from_dot_file(f.name)[0]
elif callable(obj):
with tempfile.NamedTemporaryFile(mode='w+') as f:
obj(f.name)
return pydot.graph_from_dot_file(f.name)[0]
elif hasattr(obj, 'to_dot') and callable(obj.to_dot):
return to_pydot(obj.to_dot())
else:
raise TypeError("Can't convert to pydot")
|
26ec25b22b415ba4a4f65172bdc47bf3eb4f7bc7
| 3,643,668
|
import math
def workout_train_chunk_length(inp_len: int,
resampling_factor: int = 1,
num_encoders: int = 5,
kernel: int = 8,
stride: int = 2) -> int:
"""
Given inp_len, return the chunk size for training
"""
out_len = inp_len * resampling_factor
for _ in range(num_encoders):
out_len = math.ceil((out_len - kernel) / stride) + 1
for _ in range(num_encoders):
out_len = (out_len - 1) * stride + kernel
return math.ceil(out_len / resampling_factor)
|
a7e7f42aa9670f1bda98c588e50052db0f4eb90f
| 3,643,669
|
def asin(e):
"""
:rtype: Column
"""
return col(Asin(parse(e)))
|
7c7fb32e84d7a9af74bc64eed2f111fd2030a499
| 3,643,670
|
import subprocess
def gs_exists(gs_url):
"""Check if gs_url points to a valid file we can access"""
# If gs_url is not accessible, the response could be one of:
# 1. "You aren't authorized to read ..."
# 2. "No URLs matched: ..."
# and it would have a non-0 status, which would be raised.
#
# Otherwise, it would return a bunch of information about the file,
# one of them being "Creation time".
try:
res = run_cmd(f"gsutil stat {gs_url}")
return "Creation time:" in res.stdout.decode("utf-8")
except subprocess.CalledProcessError:
return False
|
bfec2639d98f9c99107951ca3e48bdfc1c6ea545
| 3,643,671
|
def ft32m3(ft3):
"""ft^3 -> m^3"""
return 0.028316847*ft3
|
74f55f722c7e90be3fa2fc1f79f506c44bc6e9bc
| 3,643,672
|
def get_audience(request):
"""
Uses Django settings to format the audience.
To figure out the audience to use, it does this:
1. If settings.DEBUG is True and settings.SITE_URL is not set or
empty, then the domain on the request will be used.
This is *not* secure!
2. Otherwise, settings.SITE_URL is checked for the request
domain and an ImproperlyConfigured error is raised if it
is not found.
Examples of settings.SITE_URL::
SITE_URL = 'http://127.0.0.1:8001'
SITE_URL = 'https://example.com'
SITE_URL = 'http://example.com'
SITE_URL = (
'http://127.0.0.1:8001',
'https://example.com',
'http://example.com'
)
"""
req_proto = 'https://' if request.is_secure() else 'http://'
req_domain = request.get_host()
req_url = '%s%s' % (req_proto, req_domain)
site_url = getattr(settings, 'SITE_URL', None)
if not site_url:
if settings.DEBUG:
return req_url
else:
raise ImproperlyConfigured('`SITE_URL` must be set. See '
'documentation for django-browserid')
if isinstance(site_url, basestring):
site_url = [site_url]
try:
url_iterator = iter(site_url)
except TypeError:
raise ImproperlyConfigured('`SITE_URL` is not a string or an iterable')
if req_url not in url_iterator:
raise ImproperlyConfigured('request `{0}`, was not found in SITE_URL `{1}`'
.format(req_url, site_url))
return req_url
|
f5321a1ecb80c2aa3b7b16979429355841eee30a
| 3,643,673
|
def max_shading_elevation(total_collector_geometry, tracker_distance,
relative_slope):
"""Calculate the maximum elevation angle for which shading can occur.
Parameters
----------
total_collector_geometry: :py:class:`Shapely Polygon <Polygon>`
Polygon corresponding to the total collector area.
tracker_distance: array-like
Distances between neighboring trackers and the reference tracker.
relative_slope: array-like
Slope between neighboring trackers and reference tracker. A positive
slope means neighboring collector is higher than reference collector.
Returns
-------
max_shading_elevation: float
The highest solar elevation angle for which shading can occur for a
given field layout and collector geometry [degrees]
Note
----
The maximum shading elevation angle is calculated for all neighboring
trackers using the bounding box geometry and the bounding circle. For
rectangular collectors (as approximated when using the bounding box), the
maximum shading elevation occurs when one of the upper corners of the
projected shading geometry and the lower corner of the reference collector
intersects. For circular collectors (as approximated by the bounding
cirlce), the maximum elevation occurs when the projected shadow is directly
below the reference collector and the two circles tangent to each other.
The maximum elevation is calculated using both the bounding box and the
bounding circle, and the minimum of these two elevations is returned. For
rectangular and circular collectors, the maximum elevation is exact,
whereas for other geometries, the returned elevation is a conservative
estimate.
"""
# Calculate extent of box bounding the total collector geometry
x_min, y_min, x_max, y_max = total_collector_geometry.bounds
# Collector dimensions
x_dim = x_max - x_min
y_dim = y_max - y_min
delta_gamma_rad = np.arcsin(x_dim / tracker_distance)
# Calculate max elevation based on the bounding box (rectangular)
max_elevations_rectangular = np.rad2deg(np.arcsin(
y_dim * np.cos(np.deg2rad(relative_slope)) /
(tracker_distance * np.cos(delta_gamma_rad)))) + relative_slope
# Calculate max elevations using the minimum bounding diameter (circular)
D_min = _calculate_min_tracker_spacing(total_collector_geometry)
max_elevations_circular = np.rad2deg(np.arcsin(
(D_min * np.cos(np.deg2rad(relative_slope)))/tracker_distance)) \
+ relative_slope
# Compute max elevation
max_elevation = np.nanmin([np.nanmax(max_elevations_rectangular),
np.nanmax(max_elevations_circular)])
return max_elevation
|
f3e623607ae1c2576fa375146f4acc6186189d8c
| 3,643,674
|
def tensor_scatter_add(input_x, indices, updates):
"""
Creates a new tensor by adding the values from the positions in `input_x` indicated by
`indices`, with values from `updates`. When multiple values are given for the same
index, the updated result will be the sum of all values. This operation is almost
equivalent to using ScatterNdAdd, except that the updates are applied on output `Tensor`
instead of input `Parameter`.
The last axis of `indices` is the depth of each index vectors. For each index vector,
there must be a corresponding value in `updates`. The shape of `updates` should be
equal to the shape of `input_x[indices]`. For more details, see use cases.
Note:
If some values of the `indices` are out of bound, instead of raising an index error,
the corresponding `updates` will not be updated to `input_x`.
Args:
- **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].
- **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
The rank must be at least 2.
- **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
and updates. Shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
Returns:
Tensor, has the same shape and type as `input_x`.
Raises:
TypeError: If dtype of `indices` is neither int32 nor int64.
ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, nn
>>> from mindspore import ops
>>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
>>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
>>> output = ops.tensor_scatter_add(input_x, indices, updates)
>>> print(output)
[[ 3.1 0.3 3.6]
[ 0.4 0.5 -3.2]]
"""
return tensor_scatter_add_(input_x, indices, updates)
|
38707efab3d2f947cbc44dacb6427281d3b652cb
| 3,643,675
|
def test100():
"""
CIFAR-100 test set creator.
It returns a reader creator, each sample in the reader is image pixels in
[0, 1] and label in [0, 9].
:return: Test reader creator.
:rtype: callable
"""
return reader_creator(
paddle.v2.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5),
'test')
|
f43e27a7ce1ec40dfc50d513de5406b2683a566b
| 3,643,676
|
def _calculate_target_matrix_dimension(m, kernel, paddings, strides):
"""
Calculate the target matrix dimension.
Parameters
----------
m: ndarray
2d Matrix
k: ndarray
2d Convolution kernel
paddings: tuple
Number of padding in (row, height) on one side.
If you put 2 padding on the left and 2 padding on the right, specify 2.
strides: tuple
Step size in (row, height)
Returns
-------
out: tuple
Tuple containing (number of rows, number of columns)
Raises
------
ValueError
If kernel size is greater than m in any axis after padding
"""
source_height = m.shape[0]
source_width = m.shape[1]
padding_row = paddings[0]
padding_column = paddings[1]
kernel_height = kernel.shape[0]
kernel_width = kernel.shape[1]
if kernel_height > (source_height + padding_row) or kernel_width > (source_width + padding_column):
raise ValueError("Kernel size is larger than the matrix")
row_stride = strides[0]
col_stride = strides[1]
# (source_height - kernel_height)/strides[0] is how many steps you can go down.
# + 1 to include the start position.
target_height = int((source_height + padding_row - kernel_height) / row_stride) + 1
target_width = int((source_width + padding_column - kernel_width) / col_stride) + 1
return (target_height, target_width)
|
77b5cabd7101b957a27fc422d1ed1715525400a0
| 3,643,677
|
def any_email():
"""
Return random email
>>> import re
>>> result = any_email()
>>> type(result)
<type 'str'>
>>> re.match(r"(?:^|\s)[-a-z0-9_.]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}(?:\s|$)", result, re.IGNORECASE) is not None
True
"""
return "%s@%s.%s" % (any_string(max_length=10),
any_string(max_length=10),
any_string(min_length=2, max_length=3))
|
8575d02d3c9a777bc2cf27f1344676cad5514d5e
| 3,643,678
|
def transform_pts_base_to_stitched_im(pts):
"""Project 3D points in base frame to the stitched image
Args:
pts (np.array[3, N]): points (x, y, z)
Returns:
pts_im (np.array[2, N])
inbound_mask (np.array[N])
"""
im_size = (480, 3760)
# to image coordinate
pts_rect = pts[[1, 2, 0], :]
pts_rect[:2, :] *= -1
# to pixel
horizontal_theta = np.arctan2(pts_rect[0], pts_rect[2])
horizontal_percent = horizontal_theta / (2 * np.pi) + 0.5
x = im_size[1] * horizontal_percent
y = (
485.78 * pts_rect[1] / pts_rect[2] * np.cos(horizontal_theta)
+ 0.4375 * im_size[0]
)
# horizontal_theta = np.arctan(pts_rect[0, :] / pts_rect[2, :])
# horizontal_theta += (pts_rect[2, :] < 0) * np.pi
# horizontal_percent = horizontal_theta / (2 * np.pi)
# x = ((horizontal_percent * im_size[1]) + 1880) % im_size[1]
# y = (
# 485.78 * (pts_rect[1, :] / ((1 / np.cos(horizontal_theta)) * pts_rect[2, :]))
# ) + (0.4375 * im_size[0])
# x is always in bound by cylindrical parametrization
# y is always at the lower half of the image, since laser is lower than the camera
# thus only one boundary needs to be checked
inbound_mask = y < im_size[0]
return np.stack((x, y), axis=0).astype(np.int32), inbound_mask
|
c6397451e458af086fe316c6933ca27641daac26
| 3,643,679
|
def get_equal_static_values(*args):
"""get_equal_static_values(FileConstHandle input, FileConstHandle out) -> bool"""
return _RMF.get_equal_static_values(*args)
|
8f929f0eae16e620b5025ad34b4437d836d8d671
| 3,643,680
|
def quaternion_to_rotation_matrix(quaternion):
"""
This converts a quaternion representation of on orientation to
a rotation matrix. The input is a 4-component numpy array in
the order [w, x, y, z], and the output is a 3x3 matrix stored
as a 2D numpy array. We follow the approach in
"3D Math Primer for Graphics and Game Development" by
Dunn and Parberry.
"""
w = quaternion[0]
x = quaternion[1]
y = quaternion[2]
z = quaternion[3]
R = np.empty((3, 3), dtype=np.float64)
R[0][0] = 1.0 - 2.0*y**2 - 2.0*z**2
R[0][1] = 2.0*x*y + 2.0*w*z
R[0][2] = 2.0*x*z - 2.0*w*y
R[1][0] = 2.0*x*y - 2.0*w*z
R[1][1] = 1.0 - 2.0*x**2 - 2.0*z**2
R[1][2] = 2.0*y*z + 2.0*w*x
R[2][0] = 2.0*x*z + 2.0*w*y
R[2][1] = 2.0*y*z - 2.0*w*x
R[2][2] = 1.0 - 2.0*x**2 - 2.0*y**2
return R
|
95a8dd9d0a9510710e7b6ed676a5f03e26b2da96
| 3,643,681
|
def load_subject(filename: str,
mask_niimg):
"""
Load a subject saved in .mat format with
the version 7.3 flag. Return the subject
niimg, using a mask niimg as a template
for nifti headers.
Args:
filename <str> the .mat filename for the subject data
mask_niimg niimg object the mask niimg object used for nifti headers
"""
subject_data = None
with h5py.File(filename, 'r') as f:
subject_data = f['SM_feature'][()]
# It's necessary to reorient the axes, since h5py flips axis order
subject_data = np.moveaxis(subject_data, [0, 1, 2, 3], [3, 2, 1, 0])
subject_niimg = nl.image.new_img_like(
mask_niimg, subject_data, affine=mask_niimg.affine, copy_header=True)
return subject_niimg
|
e3cdb751cebd7407b694555adfb21e7a6a224c50
| 3,643,682
|
import os
import glob
def get_data_monash(directory):
"""
Get the monash data in a dictionary
"""
# Generate the wildcard for the models
wildcard = os.path.join(directory, "*")
model_files = glob.glob(wildcard)
all_models = {}
for model_file in model_files:
# First extract the filename
file_name = os.path.split(model_file)[-1].split(".")[0]
# Now for each line in the model file, retrieve the abundances
with open(model_file, "r") as fread:
# There are several models in each file, so just
# look for each and save it
for line in fread:
# Find initial mass
if "Initial mass" in line:
# Get label
lnlst = line.split()
# Initial mass
ini_mass = lnlst[4][0:4]
label = "monash_m" + ini_mass + "_"
# Mix
mix = lnlst[13][0:8]
label += "mix_" + mix + "_"
# Ov
if "_ov" in line:
ov = lnlst[16]
label += "N_ov_" + ov + "_"
# Rest of the label
label += file_name.split("_")[-1]
# Now model
if "Final abundances" in line:
fread.readline() # Skip header
# Save elements
elems = {}
for line in fread:
if "#" in line:
break
# Add element to the list
lnlst = line.split()
name = lnlst[0].capitalize()
if name == "Fe":
feH = float(lnlst[3])
else:
name += "/Fe"
elems[name] = float(lnlst[4])
# Store
all_models[label] = elems
all_models[label]["Fe/H"] = feH
return all_models
|
c427d49a925cdd97ddaf6731e799c90192b5ea33
| 3,643,683
|
def pretty_duration(seconds):
"""Return a human-readable string for the specified duration"""
if seconds < 2:
return '%d second' % seconds
elif seconds < 120:
return '%d seconds' % seconds
elif seconds < 7200:
return '%d minutes' % (seconds // 60)
elif seconds < 48 * 3600:
return '%d hours' % (seconds // 3600)
else:
return '%d days' % (seconds // (24 * 3600))
|
8e34addedeeb98e1e028fa9374fcc8c4f134a9f7
| 3,643,684
|
def plot_ecdf(tidy_data, cats, val, title, width=550, conf_int=False):
"""
Plots an ECDF of tidy data.
tidy_data: Set of tidy data.
cats: Categories to plot
val: The value to plot
title: Title of plot
width: width of plot
conf_int: Whether or not to bootstrap a CI.
"""
p = bokeh_catplot.ecdf(
data = tidy_data,
cats = cats,
val = val,
title = title,
width = width,
conf_int = conf_int,
)
return p
|
11ef82111ad300826f47f6ce91ea588911a790c8
| 3,643,685
|
from operator import concat
def upsert(left, right, inclusion=None, exclusion=None):
"""Upserts the specified left collection with the specified right collection by overriding the
left values with the right values that have the same indices and concatenating the right values
to the left values that have different indices on the common keys that are in the specified
inclusive list and are not in the specified exclusive list."""
right = collection_to_common_type(right, left, inclusion=inclusion, exclusion=exclusion)
left = update(left, include_index(right, left))
return concat(left, exclude_index(right, left))
|
b4754c01ff521b892107afd8dd12b015bd4e293a
| 3,643,686
|
from typing import Counter
def train(training_data):
"""Trains the model on a given data set.
Parameters
----------
training_data
Returns
-------
"""
counts = Counter(training_data)
model = {}
# sort counts by lowest occurrences, up to most frequent.
# this allows higher frequencies to overwrite related
# values in the model
for pair, _ in counts.most_common()[:-len(counts)-1:-1]:
word, tag = pair
model[word] = tag
return model
|
328901b090392097d22b21a948691787e0128d48
| 3,643,687
|
from datetime import datetime
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['cache'] = 86400
desc['description'] = """This chart totals the number of distinct calendar
days per month that a given present weather condition is reported within
the METAR data feed. The calendar day is computed for the local time zone
of the reporting station.
<p>The reporting of present weather codes within METARs has changed over
the years and there is some non-standard nomenclature used by some sites.
The thunder (TS) reports are delineated into three categories here to
hopefully allow more accurate statistics.
<ul>
<li><strong>All Thunder Reports (TS)</strong> includes any
<code>TS</code> mention in any present weather code</li>
<li><strong>Thunder in Vicinity (VCTS)</strong> includes any
<code>VCTS</code> mention in any present weather code, for example,
<code>VCTSRA</code> would match.</li>
<li><strong>Thunder Reports (excluding VCTS)</strong> includes most
<code>TS</code> mentions, but not any including <code>VC</code></li>
</ul>
"""
desc['arguments'] = [
dict(type='zstation', name='zstation', default='DSM',
label='Select Station:', network='IA_ASOS'),
dict(type='year', name="year", label='Year to Highlight:',
default=datetime.date.today().year, min=1973),
dict(type='select', name='var', default='FG',
label='Present Weather Option:', options=PDICT),
]
return desc
|
0927644a801a2829a97fbc6e78ef70fff1e8edfe
| 3,643,688
|
import logging
from datetime import datetime
import time
def check_cortex(ioc, ioc_type, object_id, is_mail=False, cortex_expiration_days=30):
"""Run all available analyzer for ioc.
arguments:
- ioc: value/path of item we need to check on cortex
- ioc_type: type of the ioc (generic_relation and cortex datatype)
- object_id: item to attach report to
- is_mail: ioc is a mail [mail datatype is for addresses and file is for mail]
"""
_, _, cortex_api = get_info(mail=False)
# Mail object is file in cortex
# need to save mail object analyzer as mail_obj to discriminate them
filter_type = ioc_type if not is_mail else "mail_obj"
analyzers = Analyzer.objects.filter(
disabled=False, supported_types__contains=[filter_type]
).order_by("-priority")
# Full mail only on premise
if ioc_type == "file":
analyzers = analyzers.filter(onpremise=True)
if is_mail is True:
content_type = Mail
else:
content_type = Attachment
elif ioc_type == "mail":
content_type = Address
elif ioc_type == "url":
content_type = Url
elif ioc_type == "domain":
content_type = Domain
elif ioc_type == "ip":
content_type = Ip
elif ioc_type == "hash":
content_type = Attachment
else:
logging.error("Wrong ioc_type type {}".format(ioc_type))
return
old_reports = Report.objects.filter(
content_type=ContentType.objects.get_for_model(content_type),
object_id=object_id,
success=True,
date__gte=datetime.datetime.today()
- datetime.timedelta(days=cortex_expiration_days),
)
try:
db_object = content_type.objects.get(pk=object_id)
except Exception:
logging.error("CORTEX {} {} {} {}".format(ioc, ioc_type, object_id, is_mail))
return
for analyzer in analyzers:
# Check if item was already been processed
for report in old_reports:
if report.analyzer == analyzer:
if "malicious" in report.taxonomies:
db_object.tags.add(
"{}: malicious".format(analyzer.name),
tag_kwargs={"color": "#FF0000"},
)
db_object.taxonomy = 4
db_object.save()
elif "suspicious" in report.taxonomies:
db_object.tags.add(
"{}: suspicious".format(analyzer.name),
tag_kwargs={"color": "#C15808"},
)
db_object.taxonomy = max(3, db_object.taxonomy)
db_object.save()
elif "safe" in report.taxonomies:
db_object.tags.add(
"{}: safe".format(analyzer.name),
tag_kwargs={"color": "#00FF00"},
)
db_object.taxonomy = max(2, db_object.taxonomy)
db_object.save()
elif "info" in report.taxonomies:
db_object.tags.add(
"{}: info".format(analyzer.name),
tag_kwargs={"color": "#00B0FF"},
)
db_object.taxonomy = max(1, db_object.taxonomy)
db_object.save()
continue
# If not rerun the analyzer
try:
job = cortex_api.analyzers.run_by_name(
analyzer.name,
{"data": ioc, "dataType": ioc_type, "tlp": 1},
force=1,
)
while job.status not in ["Success", "Failure"]:
time.sleep(10)
job = cortex_api.jobs.get_report(job.id)
if job.status == "Success":
response = job.json()
try:
taxonomies = glom(
response, ("report.summary.taxonomies", ["level"])
)
except PathAccessError:
taxonomies = None
report = Report(
response=response,
content_object=db_object,
analyzer=analyzer,
taxonomies=taxonomies,
success=True,
)
report.save()
if "malicious" in taxonomies:
db_object.tags.add(
"{}: malicious".format(analyzer.name),
tag_kwargs={"color": "#FF0000"},
)
db_object.taxonomy = 4
db_object.save()
elif "suspicious" in taxonomies:
db_object.tags.add(
"{}: suspicious".format(analyzer.name),
tag_kwargs={"color": "#C15808"},
)
db_object.taxonomy = max(3, db_object.taxonomy)
db_object.save()
elif "safe" in taxonomies:
db_object.tags.add(
"{}: safe".format(analyzer.name),
tag_kwargs={"color": "#00FF00"},
)
db_object.taxonomy = max(2, db_object.taxonomy)
db_object.save()
elif "info" in taxonomies:
db_object.tags.add(
"{}: info".format(analyzer.name),
tag_kwargs={"color": "#00B0FF"},
)
db_object.taxonomy = max(1, db_object.taxonomy)
db_object.save()
elif job.status == "Failure":
report = Report(
content_object=db_object,
analyzer=analyzer,
success=False,
)
report.save()
except Exception as excp:
logging.error(
"ERROR running analyzer {} for {}: {}".format(analyzer.name, ioc, excp)
)
return True
|
c647ab09bd97dda75dc4073634ac4a68cbf8613a
| 3,643,689
|
def _env_corr_same(wxy, Xa, Ya, sign=-1, log=True, x_ind=None, y_ind=None):
"""
The cSPoC objective function with same filters for both data sets:
the correlation of amplitude envelopes
Additionally, it returns the gradients of the objective function
with respect to each of the filter coefficients.
Notes:
------
The input datasets Xa and Ya are the analytic representations of the
original datasets X and Y, hence they must be complex arrays.
Xa and Ya can be either 2d numpy arrays of shape
(channels x datapoints) or 3d array of shape
(channels x datapoints x trials).
For 3d arrays the average envelope in each trial is calculated if x_ind
(or y_ind, respectively) is None. If they are set, the difference of
the instantaneous amplitude envelope at x_ind/y_ind and the average
envelope is calculated for each trial.
If log == True, then the log transform is taken before the average
inside the trial
Input:
------
-- wxy is the array of shared filter coefficients for x and y
-- Xa - numpy array - complex analytic representation of X
Xa is the first Hilbert-transformed dataset of shape px x N (x tr),
where px is the number of sensors, N the number of datapoints, tr
the number of trials
-- Ya is the second Hilbert-transformed dataset of shape py x N (x tr)
-- sign {-1, 1} - the correlation coefficient is multiplied with this
number. If the result of this function is minimized
-1 should be used to find maximum correlation, 1
should be used to find maximal anti-correlation,
defaults to -1
-- log {True, False} - compute the correlation between the log-
transformed envelopes, if datasets come in
epochs, then the log is taken before averaging
inside the epochs
-- x_ind int - the time index (-Xa.shape[1] <= x_ind < Xa.shape[1]),
where the difference of the instantaneous envelope and
the average envelope is determined for Xa
-- y_ind int - the time index (-Ya.shape[1] <= y_ind < Ya.shape[1]),
where the difference of the instantaneous envelope and
the average envelope is determined for Ya
Output:
-------
-- c - float - the correlation coefficient of the amplitude envelopes
of X and Y multiplied by the value of \"sign\"
-- c_der - numpy array - the gradient of c with respect to each of the
coefficients in wxy
"""
assert isinstance(Xa, _np.ndarray), "Xa must be numpy array"
assert _np.iscomplexobj(Xa), "Xa must be a complex-type numpy array" +\
", i.e. the analytic representaion of X"
assert (Xa.ndim ==2 or Xa.ndim==3), "Xa must be 2D or 3D numpy array"
assert isinstance(Ya, _np.ndarray), "Ya must be numpy array"
assert _np.iscomplexobj(Ya), "Ya must be a complex-type numpy array" +\
", i.e. the analytic representation of Y"
assert (Ya.ndim ==2 or Ya.ndim==3), "Ya must be 2D or 3D numpy array"
assert Xa.shape[-1] == Ya.shape[-1], "Size of last dimension in Xa " +\
"Ya must agree"
p1 = Xa.shape[0]
p2 = Ya.shape[0]
assert p1 == p2, 'Dimensionality of Xa and Ya must agree for cSPoc' +\
' with same filters'
assert len(wxy) == p1, "Length of wxy must equal the" + \
" number of variables in Xa and Ya"
assert isinstance(log, bool), "\"log\" must be a boolean (True or False)"
assert sign in [-1, 1, 0], "\"sign\" must be -1, 1, or 0"
if x_ind != None:
assert Xa.ndim == 3, "If x_ind is set, Xa must be 3d array!"
assert isinstance(x_ind, int), "x_ind must be integer!"
assert ((x_ind >= -Xa.shape[1]) and
(x_ind < Xa.shape[1])), "x_ind must match the range of " +\
"Xa.shape[1]"
if y_ind != None:
assert Ya.ndim == 3, "If y_ind is set, Ya must be 3d array!"
assert isinstance(y_ind, int), "y_ind must be integer!"
assert ((y_ind >= -Ya.shape[1]) and
(y_ind < Ya.shape[1])), "y_ind must match the range of " +\
"Ya.shape[1]"
# filter signal spatially
Xa_filt = _np.tensordot(wxy, Xa, axes=(0,0))
Ya_filt = _np.tensordot(wxy, Ya, axes=(0,0))
# get envelope of filtered signal
x_env = _np.abs(Xa_filt)
y_env = _np.abs(Ya_filt)
# get derivatives of envelopes
envx_derwx = ((Xa_filt.real * Xa.real +
Xa_filt.imag * Xa.imag) / x_env)
envy_derwy = ((Ya_filt.real * Ya.real +
Ya_filt.imag * Ya.imag) / y_env)
if log:
envx_derwx = envx_derwx / x_env
envy_derwy = envy_derwy / y_env
x_env = _np.log(x_env)
y_env = _np.log(y_env)
if ((Xa.ndim == 3) and (x_ind != None)):
envx_derwx = envx_derwx[:,x_ind] - envx_derwx.mean(1)
x_env = x_env[x_ind] - x_env.mean(0)
elif Xa.ndim == 3:
envx_derwx = envx_derwx.mean(1)
x_env = x_env.mean(0)
if ((Ya.ndim == 3) and (y_ind != None)):
envy_derwy = envy_derwy[:,y_ind] - envy_derwy.mean(1)
y_env = y_env[y_ind] - y_env.mean(0)
elif Ya.ndim == 3:
envy_derwy = envy_derwy.mean(1)
y_env = y_env.mean(0)
# remove mean of envelopes and derivatives
x_env = x_env - x_env.mean()
y_env = y_env - y_env.mean()
envx_derwx = envx_derwx - envx_derwx.mean(1)[:,_np.newaxis]
envy_derwy = envy_derwy - envy_derwy.mean(1)[:,_np.newaxis]
# numerator of correlation
num = _np.mean(x_env * y_env)
# derivative of numerator
num_d = _np.mean(envx_derwx*y_env + x_env*envy_derwy,1)
# denominator of correlation
denom = _np.sqrt(_np.mean(x_env**2) * _np.mean(y_env**2))
# derivative of denominator
denom_d = (
(_np.mean(x_env*envx_derwx,1)*_np.mean(y_env**2) +
_np.mean(x_env**2)*_np.mean(y_env*envy_derwy,1)
) /
_np.sqrt(_np.mean(x_env**2) * _np.mean(y_env**2)))
#final correlation
corr = num / denom
#final derivative
corr_d = (num_d*denom - num*denom_d) / denom**2
if sign == 0:
return _np.sign(corr)*corr, _np.sign(corr)*corr_d
else:
return sign*corr, sign*corr_d
|
2004e3ab1f9c81466e8dc946b04baf5a692b169d
| 3,643,690
|
def create_resource():
"""Hosts resource factory method"""
deserializer = HostDeserializer()
serializer = HostSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)
|
ed49ae9fecce67fcd3c4fa1a2eac469eb97e239b
| 3,643,691
|
def get_ref_kmer(ref_seq, ref_name, k_len):
""" Load reference kmers. """
ref_mer = []
ref_set = set()
for i in range(len(ref_seq) - k_len + 1):
kmer = ref_seq[i:(i + k_len)]
if kmer in ref_set:
raise ValueError(
"%s found multiple times in reference %s, at pos. %d" % (
kmer, ref_name, i)
)
ref_mer.append(kmer)
ref_set.add(kmer)
return ref_mer
|
72b75dccfba122a986d50e144dea62bfafe0fb50
| 3,643,692
|
def get_distutils_build_or_install_option(option):
""" Returns the value of the given distutils build or install option.
Parameters
----------
option : str
The name of the option
Returns
-------
val : str or None
The value of the given distutils build or install option. If the
option is not set, returns None.
"""
return get_distutils_option(option, ['build', 'build_ext', 'build_clib',
'install'])
|
7f0d72e1c30c752761eb8c7f8f0ffeb875183d4d
| 3,643,693
|
def CMDset_close(parser, args):
"""Closes the issue."""
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist(auth_config=auth_config)
# Ensure there actually is an issue to close.
cl.GetDescription()
cl.CloseIssue()
return 0
|
6711ba947e9d839217a96568a0c07d1103646030
| 3,643,694
|
def gchip(k_k, b_b, c_c):
"""gchip(k_k, b_b, c_c)"""
yout = b_b*c_c*nu_f(1, b_b, k_k)**((c_c+1)/2)*\
cos((c_c+1)*atan(b_b*k_k))
return yout
|
81fa674a2fb03875e39f2968986104da06a5ea44
| 3,643,695
|
def create_component(ctx: NVPContext):
"""Create an instance of the component"""
return ProcessUtils(ctx)
|
ec9d4539583dbdeaf1c4f5d8fce337077d249ab2
| 3,643,696
|
import ipaddress
def is_valid_ip(ip: str) -> bool:
"""
Args:
ip: IP address
Returns: True if the string represents an IPv4 or an IPv6 address, false otherwise.
"""
try:
ipaddress.IPv4Address(ip)
return True
except ValueError:
try:
ipaddress.IPv6Address(ip)
return True
except ValueError:
return False
|
aa1d3b19828dd8c3dceaaa8d9d1017cc16c1f73b
| 3,643,697
|
def complete_tree(leaves):
"""
Complete a tree defined by its leaves.
Parmeters:
----------
leaves : np.array(dtype=np.int64)
Returns:
--------
np.array(dtype=np.int64)
"""
tree_set = _complete_tree(leaves)
return np.fromiter(tree_set, dtype=np.int64)
|
4bfc1ae01efd9595ef875613bd00d76f7c485421
| 3,643,698
|
import torch
def pick_best_batch_size_for_gpu():
"""
Tries to pick a batch size that will fit in your GPU. These sizes aren't guaranteed to work, but they should give
you a good shot.
"""
free, available = torch.cuda.mem_get_info()
availableGb = available / (1024 ** 3)
if availableGb > 14:
return 16
elif availableGb > 10:
return 8
elif availableGb > 7:
return 4
return 1
|
31d970697b417b40f8ef5b41fdeacc0e378543a0
| 3,643,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.