content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def spawn_shell(shell_cmd):
"""Spawn a shell process with the provided command line. Returns the Pexpect object."""
return pexpect.spawn(shell_cmd[0], shell_cmd[1:], env=build_shell_env())
|
07de3ae221b427baddb0b47f1d52e3eae91035e7
| 3,646,200
|
from collections import OrderedDict
import sep
from grizli import utils
import tqdm
def analyze_image(data, err, seg, tab, athresh=3.,
robust=False, allow_recenter=False,
prefix='', suffix='', grow=1,
subtract_background=False, include_empty=False,
pad=0, dilate=0, make_image_cols=True):
"""
SEP/SExtractor analysis on arbitrary image
Parameters
----------
data : array
Image array
err : array
RMS error array
seg : array
Segmentation array
tab : `~astropy.table.Table`
Table output from `sep.extract` where `id` corresponds to segments in
`seg`. Requires at least columns of
``id, xmin, xmax, ymin, ymax`` and ``x, y, flag`` if want to use
`robust` estimators
athresh : float
Analysis threshold
prefix, suffix : str
Prefix and suffix to add to output table column names
Returns
-------
tab : `~astropy.table.Table`
Table with columns
``id, x, y, x2, y2, xy, a, b, theta``
``flux, background, peak, xpeak, ypeak, npix``
"""
yp, xp = np.indices(data.shape) - 0.5*(grow == 2)
# Output data
new = OrderedDict()
idcol = choose_column(tab, ['id','number'])
ids = tab[idcol]
new[idcol] = tab[idcol]
for k in ['x','y','x2','y2','xy','a','b','theta','peak','flux','background']:
if k in tab.colnames:
new[k] = tab[k].copy()
else:
new[k] = np.zeros(len(tab), dtype=np.float32)
for k in ['xpeak','ypeak','npix','flag']:
if k in tab.colnames:
new[k] = tab[k].copy()
else:
new[k] = np.zeros(len(tab), dtype=int)
for id_i in tqdm(ids):
ix = np.where(tab[idcol] == id_i)[0][0]
xmin = tab['xmin'][ix]-1-pad
ymin = tab['ymin'][ix]-1-pad
slx = slice(xmin, tab['xmax'][ix]+pad+2)
sly = slice(ymin, tab['ymax'][ix]+pad+2)
seg_sl = seg[sly, slx] == id_i
if include_empty:
seg_sl |= seg[sly, slx] == 0
if dilate > 0:
seg_sl = nd.binary_dilation(seg_sl, iterations=dilate)
if seg_sl.sum() == 0:
new['flag'][ix] |= 1
continue
if grow > 1:
sh = seg_sl.shape
seg_gr = np.zeros((sh[0]*grow, sh[1]*grow), dtype=bool)
for i in range(grow):
for j in range(grow):
seg_gr[i::grow, j::grow] |= seg_sl
seg_sl = seg_gr
xmin = xmin*grow
ymin = ymin*grow
slx = slice(xmin, (tab['xmax'][ix]+2+pad)*grow)
sly = slice(ymin, (tab['ymax'][ix]+2+pad)*grow)
if subtract_background:
if subtract_background == 2:
# Linear model
x = xp[sly, slx] - xmin
y = yp[sly, slx] - ymin
A = np.array([x[~seg_sl]*0.+1, x[~seg_sl], y[~seg_sl]])
b = data[sly,slx][~seg_sl]
lsq = np.linalg.lstsq(A.T, b)
back_level = lsq[0][0]
A = np.array([x[seg_sl]*0.+1, x[seg_sl], y[seg_sl]]).T
back_xy = A.dot(lsq[0])
else:
# Median
back_level = np.median(data[sly, slx][~seg_sl])
back_xy = back_level
else:
back_level = 0.
back_xy = back_level
dval = data[sly, slx][seg_sl] - back_xy
ival = err[sly, slx][seg_sl]
rv = dval.sum()
imax = np.argmax(dval)
peak = dval[imax]
x = xp[sly, slx][seg_sl] - xmin
y = yp[sly, slx][seg_sl] - ymin
xpeak = x[imax] + xmin
ypeak = y[imax] + ymin
thresh_sl = (dval > athresh*ival) & (ival >= 0)
new['npix'][ix] = thresh_sl.sum()
new['background'][ix] = back_level
if new['npix'][ix] == 0:
new['flag'][ix] |= 2
new['x'][ix] = np.nan
new['y'][ix] = np.nan
new['xpeak'][ix] = xpeak
new['ypeak'][ix] = ypeak
new['peak'][ix] = peak
new['flux'][ix] = rv
new['x2'][ix] = np.nan
new['y2'][ix] = np.nan
new['xy'] = np.nan
new['a'][ix] = np.nan
new['b'][ix] = np.nan
new['theta'][ix] = np.nan
continue
cval = dval[thresh_sl]
rv = cval.sum()
x = x[thresh_sl]
y = y[thresh_sl]
mx = (x*cval).sum()
my = (y*cval).sum()
mx2 = (x*x*cval).sum()
my2 = (y*y*cval).sum()
mxy = (x*y*cval).sum()
xm = mx/rv
ym = my/rv
xm2 = mx2/rv - xm**2
ym2 = my2/rv - ym**2
xym = mxy/rv - xm*ym
if robust:
if 'flag' in tab.colnames:
flag = tab['flag'][ix] & sep.OBJ_MERGED
else:
flag = False
if flag | (robust > 1):
if allow_recenter:
xn = xm
yn = ym
else:
xn = tab['x'][ix]-xmin
yn = tab['y'][ix]-ymin
xm2 = mx2 / rv + xn*xn - 2*xm*xn
ym2 = my2 / rv + yn*yn - 2*ym*yn
xym = mxy / rv + xn*yn - xm*yn - xn*ym
xm = xn
ym = yn
temp2 = xm2*ym2-xym*xym
if temp2 < 0.00694:
xm2 += 0.0833333
ym2 += 0.0833333
temp2 = xm2*ym2-xym*xym;
temp = xm2 - ym2
if np.abs(temp) > 0:
theta = np.clip(np.arctan2(2.0*xym, temp)/2.,
-np.pi/2.+1.e-5, np.pi/2.-1.e-5)
else:
theta = np.pi/4
temp = np.sqrt(0.25*temp*temp+xym*xym);
pmy2 = pmx2 = 0.5*(xm2+ym2);
pmx2 += temp
pmy2 -= temp
amaj = np.sqrt(pmx2)
amin = np.sqrt(pmy2)
new['x'][ix] = xm+xmin
new['y'][ix] = ym+ymin
new['xpeak'][ix] = xpeak
new['ypeak'][ix] = ypeak
new['peak'][ix] = peak
new['flux'][ix] = rv
new['x2'][ix] = xm2
new['y2'][ix] = ym2
new['xy'] = xym
new['a'][ix] = amaj
new['b'][ix] = amin
new['theta'][ix] = theta
new['flag'] |= ((~np.isfinite(new['a'])) | (new['a'] <= 0))*4
new['flag'] |= ((~np.isfinite(new['b'])) | (new['b'] <= 0))*8
newt = utils.GTable()
for k in new:
newt[f'{prefix}{k}{suffix}'] = new[k]
if make_image_cols:
newt['a_image'] = newt['a']
newt['b_image'] = newt['b']
newt['theta_image'] = newt['theta']
newt['x_image'] = newt['x']+1
newt['y_image'] = newt['y']+1
return newt
|
b90920ac00a995fde90c43b07cae45a752786c15
| 3,646,201
|
import re
import random
import time
import requests
import os
def qr(tag):
"""
called by an AJAX request for cipherwallet QR code
this action is typically invoked by your web page containing the form, thru the code
in cipherwallet.js, to obtain the image with the QR code to display
it will return the image itself, with an 'image/png' content type, so you can use
the URL to this page as a 'src=...' attribute for the <img> tag
"""
# default timeout values, do not modify because they must stay in sync with the API
DEFAULT_TTL = {
OP_SIGNUP: 120,
OP_LOGIN: 60,
OP_CHECKOUT: 300,
OP_REGISTRATION: 30,
}
# create an unique session identifier, 8 random characters, and postfix it with the QR code tag
# the qr code tag is useful to distinguish multiple QR codes on the same page
if re.compile("[a-zA-Z0-9.:_-]+").match(tag) is None:
raise CipherwalletError(400, "Bad request")
cw_session = "".join(random.choice(ALPHABET) for _ in range(8)) + "-" + tag
# get the user data request template; templates for each type of request are pre-formatted
# and stored in the constants file, in the qr_requests variable
try:
rq_def = qr_requests[tag]
except Exception:
raise CipherwalletError(501, "Not implemented")
# set the time-to-live of the cipherwallet session in the temporary storage
cw_session_ttl = rq_def.get('qr_ttl', DEFAULT_TTL[rq_def['operation']])
if tmp_datastore.cw_session_data(cw_session, 'qr_expires', 1 + cw_session_ttl + int(time.time())) is None:
raise CipherwalletError(500, "Internal server error")
# for registration QR code requests, we also save the current user ID in the short term storage
if rq_def['operation'] == OP_REGISTRATION:
uid = hooks.get_user_id_for_current_session() # you MUST implement this function in hooks.py
if uid is None:
raise CipherwalletError(401, "Unauthorized")
else:
tmp_datastore.cw_session_data(cw_session, 'user_id', uid);
# prepare request to the API
method = "POST";
resource = "/{0}/{1}.png".format(tag, cw_session)
request_params = {}
if rq_def.get('qr_ttl'): request_params['ttl'] = rq_def['qr_ttl']
if rq_def.get('callback_url'): request_params['push_url'] = rq_def['callback_url']
if rq_def['operation'] not in [ OP_LOGIN, OP_REGISTRATION, ]:
display = rq_def.get('display')
if hasattr(display, '__call__'):
request_params['display'] = display()
elif type(display) == type(""):
request_params['display'] = rq_def['display']
# should do the same thing for the service params
# create CQR headers and the query string
api_rq_headers = cqr.auth(
CUSTOMER_ID, API_SECRET, method, resource, request_params or "", H_METHOD
)
# some extra headers we need
api_rq_headers['Content-Type'] = "application/x-www-form-urlencoded";
#api_rq_headers['Content-Length'] = len(request_params);
# get the QR image from the API and send it right back to the browser
api_rp = requests.post(API_URL + resource, headers=api_rq_headers, data=request_params)
content = api_rp.content if api_rp.status_code == 200 \
else open(os.path.dirname(os.path.realpath(__file__)) + "/1x1.png").read()
return content, cw_session
|
ed70f32df3a06fd6dc890a1ba324783f273c8e38
| 3,646,202
|
def get_experiment_table(faultgroup, faultname, tablename):
"""
Get anny table from a faultgroup
"""
node = faultgroup._f_get_child(faultname)
table = node._f_get_child(tablename)
return pd.DataFrame(table.read())
|
16c78e6925d3da227402e2e17e00bac38ff72ab7
| 3,646,203
|
def kjunSeedList(baseSeed, n):
"""
generates n seeds
Due to the way it generates the seed, do not use i that is too large..
"""
assert n <= 100000
rs = ra.RandomState(baseSeed);
randVals = rs.randint(np.iinfo(np.uint32).max+1, size=n);
return randVals;
|
6d98adfba2917ede64d0572c21d6fe1041327241
| 3,646,204
|
import PIL
def filter_sharpen(image):
"""Apply a sharpening filter kernel to the image.
This is the same as using PIL's ``PIL.ImageFilter.SHARPEN`` kernel.
Added in 0.4.0.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: no
* ``float64``: no
* ``float128``: no
* ``bool``: no
Parameters
----------
image : ndarray
The image to modify.
Returns
-------
ndarray
Sharpened image.
"""
return _filter_by_kernel(image, PIL.ImageFilter.SHARPEN)
|
28b83153dc8931430e22f63f889cf195f01f80da
| 3,646,205
|
async def zha_client(hass, config_entry, zha_gateway, hass_ws_client):
"""Test zha switch platform."""
# load the ZHA API
async_load_api(hass)
# create zigpy device
await async_init_zigpy_device(
hass,
[general.OnOff.cluster_id, general.Basic.cluster_id],
[],
None,
zha_gateway,
)
await async_init_zigpy_device(
hass,
[general.OnOff.cluster_id, general.Basic.cluster_id, general.Groups.cluster_id],
[],
zigpy.profiles.zha.DeviceType.ON_OFF_LIGHT,
zha_gateway,
manufacturer="FakeGroupManufacturer",
model="FakeGroupModel",
ieee="01:2d:6f:00:0a:90:69:e8",
)
# load up switch domain
await hass.config_entries.async_forward_entry_setup(config_entry, DOMAIN)
await hass.async_block_till_done()
await hass.config_entries.async_forward_entry_setup(config_entry, light_domain)
await hass.async_block_till_done()
return await hass_ws_client(hass)
|
ba659195dc2e3d8d3510c25edcf4850a740483c1
| 3,646,206
|
import random
def summary_selector(summary_models=None):
"""
Will create a function that take as input a dict of summaries :
{'T5': [str] summary_generated_by_T5, ..., 'KW': [str] summary_generted_by_KW}
and randomly return a summary that has been generated by one of the summary_model in summary_model
if summary_models is none, will not use summaru
:param summary_models: list of str(SummarizerModel)
:return: function [dict] -> [str]
"""
if summary_models is None or len(summary_models) == 0 or \
(len(summary_models) == 1 and summary_models[0] == ""):
return lambda x: ""
summary_model = random.choice(summary_models)
return lambda summaries_dict: summaries_dict[summary_model]
|
b8a2336546324d39ff87ff5b59f4f1174e5dd54c
| 3,646,207
|
import collections
from datetime import datetime
def handle_collectd(root_dir):
"""Generate figure for each plugin for each hoster."""
result = collections.defaultdict(lambda: collections.defaultdict(dict))
for host in natsorted(root_dir.iterdir()):
for plugin in natsorted(host.iterdir()):
stats_list = natsorted(
[fname for fname in plugin.iterdir() if fname.suffix == ".rrd"]
)
title = plugin.name
result[host.name][plugin.name] = {
"daily": rrd2svg(
stats_list,
f"{title} - by day",
start_time=datetime.datetime.now() - datetime.timedelta(days=1),
),
"monthly": rrd2svg(
stats_list,
f"{title} - by month",
start_time=datetime.datetime.now() - datetime.timedelta(weeks=4),
),
}
if len(result[host.name]) > 20:
break
return result
|
48eb4f2ad5976d51fbe1904219e90620aea1a82c
| 3,646,208
|
def create_policy_case_enforcement(repository_id, blocking, enabled,
organization=None, project=None, detect=None):
"""Create case enforcement policy.
"""
organization, project = resolve_instance_and_project(
detect=detect, organization=organization, project=project)
policy_client = get_policy_client(organization)
configuration = create_configuration_object(repository_id, None, blocking, enabled,
'40e92b44-2fe1-4dd6-b3d8-74a9c21d0c6e',
['enforceConsistentCase'],
['true'])
return policy_client.create_policy_configuration(configuration=configuration, project=project)
|
60864cd51472029991a4bb783a39007ea42e4b58
| 3,646,209
|
def svn_fs_new(*args):
"""svn_fs_new(apr_hash_t fs_config, apr_pool_t pool) -> svn_fs_t"""
return apply(_fs.svn_fs_new, args)
|
6ade0887b16e522d47d70c974ccecf8f8bec1403
| 3,646,210
|
import os
def cam_pred(prefix, data_dir):
"""
"""
groundtruth_dict = read(os.path.join('../data/contrast_dataset', 'groundtruth.txt'))
cam = CAM(model=load_pretrained_model(prefix, 'resnet'))
if data_dir == '../data/split_contrast_dataset':
normalize = transforms.Normalize(mean=[0.7432, 0.661, 0.6283],
std=[0.0344, 0.0364, 0.0413])
print('load custom-defined skin dataset successfully!!!')
else:
raise ValueError('')
preprocess = transforms.Compose([
transforms.ToTensor(),
normalize,
])
pred_results = []
y_true = []
idx = 0
for phase in ['train', 'val']:
path = os.path.join(data_dir, phase)
for name in os.listdir(path):
if 'lesion' not in name:
continue
abs_path = os.path.join(path, name)
img_pil = Image.open(abs_path)
img_tensor = preprocess(img_pil).unsqueeze(0)
heatmap = cam(img_tensor)
idx += 1
if idx % 50 == 0:
print('[%d/%d]' % (idx, len(os.listdir(path))))
heatmap = np.float32(heatmap) / 255
pred_results.extend(heatmap.reshape(-1))
y_true.extend(get_true(groundtruth_dict, name).reshape(-1).tolist())
print(idx)
return pred_results, y_true
|
262cd00733047d2f61fff2fe440b6e81548ed533
| 3,646,211
|
def least_similar(sen, voting_dict):
"""
Find senator with voting record least similar, excluding the senator passed
:param sen: senator last name
:param voting_dict: dictionary of voting record by last name
:return: senator last name with least similar record, in case of a tie chooses first alphabetically
>>> voting_dict = create_voting_dict(list(open('voting_record_dump109.txt')))
>>> least_similar('Mikulski', voting_dict)
'Inhofe'
>>> least_similar('Santorum', voting_dict) # 2.12.5
'Feingold'
"""
return specifier_similar(sen, voting_dict, '<')['sen']
|
8bcc8cde75e9ce060f852c0e7e03756d279491f0
| 3,646,212
|
import logging
def _send_req(wait_sec, url, req_gen, retry_result_code=None):
""" Helper function to send requests and retry when the endpoint is not ready.
Args:
wait_sec: int, max time to wait and retry in seconds.
url: str, url to send the request, used only for logging.
req_gen: lambda, no parameter function to generate requests.Request for the
function to send to the endpoint.
retry_result_code: int (optional), status code to match or retry the request.
Returns:
requests.Response
"""
def retry_on_error(e):
return isinstance(e, (SSLError, ReqConnectionError))
# generates function to see if the request needs to be retried.
# if param `code` is None, will not retry and directly pass back the response.
# Otherwise will retry if status code is not matched.
def retry_on_result_func(code):
if code is None:
return lambda _: False
return lambda resp: not resp or resp.status_code != code
@retry(stop_max_delay=wait_sec * 1000, wait_fixed=10 * 1000,
retry_on_exception=retry_on_error,
retry_on_result=retry_on_result_func(retry_result_code))
def _send(url, req_gen):
resp = None
logging.info("sending request to %s", url)
try:
resp = req_gen()
except Exception as e:
logging.warning("%s: request with error: %s", url, e)
raise e
return resp
return _send(url, req_gen)
|
d6856bf241f857f3acd8768fd71d058d4c94baaa
| 3,646,213
|
def load_file(path, types = None):
"""
load file in path if file format in types list
----
:param path: file path
:param code: file type list, if None, load all files, or not load the files in the list, such as ['txt', 'xlsx']
:return: a list is [path, data]
"""
ext = path.split(".")[-1]
if types != None:
if ext not in types: # filter this file
return None
if ext == "txt":
return [path, __load_txt(path)]
else:
print("pyftools: format", ext, "not support!")
return None
|
dab404cf2399e3b87d23babb1a09be2b94c3d924
| 3,646,214
|
import numpy
def get_dense_labels_map(values, idx_dtype='uint32'):
"""
convert unique values into dense int labels [0..n_uniques]
:param array values: (n,) dtype array
:param dtype? idx_dtype: (default: 'uint32')
:returns: tuple(
labels2values: (n_uniques,) dtype array,
values2labels: HashMap(dtype->int),
)
"""
# get unique values
unique_values = unique(values)
# build labels from 0 to n_uniques
labels = numpy.arange(unique_values.shape[0], dtype=idx_dtype)
# build small hashmap with just the unique items
values2labels = Hashmap(unique_values, labels)
return unique_values, values2labels
|
cd9f2884e26fa22785e24598f0f485d2931427d8
| 3,646,215
|
def delete_debug_file_from_orchestrator(
self,
filename: str,
) -> bool:
"""Delete debug file from Orchestrator
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - debugFiles
- POST
- /debugFiles/delete
:param filename: Name of debug file to delete from Orchestrator
:type filename: str
:return: Returns True/False based on successful call
:rtype: bool
"""
data = {"fileName": filename}
return self._post(
"/debugFiles/delete",
data=data,
return_type="bool",
)
|
3357fc26e48771a716445b38c1fcc5c592c39cf9
| 3,646,216
|
from typing import Match
def _replace_fun_unescape(m: Match[str]) -> str:
""" Decode single hex/unicode escapes found in regex matches.
Supports single hex/unicode escapes of the form ``'\\xYY'``,
``'\\uYYYY'``, and ``'\\UYYYYYYYY'`` where Y is a hex digit. Only
decodes if there is an odd number of backslashes.
.. versionadded:: 0.2
Parameters
----------
m : regex match
Returns
-------
c : str
The unescaped character.
"""
slsh = b'\\'.decode('ascii')
s = m.group(0)
count = s.count(slsh)
if count % 2 == 0:
return s
else:
c = chr(int(s[(count + 1):], base=16))
return slsh * (count - 1) + c
|
3fdb275e3c15697e5302a6576b4d7149016299c0
| 3,646,217
|
def predict_next_location(game_data, ship_name):
"""
Predict the next location of a space ship.
Parameters
----------
game_data: data of the game (dic).
ship_name: name of the spaceship to predicte the next location (str).
facing: facing of the ship (tuple)
Return
------
predicted_location : predicte location of the spaceship (tuple(int, int)).
Version
-------
Specification: Nicolas Van Bossuyt (v1. 19/03/17).
Implementation: Nicolas Van Bossuyt (v1. 19/03/17).
Bayron Mahy (v2. 22/03/17).
"""
ship_location = game_data['ships'][ship_name]['location']
ship_facing = game_data['ships'][ship_name]['facing']
ship_speed = game_data['ships'][ship_name]['speed']
return next_location(ship_location, ship_facing, ship_speed, game_data['board_size'])
|
996b58e0ac8d8754a49020e0e5df830fa472be99
| 3,646,218
|
from pathlib import Path
import os
def get_absolute_filename(user_inputted_filename: str) -> Path:
"""Clean up user inputted filename path, wraps os.path.abspath, returns Path object"""
filename_location = Path(os.path.abspath(user_inputted_filename))
return filename_location
|
d8d704f4adcaa443658789c6d178b3ddb05552d0
| 3,646,219
|
def check_answer(guess, a_follower, b_follower):
"""Chcek if the user guessed the correct option"""
if a_follower > b_follower:
return guess == "a"
else:
return guess == "b"
|
acd1e78026f89dd1482f4471916472d35edf68a7
| 3,646,220
|
import argparse
def command_line_arg_parser():
"""
Command line argument parser. Encrypts by default. Decrypts when --decrypt flag is passed in.
"""
parser = argparse.ArgumentParser(description='Parses input args')
parser.add_argument('input_file', type=str,
help='Path to input file location')
parser.add_argument('output_file', type=str, default='./output_data',
help='Path to output file location')
parser.add_argument('key_file', type=str,
help='Path to public or private key file')
parser.add_argument('--decrypt', dest='decrypt', action='store_true',
help='Private key file (for decryption)')
return parser
|
c1e9305e2967368fb36ad8fcdbb654ef04f8d3bf
| 3,646,221
|
def respond(variables, Body=None, Html=None, **kwd):
"""
Does the grunt work of cooking up a MailResponse that's based
on a template. The only difference from the lamson.mail.MailResponse
class and this (apart from variables passed to a template) are that
instead of giving actual Body or Html parameters with contents,
you give the name of a template to render. The kwd variables are
the remaining keyword arguments to MailResponse of From/To/Subject.
For example, to render a template for the body and a .html for the Html
attachment, and to indicate the From/To/Subject do this:
msg = view.respond(locals(), Body='template.txt',
Html='template.html',
From='test@test.com',
To='receiver@test.com',
Subject='Test body from "%(dude)s".')
In this case you're using locals() to gather the variables needed for
the 'template.txt' and 'template.html' templates. Each template is
setup to be a text/plain or text/html attachment. The From/To/Subject
are setup as needed. Finally, the locals() are also available as
simple Python keyword templates in the From/To/Subject so you can pass
in variables to modify those when needed (as in the %(dude)s in Subject).
"""
assert Body or Html, "You need to give either the Body or Html template of the mail."
for key in kwd:
kwd[key] = kwd[key] % variables
msg = mail.MailResponse(**kwd)
if Body:
msg.Body = render(variables, Body)
if Html:
msg.Html = render(variables, Html)
return msg
|
214513edf420dc629603cc98d1728dec8c81aee9
| 3,646,222
|
from typing import Dict
def canonical_for_code_system(jcs: Dict) -> str:
"""get the canonical URL for a code system entry from the art decor json. Prefer FHIR URIs over the generic OID URI.
Args:
jcs (Dict): the dictionary describing the code system
Returns:
str: the canonical URL
"""
if "canonicalUriR4" in jcs:
return jcs["canonicalUriR4"]
else:
return jcs["canonicalUri"]
|
f111a4cb65fa75799e799f0b088180ef94b71cc8
| 3,646,223
|
def correspdesc_source(data):
"""
extract @source from TEI elements <correspDesc>
"""
correspdesc_data = correspdesc(data)
try:
return [cd.attrib["source"].replace("#", "") for cd in correspdesc_data]
except KeyError:
pass
try:
return [cd.attrib[ns_cs("source")].replace("#", "") for cd in correspdesc_data]
except KeyError:
pass
return []
|
18a2fe1d0daf0f383c8b8295105ad0027b626f31
| 3,646,224
|
def leaders(Z, T):
"""
(L, M) = leaders(Z, T):
For each flat cluster j of the k flat clusters represented in the
n-sized flat cluster assignment vector T, this function finds the
lowest cluster node i in the linkage tree Z such that:
* leaf descendents belong only to flat cluster j (i.e. T[p]==j
for all p in S(i) where S(i) is the set of leaf ids of leaf
nodes descendent with cluster node i)
* there does not exist a leaf that is not descendent with i
that also belongs to cluster j (i.e. T[q]!=j for all q not in S(i)).
If this condition is violated, T is not a valid cluster assignment
vector, and an exception will be thrown.
Two k-sized numpy vectors are returned, L and M. L[j]=i is the linkage
cluster node id that is the leader of flat cluster with id M[j]. If
i < n, i corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
"""
Z = np.asarray(Z)
T = np.asarray(T)
if type(T) != _array_type or T.dtype != np.int:
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype=np.int32)
M = np.zeros((kk,), dtype=np.int32)
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _cluster_wrap.leaders_wrap(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError('T is not a valid assignment vector. Error found when examining linkage node %d (< 2n-1).' % s)
return (L, M)
|
7b72b33b87e454138c144a791612d7af8422b0a6
| 3,646,225
|
from typing import List
def create_unique_views(rows: list, fields: List[str]):
"""Create views for each class objects, default id should be a whole row"""
views = {}
for r in rows:
values = [r[cname] for cname in fields]
if any(isinstance(x, list) for x in values):
if all(isinstance(x, list) for x in values) and len({len(x) for x in values}) == 1:
# all its value is in a list
for j in range(len(values[0])):
key = ",".join(str(values[i][j]) for i in range(len(values)))
views[key] = [values[i][j] for i in range(len(values))]
else:
# assert False
key = ",".join((str(x) for x in values))
views[key] = values
else:
key = ",".join((str(x) for x in values))
views[key] = values
views = [{cname: r[i] for i, cname in enumerate(fields)} for r in views.values()]
return views
|
24b311c8b013f742e69e7067c1f1bafe0044c940
| 3,646,226
|
from typing import List
import torch
def check_shape_function(invocations: List[Invocation]):
"""Decorator that automatically tests a shape function.
The shape function, which is expected to be named systematically with
`〇` instead of `.`, is tested against the corresponding op in
`torch.ops.*` function using the given invocations.
"""
def decorator(f):
# `torch.ops.*` functions are overloaded already, so we don't need
# to pass in the overload name.
ns, unqual = f.__name__.split("〇")[:2]
op = getattr(getattr(torch.ops, ns), unqual)
for invocation in invocations:
shape_fn_error, op_error = None, None
try:
result_shapes = _normalize_multiple_results_to_list(f(
*invocation.to_shape_function_args(),
**invocation.kwargs))
except Exception as e:
shape_fn_error = f"{e}"
try:
golden_results = _normalize_multiple_results_to_list(op(
*invocation.to_real_op_args(),
**invocation.kwargs))
except Exception as e:
op_error = f"{e}"
def report(error_message: str):
raise ValueError(f"For shape function {f.__name__!r} with invocation {invocation}: {error_message}")
# Check for error behavior.
if invocation.is_expected_to_raise_exception():
if shape_fn_error is None and op_error is None:
report(f"Expected to raise an exception, but neither shape function nor op raised an exception")
if shape_fn_error is None:
report(f"Op raised error {op_error!r}, but shape function did not.")
if op_error is None:
report(f"Shape function raised error {shape_fn_error!r}, but op did not.")
else:
if shape_fn_error is not None and op_error is not None:
report(f"Both shape function and op raised errors, but were not expected to. Shape function raised error {shape_fn_error!r} and op raised error {op_error!r}.")
if shape_fn_error is not None:
report(f"Shape function raised error {shape_fn_error!r} but op did not raise any error.")
if op_error is not None:
report(f"Op raised error {op_error!r} but shape function did not raise any error.")
if shape_fn_error is not None or op_error is not None:
# If both raised errors, then that is good -- the shape function
# and the real op should agree on the erroneous cases.
# The exact error message might differ though.
if shape_fn_error is not None and op_error is not None:
continue
# Check for matching results.
if len(result_shapes) != len(golden_results):
report(f"Expected {len(golden_results)} result shapes, got {len(result_shapes)}")
for result_shape, golden_result in zip(result_shapes, golden_results):
for dimension_size, golden_dimension_size in zip(result_shape, golden_result.shape):
if dimension_size != golden_dimension_size:
report(f"Expected result shape {golden_result.shape}, got {result_shape}")
return f
return decorator
|
be237f2209f1e2007b53a1ecbe0c277bf2b37fe7
| 3,646,227
|
def _prepare_images(ghi, clearsky, daytime, interval):
"""Prepare data as images.
Performs pre-processing steps on `ghi` and `clearsky` before
returning images for use in the shadow detection algorithm.
Parameters
----------
ghi : Series
Measured GHI. [W/m^2]
clearsky : Series
Expected clearsky GHI. [W/m^2]
daytime : Series
Boolean series with True for daytime and False for night.
interval : int
Time between data points in `ghi`. [minutes]
Returns
-------
ghi_image : np.ndarray
Image form of `ghi`
clearsky_image : np.ndarray
Image form of `clearsky`
clouds_image : np.ndarray
Image of the cloudy periods in `ghi`
image_times : pandas.DatetimeIndex
Index for the data included in the returned images. Leading
and trailing days with incomplete data are not included in the
image, these times are needed to build a Series from the image
later on.
"""
# Fill missing times by interpolation. Missing data at the
# beginning or end of the series is not filled in, and will be
# excluded from the images used for shadow detection.
image_width = 1440 // interval
ghi = ghi.interpolate(limit_area='inside')
# drop incomplete days.
ghi = ghi[ghi.resample('D').transform('count') == image_width]
image_times = ghi.index
ghi_image = _to_image(ghi.to_numpy(), image_width)
scaled_ghi = (ghi * 1000) / np.max(_smooth(ghi_image))
scaled_clearsky = (clearsky * 1000) / clearsky.max()
scaled_clearsky = scaled_clearsky.reindex_like(scaled_ghi)
daytime = daytime.reindex_like(scaled_ghi)
# Detect clouds.
window_size = 50 // interval
clouds = _detect_clouds(scaled_ghi, scaled_clearsky, window_size)
cloud_mask = _to_image(clouds.to_numpy(), image_width)
# Interpolate across days (i.e. along columns) to remove clouds
# replace clouds with nans
#
# This could probably be done directly with scipy.interpolate.inter1d,
# but the easiest approach is to turn the image into a dataframe and
# interpolate along the columns.
cloudless_image = ghi_image.copy()
cloudless_image[cloud_mask] = np.nan
clouds_image = ghi_image.copy()
clouds_image[~cloud_mask] = np.nan
ghi_image = pd.DataFrame(cloudless_image).interpolate(
axis=0,
limit_direction='both'
).to_numpy()
# set night to nan
ghi_image[~_to_image(daytime.to_numpy(), image_width)] = np.nan
return (
ghi_image,
_to_image(scaled_clearsky.to_numpy(), image_width),
clouds_image,
image_times
)
|
9433cce0ccb9dae5e5b364fce42f8ed391adf239
| 3,646,228
|
import numpy
def interleaved_code(modes: int) -> BinaryCode:
""" Linear code that reorders orbitals from even-odd to up-then-down.
In up-then-down convention, one can append two instances of the same
code 'c' in order to have two symmetric subcodes that are symmetric for
spin-up and -down modes: ' c + c '.
In even-odd, one can concatenate with the interleaved_code
to have the same result:' interleaved_code * (c + c)'.
This code changes the order of modes from (0, 1 , 2, ... , modes-1 )
to (0, modes/2, 1 modes/2+1, ... , modes-1, modes/2 - 1).
n_qubits = n_modes.
Args: modes (int): number of modes, must be even
Returns (BinaryCode): code that interleaves orbitals
"""
if modes % 2 == 1:
raise ValueError('number of modes must be even')
else:
mtx = numpy.zeros((modes, modes), dtype=int)
for index in numpy.arange(modes // 2, dtype=int):
mtx[index, 2 * index] = 1
mtx[modes // 2 + index, 2 * index + 1] = 1
return BinaryCode(mtx, linearize_decoder(mtx.transpose()))
|
e9b178165c8fe1e33d880dee056a3e397fa90bce
| 3,646,229
|
from sklearn.neighbors import NearestNeighbors
def nearest_neighbors(data, args):
"""
最近邻
"""
nbrs = NearestNeighbors(**args)
nbrs.fit(data)
# 计算测试数据对应的最近邻下标和距离
# distances, indices = nbrs.kneighbors(test_data)
return nbrs
|
d91014d082f7a15a26e453d32272381b7578c9de
| 3,646,230
|
def gradient(v, surf):
"""
:param v: vector of x, y, z coordinates
:param phase: which implicit surface is being used to approximate the structure of this phase
:return: The gradient vector (which is normal to the surface at x)
"""
x = v[0]
y = v[1]
z = v[2]
if surf == 'Ia3d' or surf == 'gyroid' or surf == 'ia3d':
a = np.cos(x)*np.cos(y) - np.sin(x)*np.sin(z)
b = -np.sin(y)*np.sin(x) + np.cos(y)*np.cos(z)
c = -np.sin(y)*np.sin(z) + np.cos(z)*np.cos(x)
elif surf == 'Pn3m' or surf == 'pn3m':
a = np.cos(x)*np.sin(y)*np.sin(z) + np.cos(x)*np.cos(y)*np.cos(z) - np.sin(x)*np.sin(y)*np.cos(z) - np.sin(x)*np.cos(y)*np.sin(z)
b = np.sin(x)*np.cos(y)*np.sin(z) - np.sin(x)*np.sin(y)*np.cos(z) + np.cos(x)*np.cos(y)*np.cos(z) - np.cos(x)*np.sin(y)*np.sin(z)
c = np.sin(x)*np.sin(y)*np.cos(z) - np.sin(x)*np.cos(y)*np.sin(z) - np.cos(x)*np.sin(y)*np.sin(z) + np.cos(x)*np.cos(y)*np.cos(z)
elif surf == 'sphere':
a = 2*x
b = 2*y
c = 2*z
return np.array([a, b, c])
|
2105c491f122508531816d15146801b0dd1c9b75
| 3,646,231
|
def authenticated_client(client, user):
"""
"""
client.post(
'/login',
data={'username': user.username, 'password': 'secret'},
follow_redirects=True,
)
return client
|
5f96ef56179848f7d348ffda67fc08cfccb080ed
| 3,646,232
|
def viterbi(obs, states, start_p, trans_p, emit_p):
"""
請參考李航書中的算法10.5(維特比算法)
HMM共有五個參數,分別是觀察值集合(句子本身, obs),
狀態值集合(all_states, 即trans_p.keys()),
初始機率(start_p),狀態轉移機率矩陣(trans_p),發射機率矩陣(emit_p)
此處的states是為char_state_tab_P,
這是一個用來查詢漢字可能狀態的字典
此處沿用李航書中的符號,令T=len(obs),令N=len(trans_p.keys())
"""
"""
維特比算法第1步:初始化
"""
#V:李航書中的delta,在時刻t狀態為i的所有路徑中之機率最大值
V = [{}] # tabular
#李航書中的Psi,T乘N維的矩陣
#表示在時刻t狀態為i的所有單個路徑(i_1, i_2, ..., i_t-1, i)中概率最大的路徑的第t-1個結點
mem_path = [{}]
#共256種狀態,所謂"狀態"是:"分詞標籤(BMES)及詞性(v, n, nr, d, ...)的組合"
all_states = trans_p.keys()
#obs[0]表示句子的第一個字
#states.get(obs[0], all_states)表示該字可能是由哪些狀態發射出來的
for y in states.get(obs[0], all_states): # init
#在時間點0,狀態y的log機率為:
#一開始在y的log機率加上在狀態y發射obs[0]觀察值的log機率
V[0][y] = start_p[y] + emit_p[y].get(obs[0], MIN_FLOAT)
#時間點0在狀態y,則前一個時間點會在哪個狀態
mem_path[0][y] = ''
"""
維特比算法第2步:遞推
"""
#obs: 觀察值序列
for t in xrange(1, len(obs)):
V.append({})
mem_path.append({})
#prev_states = get_top_states(V[t-1])
#mem_path[t - 1].keys(): 前一個時間點在什麼狀態,這裡以x代表
#只有在len(trans_p[x])>0(即x有可能轉移到其它狀態)的情況下,prev_states才保留x
prev_states = [
x for x in mem_path[t - 1].keys() if len(trans_p[x]) > 0]
#前一個狀態是x(prev_states中的各狀態),那麼現在可能在什麼狀態(y)
prev_states_expect_next = set(
(y for x in prev_states for y in trans_p[x].keys()))
#set(states.get(obs[t], all_states)):句子的第t個字可能在什麼狀態
#prev_states_expect_next:由前一個字推斷,當前的字可能在什麼狀態
#obs_states:以上兩者的交集
obs_states = set(
states.get(obs[t], all_states)) & prev_states_expect_next
#如果交集為空,則依次選取prev_states_expect_next或all_states
if not obs_states:
obs_states = prev_states_expect_next if prev_states_expect_next else all_states
for y in obs_states:
#李航書中的公式10.45
#y0表示前一個時間點的狀態
#max的參數是一個list of tuple: [(機率1,狀態1),(機率2,狀態2),...]
#V[t - 1][y0]:時刻t-1在狀態y0的機率對數
#trans_p[y0].get(y, MIN_INF):由狀態y0轉移到y的機率對數
#emit_p[y].get(obs[t], MIN_FLOAT):在狀態y發射出觀測值obs[t]的機率對數
#三項之和表示在時刻t由狀態y0到達狀態y的路徑的機率對數
prob, state = max((V[t - 1][y0] + trans_p[y0].get(y, MIN_INF) +
emit_p[y].get(obs[t], MIN_FLOAT), y0) for y0 in prev_states)
#挑選機率最大者將之記錄於V及mem_path
V[t][y] = prob
#時刻t在狀態y,則時刻t-1最有可能在state這個狀態
mem_path[t][y] = state
"""
維特比算法第3步:終止
"""
#mem_path[-1].keys():最後一個時間點可能在哪些狀態
#V[-1][y]:最後一個時間點在狀態y的機率
#把mem_path[-1]及V[-1]打包成一個list of tuple
last = [(V[-1][y], y) for y in mem_path[-1].keys()]
# if len(last)==0:
# print obs
#最後一個時間點最有可能在狀態state,其機率為prob
#在jieba/finalseg/__init__.py的viterbi函數中有限制句子末字的分詞標籤需為E或S
#這裡怎麼沒做這個限制?
prob, state = max(last)
"""
維特比算法第4步:最優路徑回溯
"""
route = [None] * len(obs)
i = len(obs) - 1
while i >= 0:
route[i] = state
#時間點i在狀態state,則前一個時間點最有可能在狀態mem_path[i][state]
state = mem_path[i][state]
i -= 1
return (prob, route)
|
42f3037042114c0b4e56053ac6dfc6bd77423d39
| 3,646,233
|
def add_merge_variants_arguments(parser):
"""
Add arguments to a parser for sub-command "stitch"
:param parser: argeparse object
:return:
"""
parser.add_argument(
"-vp",
"--vcf_pepper",
type=str,
required=True,
help="Path to VCF file from PEPPER SNP."
)
parser.add_argument(
"-vd",
"--vcf_deepvariant",
type=str,
required=True,
help="Path to VCF file from DeepVariant."
)
parser.add_argument(
"-o",
"--output_dir",
type=str,
required=True,
help="Path to output directory."
)
return parser
|
5fd6bc936ba1d17ea86a49499e1f6b816fb0a389
| 3,646,234
|
import graph_scheduler
import types
def set_time_scale_alias(name: str, target: TimeScale):
"""Sets an alias named **name** of TimeScale **target**
Args:
name (str): name of the alias
target (TimeScale): TimeScale that **name** will refer to
"""
name_aliased_time_scales = list(filter(
lambda e: _time_scale_aliases[e] == name,
_time_scale_aliases
))
if len(name_aliased_time_scales) > 0:
raise ValueError(f"'{name}' is already aliased to {name_aliased_time_scales[0]}")
try:
target = getattr(TimeScale, target)
except TypeError:
pass
except AttributeError as e:
raise ValueError(f'Invalid TimeScale {target}') from e
_time_scale_aliases[target] = name
setattr(TimeScale, name, target)
def getter(self):
return getattr(self, _time_scale_to_attr_str(target))
def setter(self, value):
setattr(self, _time_scale_to_attr_str(target), value)
prop = property(getter).setter(setter)
setattr(Time, name.lower(), prop)
setattr(SimpleTime, name.lower(), prop)
# alias name in style of a class name
new_class_segment_name = _time_scale_to_class_str(name)
for cls_name, cls in graph_scheduler.__dict__.copy().items():
# make aliases of conditions that contain a TimeScale name (e.g. AtEnvironmentStateUpdate)
target_class_segment_name = _time_scale_to_class_str(target)
if isinstance(cls, (type, types.ModuleType)):
if isinstance(cls, types.ModuleType):
try:
if _alias_docs_warning_str not in cls.__doc__:
cls.__doc__ = f'{_alias_docs_warning_str}{cls.__doc__}'
except TypeError:
pass
_multi_substitute_docstring(
cls,
{
target.name: name,
target_class_segment_name: new_class_segment_name,
}
)
if target_class_segment_name in cls_name:
new_cls_name = cls_name.replace(
target_class_segment_name,
new_class_segment_name
)
setattr(graph_scheduler.condition, new_cls_name, cls)
setattr(graph_scheduler, new_cls_name, cls)
graph_scheduler.condition.__all__.append(new_cls_name)
graph_scheduler.__all__.append(new_cls_name)
|
889f2b70735a11ce8330e58b7294d3d115334d5f
| 3,646,235
|
def find_bounding_boxes(img):
"""
Find bounding boxes for blobs in the picture
:param img - numpy array 1xWxH, values 0 to 1
:return: bounding boxes of blobs [x0, y0, x1, y1]
"""
img = util.torch_to_cv(img)
img = np.round(img)
img = img.astype(np.uint8)
contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # params copied from tutorial
bounding_boxes = []
for c in contours:
x, y, w, h = cv2.boundingRect(c)
bounding_boxes.append([x, y, x + w, y + h])
return bounding_boxes
|
7b7de4d163b18b099721c39b69721e477c473c16
| 3,646,236
|
from typing import Optional
from datetime import datetime
def resolve_day(day: str, next_week: Optional[bool] = False) -> int:
"""Resolves day to index value."""
week = ['monday',
'tuesday',
'wednesday',
'thursday',
'friday',
'saturday',
'sunday']
today = datetime.now()
today_idx = date.weekday(today)
day_idx = week.index(day)
temp_list = list(islice(cycle(week), today_idx, 2 * today_idx + day_idx))
if next_week:
return len(temp_list) - 1
else:
return temp_list.index(day)
|
d09aba564f0293ac8b92699427199998bf7e869f
| 3,646,237
|
def write_image(image,
path,
bit_depth='float32',
method='OpenImageIO',
**kwargs):
"""
Writes given image at given path using given method.
Parameters
----------
image : array_like
Image data.
path : unicode
Image path.
bit_depth : unicode, optional
**{'float32', 'uint8', 'uint16', 'float16'}**,
Bit depth to write the image at, for the *Imageio* method, the image
data is converted with :func:`colour.io.convert_bit_depth` definition
prior to writing the image.
method : unicode, optional
**{'OpenImageIO', 'Imageio'}**,
Write method, i.e. the image library used for writing images.
Other Parameters
----------------
attributes : array_like, optional
{:func:`colour.io.write_image_OpenImageIO`},
An array of :class:`colour.io.ImageAttribute_Specification` class
instances used to set attributes of the image.
Returns
-------
bool
Definition success.
Notes
-----
- If the given method is *OpenImageIO* but the library is not available
writing will be performed by *Imageio*.
- If the given method is *Imageio*, ``kwargs`` is passed directly to the
wrapped definition.
Examples
--------
Basic image writing:
>>> import os
>>> import colour
>>> path = os.path.join(colour.__path__[0], 'io', 'tests', 'resources',
... 'CMS_Test_Pattern.exr')
>>> image = read_image(path) # doctest: +SKIP
>>> path = os.path.join(colour.__path__[0], 'io', 'tests', 'resources',
... 'CMSTestPattern.tif')
>>> write_image(image, path) # doctest: +SKIP
True
Advanced image writing while setting attributes using *OpenImageIO*:
>>> compression = ImageAttribute_Specification('Compression', 'none')
>>> write_image(image, path, bit_depth='uint8', attributes=[compression])
... # doctest: +SKIP
True
"""
method = validate_method(method, WRITE_IMAGE_METHODS)
if method == 'openimageio': # pragma: no cover
if not is_openimageio_installed():
usage_warning(
'"OpenImageIO" related API features are not available, '
'switching to "Imageio"!')
method = 'Imageio'
function = WRITE_IMAGE_METHODS[method]
if method == 'openimageio': # pragma: no cover
kwargs = filter_kwargs(function, **kwargs)
return function(image, path, bit_depth, **kwargs)
|
754bfa4ffbc14fd7f8123c2cab754d0f3814ed05
| 3,646,238
|
import logging
def get_gsheet_data():
"""
Get's all of the data in the specified Google Sheet.
"""
# Get Credentials from JSON
logging.info('Attempting to read values to Google Sheet.')
creds = ServiceAccountCredentials.from_json_keyfile_name('TrackCompounds-1306f02bc0b1.json', SCOPES)
logging.info('Authorizing Google API credentials.')
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=SPREADSHEET_ID,
range=READ_RANGE).execute()
data = result.get('values')
# Turn data into a DataFrame
df = pd.DataFrame(data[1:], columns=data[0])
logging.info('Successfully read G-Sheet data into a DataFrame.')
return df
|
872671a1bc9b17fec5e6db3fb4a7172567da4eff
| 3,646,239
|
def name_tensor(keras_tensor, name):
"""
Add a layer with this ``name`` that does nothing.
Usefull to mark a tensor.
"""
return Activation('linear', name=name)(keras_tensor)
|
9ac83e8974efa2e48ab14d150a5da47ac7c23fb5
| 3,646,240
|
import operator
def pluck(ind, seqs, default=no_default):
""" plucks an element or several elements from each item in a sequence.
``pluck`` maps ``itertoolz.get`` over a sequence and returns one or more
elements of each item in the sequence.
This is equivalent to running `map(curried.get(ind), seqs)`
``ind`` can be either a single string/index or a sequence of
strings/indices.
``seqs`` should be sequence containing sequences or dicts.
e.g.
>>> data = [{'id': 1, 'name': 'Cheese'}, {'id': 2, 'name': 'Pies'}]
>>> list(pluck('name', data))
['Cheese', 'Pies']
>>> list(pluck([0, 1], [[1, 2, 3], [4, 5, 7]]))
[(1, 2), (4, 5)]
See Also:
get
map
"""
if default is no_default:
if isinstance(ind, list):
return map(operator.itemgetter(*ind), seqs)
return map(operator.itemgetter(ind), seqs)
elif isinstance(ind, list):
return (tuple(_get(item, seq, default) for item in ind)
for seq in seqs)
return (_get(ind, seq, default) for seq in seqs)
|
9bb31f94115eec0ba231c3c2bf9c067a52efca52
| 3,646,241
|
def shape_metrics(model):
"""""
Calculates three different shape metrics of the current graph of the model.
Shape metrics: 1. Density 2. Variance of nodal degree 3. Centrality
The calculations are mainly based on the degree statistics of the current
graph
For more information one is referred to the article 'Geographical
influences of an emerging network of gang rivalries'
(Rachel A. Hegemann et al., 2011)
Input:
model = Model object
Output:
Tuple containing the three shape metrics in the order described above.
"""
# Determine total degree, average degree, max degree and density graph
degrees = [degree[1] for degree in model.gr.degree]
total_degree = sum(degrees)
ave_degree = total_degree / model.config.total_gangs
max_degree = max(degrees)
graph_density = nx.density(model.gr)
# Determine variance of nodal degree and centrality
variance_degree, centrality = 0, 0
for degree in degrees:
variance_degree += ((degree - ave_degree) * (degree - ave_degree))
centrality += max_degree - degree
# Normailize variance of nodal degree and centrality
variance_degree /= model.config.total_gangs
centrality /= ((model.config.total_gangs - 1) *
(model.config.total_gangs - 2))
# Returns a tuple containging the three statistics
return graph_density, variance_degree, centrality
|
003e72145ade222a7ec995eae77f6028527e8ba9
| 3,646,242
|
def calculate_actual_sensitivity_to_removal(jac, weights, moments_cov, params_cov):
"""calculate the actual sensitivity to removal.
The sensitivity measure is calculated for each parameter wrt each moment.
It answers the following question: How much precision would be lost if the kth
moment was excluded from the estimation if "weights" is used as weighting
matrix?
Args:
sensitivity_to_bias (np.ndarray or pandas.DataFrame): See
``calculate_sensitivity_to_bias`` for details.
weights (np.ndarray or pandas.DataFrame): The weighting matrix used for
msm estimation.
moments_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the
empirical moments.
params_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the
parameter estimates.
Returns:
np.ndarray or pd.DataFrame: Sensitivity measure with shape (n_params, n_moments)
"""
m4 = []
_jac, _weights, _moments_cov, _params_cov, names = process_pandas_arguments(
jac=jac, weights=weights, moments_cov=moments_cov, params_cov=params_cov
)
for k in range(len(_weights)):
weight_tilde_k = np.copy(_weights)
weight_tilde_k[k, :] = 0
weight_tilde_k[:, k] = 0
sigma_tilde_k = cov_robust(_jac, weight_tilde_k, _moments_cov)
m4k = sigma_tilde_k - _params_cov
m4k = m4k.diagonal()
m4.append(m4k)
m4 = np.array(m4).T
params_variances = np.diagonal(_params_cov)
e4 = m4 / params_variances.reshape(-1, 1)
if names:
e4 = pd.DataFrame(e4, index=names.get("params"), columns=names.get("moments"))
return e4
|
30f51ecc2c53126b6e46f5301bef857d104381cf
| 3,646,243
|
def escape_html(text: str) -> str:
"""Replaces all angle brackets with HTML entities."""
return text.replace('<', '<').replace('>', '>')
|
f853bcb3a69b8c87eb3d4bcea5bbca66376c7db4
| 3,646,244
|
import random
def pptest(n):
"""
Simple implementation of Miller-Rabin test for
determining probable primehood.
"""
bases = [random.randrange(2,50000) for _ in range(90)]
# if any of the primes is a factor, we're done
if n<=1: return 0
for b in bases:
if n%b==0: return 0
tests,s = 0,0
m = n-1
# turning (n-1) into (2**s) * m
while not m&1: # while m is even
m >>= 1
s += 1
for b in bases:
tests += 1
isprob = algP(m,s,b,n)
if not isprob: break
if isprob: return (1-(1./(4**tests)))
else: return 0
|
3a74cfebb6b14659a34ab0b6c761efd16d2736fa
| 3,646,245
|
def schedule_conv2d_NCHWc(outs):
"""Schedule for conv2d_NCHW[x]c
Parameters
----------
outs : Array of Tensor
The computation graph description of conv2d_NCHWc
in the format of an array of tensors.
The number of filter, i.e., the output channel.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
|
a24cb4f6e1dd3d8891bc82df75f53c8afe709727
| 3,646,246
|
def calc_E_E_hs_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_ba1_d_t, W_dash_b2_d_t,
theta_ex_d_Ave_d, L_dashdash_ba2_d_t):
"""1時間当たりの給湯機の消費電力量 (1)
Args:
W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯量 (L/h)
W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯量 (L/h)
W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯量 (L/h)
W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/h)
W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯量 (L/h)
W_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯量 (L/h)
theta_ex_d_Ave_d: 日平均外気温度 (℃)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/h)
Returns:
ndarray: 1時間当たりの給湯機の消費電力量 (kWh/h)
"""
# 待機時及び水栓給湯時の補機による消費電力量 (2)
E_E_hs_aux1_d_t = get_E_E_hs_aux1_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_ba1_d_t,
theta_ex_d_Ave_d)
# 湯はり時の補機による消費電力量 (3)
E_E_hs_aux2_d_t = calc_E_E_hs_aux2_d_t(W_dash_b2_d_t)
# 保温時の補機による消費電力量 (4)
E_E_hs_aux3_d_t = calc_E_E_hs_aux3_d_t(L_dashdash_ba2_d_t)
# 1日当たりの給湯機の消費電力量 (1)
E_E_hs_d_t = E_E_hs_aux1_d_t + E_E_hs_aux2_d_t + E_E_hs_aux3_d_t
return E_E_hs_d_t
|
00cf40b221d2a24081d9c362fb5e8474057ddb93
| 3,646,247
|
import functools
def keras_quantile_loss(q):
"""Return keras loss for quantile `q`."""
func = functools.partial(_tilted_loss_scalar, q)
func.__name__ = f'qunatile loss, q={q}'
return func
|
173a9410c2994bd02e5845a85cc2050489ce2d12
| 3,646,248
|
from typing import Dict
import os
import json
def _load_template_file() -> Dict:
"""
Read and validate the registration definition template file, located in the
same directory as this source file
Returns
-------
Dict
Contents of the registration definition template file JSON, converted to
a Python dictionary
"""
src_path: str = f"{SCRIPT_PATH}/{TEMPLATE_FILENAME}"
if not os.path.exists(src_path):
print("Error: Cannot find registration definition template file")
with open(src_path) as registration_definition_template_file:
try:
data = registration_definition_template_file.read()
except:
print("Error: Cannot read registration definition template file")
return None
# Reconstruct the dict object
readin = json.loads(data)
if type(readin) != dict:
print("Error: Corrupted registration definition template file")
return None
return readin
|
db3d3d8af9d78e8949dfb7428e45cf926346b7d0
| 3,646,249
|
from typing import Mapping
from typing import Union
def _reactions_table(reaction: reaction_pb2.Reaction, dataset_id: str) -> Mapping[str, Union[str, bytes, None]]:
"""Adds a Reaction to the 'reactions' table.
Args:
reaction: Reaction proto.
dataset_id: Dataset ID.
Returns:
Dict mapping string column names to values.
"""
values = {
"dataset_id": dataset_id,
"reaction_id": reaction.reaction_id,
"serialized": reaction.SerializeToString().hex(),
}
try:
reaction_smiles = message_helpers.get_reaction_smiles(reaction, generate_if_missing=True)
# Control for REACTION_CXSMILES.
values["reaction_smiles"] = reaction_smiles.split()[0]
except ValueError:
values["reaction_smiles"] = None
if reaction.provenance.doi:
values["doi"] = reaction.provenance.doi
else:
values["doi"] = None
return values
|
b09df06a13d1f1d42ab22da1c6bcc00c48c2e81d
| 3,646,250
|
from typing import List
from typing import Dict
from typing import Any
import time
import json
def get_incidents_for_alert(**kwargs) -> list:
"""
Return List of incidents for alert.
:param kwargs: Contains all required arguments.
:return: Incident List for alert.
"""
incidents: List[Dict[str, Any]] = []
headers = {
'X-FeApi-Token': kwargs['client'].get_api_token(),
'Accept': CONTENT_TYPE_JSON
}
params = {
'start_time': time.strftime(API_SUPPORT_DATE_FORMAT, time.localtime(kwargs['start_time'])),
'duration': '48_hours'
}
if kwargs['malware_type']:
params['malware_type'] = kwargs['malware_type']
# http call
resp = kwargs['client'].http_request(method="GET", url_suffix=URL_SUFFIX['GET_ALERTS'], params=params,
headers=headers)
total_records = resp.get('alertsCount', 0)
if total_records > 0:
if kwargs['replace_alert_url']:
replace_alert_url_key_domain_to_instance_url(resp.get('alert', []), kwargs['instance_url'])
count = kwargs['fetch_count']
for alert in resp.get('alert', []):
# set incident
context_alert = remove_empty_entities(alert)
context_alert['incidentType'] = ALERT_INCIDENT_TYPE
if count >= kwargs['fetch_limit']:
break
occurred_date = dateparser.parse(context_alert.get('occurred', ''))
assert occurred_date is not None
incident = {
'name': context_alert.get('name', ''),
'occurred': occurred_date.strftime(
DATE_FORMAT_WITH_MICROSECOND),
'rawJSON': json.dumps(context_alert)
}
if not kwargs['is_test'] and alert.get('uuid', '') and kwargs['fetch_artifacts']:
set_attachment_file(client=kwargs['client'], incident=incident, uuid=alert.get('uuid', ''),
headers=headers)
remove_nulls_from_dictionary(incident)
incidents.append(incident)
count += 1
return incidents
|
48d2519d5e5aa25d6b0fc6a6e2c959489e861e1c
| 3,646,251
|
def pbar(*args, **kwargs):
"""
Progress bar.
This function is an alias of :func:`dh.thirdparty.tqdm.tqdm()`.
"""
return dh.thirdparty.tqdm.tqdm(*args, **kwargs)
|
3de7101becc015e402aa067c676104f34679e549
| 3,646,252
|
import torch
def calc_driver_mask(n_nodes, driver_nodes: set, device='cpu', dtype=torch.float):
"""
Calculates a binary vector mask over graph nodes with unit value on the drive indeces.
:param n_nodes: numeber of driver nodes in graph
:param driver_nodes: driver node indeces.
:param device: the device of the `torch.Tensor`
:param dtype: the data type of the `torch.Tensor`
:return: the driver mask vector.
"""
driver_mask = torch.zeros(n_nodes, device=device, dtype=dtype)
driver_mask[list(driver_nodes)] = 1
return driver_mask
|
2d2a08a86629ece190062f68dd25fc450d0fd84e
| 3,646,253
|
from typing import List
def all_fermions(fields: List[Field]) -> bool:
"""Checks if all fields are fermions."""
boolean = True
for f in fields:
boolean = boolean and f.is_fermion
return boolean
|
eb54d5ad5b3667e67634b06d2943e2d14c8a0c61
| 3,646,254
|
def open_file(name):
"""
Return an open file object.
"""
return open(name, 'r')
|
8921ee51e31ac6c64d9d9094cedf57502a2aa436
| 3,646,255
|
import math
def _bit_length(n):
"""Return the number of bits necessary to store the number in binary."""
try:
return n.bit_length()
except AttributeError: # pragma: no cover (Python 2.6 only)
return int(math.log(n, 2)) + 1
|
bea6cb359c7b5454bdbb1a6c29396689035592d7
| 3,646,256
|
def read_dwd_percentile_old(filename):
"""
Read data from .txt file into Iris cube
:param str filename: file to process
:returns: cube
"""
# use header to hard code the final array shapes
longitudes = np.arange(-179.5, 180.5, 1.)
latitudes = np.arange(89.5, -90.5, -1.)
data = np.ma.zeros((latitudes.shape[0], longitudes.shape[0]))
# read in the dat
indata = np.genfromtxt(filename, dtype=(float))
this_lat = []
tl = 0
# process each row, append until have complete latitude band
for row in indata:
this_lat += [row]
if len(this_lat) == longitudes.shape[0]:
# copy into final array and reset
data[tl, :] = this_lat
tl += 1
this_lat = []
# mask the missing values
data = np.ma.masked_where(data <= -999.000, data)
cube = utils.make_iris_cube_2d(data, latitudes, longitudes, "R90p", "%")
return cube
|
4d8366606c4e00eb43aa2c6a50a735617c7ca242
| 3,646,257
|
import base64
def media_post():
"""API call to store new media on the BiBli"""
data = request.get_json()
fname = "%s/%s" % (MUSIC_DIR, data["name"])
with open(fname, "wb") as file:
file.write(base64.decodestring(data["b64"]))
audiofile = MP3(fname)
track = {"file": data["name"], "title": "", "artist": "?"}
tags = audiofile.tags
if tags:
track["artist"] = tags["artist"][0] if "artist" in tags else "?"
track["title"] = tags["title"][0] if "title" in tags else None
if audiofile.info:
seconds = int(audiofile.info.length)
minutes = seconds / 60
seconds = seconds % 60
track["duration"] = "%s:%02d" % (minutes, seconds)
# make sure there's a title
if not track["title"]:
track["title"] = fname.replace(".mp3", "")
return jsonify({"music": track})
|
ff2aa7df2cdc6ea9bf3d657c7bb675e824639107
| 3,646,258
|
from pathlib import Path
def obtain_stores_path(options, ensure_existence=True) -> Path:
"""
Gets the store path if present in options or asks the user to input it
if not present between parsed_args.
:param options: the parsed arguments
:param ensure_existence: whether abort if the path does not exist
:return: the store path
"""
path = Path(get_option_or_default(options, Options.STORE_PATH, DEFAULT_SECRETS_PATH))
if ensure_existence and not path.exists():
abort(f"Error: path does not exist ({path})")
return path
|
8bb3ff96cdc57f85058ad7cd3c96552462b8de9f
| 3,646,259
|
import os
import fnmatch
import pickle
def extract_res_from_files(exp_dir_base):
"""Takes a directory (or directories) and searches recursively for
subdirs that have a test train and settings file
(meaning a complete experiment was conducted).
Returns:
A list of dictionaries where each element in the list
is an experiment and the dictionary has the following form:
data_dict = {"train_df": df1, "test_df":df2,
"settings":settings, "path": path}
"""
if isinstance(exp_dir_base, str):
exp_dirs = [exp_dir_base]
elif isinstance(exp_dir_base, list):
exp_dirs = exp_dir_base
else:
raise ValueError("exp_dir_base must be a string or a list")
TEST = "test.csv"
TRAIN = "train.csv"
SETTINGS = "settings.txt"
results = []
for exp_dir_base in exp_dirs:
for path, subdirs, files in os.walk(exp_dir_base):
test, train, settings = None, None, None
for name in files:
if fnmatch(name, TEST):
test = os.path.join(path, name)
elif fnmatch(name, TRAIN):
train = os.path.join(path, name)
elif fnmatch(name, SETTINGS):
settings = os.path.join(path, name)
if settings and not test and not train:
test, train = [], []
for name in files:
if fnmatch(name, "*test.csv"):
test.append(os.path.join(path, name))
elif fnmatch(name, "*train.csv"):
train.append(os.path.join(path, name))
if test and train and settings:
if isinstance(test, list):
dftest = []
for fp in test:
dftest.append(pd.read_csv(fp))
dftrain = []
for fp in train:
dftrain.append(pd.read_csv(fp))
else:
dftest = pd.read_csv(test)
dftrain = pd.read_csv(train)
with open(settings, "rb") as f:
settings = pickle.load(f)
model_data = get_model_specific_data(settings, path)
DA_data, mean_DF, last_df = get_DA_info(path, "mse_DA")
data_dict = {"train_df": dftrain,
"test_df":dftest,
"test_DA_df_final": last_df,
"DA_mean_DF": mean_DF,
"settings":settings,
"path": path,
"model_data": model_data,}
results.append(data_dict)
print("{} experiments conducted".format(len(results)))
sort_res = sorted(results, key = lambda x: x['path'])
return sort_res
|
167124abb8d4ab22abe4237cc6ccbf691b6eab90
| 3,646,260
|
def compute_roc(distrib_noise, distrib_signal):
"""compute ROC given the two distribributions
assuming the distributions are the output of np.histogram
example:
dist_l, _ = np.histogram(acts_l, bins=n_bins, range=histrange)
dist_r, _ = np.histogram(acts_r, bins=n_bins, range=histrange)
tprs, fprs = compute_roc(dist_l, dist_r)
Parameters
----------
distrib_noise : 1d array
the noise distribution
distrib_signal : 1d array
the noise+signal distribution
Returns
-------
1d array, 1d array
the roc curve: true positive rate, and false positive rate
"""
# assert len(distrib_noise) == len(distrib_signal)
# assert np.sum(distrib_noise) == np.sum(distrib_signal)
n_pts = len(distrib_noise)
tpr, fpr = np.zeros(n_pts), np.zeros(n_pts)
# slide the decision boundary from left to right
for b in range(n_pts):
fn, tp = np.sum(distrib_signal[:b]), np.sum(distrib_signal[b:])
tn, fp = np.sum(distrib_noise[:b]), np.sum(distrib_noise[b:])
# calculate TP rate and FP rate
tpr[b] = tp / (tp + fn)
fpr[b] = fp / (tn + fp)
return tpr, fpr
|
d9a970435fd7b0dc79cfb4eca24a6e6779ce9300
| 3,646,261
|
def zoom(clip, screensize, show_full_height=False):
"""Zooms preferably image clip for clip duration a little
To make slideshow more movable
Parameters
---------
clip
ImageClip on which to work with duration
screensize
Wanted (width, height) tuple
show_full_height
Should this image be shown in full height. This is usefull when 4:3
images are shown in 16:9 video and need to be shown in full.
Otherwise they are shown in full width and top and bottom is cut off.
Returns
------
VideoClip in desired size
"""
#We need to resize high imageč differently
if clip.h > clip.w or show_full_height:
clip_resized = (clip.fx(resize, width=screensize[0]*2)
.fx(resize, lambda t : 1+0.02*t)
.set_position(('center', 'center'))
)
clip_composited = CompositeVideoClip([clip_resized]) \
.fx(resize, height=screensize[1])
else:
clip_resized = (clip.fx(resize, height=screensize[1]*2)
.fx(resize, lambda t : 1+0.02*t)
.set_position(('center', 'center'))
)
clip_composited = CompositeVideoClip([clip_resized]) \
.fx(resize, width=screensize[0])
vid = CompositeVideoClip([clip_composited.set_position(('center', 'center'))],
size=screensize)
return vid
|
668ef2b598432fc18510a5a69b73e400eec42b17
| 3,646,262
|
import typing
def create(
host_address: str,
topics: typing.Sequence[str]) -> Subscriber:
"""
Create a subscriber.
:param host_address: The server notify_server address
:param topics: The topics to subscribe to.
:return: A Subscriber instance.
"""
return Subscriber(create_subscriber(host_address, topics))
|
40221a3be496528115afdc2eda063a006e08aadd
| 3,646,263
|
def solution(n):
"""
Return the product of a,b,c which are Pythagorean Triplet that satisfies
the following:
1. a < b < c
2. a**2 + b**2 = c**2
3. a + b + c = 1000
>>> solution(1000)
31875000
"""
product = -1
d = 0
for a in range(1, n // 3):
"""Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
"""
b = (n * n - 2 * a * n) // (2 * n - 2 * a)
c = n - a - b
if c * c == (a * a + b * b):
d = a * b * c
if d >= product:
product = d
return product
|
a0bf0f0bde50f536f6c91f2b52571be38e494cea
| 3,646,264
|
import sys
def python2_binary():
"""Tries to find a python 2 executable."""
# Using [0] instead of .major here to support Python 2.6.
if sys.version_info[0] == 2:
return sys.executable or "python"
else:
return "python2"
|
054870972e68aa5b9e8609d93b8a82ad5e95d634
| 3,646,265
|
def with_metaclass(meta, *bases):
"""A Python 2/3 compatible way of declaring a metaclass.
Taken from `Jinja 2 <https://github.com/mitsuhiko/jinja2/blob/master/jinja2
/_compat.py>`_ via `python-future <http://python-future.org>`_. License:
BSD.
Use it like this::
class MyClass(with_metaclass(MyMetaClass, BaseClass)):
pass
"""
class _Metaclass(meta):
"""Inner class"""
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, attrs):
if this_bases is None:
return type.__new__(cls, name, (), attrs)
return meta(name, bases, attrs)
return _Metaclass(str('temporary_class'), None, {})
|
0fe8e95fe29821e4cda8b66ff54ddd1b73e51243
| 3,646,266
|
def energy(particles):
"""total kinetic energy up to a constant multiplier"""
return np.sum([particle.size ** 2 * np.linalg.norm(particle.speed) ** 2 for particle in particles])
|
29cae5c46d053f6fa558ba7a839d8b647c86d236
| 3,646,267
|
def post_adaptation_non_linear_response_compression_matrix(P_2, a, b):
"""
Returns the post adaptation non linear response compression matrix.
Parameters
----------
P_2 : numeric or array_like
Point :math:`P_2`.
a : numeric or array_like
Opponent colour dimension :math:`a`.
b : numeric or array_like
Opponent colour dimension :math:`b`.
Returns
-------
ndarray
Points :math:`P`.
Examples
--------
>>> P_2 = 24.2372054671
>>> a = -0.000624112068243
>>> b = -0.000506270106773
>>> post_adaptation_non_linear_response_compression_matrix(P_2, a, b)
... # doctest: +ELLIPSIS
array([ 7.9463202..., 7.9471152..., 7.9489959...])
"""
P_2 = as_float_array(P_2)
a = as_float_array(a)
b = as_float_array(b)
R_a = (460 * P_2 + 451 * a + 288 * b) / 1403
G_a = (460 * P_2 - 891 * a - 261 * b) / 1403
B_a = (460 * P_2 - 220 * a - 6300 * b) / 1403
RGB_a = tstack([R_a, G_a, B_a])
return RGB_a
|
6b7f8bcc62142e99c63c0e7a9b073e25f3c36e8c
| 3,646,268
|
def forward(network, x):
"""
入力信号を出力に変換する関数
Args:
network: ネットワークのDict
x: Inputの配列
Returns:
出力信号
"""
w1, w2, w3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['B1'], network['B2'], network['B3']
# 1層目
a1 = np.dot(x, w1) + b1
z1 = sigmoid(a1)
# 2層目
a2 = np.dot(z1, w2) + b2
z2 = sigmoid(a2)
# 3層目
a3 = np.dot(z2, w3) + b3
y = identity(a3)
return y
|
93c79a049e4c45f31a502aa81f840e48ff41d229
| 3,646,269
|
from typing import List
def join_with_and(words: List[str]) -> str:
"""Joins list of strings with "and" between the last two."""
if len(words) > 2:
return ", ".join(words[:-1]) + ", and " + words[-1]
elif len(words) == 2:
return " and ".join(words)
elif len(words) == 1:
return words[0]
else:
return ""
|
ecb2c1fa060657f2ea4173c4382a81c9b42beeb9
| 3,646,270
|
from nipype.interfaces.base import Bunch
def condition_generator(single_sub_data, params_name, duration = 2):
"""Build a bunch to show the relationship between each onset and parameter
Build a bunch for make a design matrix for next analysis. This bunch is for describing the relationship
between each onset and parameter.
Args:
single_sub_data: A pandas DataFrame which contains data for one subject.
It must contains the information about run, onsets, and parameters.
params_name: A list of names of parameters which you want to analysis.
The order of the names will be inherited to the design matrix next.
duration: The duration of a TR.
Returns:
subject_info: A list of bunch type which can be resolve by SpecifySPMModel interface in nipype.
"""
run_num = set(single_sub_data.run)
subject_info = []
for i in run_num:
tmp_table = single_sub_data[single_sub_data.run == i]
tmp_onset = tmp_table.onset.values.tolist()
pmod_names = []
pmod_params = []
pmod_poly = []
for param in params_name:
pmod_params.append(tmp_table[param].values.tolist())
pmod_names.append(param)
pmod_poly.append(1)
tmp_Bunch = Bunch(conditions=["trial_onset_run"+str(i)], onsets=[tmp_onset], durations=[[duration]],
pmod=[Bunch(name = pmod_names, poly = pmod_poly, param = pmod_params)])
subject_info.append(tmp_Bunch)
return subject_info
|
6a4743043a49b6a1703c3b42840256a58e07f3bd
| 3,646,271
|
from typing import Dict
from typing import Iterable
from pathlib import Path
def create_pkg(
meta: Dict, fpaths: Iterable[_res_t], basepath: _path_t = "", infer=True
):
"""Create a datapackage from metadata and resources.
If ``resources`` point to files that exist, their schema are inferred and
added to the package. If ``basepath`` is a non empty string, it is treated
as the parent directory, and all resource file paths are checked relative
to it.
Parameters
----------
meta : Dict
A dictionary with package metadata.
fpaths : Iterable[Union[str, Path, Dict]]
An iterator over different resources. Resources are paths to files,
relative to ``basepath``.
basepath : str (default: empty string)
Directory where the package files are located
infer : bool (default: True)
Whether to infer resource schema
Returns
-------
Package
A datapackage with inferred schema for all the package resources
"""
# for an interesting discussion on type hints with unions, see:
# https://stackoverflow.com/q/60235477/289784
# TODO: filter out and handle non-tabular (custom) data
existing = glom(meta.get("resources", []), Iter("path").map(Path).all())
basepath = basepath if basepath else getattr(meta, "basepath", basepath)
pkg = Package(resolve_licenses(meta), basepath=str(basepath))
def keep(res: _path_t) -> bool:
if Path(res) in existing:
return False
full_path = Path(basepath) / res
if not full_path.exists():
logger.warning(f"{full_path}: skipped, doesn't exist")
return False
return True
for res in fpaths:
spec = res if isinstance(res, dict) else {"path": res}
if not keep(spec["path"]):
continue
# NOTE: noop when Resource
_res = resource_(spec, basepath=basepath, infer=infer)
pkg.add_resource(_res)
return _ensure_posix(pkg)
|
a668e4f1eacbf14d39165f9dbd4af1b283078ef1
| 3,646,272
|
from typing import Callable
from typing import Any
def one_hot(
encoding_size: int, mapping_fn: Callable[[Any], int] = None, dtype="bool"
) -> DatasetTransformFn:
"""Transform data into a one-hot encoded label.
Arguments:
encoding_size {int} -- The size of the encoding
mapping_fn {Callable[[Any], int]} -- A function transforming the input data to an integer label. If not specified, labels are automatically inferred from the data.
Returns:
DatasetTransformFn -- A function to be passed to the Dataset.transform()
"""
mem, maxcount = {}, -1
def auto_label(x: Any) -> int:
nonlocal mem, maxcount, encoding_size
h = hash(str(x))
if not h in mem.keys():
maxcount += 1
if maxcount >= encoding_size:
raise ValueError(
"More unique labels found than were specified by the encoding size ({} given)".format(
encoding_size
)
)
mem[h] = maxcount
return mem[h]
label_fn = mapping_fn or auto_label
def encode(x):
nonlocal encoding_size, dtype, label_fn
o = np.zeros(encoding_size, dtype=dtype)
o[label_fn(x)] = True
return o
return _dataset_element_transforming(fn=encode)
|
48758666885969c10b5e6ef46f2d392cd06800a2
| 3,646,273
|
def is_url_relative(url):
"""
True if a URL is relative, False otherwise.
"""
return url[0] == "/" and url[1] != "/"
|
91e1cb756a4554973e53fd1f607515577bc63294
| 3,646,274
|
def distance_matrix(lats, lons):
"""Compute distance matrix using great-circle distance formula
https://en.wikipedia.org/wiki/Great-circle_distance#Formulae
Parameters
----------
lats : array
Latitudes
lons : array
Longitudes
Returns
-------
dists : matrix
Entry `(i, j)` shows the great-circle distance between
point `i` and `j`, i.e. distance between `(lats[i], lons[i])`
and `(lats[j], lons[j])`.
"""
R = 6372795.477598
lats = np.array(lats)
lons = np.array(lons)
assert len(lats) == len(lons), "lats and lons should be of the same size"
assert not any(np.isnan(lats)), "nan in lats"
assert not any(np.isnan(lons)), "nan in lons"
# convert degree to radian
lats = lats * np.pi / 180.0
lons = lons * np.pi / 180.0
sins = np.sin(lats)
sin_matrix = sins.reshape(-1, 1) @ sins.reshape(1, -1)
coss = np.cos(lats)
cos_matrix = coss.reshape(-1, 1) @ coss.reshape(1, -1)
lons_matrix = lons * np.ones((len(lons), len(lons)))
lons_diff = lons_matrix - lons_matrix.T
lons_diff = np.cos(lons_diff)
# TODO: make this function more efficient
dists = R * np.arccos(sin_matrix + cos_matrix * lons_diff)
dists[np.isnan(dists)] = 0
return dists
|
a322306d13f2e15b60a61eb1fcff95c71f005d07
| 3,646,275
|
def _split_link_ends(link_ends):
"""
Examples
--------
>>> from landlab.grid.unstructured.links import _split_link_ends
>>> _split_link_ends(((0, 1, 2), (3, 4, 5)))
(array([0, 1, 2]), array([3, 4, 5]))
>>> _split_link_ends([(0, 3), (1, 4), (2, 5)])
(array([0, 1, 2]), array([3, 4, 5]))
>>> _split_link_ends((0, 3))
(array([0]), array([3]))
"""
links = np.array(list(link_ends), ndmin=2, dtype=np.int)
if len(links) != 2:
links = links.transpose()
if links.size == 0:
return (np.array([], dtype=np.int), np.array([], dtype=np.int))
else:
return links[0], links[1]
|
3aee58b5e4e928d45a33026c0b9e554c859d0d6f
| 3,646,276
|
from heapq import heappop, heappush
def dijkstra(vertex_count: int, source: int, edges):
"""Uses Dijkstra's algorithm to find the shortest path in a graph.
Args:
vertex_count: The number of vertices.
source : Vertex number (0-indexed).
edges : List of (cost, edge) (0-indexed).
Returns:
costs : List of the shortest distance.
parents: List of parent vertices.
Landau notation: O(|Edges|log|Vertices|).
See:
https://atcoder.jp/contests/abc191/submissions/19964078
https://atcoder.jp/contests/abc191/submissions/19966232
"""
hq = [(0, source)] # weight, vertex number (0-indexed)
costs = [float("inf") for _ in range(vertex_count)]
costs[source] = 0
pending = -1
parents = [pending for _ in range(vertex_count)]
while hq:
cost, vertex = heappop(hq)
if cost > costs[vertex]:
continue
for weight, edge in edges[vertex]:
new_cost = cost + weight
if new_cost < costs[edge]:
costs[edge] = new_cost
parents[edge] = vertex
heappush(hq, (new_cost, edge))
return costs, parents
|
d33f8dc28bf07154ffd7582a5bdd7161e195f331
| 3,646,277
|
import os
import secrets
def save_picture(form_picture):
"""
function for saving the path to the profile picture
"""
app = create_app(config_name=os.getenv('APP_SETTINGS'))
# random hex to be usedin storing the file name to avoid clashes
random_hex = secrets.token_hex(8)
# split method for splitting the filename from the file extension
_, pic_ext = os.path.split(form_picture.filename)
# pic_fn = picture filename which is a concatanation of the filename(hex name) and file extension
pic_fn = random_hex + pic_ext
# path to picture from the root to the profile_pics folder
pic_path = os.path.join(app.root_path, 'static/profile_pics', pic_fn)
output_size = (512, 512)
img = Image.open(form_picture)
img.thumbnail(output_size)
img.save(pic_path) # save the picture path to the file system
return pic_fn
|
5c725a06508d4d4316b85305ae37f30d80a90d28
| 3,646,278
|
def plot_labels(labels, lattice=None, coords_are_cartesian=False, ax=None, **kwargs):
"""
Adds labels to a matplotlib Axes
Args:
labels: dict containing the label as a key and the coordinates as value.
lattice: Lattice object used to convert from reciprocal to Cartesian coordinates
coords_are_cartesian: Set to True if you are providing.
coordinates in Cartesian coordinates. Defaults to False.
Requires lattice if False.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
kwargs: kwargs passed to the matplotlib function 'text'. Color defaults to blue
and size to 25.
Returns:
matplotlib figure and matplotlib ax
"""
ax, fig, plt = get_ax3d_fig_plt(ax)
if "color" not in kwargs:
kwargs["color"] = "b"
if "size" not in kwargs:
kwargs["size"] = 25
for k, coords in labels.items():
label = k
if k.startswith("\\") or k.find("_") != -1:
label = "$" + k + "$"
off = 0.01
if coords_are_cartesian:
coords = np.array(coords)
else:
if lattice is None:
raise ValueError("coords_are_cartesian False requires the lattice")
coords = lattice.get_cartesian_coords(coords)
ax.text(*(coords + off), s=label, **kwargs)
return fig, ax
|
b0172061e043fcaef38d2503be67333862da3acf
| 3,646,279
|
def contains(poly0, poly1):
""" Does poly0 contain poly1?
As an initial implementation, returns True if any vertex of poly1 is within
poly0.
"""
# check for bounding box overlap
bb0 = (min(p[0] for p in poly0), min(p[1] for p in poly0),
max(p[0] for p in poly0), max(p[1] for p in poly0))
bb1 = (min(p[0] for p in poly1), min(p[1] for p in poly1),
max(p[0] for p in poly1), max(p[1] for p in poly1))
if ((bb0[0] > bb1[2])
or (bb0[2] < bb1[0])
or (bb0[1] > bb1[3])
or (bb0[3] < bb1[1])):
return False
# check each vertex
def _isleft(p, p0, p1):
return ((p1[0]-p0[0])*(p[1]-p0[1]) - (p[0]-p0[0])*(p1[1]-p0[1])) > 0
for p in poly1:
wn = 0
for i in range(len(poly0)-1):
p0 = poly0[i]
p1 = poly0[i+1]
if p0[1] <= p[1] < p1[1]: # upward crossing
if _isleft(p, p0, p1):
wn += 1
elif p0[1] >= p[1] > p1[1]:
if not _isleft(p, p0, p1):
wn -= 1
if wn != 0:
return True
return False
|
26ea4bd17a55ed05afa049a9aaab5237f0965674
| 3,646,280
|
import os
import json
def get_ids_in_annotations(scene, frame, quality):
"""
Returns a set of all ids found in annotations.
"""
annotations_path = os.path.join(scene, '%sPose3d_stage1' % quality,
'body3DScene_%s.json' % frame)
if not os.path.exists(annotations_path):
return set()
with open(annotations_path, 'r') as f:
annots = json.load(f)
return set([b['id'] for b in annots['bodies']])
|
9e754af7ec397e9a36151b7f49f69d2de6ca0128
| 3,646,281
|
def new_halberd(game_state):
"""
A composite component representing a Sword item.
"""
c = Composite()
set_item_components(c, game_state)
set_melee_weapon_component(c)
c.set_child(Description("Halberd",
"A long stick with a with an axe-head at one end."
"It's a useful weapon when you want to keep danger at bay."))
c.set_child(GraphicChar(None, colors.GRAY, icon.HALBERD))
c.set_child(DataPoint(DataTypes.WEIGHT, 8))
c.set_child(accuracy_item_stat(10))
c.set_child(damage_item_stat(1, 5))
c.set_child(CritChanceBonusEffect(0.1))
c.set_child(crit_multiplier_item_stat(2))
c.set_child(DefenciveAttackEffect(0.75))
c.set_child(OffenciveAttackEffect(0.20))
return c
|
1e6ccdce08a5e4e26c6dc8d09db38ef4b6d7b2f0
| 3,646,282
|
from typing import List
def get_regional_services(service_list: List[AWSService] = None) -> List[AWSService]:
"""List all services which are tied to specific regions."""
services = service_list or get_services()
return [s for s in services if s.is_regional]
|
d856acfc24430102ccb72a76eedbc47ace842894
| 3,646,283
|
import os
import errno
def _ReapUntilProcessExits(monitored_pid):
"""Reap processes until |monitored_pid| exits, then return its exit status.
This will also reap any other processes ready to be reaped immediately after
|monitored_pid| is reaped.
"""
pid_status = None
options = 0
while True:
try:
(pid, status, _) = os.wait3(options)
# Capture status of monitored_pid so we can return it.
if pid == monitored_pid:
pid_status = status
# Switch to nohang so we can churn through the zombies w/out getting
# stuck on live orphaned processes.
options = os.WNOHANG
# There may be some more child processes still running, but none of them
# have exited/finished. Don't wait for those as we'll throw an error in
# the caller.
if pid_status is not None and pid == 0 and status == 0:
break
except OSError as e:
if e.errno == errno.ECHILD:
break
elif e.errno != errno.EINTR:
raise
return pid_status
|
ef540cc60634fe13ddc58b434f7fabe01109ddda
| 3,646,284
|
def f_setup_config(v_config_filename):
"""This function read the configuration file"""
df_conf_file = pd.read_csv(v_config_filename, delimiter="|", header=0)
api_key = df_conf_file[df_conf_file.CONFIG_VAR == 'API_KEY']['VALUE'].values[0]
data_dir = df_conf_file[df_conf_file.CONFIG_VAR == 'DATA_DIR']['VALUE'].values[0]
json_log_dir = df_conf_file[df_conf_file.CONFIG_VAR == 'JSON_DIR']['VALUE'].values[0]
gcs_bucket = df_conf_file[df_conf_file.CONFIG_VAR == 'GCP_BUCKET']['VALUE'].values[0]
# gcs_service_account_key = df_conf_file[df_conf_file.CONFIG_VAR == 'GCP_SERVICE_ACOUNT_KEY']['VALUE'].values[0]
# aws_key = df_conf_file[df_conf_file.CONFIG_VAR == 'AWS_ACCESS_KEY']['VALUE'].values[0]
# aws_secret_key = df_conf_file[df_conf_file.CONFIG_VAR == 'AWS_SECRET_ASSES_KEY']['VALUE'].values[0]
aws_s3 = df_conf_file[df_conf_file.CONFIG_VAR == 'AWS_S3_BUCKET']['VALUE'].values[0]
export_csv = df_conf_file[df_conf_file.CONFIG_VAR == 'EXPORT_CSV']['VALUE'].values[0]
cleanup_days = df_conf_file[df_conf_file.CONFIG_VAR == 'CLEANUP_DAYS']['VALUE'].values[0]
# return api_key, gcs_bucket, gcs_service_account_key, aws_key, aws_secret_key, \
# aws_s3, data_dir, json_log_dir, export_csv, cleanup_days
return api_key, gcs_bucket, aws_s3, data_dir, json_log_dir, export_csv, cleanup_days
|
b2e9a8e822a2c582549055184cc8096f174fdb3b
| 3,646,285
|
def choose_username(email):
"""
Chooses a unique username for the provided user.
Sets the username to the email parameter umodified if
possible, otherwise adds a numerical suffix to the email.
"""
def get_suffix(number):
return "" if number == 1 else "_"+str(number).zfill(3)
user_model = get_user_model()
num = 1
while user_model.objects.filter(username=email+get_suffix(num)).exists():
num += 1
return email + get_suffix(num)
|
594c060df6df5c89c7c08a2e3979960866cc5688
| 3,646,286
|
def lms2rgb(image):
"""
Convert an array of pixels from the LMS colorspace to the RGB colorspace. This function assumes that
each pixel in an array of LMS values.
:param image: An np.ndarray containing the image data
:return: An np.ndarray containing the transformed image data
"""
return np.clip(apply_matrix_to_image(lms_matrix_inverse, image), 0.0, 1.0)
|
736d7101a4c4256725fd4f09c6a453c418c1ae81
| 3,646,287
|
def __apply_to_property_set (f, property_set):
""" Transform property_set by applying f to each component property.
"""
properties = feature.split (property_set)
return '/'.join (f (properties))
|
5091065f90b602a775c24eca9e2ab3bc6861e0c8
| 3,646,288
|
from typing import Dict
import tarfile
import os
def _zip_index(job_context: Dict) -> Dict:
"""Zips the index directory into a single .tar.gz file.
This makes uploading and retrieving the index easier since it will
only be a single file along with compressing the size of the file
during storage.
"""
temp_post_path = job_context["gtf_file"].get_temp_post_path(job_context["job_dir_prefix"])
try:
with tarfile.open(temp_post_path, "w:gz") as tar:
tar.add(job_context["output_dir"],
arcname=os.path.basename(job_context["output_dir"]))
except:
logger.exception("Exception caught while zipping index directory %s",
temp_post_path,
processor_job=job_context["job_id"],
batch=job_context["batches"][0].id)
job_context["gtf_file"].remove_temp_directory(job_context["job_dir_prefix"])
failure_template = "Exception caught while zipping index directory {}"
job_context["job"].failure_reason = failure_template.format(temp_post_path)
job_context["success"] = False
return job_context
job_context["files_to_upload"] = [job_context["gtf_file"]]
job_context["success"] = True
return job_context
|
675061fe65c97ddabb159cd4d960c8cdafca27a3
| 3,646,289
|
def _return_feature_statistics(feature_number: int, feature_value: float, names: list):
"""
Arguments:
feature_number (int) -- number of the feature
feature_value (float) -- value of the feature (used to compute color)
names (list) -- list of feature names
Returns:
"""
percentile_score = int(
stats.percentileofscore(TRAIN_DATA.T[feature_number], feature_value)
)
color = matplotlib.colors.to_hex(MAPPABLE.to_rgba(percentile_score))
# ToDo: Maybe not only return the category but also the color which we used in the article
return percentile_score, color, feature_cat_dict[names[feature_number]]
|
9cc678c1180e6533bd803b00388db84d085030b3
| 3,646,290
|
def calc_eta_FC(Q_load_W, Q_design_W, phi_threshold, approach_call):
"""
Efficiency for operation of a SOFC (based on LHV of NG) including all auxiliary losses
Valid for Q_load in range of 1-10 [kW_el]
Modeled after:
- **Approach A (NREL Approach)**:
http://energy.gov/eere/fuelcells/distributedstationary-fuel-cell-systems
and
NREL : p.5 of [M. Zolot et al., 2004]_
- **Approach B (Empiric Approach)**: [Iain Staffell]_
:type Q_load_W : float
:param Q_load_W: Load at each time step
:type Q_design_W : float
:param Q_design_W: Design Load of FC
:type phi_threshold : float
:param phi_threshold: where Maximum Efficiency is reached, used for Approach A
:type approach_call : string
:param appraoch_call: choose "A" or "B": A = NREL-Approach, B = Empiric Approach
:rtype eta_el : float
:returns eta_el: electric efficiency of FC (Lower Heating Value), in abs. numbers
:rtype Q_fuel : float
:returns Q_fuel: Heat demand from fuel (in Watt)
..[M. Zolot et al., 2004] M. Zolot et al., Analysis of Fuel Cell Hybridization and Implications for Energy Storage
Devices, NREL, 4th International Advanced Automotive Battery.
http://www.nrel.gov/vehiclesandfuels/energystorage/pdfs/36169.pdf
..[Iain Staffell, 2009] Iain Staffell, For Domestic Heat and Power: Are They Worth It?, PhD Thesis, Birmingham:
University of Birmingham. http://etheses.bham.ac.uk/641/1/Staffell10PhD.pdf
"""
phi = 0.0
## Approach A - NREL Approach
if approach_call == "A":
phi = float(Q_load_W) / float(Q_design_W)
eta_max = 0.425 # from energy.gov
if phi >= phi_threshold: # from NREL-Shape
eta_el = eta_max - ((1 / 6.0 * eta_max) / (1.0 - phi_threshold)) * abs(phi - phi_threshold)
if phi < phi_threshold:
if phi <= 118 / 520.0 * phi_threshold:
eta_el = eta_max * 2 / 3 * (phi / (phi_threshold * 118 / 520.0))
if phi < 0.5 * phi_threshold and phi >= 118 / 520.0 * phi_threshold:
eta_el = eta_max * 2 / 3.0 + \
eta_max * 0.25 * (phi - phi_threshold * 118 / 520.0) / (phi_threshold * (0.5 - 118 / 520.0))
if phi > 0.5 * phi_threshold and phi < phi_threshold:
eta_el = eta_max * (2 / 3.0 + 0.25) + \
1 / 12.0 * eta_max * (phi - phi_threshold * 0.5) / (phi_threshold * (1 - 0.5))
eta_therm_max = 0.45 # constant, after energy.gov
if phi < phi_threshold:
eta_therm = 0.5 * eta_therm_max * (phi / phi_threshold)
else:
eta_therm = 0.5 * eta_therm_max * (1 + eta_therm_max * ((phi - phi_threshold) / (1 - phi_threshold)))
## Approach B - Empiric Approach
if approach_call == "B":
if Q_design_W > 0:
phi = float(Q_load_W) / float(Q_design_W)
else:
phi = 0
eta_el_max = 0.39
eta_therm_max = 0.58 # * 1.11 as this source gives eff. of HHV
eta_el_score = -0.220 + 5.277 * phi - 9.127 * phi ** 2 + 7.172 * phi ** 3 - 2.103 * phi ** 4
eta_therm_score = 0.9 - 0.07 * phi + 0.17 * phi ** 2
eta_el = eta_el_max * eta_el_score
eta_therm = eta_therm_max * eta_therm_score
if phi < 0.2:
eta_el = 0
return eta_el, eta_therm
|
0cd14d976d773dc34d7ea96e80db4267e33aac1f
| 3,646,291
|
from typing import Tuple
def erdos_renyi(
num_genes: int, prob_conn: float, spec_rad: float = 0.8
) -> Tuple[np.ndarray, float]:
"""Initialize an Erdos Renyi network as in Sun–Taylor–Bollt 2015.
If the spectral radius is positive, the matrix is normalized
to a spectral radius of spec_rad and the scale shows the
normalization. If the spectral radius is zero, the returned
matrix will have entries of 0, 1, and -1, and the scale is set
to zero.
Args:
num_genes: Number of genes/nodes.
prob_conn: Probability of connection.
spec_rad: The desired spectral radius.
Returns:
Adjacency matrix and its scale.
"""
signed_edges = erdos_renyi_ternary(num_genes, prob_conn)
return scale_by_spec_rad(signed_edges, spec_rad)
|
87e29376ec79ea9198bb3c668fdc31fc61216a26
| 3,646,292
|
import _ctypes
def IMG_LoadTextureTyped_RW(renderer, src, freesrc, type):
"""Loads an image file from a file object to a texture as a specific format.
This function allows you to explicitly specify the format type of the image
to load. The different possible format strings are listed in the
documentation for :func:`IMG_LoadTyped_RW`.
See :func:`IMG_LoadTexture` for more information.
Args:
renderer (:obj:`SDL_Renderer`): The SDL rendering context with which to
create the texture.
src (:obj:`SDL_RWops`): The file object from which to load the image.
freesrc (int): If non-zero, the input file object will be closed and
freed after it has been read.
type (bytes): A bytestring indicating the image format with which the
file object should be loaded.
Returns:
POINTER(:obj:`SDL_Texture`): A pointer to the new texture containing
the image, or a null pointer if there was an error.
"""
return _ctypes["IMG_LoadTextureTyped_RW"](renderer, src, freesrc, type)
|
ef9f963e71b7419ec790bd3fdb06eb470d30972b
| 3,646,293
|
def soft_l1(z: np.ndarray, f_scale):
"""
rho(z) = 2 * ((1 + z)**0.5 - 1)
The smooth approximation of l1 (absolute value) loss. Usually a good choice for robust least squares.
:param z: z = f(x)**2
:param f_scale: rho_(f**2) = C**2 * rho(f**2 / C**2), where C is f_scale
:return:
"""
loss = np.empty((3, z.shape[0]), dtype=np.float64)
c2 = f_scale * f_scale
ic2 = 1.0 / c2
z = ic2 * z
sqrt_1pz = np.sqrt(z + 1)
loss[0, :] = c2 * 2 * (sqrt_1pz - 1)
loss[1, :] = 1 / sqrt_1pz
loss[2, :] = -ic2 * 0.5 * np.power(loss[1, :], 3)
return loss
|
95813cd59c99ab94e6b4693237dc85f5b7d31b14
| 3,646,294
|
def calculate_hit_box_points_detailed(image: Image, hit_box_detail: float = 4.5):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image: Image get hit box from.
:param int hit_box_detail: How detailed to make the hit box. There's a
trade-off in number of points vs. accuracy.
:Returns: List of points
"""
def sample_func(sample_point):
""" Method used to sample image. """
if sample_point[0] < 0 \
or sample_point[1] < 0 \
or sample_point[0] >= image.width \
or sample_point[1] >= image.height:
return 0
point_tuple = sample_point[0], sample_point[1]
color = image.getpixel(point_tuple)
if color[3] > 0:
return 255
else:
return 0
# Do a quick check if it is a full tile
p1 = 0, 0
p2 = 0, image.height - 1
p3 = image.width - 1, image.height - 1
p4 = image.width - 1, 0
if sample_func(p1) and sample_func(p2) and sample_func(p3) and sample_func(p4):
# Do a quick check if it is a full tile
p1 = (-image.width / 2, -image.height / 2)
p2 = (image.width / 2, -image.height / 2)
p3 = (image.width / 2, image.height / 2)
p4 = (-image.width / 2, image.height / 2)
return p1, p2, p3, p4
# Get the bounding box
logo_bb = pymunk.BB(-1, -1, image.width, image.height)
# Set of lines that trace the image
line_set = pymunk.autogeometry.PolylineSet()
# How often to sample?
downres = 1
horizontal_samples = int(image.width / downres)
vertical_samples = int(image.height / downres)
# Run the trace
# Get back one or more sets of lines covering stuff.
line_sets = pymunk.autogeometry.march_soft(
logo_bb,
horizontal_samples, vertical_samples,
99,
sample_func)
if len(line_sets) == 0:
return []
selected_line_set = line_sets[0]
selected_range = None
if len(line_set) > 1:
# We have more than one line set. Try and find one that covers most of
# the sprite.
for line in line_set:
min_x = None
min_y = None
max_x = None
max_y = None
for point in line:
if min_x is None or point.x < min_x:
min_x = point.x
if max_x is None or point.x > max_x:
max_x = point.x
if min_y is None or point.y < min_y:
min_y = point.y
if max_y is None or point.y > max_y:
max_y = point.y
if min_x is None or max_x is None or min_y is None or max_y is None:
raise ValueError("No points in bounding box.")
my_range = max_x - min_x + max_y + min_y
if selected_range is None or my_range > selected_range:
selected_range = my_range
selected_line_set = line
# Reduce number of vertices
# original_points = len(selected_line_set)
selected_line_set = pymunk.autogeometry.simplify_curves(selected_line_set,
hit_box_detail)
# downsampled_points = len(selected_line_set)
# Convert to normal points, offset fo 0,0 is center, flip the y
hh = image.height / 2
hw = image.width / 2
points = []
for vec2 in selected_line_set:
point = round(vec2.x - hw), round(image.height - (vec2.y - hh) - image.height)
points.append(point)
if len(points) > 1 and points[0] == points[-1]:
points.pop()
# print(f"{sprite.texture.name} Line-sets={len(line_set)}, Original points={original_points}, Downsampled points={downsampled_points}")
return points
|
dd74e18fac1fe96728837ce8af62c38461592baa
| 3,646,295
|
async def open_local_endpoint(
host="0.0.0.0", port=0, *, queue_size=None, **kwargs
):
"""Open and return a local datagram endpoint.
An optional queue size argument can be provided.
Extra keyword arguments are forwarded to `loop.create_datagram_endpoint`.
"""
return await open_datagram_endpoint(
host,
port,
remote=False,
endpoint_factory=lambda: LocalEndpoint(queue_size),
**kwargs
)
|
a3b03408bbe35972b0588912a0628df2be9cddc5
| 3,646,296
|
from typing import Union
def parse_bool(value: Union[str, bool]) -> bool:
"""Parse a string value into a boolean.
Uses the sets ``CONSIDERED_TRUE`` and ``CONSIDERED_FALSE`` to determine the boolean value of the string.
Args:
value (Union[str, bool]): the string to parse (is converted to lowercase and stripped of surrounding whitespace)
Raises:
ValueError: if the string cannot reliably be determined true or false
Returns:
bool: the parsed result
"""
if value is True or value is False:
return value
val = value.strip().lower()
if val in CONSIDERED_TRUE:
return True
if val in CONSIDERED_FALSE:
return False
raise ValueError(f"Value {value} is not compatible with boolean!")
|
86bb61b82eb71627f3563584779f3a17ea1bc8b7
| 3,646,297
|
from datetime import datetime
import traceback
import requests
def send_rocketchat_notification(text: str, exc_info: Exception) -> dict:
""" Sends message with specified text to configured Rocketchat channel.
We don't want this method to raise any exceptions, as we don't want to
unintentionally break any kind of error management flow. (We only use
rocket chat notification when something goes wrong).
If you want to know if this method worked or not, you'll have to inspect
the response.
"""
full_message = f"{datetime.now(tz=timezone.utc).isoformat()}\n{text}\n\
{config.get('HOSTNAME')}: {exc_info}\n\
{traceback.format_exception(etype=type(exc_info),value=exc_info,tb=exc_info.__traceback__)}"
result = None
try:
response = requests.post(
config.get('ROCKET_URL_POST_MESSAGE'),
headers={
'X-Auth-Token': config.get('ROCKET_AUTH_TOKEN'),
'X-User-Id': config.get('ROCKET_USER_ID'),
'Content-Type': 'application/json'
},
json={
'channel': config.get('ROCKET_CHANNEL'),
'text': full_message
}
)
result = response.json()
except Exception as exception: # pylint: disable=broad-except
# not doing exc_info=exception - as this causes a lot of noise, and we're more interested
# in the main code!
logger.error('failed to send rocket chat notification %s', exception)
return result
|
c466621cfd8ead8f6773bc1c461fb779c0374937
| 3,646,298
|
def get_number_of_forms_all_domains_in_couch():
"""
Return number of non-error, non-log forms total across all domains
specifically as stored in couch.
(Can't rewrite to pull from ES or SQL; this function is used as a point
of comparison between row counts in other stores.)
"""
all_forms = (
XFormInstance.get_db().view('couchforms/by_xmlns').one()
or {'value': 0}
)['value']
device_logs = (
XFormInstance.get_db().view('couchforms/by_xmlns',
key=DEVICE_LOG_XMLNS).one()
or {'value': 0}
)['value']
return all_forms - device_logs
|
a30f5e6410dc3b91c3a962169fad20e2a8d4a8fb
| 3,646,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.