content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def linspace(
start,
stop,
num=50,
endpoint=True,
retstep=False,
dtype=None,
split=None,
device=None,
comm=None,
):
"""
Returns num evenly spaced samples, calculated over the interval [start, stop]. The endpoint of the interval can
optionally be excluded.
Parameters
----------
start: scalar, scalar-convertible
The starting value of the sample interval, maybe a sequence if convertible to scalar
stop: scalar, scalar-convertible
The end value of the sample interval, unless is set to False. In that case, the sequence consists of all but the
last of num + 1 evenly spaced samples, so that stop is excluded. Note that the step size changes when endpoint
is False.
num: int, optional
Number of samples to generate, defaults to 50. Must be non-negative.
endpoint: bool, optional
If True, stop is the last sample, otherwise, it is not included. Defaults to True.
retstep: bool, optional
If True, return (samples, step), where step is the spacing between samples.
dtype: dtype, optional
The type of the output array.
split: int, optional
The axis along which the array is split and distributed, defaults to None (no distribution).
device : str, ht.Device or None, optional
Specifies the device the tensor shall be allocated on, defaults to None (i.e. globally set default device).
comm: Communication, optional
Handle to the nodes holding distributed parts or copies of this tensor.
Returns
-------
samples: ht.DNDarray
There are num equally spaced samples in the closed interval [start, stop] or the half-open interval
[start, stop) (depending on whether endpoint is True or False).
step: float, optional
Size of spacing between samples, only returned if retstep is True.
Examples
--------
>>> ht.linspace(2.0, 3.0, num=5)
tensor([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> ht.linspace(2.0, 3.0, num=5, endpoint=False)
tensor([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> ht.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
"""
# sanitize input parameters
start = float(start)
stop = float(stop)
num = int(num)
if num <= 0:
raise ValueError(
"number of samples 'num' must be non-negative integer, but was {}".format(num)
)
step = (stop - start) / max(1, num - 1 if endpoint else num)
# sanitize device and comm
device = devices.sanitize_device(device)
comm = sanitize_comm(comm)
# infer local and global shapes
gshape = (num,)
split = sanitize_axis(gshape, split)
offset, lshape, _ = comm.chunk(gshape, split)
balanced = True
# compose the local tensor
start += offset * step
stop = start + lshape[0] * step - step
data = torch.linspace(start, stop, lshape[0], device=device.torch_device)
if dtype is not None:
data = data.type(types.canonical_heat_type(dtype).torch_type())
# construct the resulting global tensor
ht_tensor = dndarray.DNDarray(
data, gshape, types.canonical_heat_type(data.dtype), split, device, comm, balanced
)
if retstep:
return ht_tensor, step
return ht_tensor
| 19,500
|
def _h1_cmp_chi2_ ( h1 ,
h2 ,
density = False ) :
"""Compare histograms by chi2
>>> h1 = ... ## the first histo
>>> h2 = ... ## the second histo (or function or anything else)
>>> chi2ndf , probability = h1.cmp_chi2 ( h2 )
"""
assert isinstance ( h1 , ROOT.TH1 ) and 1 == h1.dim () , \
"cmp_dist: invalid type of h1 %s/%s" % ( h1 , type ( h1 ) )
if isinstance ( h2 , ROOT.TH1 ) :
assert 1 == h2.dim () , "cmp_dist: invalid type of h2 %s/%s" % ( h2 , type ( h2 ) )
if density :
h1_ = h1.density() if hasattr ( h1 , 'density' ) else h1
h2_ = h2.density() if hasattr ( h2 , 'density' ) else h2
cmp = _h1_cmp_chi2_ ( h1_ , h2_ , density = False )
if h1_ is not h1 : del h1_
if h2_ is not h2 : del h2_
return cmp
chi2 = 0.0
ndf = 0
for i , x , v1 in h1.items() :
v2 = h2 ( x.value() )
chi2 += v1.chi2 ( v2 )
ndf += 1
c2ndf = chi2/ndf
return c2ndf, ROOT.TMath.Prob ( chi2 , ndf )
| 19,501
|
def _questionnaire_metric(name, col):
"""Returns a metrics SQL aggregation tuple for the given key/column."""
return _SqlAggregation(
name,
"""
SELECT {col}, COUNT(*)
FROM participant_summary
WHERE {summary_filter_sql}
GROUP BY 1;
""".format(
col=col, summary_filter_sql=_SUMMARY_FILTER_SQL
),
lambda v: QuestionnaireStatus.lookup_by_number(v).name,
None,
)
| 19,502
|
def xyzToAtomsPositions(xyzFileOrStr):
"""
Returns atom positions (order) given a molecule in an xyz format.
Inchi-based algorithm.
Use this function to set the atoms positions in a reference
molecule. The idea is to assign the positions once and to never
change them again.
Arguments:
----------
xyzFileOrStr : str
input xyz molecule (either file path or xyz string)
Returns:
----------
atomsPositions: dict
dictionary whose keys correspond to atoms positions in xyz
file and values to the newly assigned positions
"""
# get inchi with extra auxiliary log
if ioutils.fileExists(xyzFileOrStr): xyzFileOrStr= ioutils.readFile(xyzFileOrStr)
xyzFileOrStr = xyzToIntertialFrame(xyzFileOrStr)
# swap all hydrogens with a heavy atom, here I picked Cl, but any other halogen atom
# should also work. this atom swap is to force inchi to considered all the atoms in its
# connectivity algorithm. note that atoms from the first group (e..g Na, Li) wont work
# as they produce solids and thus the inchi string is significantly changed
xyzFileOrStr = '\n'.join([xyz_line.replace('H','Cl') for xyz_line in xyzFileOrStr.split('\n')])
inchiWithAux = obconverter.obConvert(inputMol=xyzFileOrStr,inputMolFormat='xyz',
outputMolFormat='inchi', options=['-xa'])
inchi, inchiAux = inchiWithAux.split('\n')
# find connectivity info in the inchi string - used to detect the
# presence of heavy atoms.
atomsInchiConnectivity = re.search(r'/c(\d+?\*)?(.*?)(?=/|$)',inchi)
# read the mapping between heavy atoms (+ lone hydrogens) in xyz and inchi
# from the auxiliary log
atomsInchiAuxMap = re.search(r'/N:(.*?)(?=/|$)',inchiAux)
atomsInchiAuxEquivMap = re.search(r'/E:(.*?)(?=/|$)',inchiAux)
# create the rdkit mol object
rdkitMolFromMol = xyzconverters.xyzToMolToRdkitMol(xyzFileOrStr, removeHs=False)
numAtoms = rdkitMolFromMol.GetNumAtoms()
# initialise the atoms position dict
atomsPositions = {k:None for k in range(numAtoms)}
nextAtomId = 0
mol_frags = rdkitmolutils.rdkitMolToMolFrags(rdkitMolFromMol)
if mol_frags:
print(f'Warning: Provided xyz file contains {len(mol_frags)} molecular fragments.')
#return atomsPositions
# get the atoms based on the inchi connectivity info
if atomsInchiConnectivity is not None:
# process the atomsInchiAuxMap and extract the atoms mapping
atomsInchiAuxMap= atomsInchiAuxMap.groups()[0] \
.replace('/','').replace(';',',').split(',')
atomsInchiMatch = {int(atomId)-1: i
for i, atomId in enumerate(atomsInchiAuxMap)}
atomsInchiMatchList = list(map(lambda x: int(x)-1, atomsInchiAuxMap))
if atomsInchiMatch:
# now disambiguate any equivalent atoms
if atomsInchiAuxEquivMap:
atomsInchiAuxEquivMap= atomsInchiAuxEquivMap.groups()[0] \
.replace('/','').replace(')(','#').replace(')','') \
.replace('(','').split('#')
for i in range(len(atomsInchiAuxEquivMap)):
atomsInchiAuxEquivMap[i] = list(map(lambda x: int(x)-1, atomsInchiAuxEquivMap[i].split(',')))
atomsInchiAuxEquivMap[i] = list(map(lambda x: atomsInchiMatchList[x], atomsInchiAuxEquivMap[i]))
for equivAtomsList in atomsInchiAuxEquivMap:
atomsXYZ = rdkitmolutils.getAtomsXYZs(rdkitMolFromMol, equivAtomsList)
atomsXYZ = (atomsXYZ * 1e7).astype(int)
atomsX = atomsXYZ[:,0].tolist()
atomsY = atomsXYZ[:,1].tolist()
atomsZ = atomsXYZ[:,2].tolist()
_atomsDist = rdkitmolutils.rdkitSumAllAtomsDistFromAtoms(rdkitMolFromMol, equivAtomsList)
_atomsDist = [int(dist * 1e7) for dist in _atomsDist]
# use four invariants to disambiguate atoms
equivAtomsOrder = np.lexsort((atomsZ,atomsY,atomsX,_atomsDist)).tolist()
currentAtomsOrder = sorted([atomsInchiMatch[equivAtomId] for equivAtomId in equivAtomsList])
for equivAtomPos in equivAtomsOrder:
atomsInchiMatch[equivAtomsList[equivAtomPos]] = currentAtomsOrder.pop(0)
# add the atoms positions to the overall atomsPosition dictionary
atomsPositions = {**atomsPositions, **atomsInchiMatch}
nextAtomId = len(atomsInchiMatch)
# assign posititions to any atoms that are left
if nextAtomId < numAtoms:
loneAtomsIds = [atomId
for atomId, refId in atomsPositions.items()
if refId is None]
loneAtomsMap = {}
atomsXYZ = rdkitmolutils.getAtomsXYZs(rdkitMolFromMol, loneAtomsIds)
atomsXYZ = (atomsXYZ * 1e7).astype(int)
atomsX = atomsXYZ[:,0].tolist()
atomsY = atomsXYZ[:,1].tolist()
atomsZ = atomsXYZ[:,2].tolist()
_atomsDist = rdkitmolutils.rdkitSumAllAtomsDistFromAtoms(rdkitMolFromMol, loneAtomsIds)
_atomsDist = [int(dist * 1e7) for dist in _atomsDist]
loneAtomsOrder = np.lexsort((atomsZ,atomsY,atomsX,_atomsDist)).tolist()
for loneAtomPos in loneAtomsOrder:
loneAtomsMap[loneAtomsIds[loneAtomPos]] = nextAtomId
nextAtomId += 1
# add the remaining positions to the overall atoms positions
atomsPositions = {**atomsPositions, **loneAtomsMap}
# check for duplicate and None values at the end
hasDuplicates = len(atomsPositions.values()) > len(set(atomsPositions.values()))
hasNones = None in atomsPositions.values()
if hasDuplicates or hasNones:
print('Error: atom canoncial positions algorithm has failed.')
atomsPositions= {}
return atomsPositions
| 19,503
|
def read_LFW(fname):
""" read LFW dataset annotation information with names and labels... """
# dir = os.getcwd()
# os.chdir(dirname)
if not os.path.exists(fname):
raise ValueError('LFW File :' + fname + 'does not exist')
lines = open(fname).readlines();
print lines
for l in lines:
print l
| 19,504
|
async def modify_video_favorite_list(
media_id: int,
title: str,
introduction: str = '',
private: bool = False,
credential: Credential = None):
"""
修改视频收藏夹信息。
Args:
media_id (int) : 收藏夹 ID.
title (str) : 收藏夹名。
introduction (str, optional) : 收藏夹简介. Defaults to ''.
private (bool, optional) : 是否为私有. Defaults to False.
credential (Credential, optional): Credential. Defaults to None.
Returns:
dict: API 调用结果。
"""
if credential is None:
credential = Credential()
credential.raise_for_no_sessdata()
credential.raise_for_no_bili_jct()
api = API["operate"]["modify"]
data = {
"title": title,
"intro": introduction,
"privacy": 1 if private else 0,
"cover": "",
"media_id": media_id
}
return await request("POST", api["url"], data=data, credential=credential)
| 19,505
|
def get_salesforce_log_files():
"""Helper function to get a list available log files"""
return {
"totalSize": 2,
"done": True,
"records": [
{
"attributes": {
"type": "EventLogFile",
"url": "/services/data/v32.0/sobjects/EventLogFile/0ATD00001bROAQ"
},
"Id": "0ATD000000001bROAQ",
"EventType": "API",
"LogFile": "/services/data/v32.0/sobjects/EventLogFile/0ATD00001bROAQ/LogFile",
"LogDate": "2014-03-14T00:00:00.000+0000",
"LogFileLength": 2692.0
},
{
"attributes": {
"type": "EventLogFile",
"url": "/services/data/v32.0/sobjects/EventLogFile/0ATD000000001SdOAI"
},
"Id": "0ATD000000001SdOAI",
"EventType": "API",
"LogFile": "/services/data/v32.0/sobjects/EventLogFile/0ATD00001SdOAI/LogFile",
"LogDate": "2014-03-13T00:00:00.000+0000",
"LogFileLength": 1345.0
}
]
}
| 19,506
|
def remove_true_false_edges(dict_snapshots, dict_weights, index):
"""
Remove chosen true edges from the graph so the embedding could be calculated without them.
:param dict_snapshots: Dict where keys are times and values are a list of edges for each time stamp.
:param dict_weights: Dict where keys are times and values are list of weights for each edge in the time stamp, order
corresponds to the order of edges in dict_snapshots.
:param index: Index of pivot time- until pivot time (including) it is train set, afterwards it is test set.
:return: Updated dict_snapshots and dict_weights.
"""
times = list(dict_snapshots.keys())
mapping = {i: times[i] for i in range(len(times))}
keys = list(mapping.keys())
for key in keys:
if key < index:
continue
else:
del dict_snapshots[mapping[key]]
del dict_weights[mapping[key]]
return dict_snapshots, dict_weights
| 19,507
|
def _make_mutable(obj):
"""
Context manager that makes the tree mutable.
"""
with _MUTABLE_CONTEXT.update(prev_mutable={}):
try:
apply(_make_mutable_fn, obj, inplace=True)
yield
finally:
apply(_revert_mutable_fn, obj, inplace=True)
| 19,508
|
def addFavoriteDir(name:str, directory:str, type:str=None, icon:str=None, tooltip:str=None, key:str=None):
"""
addFavoriteDir(name, directory, type, icon, tooltip, key) -> None.
Add a path to the file choosers favorite directory list. The path name can contain environment variables which will be expanded when the user clicks the favourites button
@param name: Favourite path entry ('Home', 'Desktop', etc.).
@param directory: FileChooser will change to this directory path.
@param type: Optional bitwise OR combination of nuke.IMAGE, nuke.SCRIPT, nuke.FONT or nuke.GEO.
@param icon: Optional filename of an image to use as an icon.
@param tooltip: Optional short text to explain the path and the meaning of the name.
@param key: Optional shortcut key.
@return: None.
"""
return None
| 19,509
|
def save(data):
"""Save cleanup annotations."""
data_and_frames = data.split("_")
data = data_and_frames[0]
frames = data_and_frames[1]
if len(data) == 1:
removed = []
else:
removed = [int(f) for f in data[1:].split(':')]
frames = [int(f) for f in frames[:].split(':')]
#fname = APP.basedir + '/' + APP.dlist[APP.targetid] + '/planttag.npz'
fname = APP.basedir + '/' + APP.dlist[APP.targetid] + '/' + APP.tag_name + '.npz'
if len(removed) == 0: # Before: if len(removed) == 0
idx = np.zeros((np.amax(APP.lbls) + 1,), APP.lbls.dtype)
_id = 1
for i in range(1, len(idx)):
if i not in removed:
idx[i] = _id
_id = _id + 1
lbls = idx[APP.lbls]
else:
lbls = APP.lbls
for j in range(len(removed)):
rem = removed[j]
frame = frames[j]
# Remove that label from the frame onwards:
if APP.tag_type == "deletion-onwards":
lbls[:,:,frame:][lbls[:,:,frame:] == rem] = 0
elif APP.tag_type == "deletion-upto":
lbls[:,:,:frame][lbls[:,:,:frame] == rem] = 0
elif APP.tag_type == "deletion-single":
lbls[:,:,frame][lbls[:,:,frame] == rem] = 0
#
tag = [-1]*lbls.max()
for i in range(len(removed)):
tag[removed[i]] = frames[i]
npz = {'removed': np.asarray(removed, np.int16), 'labels': lbls, "frames": np.asarray(frames, np.int16), \
APP.tag_name: tag}
np.savez_compressed(fname, **npz)
return ' '
| 19,510
|
def search(search_term):
"""Searches the FRED database using a user provided
search term."""
continue_search = True
page_num = 1
while continue_search:
complete_search_term = ' '.join(search_term)
metadata = api.search_fred(complete_search_term, page=page_num)
data = metadata['data']
current_page = metadata['current_page']
total_pages = metadata['total_pages']
click.clear()
printable_table= table.make_table(data)
click.echo(printable_table)
click.echo()
click.echo(f'Page: {current_page} / {total_pages} | next page (n), prev page (b), exit (e) ')
click.echo()
character_pressed = click.getchar()
if character_pressed == 'n' and current_page != total_pages:
page_num += 1
elif character_pressed == 'b' and current_page != 1:
page_num -= 1
elif character_pressed == 'e':
continue_search = False
else:
print('Incorrect input, please try again. Press any key to try again.')
click.pause()
pass
| 19,511
|
def download_data(date_string):
"""Download weekly prices in XLS and save them to file"""
main_url = 'http://fcainfoweb.nic.in/PMSver2/Reports/Report_Menu_web.aspx'
params = 'MainContent_ToolkitScriptManager1_HiddenField=%3B%3BAjaxControlToolkit%2C+Version%3D4.1.51116.0%2C+Culture%3Dneutral%2C+PublicKeyToken%3D28f01b0e84b6d53e%3Aen-US%3Afd384f95-1b49-47cf-9b47-2fa2a921a36a%3A475a4ef5%3Aaddc6819%3A5546a2b%3Ad2e10b12%3Aeffe2a26%3A37e2e5c9%3A5a682656%3Ac7029a2%3Ae9e598a9&__EVENTTARGET=&__EVENTARGUMENT=&__LASTFOCUS=&__VIEWSTATE=nAMJC3oD4TO5c%2B7jRiFrnarRMv05i2lEZsHM0VriP9iU1WnwdzPV8exn2HaN0Pdpqabt5BHGcBqsu1HG28ilmfBCvWehOqVKrbzbTksxY9OriToU7o5%2Fg0Rxp8sEPjxUFYjwo10BjIRiBuuu80dXQR3a023BYcloCxn0OeYH1ceGHo%2BteEphPeiVlgJ3UgGb7D1IB9VToTL3JZ%2Bi8CSwOcwfCZWVcQv8e0JJ5Ylocffk0MtEfgkhxop4ViipoLcy5dplKkzNdskRsgep%2FmvnsU6opOnepjIO0oYberxVoyEjM2zcdggVqXIfmNy%2F1EtcsO9HVGn0cqeVWgYtT3sPR35sQZQMsZjT9bSxXX%2BDlTmTF%2B6rv7ZdQu9OXpwz4Ta9lpAAcZfcU2J2ozk%2FsyDjeVEkkhyJyjmV7tOO4jiKJJzWpE6E9Tf5bs7fSFUzJgOl%2F5F7iOJg0S3pisif1F1a%2B1qVg7uud5p%2F8HatGeDd53eaDPci1yAVGuviyb1fn4KTyubqUbGNK9mQYRIuiMRjwaWBcKdZdLk4z1u1POSm5to%3D&__VIEWSTATEENCRYPTED=&__EVENTVALIDATION=jFcAc4ikcRQ1k1z9MQ6d0udWfcwWaQtx9e3Gx0d7tlPQMpNoCBZmVmk0O5%2FUl5FmUkP2a7%2FQKdKnB8XaqaFUgPgTZ0sZlbpTzTenH%2Fnp4iywH8oi3jGUGMcORoGXaTgF7%2B3t5QIsK4VfiI20cik3DQSGE8P7uhGrccO%2BluXGZWVuopXv40JTT2nExb0ix4gmAcYL6tdryuw61vvqjHkxo04hMKrAoUMTVxjaUyOpeguI0BZdYWk46943BzFetIxjYK%2F4QhYGJrMbdz%2FM%2FfeEajod34m2dqISVmhCEa%2Fu2N8jgqTcsHqDLCwhaNoMiZDA2yW1Yzcli4mAQMGcPqy%2FZd8Ta7ZajpdPlupVtFNK%2BWXrlY54irp8MKRl1IsPyT3y&ctl00%24MainContent%24Ddl_Rpt_type=#{TXTPRICE}&ctl00%24MainContent%24Rbl_Rpt_type=Price+report&ctl00%24MainContent%24Ddl_Rpt_Option0=Daily+Prices&ctl00%24MainContent%24Txt_FrmDate=#{TXTDATE}&ctl00%24MainContent%24btn_getdata1=Get+Data'
params = params.replace('#{TXTDATE}', date_string)
params = params.replace('#{TXTPRICE}', price_type)
opener = urllib2.build_opener()
opener.addheaders.append(('Cookie', 'ASP.NET_SessionId=' + cookie))
req = opener.open(main_url, params)
result_html = req.read()
save_downloaded_data(date_string, result_html)
| 19,512
|
def list_runs_in_swestore(path, pattern=RUN_RE, no_ext=False):
"""
Will list runs that exist in swestore
:param str path: swestore path to list runs
:param str pattern: regex pattern for runs
"""
try:
status = check_call(['icd', path])
proc = Popen(['ils'], stdout=PIPE)
contents = [c.strip() for c in proc.stdout.readlines()]
runs = [r for r in contents if re.match(pattern, r)]
if no_ext:
runs = [r.split('.')[0] for r in runs]
return runs
except CalledProcessError:
return []
| 19,513
|
def linear_regression(
XL: ArrayLike, YP: ArrayLike, Q: ArrayLike
) -> LinearRegressionResult:
"""Efficient linear regression estimation for multiple covariate sets
Parameters
----------
XL
[array-like, shape: (M, N)]
"Loop" covariates for which N separate regressions will be run
YP
[array-like, shape: (M, O)]
Continuous traits that have had core covariates eliminated through orthogonal projection.
Q
[array-like, shape: (M, P)]
Orthonormal matrix computed by applying QR factorization to covariate matrix
Returns
-------
Dataclass containing:
beta : [array-like, shape: (N, O)]
Beta values associated with each loop covariate and outcome
t_value : [array-like, shape: (N, O)]
T statistics for each beta
p_value : [array-like, shape: (N, O)]
P values as float in [0, 1]
"""
if set([x.ndim for x in [XL, YP, Q]]) != {2}:
raise ValueError("All arguments must be 2D")
n_core_covar, n_loop_covar, n_obs, n_outcome = (
Q.shape[1],
XL.shape[1],
YP.shape[0],
YP.shape[1],
)
dof = n_obs - n_core_covar - 1
if dof < 1:
raise ValueError(
"Number of observations (N) too small to calculate sampling statistics. "
"N must be greater than number of core covariates (C) plus one. "
f"Arguments provided: N={n_obs}, C={n_core_covar}."
)
# Apply orthogonal projection to eliminate core covariates
# Note: QR factorization or SVD should be used here to find
# what are effectively OLS residuals rather than matrix inverse
# to avoid need for MxM array; additionally, dask.lstsq fails
# with numpy arrays
LS = Q @ (Q.T @ XL)
assert XL.shape == LS.shape
XLP = XL - LS
assert XLP.shape == (n_obs, n_loop_covar)
# Estimate coefficients for each loop covariate
# Note: A key assumption here is that 0-mean residuals
# from projection require no extra terms in variance
# estimate for loop covariates (columns of G), which is
# only true when an intercept is present.
XLPS = (XLP ** 2).sum(axis=0, keepdims=True).T
assert XLPS.shape == (n_loop_covar, 1)
B = (XLP.T @ YP) / XLPS
assert B.shape == (n_loop_covar, n_outcome)
# Compute residuals for each loop covariate and outcome separately
YR = YP[:, np.newaxis, :] - XLP[..., np.newaxis] * B[np.newaxis, ...]
assert YR.shape == (n_obs, n_loop_covar, n_outcome)
RSS = (YR ** 2).sum(axis=0)
assert RSS.shape == (n_loop_covar, n_outcome)
# Get t-statistics for coefficient estimates
T = B / np.sqrt(RSS / dof / XLPS)
assert T.shape == (n_loop_covar, n_outcome)
# Match to p-values
# Note: t dist not implemented in Dask so this must be delayed,
# see https://github.com/dask/dask/issues/6857
P = da.map_blocks(
lambda t: 2 * stats.distributions.t.sf(np.abs(t), dof),
map_blocks_asnumpy(T),
dtype="float64",
)
assert P.shape == (n_loop_covar, n_outcome)
P = np.asarray(P, like=T)
return LinearRegressionResult(beta=B, t_value=T, p_value=P)
| 19,514
|
def wait_for_visible_link(driver):
""" Wait for any link of interest to be visible. """
wait_time = 0
while wait_time < MAX_WAIT_SECONDS:
for link in LINKS:
if is_link_visible(driver, link):
return
time.sleep(1)
wait_time += 1
raise TimeoutException
| 19,515
|
def scrape(domains, links):
"""
Scrape the given list of links and pickle the output to a file containing a
dict of <category, dict of link, document> pairs and a dict of {link,
set(links)}.
args:
domains - list of base urls
links - list of page paths
"""
pool = Pool(10)
results = pool.map(call_scraper, zip(domains, links))
categories = {}
link_edges = {}
for index, result in enumerate(results):
documents, edges = result
link_edges = dict(link_edges.items() + edges.items())
categories[links[index]] = documents
# save the data so that we don't have
# to put a bunch of load on wikipedia's servers if we run again.
import pickle
pickle.dump(categories, open(PATH + "categories.pickle", "wb"))
pickle.dump(link_edges, open(PATH + "link_edges.pickle", "wb"))
| 19,516
|
def orthogonal_decomposition(C, tr_error, l_exp):
"""
Orthogonal decomposition of the covariance matrix to determine the meaningful directions
:param C: covariance matrix
:param tr_error: allowed truncation error
:param l_exp: expansion order
:return: transformation matrix Wy, number of terms N_t and meaningful directions k
"""
# eigenvalues and eigenvectors
v, w = np.linalg.eig(C)
v_sum = np.sum(v)
err_v = 1
k = 0 # meaningful directions
while err_v > tr_error:
err_v = 1 - v[k] / v_sum
k += 1
N_t = int(math.factorial(l_exp + k) / (math.factorial(k) * math.factorial(l_exp))) # number of terms
Wy = w[:,:k] # and for now, do not define Wz
return Wy, N_t, k
| 19,517
|
def get_data():
"""Reads the current state of the world"""
server = http.client.HTTPConnection(URL)
server.request('GET','/data')
response = server.getresponse()
if (response.status == 200):
data = response.read()
response.close()
return json.loads(data.decode())
else:
return UnexpectedResponse(response)
| 19,518
|
def chi_angles(filepath, model_id=0):
"""Calculate chi angles for a given file in the PDB format.
:param filepath: Path to the PDB file.
:param model_id: Model to be used for chi calculation.
:return: A list composed by a list of chi1, a list of chi2, etc.
"""
torsions_list = _sidechain_torsions(filepath, model_id)
chis = [item[2] for item in torsions_list]
return list(zip(*chis))
| 19,519
|
def callback(ch, method, properties, body):
"""Callback that has the message that was received"""
vol_prefix = os.getenv('VOL_PREFIX', '')
workers = load_workers()
d = setup_docker()
pipeline = json.loads(body.decode('utf-8'))
worker_found = False
status = {}
extra_workers = {}
for worker in workers['workers']:
file_path = pipeline['file_path']
if 'id' in pipeline and (('results' in pipeline and pipeline['results']['tool'] in worker['inputs']) or ('file_type' in pipeline and pipeline['file_type'] in worker['inputs'])):
uid = str(uuid.uuid4()).split('-')[-1]
name = worker['name'] + '_' + uid
image = worker['image']
ports = None
if 'version' in worker:
image += ':' + worker['version']
command = []
if 'command' in worker:
command = worker['command']
command.append(file_path)
environment = pipeline
if 'environment' in worker:
environment.update(worker['environment'])
if 'rabbit' not in pipeline:
pipeline['rabbit'] = 'true'
if 'ports' in worker:
ports = worker['ports']
keep_images = os.getenv('KEEPIMAGES', '0')
remove = True
if keep_images == '1':
remove = False
try:
d.containers.run(image=image,
name=name,
network=worker['stage'],
volumes={
vol_prefix + '/opt/poseidon_files': {'bind': '/files', 'mode': 'rw'}},
environment=environment,
remove=remove,
command=command,
ports=ports,
detach=True)
print(' [Create container] %s UTC %r:%r:%r:%r' % (str(datetime.datetime.utcnow()),
method.routing_key,
pipeline['id'],
image,
pipeline))
status[worker['name']] = json.dumps(
{'state': 'In progress', 'timestamp': str(datetime.datetime.utcnow())})
worker_found = True
except Exception as e: # pragma: no cover
print('failed: {0}'.format(str(e)))
status[worker['name']] = json.dumps(
{'state': 'Error', 'timestamp': str(datetime.datetime.utcnow())})
else:
extra_workers[worker['name']] = json.dumps(
{'state': 'Queued', 'timestamp': str(datetime.datetime.utcnow())})
if 'id' in pipeline and 'results' in pipeline and pipeline['type'] == 'data':
print(' [Data] %s UTC %r:%r:%r' % (str(datetime.datetime.utcnow()),
method.routing_key,
pipeline['id'],
pipeline['results']))
status[pipeline['results']['tool']] = json.dumps(
{'state': 'In progress', 'timestamp': str(datetime.datetime.utcnow())})
elif 'id' in pipeline and 'results' in pipeline and pipeline['type'] == 'metadata':
if 'data' in pipeline and pipeline['data'] != '':
print(' [Metadata] %s UTC %r:%r:%r' % (str(datetime.datetime.utcnow()),
method.routing_key,
pipeline['id'],
pipeline['results']))
status[pipeline['results']['tool']] = json.dumps(
{'state': 'In progress', 'timestamp': str(datetime.datetime.utcnow())})
else:
print(' [Finished] %s UTC %r:%r' % (str(datetime.datetime.utcnow()),
method.routing_key,
pipeline))
status[pipeline['results']['tool']] = json.dumps(
{'state': 'Complete', 'timestamp': str(datetime.datetime.utcnow())})
elif not worker_found:
print(' [X no match] %s UTC %r:%r' % (str(datetime.datetime.utcnow()),
method.routing_key,
pipeline))
ch.basic_ack(delivery_tag=method.delivery_tag)
# store state of status in redis
r = setup_redis()
print('redis: {0}'.format(status))
if r:
try:
r.hmset('status', status)
statuses = r.hgetall('status')
for s in statuses:
statuses[s] = json.loads(statuses[s])
for worker in extra_workers:
if worker not in statuses:
status[worker] = extra_workers[worker]
r.hmset('status', status)
r.close()
except Exception as e: # pragma: no cover
print('Failed to update Redis because: {0}'.format(str(e)))
| 19,520
|
def surface_sphere(radius):
"""
"""
phi, theta = np.mgrid[0.0:np.pi:100j, 0.0:2.0*np.pi:100j]
x_blank_sphere = radius*np.sin(phi)*np.cos(theta)
y_blank_sphere = radius*np.sin(phi)*np.sin(theta)
z_blank_sphere = radius*np.cos(phi)
sphere_surface = np.array(([x_blank_sphere,
y_blank_sphere,
z_blank_sphere]))
return sphere_surface
| 19,521
|
def p_domain(p):
"""domain : domain_def requirements_def types_def predicates_def action_def
| domain_def requirements_def types_def predicates_def function_def action_def"""
if len(p) == 6:
p[0] = Domain(p[1], p[2], p[3], p[4], p[5])
else:
p[0] = Domain(p[1], p[2], p[3], p[4], p[6], p[5])
| 19,522
|
def test_create_layout():
"""Checks if axes for regular and detailed plots were created
properly.
Summary
-------
We pass different stages and plot levels to pipeline.create_layout,
and check the resulting axes.
Expected
--------
- pipeline.create_layout(1, 0) should not return axes.
- pipeline.create_layout(3, 2) should return all seven axes
(ax_main, ax_bin, ax_poi, ax_structure, ax_signal, ax_fourier, ax_tags).
- pipeline.create_layout(3, 1) should return a list with three axes and
four None.
"""
axes = pipeline.create_layout(1, 0)
assert axes is None
axes = pipeline.create_layout(3, 2)
for ax in axes:
assert ax
axes = pipeline.create_layout(3, 1)
for ax in axes[:3]:
assert ax
for ax in axes[3:]:
assert ax is None
| 19,523
|
def schema_upgrades():
"""schema upgrade migrations go here."""
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('build', 'git_tag', new_column_name='vcs_ref')
op.add_column('build', sa.Column('ver', sa.String(), nullable=False, server_default='0.0.0.0'))
op.alter_column('build', 'ver', server_default=None)
# ### end Alembic commands ###
| 19,524
|
def format_bucket_objects_listing(bucket_objects):
"""Returns a formated list of buckets.
Args:
buckets (list): A list of buckets objects.
Returns:
The formated list as string
"""
import re
import math
out = ""
i = 1
for o in bucket_objects:
# Shorten to 24 chars max, remove linebreaks
name = re.sub(r'[\n\r]', ' ',
o.name[:63] + '..'
if len(o.name) > 65
else o.name)
size = sizeof_fmt(o.size)
time = f"{o.time_modified:%Y-%m-%d %H:%M}" \
if o.time_modified is not None else ""
out += (f"{i:>4} {name:65} {size:8} {time:16}\n")
i += 1
return out
| 19,525
|
def load_white_list() -> None:
"""Loads the whitelist."""
with OpenJson(PLAYERS_DATA_PATH + "whitelist.json") as whitelist_file:
data = whitelist_file.load()
for account_id in data:
CacheData.whitelist.append(account_id)
| 19,526
|
def test_decode_ssdp_packet_v6():
"""Test SSDP response decoding."""
msg = (
b"HTTP/1.1 200 OK\r\n"
b"Cache-Control: max-age=1900\r\n"
b"Location: http://[fe80::2]:80/RootDevice.xml\r\n"
b"Server: UPnP/1.0 UPnP/1.0 UPnP-Device-Host/1.0\r\n"
b"ST:urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1\r\n"
b"USN: uuid:...::WANCommonInterfaceConfig:1\r\n"
b"EXT:\r\n\r\n"
)
request_line, headers = decode_ssdp_packet(msg, ("fe80::1", 123, 0, 3))
assert request_line == "HTTP/1.1 200 OK"
assert headers == {
"cache-control": "max-age=1900",
"location": "http://[fe80::2%3]:80/RootDevice.xml",
"server": "UPnP/1.0 UPnP/1.0 UPnP-Device-Host/1.0",
"st": "urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1",
"usn": "uuid:...::WANCommonInterfaceConfig:1",
"ext": "",
"_location_original": "http://[fe80::2]:80/RootDevice.xml",
"_host": "fe80::1%3",
"_port": 123,
"_udn": "uuid:...",
"_timestamp": ANY,
}
| 19,527
|
def charts(chart_type, cmid, start_date, end_date=None):
"""
Get the given type of charts for the artist.
https://api.chartmetric.com/api/artist/:id/:type/charts
**Parameters**
- `chart_type`: string type of charts to pull, choose from
'spotify_viral_daily', 'spotify_viral_weekly',
'spotify_top_daily', 'spotify_top_weekly',
'applemusic_top', 'applemusic_daily',
'applemusic_albums', 'itunes_top',
'itunes_albums', 'shazam', 'beatport'
- `cmid`: string or int Chartmetric artist ID
- `start_date`: string of start data in ISO format
- `end_date`: string of end date in ISO format
**Returns**
A list of dictionaries of specific type of charts for the given artist.
"""
urlhandle = f"/artist/{cmid}/{chart_type}/charts"
params = {
"since": start_date,
"until": end_date if end_date else utilities.strDateToday(),
}
data = utilities.RequestData(urlhandle, params)
return utilities.RequestGet(data)["data"]
| 19,528
|
def session_setup(request):
"""
Auto session resource fixture
"""
pass
| 19,529
|
def test_condition_one_condition_pair():
"""
GIVEN PolicyStatementCondition object.
WHEN Created PolicyStatementCondition object with 'condition_operator','condition_statements attributes.
THEN Object created with desired attributes.
"""
cond_statement = PolicyStatementCondition(
condition_operator='StringEquals',
condition_statements={'ec2:Vpc': 'arn:aws:ec2:region:account:vpc/vpc-11223344556677889'}
)
desired_result = {
'StringEquals': {
'ec2:Vpc': 'arn:aws:ec2:region:account:vpc/vpc-11223344556677889'
}
}
assert cond_statement.get_condition() == desired_result
| 19,530
|
def config():
"""Do various things related to config."""
| 19,531
|
def fake_traceback(exc_value, tb, filename, lineno):
"""Produce a new traceback object that looks like it came from the
template source instead of the compiled code. The filename, line
number, and location name will point to the template, and the local
variables will be the current template context.
:param exc_value: The original exception to be re-raised to create
the new traceback.
:param tb: The original traceback to get the local variables and
code info from.
:param filename: The template filename.
:param lineno: The line number in the template source.
"""
if tb is not None:
# Replace the real locals with the context that would be
# available at that point in the template.
locals = get_template_locals(tb.tb_frame.f_locals)
locals.pop("__jinja_exception__", None)
else:
locals = {}
globals = {
"__name__": filename,
"__file__": filename,
"__jinja_exception__": exc_value,
}
# Raise an exception at the correct line number.
code = compile("\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec")
# Build a new code object that points to the template file and
# replaces the location with a block name.
try:
location = "template"
if tb is not None:
function = tb.tb_frame.f_code.co_name
if function == "root":
location = "top-level template code"
elif function.startswith("block_"):
location = 'block "%s"' % function[6:]
# Collect arguments for the new code object. CodeType only
# accepts positional arguments, and arguments were inserted in
# new Python versions.
code_args = []
for attr in (
"argcount",
"posonlyargcount", # Python 3.8
"kwonlyargcount", # Python 3
"nlocals",
"stacksize",
"flags",
"code", # codestring
"consts", # constants
"names",
"varnames",
("filename", filename),
("name", location),
"firstlineno",
"lnotab",
"freevars",
"cellvars",
):
if isinstance(attr, tuple):
# Replace with given value.
code_args.append(attr[1])
continue
try:
# Copy original value if it exists.
code_args.append(getattr(code, "co_" + attr))
except AttributeError:
# Some arguments were added later.
continue
code = CodeType(*code_args)
except Exception:
# Some environments such as Google App Engine don't support
# modifying code objects.
pass
# Execute the new code, which is guaranteed to raise, and return
# the new traceback without this frame.
try:
exec(code, globals, locals)
except BaseException:
return sys.exc_info()[2].tb_next
| 19,532
|
def search_candidates(api_key, active_status="true"):
"""
https://api.open.fec.gov/developers#/candidate/get_candidates_
"""
query = """https://api.open.fec.gov/v1/candidates/?sort=name&sort_hide_null=false&is_active_candidate={active_status}&sort_null_only=false&sort_nulls_last=false&page=1&per_page=20&api_key={api_key}""".format(
api_key=api_key,
active_status=active_status
)
return get_response(
query=query
)
| 19,533
|
def load_dat(file_name):
"""
carga el fichero dat (Matlab) especificado y lo
devuelve en un array de numpy
"""
data = loadmat(file_name)
y = data['y']
X = data['X']
ytest = data['ytest']
Xtest = data['Xtest']
yval = data['yval']
Xval = data['Xval']
return X,y,Xtest,ytest,Xval,yval
| 19,534
|
def get_specific_pos_value(img, pos):
"""
Parameters
----------
img : ndarray
image data.
pos : list
pos[0] is horizontal coordinate, pos[1] is verical coordinate.
"""
return img[pos[1], pos[0]]
| 19,535
|
def postorder(dirUp,catchment,node,catch,dirDown):
"""
routine to run a postoder tree traversal
:param dirUp:
:param catchment:
:param node:
:param catch:
:param dirDown:
:return: dirDown and catchment
"""
if dirUp[node] != []:
postorder(dirUp,catchment,dirUp[node][0],catch,dirDown)
catchment[dirUp[node][0]]=catch
dirDown.append(dirUp[node][0])
if len(dirUp[node])>1:
postorder(dirUp,catchment,dirUp[node][1],catch,dirDown)
catchment[dirUp[node][1]]=catch
dirDown.append(dirUp[node][1])
if len(dirUp[node])>2:
postorder(dirUp,catchment,dirUp[node][2],catch,dirDown)
catchment[dirUp[node][2]]=catch
dirDown.append(dirUp[node][2])
if len(dirUp[node])>3:
postorder(dirUp,catchment,dirUp[node][3],catch,dirDown)
catchment[dirUp[node][3]]=catch
dirDown.append(dirUp[node][3])
if len(dirUp[node])>4:
postorder(dirUp,catchment,dirUp[node][4],catch,dirDown)
catchment[dirUp[node][4]]=catch
dirDown.append(dirUp[node][4])
if len(dirUp[node])>5:
postorder(dirUp,catchment,dirUp[node][5],catch,dirDown)
catchment[dirUp[node][5]]=catch
dirDown.append(dirUp[node][5])
if len(dirUp[node])>6:
postorder(dirUp,catchment,dirUp[node][6],catch,dirDown)
catchment[dirUp[node][6]]=catch
dirDown.append(dirUp[node][6])
if len(dirUp[node])>7:
postorder(dirUp,catchment,dirUp[node][7],catch,dirDown)
catchment[dirUp[node][7]]=catch
dirDown.append(dirUp[node][7])
| 19,536
|
def transpose_tokens(
cards: List[MTGJSONCard]
) -> Tuple[List[MTGJSONCard], List[Dict[str, Any]]]:
"""
Sometimes, tokens slip through and need to be transplanted
back into their appropriate array. This method will allow
us to pluck the tokens out and return them home.
:param cards: Cards+Tokens to iterate
:return: Cards, Tokens as two separate lists
"""
# Order matters with these, as if you do cards first
# it will shadow the tokens lookup
# Single faced tokens are easy
tokens = [
scryfall.download(scryfall.SCRYFALL_API_CARD + card.get("scryfallId"))
for card in cards
if card.get("layout") in ["token", "emblem"]
]
# Do not duplicate double faced tokens
done_tokens: Set[str] = set()
for card in cards:
if (
card.get("layout") == "double_faced_token"
and card.get("scryfallId") not in done_tokens
):
tokens.append(
scryfall.download(scryfall.SCRYFALL_API_CARD + card.get("scryfallId"))
)
done_tokens.add(card.get("scryfallId"))
# Remaining cards, without any kind of token
cards = [
card
for card in cards
if card.get("layout") not in ["token", "double_faced_token", "emblem"]
]
return cards, tokens
| 19,537
|
def test_jdbc_query_executor_failed_query_event(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for failed-query event type.
Pipeline will try to insert records into a non-existing table and the query would fail.
Event records are verified for failed-query event type.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= wiretap
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA),
stop_after_first_batch=True)
invalid_table = "INVALID_TABLE"
query_str = f"INSERT INTO {invalid_table} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
wiretap = pipeline_builder.add_wiretap()
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= wiretap.destination
record_deduplicator >> trash2
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
event_records = wiretap.output_records
assert len(event_records) == 3
assert 'failed-query' == event_records[0].header['values']['sdc.event.type']
assert 'failed-query' == event_records[1].header['values']['sdc.event.type']
assert 'failed-query' == event_records[2].header['values']['sdc.event.type']
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == []
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
| 19,538
|
def from_local(local_dt, timezone=None):
"""Converts the given local datetime to a universal datetime."""
if not isinstance(local_dt, datetime.datetime):
raise TypeError('Expected a datetime object')
if timezone is None:
a = arrow.get(local_dt)
else:
a = arrow.get(local_dt, timezone)
return a.to('UTC').naive
| 19,539
|
def mrefresh_to_relurl(content):
"""Get a relative url from the contents of a metarefresh tag"""
urlstart = re.compile('.*URL=')
_, url = content.split(';')
url = urlstart.sub('', url)
return url
| 19,540
|
def mock_mikrotik_api():
"""Mock an api."""
with patch("librouteros.connect"):
yield
| 19,541
|
def simclr_loss_func(
z1: torch.Tensor,
z2: torch.Tensor,
temperature: float = 0.1,
extra_pos_mask=None,
) -> torch.Tensor:
"""Computes SimCLR's loss given batch of projected features z1 from view 1 and
projected features z2 from view 2.
Args:
z1 (torch.Tensor): NxD Tensor containing projected features from view 1.
z2 (torch.Tensor): NxD Tensor containing projected features from view 2.
temperature (float): temperature factor for the loss. Defaults to 0.1.
extra_pos_mask (Optional[torch.Tensor]): boolean mask containing extra positives other
than normal across-view positives. Defaults to None.
Returns:
torch.Tensor: SimCLR loss.
"""
device = z1.device
b = z1.size(0)
z = torch.cat((z1, z2), dim=0)
z = F.normalize(z, dim=-1)
logits = torch.einsum("if, jf -> ij", z, z) / temperature
logits_max, _ = torch.max(logits, dim=1, keepdim=True)
logits = logits - logits_max.detach()
# positive mask are matches i, j (i from aug1, j from aug2), where i == j and matches j, i
pos_mask = torch.zeros((2 * b, 2 * b), dtype=torch.bool, device=device)
pos_mask[:, b:].fill_diagonal_(True)
pos_mask[b:, :].fill_diagonal_(True)
# if we have extra "positives"
if extra_pos_mask is not None:
pos_mask = torch.bitwise_or(pos_mask, extra_pos_mask)
# all matches excluding the main diagonal
logit_mask = torch.ones_like(pos_mask, device=device).fill_diagonal_(0)
exp_logits = torch.exp(logits) * logit_mask
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
# compute mean of log-likelihood over positives
mean_log_prob_pos = (pos_mask * log_prob).sum(1) / pos_mask.sum(1)
# loss
loss = -mean_log_prob_pos.mean()
return loss
| 19,542
|
def find_node_types(G, edge_type):
"""
:param G: NetworkX graph.
:param edge_type: Edge type.
:return: Node types that correspond to the edge type.
"""
for e in G.edges:
if G[e[0]][e[1]][e[2]]['type'] == edge_type:
u, v = e[0], e[1]
break
utype = G.nodes[u]['type']
vtype = G.nodes[v]['type']
try:
if int(utype) > int(vtype):
return utype, vtype
else:
return vtype, utype
except:
return utype, vtype
| 19,543
|
def distance_point_point(p1, p2):
"""Calculates the euclidian distance between two points or sets of points
>>> distance_point_point(np.array([1, 0]), np.array([0, 1]))
1.4142135623730951
>>> distance_point_point(np.array([[1, 1], [0, 0]]), np.array([0, 1]))
array([1., 1.])
>>> distance_point_point(np.array([[1, 0], [0, 0]]), np.array([[0, 0], [0, -3]]))
array([1., 3.])
"""
return scipy.spatial.minkowski_distance(p1, p2)
| 19,544
|
def plotInfluential ( InferenceObject ):
"""Diagnostic plot for detecting influential observations
Determining influential observations follows a different logic for bootstrap
and for bayes inference. A block is labelled an influential observation if
the fit for a dataset without that point is significantly different from the
fit including that point. For BootstrapInference objects, this is quantified
using a normed distance of the maximum likelihood fit including the block and
withouth that block. This distance is normed in the following way: If the
maximum likelihood fit for the reduced dataset remains inside the 95% confidence
limits of the maximum likelihood fit for the full dataset, the influence
value is below 1. Thus, influence values large than 1 indicate a problem with
the data set. For BayesInference objects, the influence of a block is simply
quantified as the Kullbach-Leibler divergence of the posterior for the full
data set from the posterior for the reduced data set.
:Parameters:
*InferenceObject* :
Data set for which the influential observations are to be plotted
"""
maxinfl = N.argmax(InferenceObject.infl)
ind = range ( InferenceObject.data.shape[0] )
ind.pop(maxinfl)
# influencedDataset = psignidata.BootstrapInference( InferenceObject.data[ind,:],
# sample=False, **(InferenceObject.model))
# influencedDataset = psignidata.BayesInference ( InferenceObject.data[ind,:], **(InferenceObject.model) )
est = interface.mapestimate ( InferenceObject.data[ind,:], start=InferenceObject.estimate, **(InferenceObject.model) )[0]
x = N.mgrid[InferenceObject.data[:,0].min():InferenceObject.data[:,0].max():100j]
influencedPMF = interface.diagnostics ( x, est,
nafc = InferenceObject.model["nafc"],
sigmoid = InferenceObject.model["sigmoid"],
core = InferenceObject.model["core"] )
p.figure ( figsize=(6,8) )
# ax = p.axes ( (0.0,.5,.9,.5) )
ax = prepare_axes ( p.subplot ( 2,1,1 ) )
ax.set_ylabel ( r"$\Psi(x)$" )
if InferenceObject.__repr__().split()[1] in ["BayesInference","ASIRInference"]:
InferenceObject.drawposteriorexamples ( ax=ax )
plotPMF ( InferenceObject, ax=ax, showaxes=True, showdesc=False, color="b", linewidth=2 )
ax.plot ( [InferenceObject.data[maxinfl,0]], [InferenceObject.data[maxinfl,1].astype("d")/InferenceObject.data[maxinfl,2]],
'rx', markersize=20, markeredgewidth=5 )
# ax = plotPMF ( influencedDataset, ax=ax, showdesc=False, showaxes=True, color="r", markertype=([(0,0)],0), linewidth=2 )[-1]
ax.plot ( x, influencedPMF, color="r", linewidth=2 )
xl = list(ax.get_xlim ())
# ax = p.axes ( (0.0, 0., .9, .5) )
ax = p.subplot ( 2,1,2, sharex=ax )
if InferenceObject.__repr__().split()[1] == "BootstrapInference":
ax.plot ( [InferenceObject.data[:,0].min(),InferenceObject.data[:,0].max()], [1,1], 'k:' )
yname = "Influence"
else:
yname = "D_KL( full || reduced )"
ax.plot ( InferenceObject.data[:,0], InferenceObject.infl, 'bo' )
ax.set_xlim(xl)
drawaxes ( ax, ax.get_xticks(), "%g", ax.get_yticks(), "%g", r"stimulus intensity $x$", yname )
| 19,545
|
def switch(
confs=None, remain=False, all_checked=False, _default=None, **kwargs
):
"""
Execute first statement among conf where task result is True.
If remain, process all statements conf starting from the first checked
conf.
:param confs: task confs to check. Each one may contain a task action at
the key 'action' in conf.
:type confs: str or dict or list
:param bool remain: if True, execute all remaining actions after the
first checked condition.
:param bool all_checked: execute all statements where conditions are
checked.
:param _default: default task to process if others have not been checked.
:type _default: str or dict
:return: statement result or list of statement results if remain.
:rtype: list or object
"""
# init result
result = [] if remain else None
# check if remain and one task has already been checked.
remaining = False
if confs is not None:
if isinstance(confs, string_types) or isinstance(confs, dict):
confs = [confs]
for conf in confs:
# check if task has to be checked or not
check = remaining
if not check:
# try to check current conf
check = run(conf=conf, **kwargs)
# if task is checked or remaining
if check:
if STATEMENT in conf: # if statements exist, run them
statement = conf[STATEMENT]
statement_result = run(statement, **kwargs)
# save result
if not remain: # if not remain, result is statement_result
result = statement_result
else: # else, add statement_result to result
result.append(statement_result)
# if remain
if remain:
# change of remaining status
if not remaining:
remaining = True
elif all_checked:
pass
else: # leave execution if one statement has been executed
break
# process _default statement if necessary
if _default is not None and (remaining or (not result) or all_checked):
last_result = run(_default, **kwargs)
if not remain:
result = last_result
else:
result.append(last_result)
return result
| 19,546
|
def makeProcesses(nChildren):
"""
Create and start all the worker processes
"""
global taskQueue,resultsQueue,workers
if nChildren < 0:
print 'makeProcesses: ',nChildren, ' is too small'
return False
if nChildren > 3:
print 'makeProcesses: ',nChildren, ' is too large'
return False
# Create a task queue for each worker to receive the image segment
taskQueue = []
for k in range(nChildren):
taskQueue.append(Queue())
resultsQueue = Queue() # Single results queue
#Create and start the workers
workers = []
for k in range(nChildren):
p = Process(target=worker, args=(k,taskQueue[k],resultsQueue))
workers.append(p)
for p in workers:
p.start()
time.sleep(2)
return True
| 19,547
|
def create_moleculenet_model(model_name):
"""Create a model.
Parameters
----------
model_name : str
Name for the model.
Returns
-------
Created model
"""
for func in [create_bace_model, create_bbbp_model, create_clintox_model, create_esol_model,
create_freesolv_model, create_hiv_model, create_lipophilicity_model,
create_muv_model, create_pcba_model, create_sider_model, create_tox21_model,
create_toxcast_model]:
model = func(model_name)
if model is not None:
return model
return None
| 19,548
|
def detect_daml_lf_dir(paths: "Collection[str]") -> "Optional[str]":
"""
Find the biggest Daml-LF v1 version in the set of file names from a Protobuf archive, and return
the path that contains the associated files (with a trailing slash).
If there is ever a Daml-LF 2, then this logic will need to be revisited; however, when that
happens, there are likely to be even larger changes required so we won't worry about this too
much right now.
:param paths: The paths in a Protobuf zipfile to examine.
:return: The root directory of a target Daml-LF protobuf version, stripped of a prefix.
>>> detect_daml_lf_dir([
... "protos-1.15.0/com/daml/daml_lf_1_10/something.proto",
... "protos-1.15.0/com/daml/daml_lf_1_9/something.proto",
... "protos-1.15.0/com/daml/daml_lf_dev/something.proto",
... "protos-1.15.0/com/daml/daml_lf_1_what/something.proto",
... ])
'com/daml/daml_lf_1_10/'
"""
daml_lf_prefix = "com/daml/daml_lf_1_"
minor_versions = set() # type: Set[int]
for p in paths:
_, _, truncated_path = p.partition("/")
if truncated_path.startswith(daml_lf_prefix):
version_str, _, _ = truncated_path[len(daml_lf_prefix) :].partition("/")
try:
minor_versions.add(int(version_str))
except ValueError:
# skip over unrecognized directory names
pass
if minor_versions:
return f"{daml_lf_prefix}{max(minor_versions)}/"
else:
return None
| 19,549
|
def random_mini_batches(X, Y, mini_batch_size):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (m, n_H, n_W, c)
Y -- true "label" vector of shape (m, num_classes)
mini_batch_size -- size of mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
# Extract the input data shapes.
m = X.shape[0]
num_classes = Y.shape[1]
# Instantiate an empty list to hold mini batch X-Y tuples with size batch_size.
mini_batches = []
# Shuffle X and Y.
permutation = list(np.random.permutation(m))
shuffled_X = X[permutation, :, :, :]
shuffled_Y = Y[permutation, :]
# Divide (shuffled_X, shuffled_Y) into batches minus the end case.
num_complete_minibatches = m // mini_batch_size
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[ k*mini_batch_size:(k+1)*mini_batch_size, :,:,:]
mini_batch_Y = shuffled_Y[ k*mini_batch_size:(k+1)*mini_batch_size, :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handle the end case if the last mini-batch < mini_batch_size.
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[ num_complete_minibatches*mini_batch_size: , :,:,:]
mini_batch_Y = shuffled_Y[ num_complete_minibatches*mini_batch_size: , :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
| 19,550
|
def classifyContent(text):
"""
Uses the NLP provider's SDK to perform a content classification operation.
Arguments:
text {String} -- Text to be analyzed.
"""
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT,
language='en')
try:
response = client.classify_text(document=document)
values = []
for category in response.categories:
values.append({
"category": category.name,
"confidence": category.confidence
})
return(Classiciation(values, ""))
except Exception as e:
return Classiciation([], str(e.args))
| 19,551
|
def print_matrix(matrice: np.ndarray):
"""Stampa a video della Mappa"""
str_ = ""
for i in range(len(matrice)):
for j in range(len(matrice)):
if matrice[i][j] == 0:
str_ += Back.BLACK + " "
elif matrice[i][j] == 1:
str_ += Back.WHITE + " "
elif matrice[i][j] == 2:
str_ += Back.RED + " "
elif matrice[i][j] == 3:
str_ += Back.BLUE + " "
elif matrice[i][j] == 4:
str_ += Back.GREEN + " "
str_ += Back.BLACK + "\n"
print(str_)
| 19,552
|
def print_instance_summary(instance, use_color='auto'):
""" Print summary info line for the supplied instance """
colorize_ = partial(colorize, use_color=use_color)
name = colorize_(instance.name, "yellow")
instance_type = instance.extra['gonzo_size']
if instance.state == NodeState.RUNNING:
status_colour = "green"
else:
status_colour = "red"
instance_status = NodeState.tostring(instance.state)
status = colorize_(instance_status, status_colour)
if 'owner' in instance.extra['gonzo_tags']:
owner = instance.extra['gonzo_tags']['owner']
else:
owner = "---"
uptime = format_uptime(instance.extra['gonzo_created_time'])
uptime = colorize_(uptime, "blue")
availability_zone = instance.extra['gonzo_az']
result_list = [
name,
instance_type,
status,
owner,
uptime,
availability_zone,
]
return result_list
| 19,553
|
def cli(input_file=None, part=None):
"""
CLI entry point
"""
result = process(input_file, part)
print(result)
| 19,554
|
def test_local_time_string_raises_v_020_010():
"""local_time_string() with bad t_format type raises SimplEthError"""
bad_format_type = 100
with pytest.raises(SimplEthError) as excp:
Convert().local_time_string(bad_format_type)
assert excp.value.code == 'V-020-010'
| 19,555
|
def generate_blob_sas_token(blob, container, blob_service, permission=BlobPermissions.READ):
"""Generate a blob URL with SAS token."""
sas_token = blob_service.generate_blob_shared_access_signature(
container, blob.name,
permission=permission,
start=datetime.datetime.utcnow() - datetime.timedelta(minutes=15),
expiry=datetime.datetime.utcnow() + datetime.timedelta(days=FileUtils.SAS_EXPIRY_DAYS))
return blob_service.make_blob_url(container, quote(blob.name.encode('utf-8')), sas_token=sas_token)
| 19,556
|
def rewrite_complex_signature(function, signature: Sequence[tf.TensorSpec]):
"""Compatibility layer for testing complex numbers."""
if not all([spec.dtype.is_complex for spec in signature]):
raise NotImplementedError("Signatures with mixed complex and non-complex "
"tensor specs are not supported.")
# Rewrite the signature, replacing all complex tensors with pairs of real
# and imaginary tensors.
real_imag_signature = []
for spec in signature:
new_dtype = tf.float32 if spec.dtype.size == 8 else tf.float64
real_imag_signature.append(tf.TensorSpec(spec.shape, new_dtype))
real_imag_signature.append(tf.TensorSpec(spec.shape, new_dtype))
return _complex_wrapper(function), real_imag_signature
| 19,557
|
def get_args(argv: list):
"""gets the args and dictionarize them"""
if len(argv) not in [5,7]:
Errors.args_error()
data = {}
# getting the type of the title
if "-" in argv[1]:
data["type"] = "series" if argv[1] == "-s" else "movie" if argv[1] == "-m" else None
else:
Errors.args_error()
# getting the title itself
data["title"] = argv[2]
data["format"] = argv[3]
data["language"] = argv[4]
if data["type"] == "series":
if len(argv) != 7:
Errors.args_error()
try:
data["season"] = int(argv[5])
data["episode"] = int(argv[6])
except:
Errors.args_error()
return data
| 19,558
|
def random(
reason: str,
current_dir: Path = Path("."),
) -> None:
"""Roll the dice and email the result."""
logger = getLogger(__name__)
config = MailsConfig.from_global_paths(GlobalPaths.from_defaults(current_dir))
names = list(config.to)
name = choice(names)
logger.info(f"I've drawn {name} randomly from {', '.join(names)}")
sendgrid_email = Mail(
from_email=config.mail,
to_emails=list(config.to.values()),
subject=f"[deckz random] {reason}: {name} got picked",
plain_text_content="See subject :)",
)
client = SendGridAPIClient(config.api_key)
client.send(sendgrid_email)
| 19,559
|
def fund_with_erc20(
to_fund_address, erc20_token_contract, ether_amount=0.1, account=None
):
"""Send a specified amount of an ERC20 token to an address.
Args:
to_fund_address (address): Address to send to the tokens to.
erc20_token_contract (Contract): Contract of the ERC20 token.
ether_amount (float, optional): Amount to be sent, in ETHER. Defaults to 0.1.
account (address, optional): Account from which to send the transaction. Defaults to None.
Returns:
TransactionReceipt
"""
account = account if account else get_account()
print(
f"Funding {to_fund_address} with {ether_amount} {erc20_token_contract.symbol()}..."
)
tx = erc20_token_contract.transfer(
to_fund_address,
Web3.toWei(ether_amount, "ether"),
{"from": account},
)
tx.wait(1)
print(
f"Funded {to_fund_address} with {ether_amount} {erc20_token_contract.symbol()}."
)
return tx
| 19,560
|
def test_tensor_from_cache_empty(tensor_key):
"""Test get works returns None if tensor key is not in the db."""
db = TensorDB()
cached_nparray = db.get_tensor_from_cache(tensor_key)
assert cached_nparray is None
| 19,561
|
async def test_form(opp):
"""Test we get the form."""
await setup.async_setup_component(opp, "persistent_notification", {})
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
c4_account = _get_mock_c4_account()
c4_director = _get_mock_c4_director()
with patch(
"openpeerpower.components.control4.config_flow.C4Account",
return_value=c4_account,
), patch(
"openpeerpower.components.control4.config_flow.C4Director",
return_value=c4_director,
), patch(
"openpeerpower.components.control4.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: "1.1.1.1",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
},
)
await opp.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "control4_model_00AA00AA00AA"
assert result2["data"] == {
CONF_HOST: "1.1.1.1",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
"controller_unique_id": "control4_model_00AA00AA00AA",
}
assert len(mock_setup_entry.mock_calls) == 1
| 19,562
|
def energy_adj_ground_to_sig_end(ds):
"""
Waveform energy from the ground peak. We calculated senergy_whrc as the energy of the waveform (in digital counts) from the ground peak
to the signal end multiplied by two. Ground peak defined as whichever of the two lowest peaks has greater amplitude. We then applied the
following linear transformation in order to calculate on the same scale as data published by Margolis et al. (2015)
senergy = -4.397006 + 0.006208 * senergy_whrc
"""
from carbonplan_trace.v1.glas_preprocess import select_valid_area # avoid circular import
path = 'gs://carbonplan-climatetrace/inputs/volt_table.csv'
volt_table = pd.read_csv(path)
volt_to_digital_count = volt_table.set_index('volt_value')['ind'].to_dict()
wf_in_digital_count = xr.apply_ufunc(
volt_to_digital_count.__getitem__,
ds.rec_wf.astype(float).round(6).fillna(-0.195279),
vectorize=True,
dask='parallelized',
output_dtypes=[int],
)
ds = get_dist_metric_value(ds, metric='adj_ground_peak_dist_actual_wf')
# the processed wf is from sig beg to sig end, select adj ground peak to sig end instead
ground_energy = select_valid_area(
bins=ds.rec_wf_sample_dist,
wf=wf_in_digital_count,
signal_begin_dist=ds.adj_ground_peak_dist_actual_wf,
signal_end_dist=ds.sig_end_dist,
)
# make sure dimensions matches up
dims = ds.processed_wf.dims
ground_energy = ground_energy.transpose(dims[0], dims[1])
senergy_whrc = ground_energy.sum(dim="rec_bin") * 2
return -4.397006 + 0.006208 * senergy_whrc
| 19,563
|
def profile(args, model, model_info, device):
"""
Profile.
:param model:
:param model_info:
:return:
"""
import copy
from torch.profiler import profile, record_function, ProfilerActivity
model = copy.deepcopy(model)
model = model.to(device)
model.eval()
inputs = tuple(
torch.ones((args.batch_size,) + model_info["input_shapes"][k][1:], dtype=torch.float32).to(
device
)
for k in model_info["input_names"]
)
for x in inputs:
print(x.shape, x.device)
def trace_handler(p):
output = p.key_averages().table(sort_by="self_cuda_time_total", row_limit=50)
print(output)
p.export_chrome_trace("/tmp/trace_" + str(p.step_num) + ".json")
with profile(
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
schedule=torch.profiler.schedule(wait=2, warmup=2, active=6, repeat=2),
on_trace_ready=trace_handler,
) as p:
for idx in range(100):
model(*inputs)
p.step()
| 19,564
|
def get_previous_version(versions: dict, app: str) -> str:
"""Looks in the app's .version_history to retrieve the prior version"""
try:
with open(f"{app}/.version_history", "r") as fh:
lines = [line.strip() for line in fh]
except FileNotFoundError:
logging.warning(f"No .version_history for {app}")
return ""
if versions[app] != lines[-1]:
logging.warning(
f"Mismatch in data:\n\tCurrent version is {versions[app]}"
f" but most recent line in .version_history is {lines[-1]}"
)
return ""
elif len(lines) < 2:
logging.warning("No prior version recorded")
return ""
return lines[-2]
| 19,565
|
def validate_marktlokations_id(self, marktlokations_id_attribute, value):
"""
A validator for marktlokations IDs
"""
if not value:
raise ValueError("The marktlokations_id must not be empty.")
if not _malo_id_pattern.match(value):
raise ValueError(f"The {marktlokations_id_attribute.name} '{value}' does not match {_malo_id_pattern.pattern}")
expected_checksum = _get_malo_id_checksum(value)
actual_checksum = value[10:11]
if expected_checksum != actual_checksum:
# pylint: disable=line-too-long
raise ValueError(
f"The {marktlokations_id_attribute.name} '{value}' has checksum '{actual_checksum}' but '{expected_checksum}' was expected."
)
| 19,566
|
def healthcheck() -> bool:
"""FastAPI server healthcheck."""
return True
| 19,567
|
def test_unsigned_short_enumeration004_1782_unsigned_short_enumeration004_1782_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=enumeration and value=0 1 234
and document value=0
"""
assert_bindings(
schema="msData/datatypes/Facets/unsignedShort/unsignedShort_enumeration004.xsd",
instance="msData/datatypes/Facets/unsignedShort/unsignedShort_enumeration004.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 19,568
|
def copy_media_files(from_dir, to_dir, exclude=None, dirty=False):
"""
Recursively copy all files except markdown and exclude[ed] files into another directory.
`exclude` accepts a list of Unix shell-style wildcards (`['*.py', '*.pyc']`).
Note that `exclude` only operates on file names, not directories.
"""
for (source_dir, dirnames, filenames) in os.walk(from_dir, followlinks=True):
relative_path = os.path.relpath(source_dir, from_dir)
output_dir = os.path.normpath(os.path.join(to_dir, relative_path))
# Filter file names using Unix pattern matching
# Always filter file names starting with a '.'
exclude_patterns = ['.*']
exclude_patterns.extend(exclude or [])
for pattern in exclude_patterns:
filenames = [f for f in filenames if not fnmatch.fnmatch(f, pattern)]
# Filter the dirnames that start with a '.' and update the list in
# place to prevent us walking these.
dirnames[:] = [d for d in dirnames if not d.startswith('.')]
for filename in filenames:
if not is_markdown_file(filename):
source_path = os.path.join(source_dir, filename)
output_path = os.path.join(output_dir, filename)
# Do not copy when using --dirty if the file has not been modified
if dirty and (modified_time(source_path) < modified_time(output_path)):
continue
copy_file(source_path, output_path)
| 19,569
|
def frames_to_masks(
nproc: int,
out_paths: List[str],
colors_list: List[List[np.ndarray]],
poly2ds_list: List[List[List[Poly2D]]],
with_instances: bool = True,
) -> None:
"""Execute the mask conversion in parallel."""
with Pool(nproc) as pool:
pool.starmap(
partial(frame_to_mask, with_instances=with_instances),
tqdm(
zip(out_paths, colors_list, poly2ds_list),
total=len(out_paths),
),
)
| 19,570
|
def test_branched_history_with_mergepoint(pytester):
"""Branched history can be navigated, when there's a mergepoint present."""
run_pytest(pytester, passed=5)
| 19,571
|
def ToHexStr(num):
"""
将返回的错误码转换为十六进制显示
:param num: 错误码 字符串
:return: 十六进制字符串
"""
chaDic = {10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f'}
hexStr = ""
if num < 0:
num = num + 2**32
while num >= 16:
digit = num % 16
hexStr = chaDic.get(digit, str(digit)) + hexStr
num //= 16
hexStr = chaDic.get(num, str(num)) + hexStr
return hexStr
| 19,572
|
def getSBMLFromBiomodelsURN(urn):
""" Get SBML string from given BioModels URN.
Searches for a BioModels identifier in the given urn and retrieves the SBML from biomodels.
For example:
urn:miriam:biomodels.db:BIOMD0000000003.xml
Handles redirects of the download page.
:param urn:
:return: SBML string for given model urn
"""
if ":" not in urn:
raise ValueError("The URN", urn, "is not in the correct format: it must be divided by colons in a format such as 'urn:miriam:biomodels.db:BIOMD0000000003.xml'.")
core = urn.split(":")[-1].split(".")[0]
url = "https://www.ebi.ac.uk/biomodels/model/download/" + core + "?filename="+ core + "_url.xml"
response = requests.get(url, allow_redirects=True)
response.raise_for_status()
sbml = response.content
# bytes array in py3
try:
sbml_str = str(sbml.decode("utf-8"))
except:
sbml_str = str(sbml)
return sbml_str
| 19,573
|
def dataclass_validate(instance: Any) -> None:
"""Ensure values in a dataclass are correct types.
Note that this will always fail if a dataclass contains field types
not supported by this module.
"""
_dataclass_validate(instance, dataclasses.asdict(instance))
| 19,574
|
def draw(k, n):
"""
Select k things from a pool of n without replacement.
"""
# At k == n/4, an extra 0.15*k draws are needed to get k unique draws
if k > n/4:
result = rng.permutation(n)[:k]
else:
s = set()
result = np.empty(k, 'i')
for i in range(k):
p = rng.randint(n)
while p in s:
p = rng.randint(n)
s.add(p)
result[i] = p
return result
| 19,575
|
def print_version(args):
"""Print the version (short or long)"""
# Long version
if len(args) > 0 and args[0] == '--full':
apk_version = dtfglobals.get_generic_global(
dtfglobals.CONFIG_SECTION_CLIENT, 'apk_version')
bundle_version = dtfglobals.get_generic_global(
dtfglobals.CONFIG_SECTION_BINDINGS, 'version')
python_version = constants.VERSION
print("Python Version: %s" % python_version)
print("dtfClient Version: %s" % apk_version)
print("Bindings Version Date: %s" % bundle_version)
else:
print(constants.VERSION)
return 0
| 19,576
|
async def test_get_start_entries_by_race_id(
client: _TestClient,
mocker: MockFixture,
token: MockFixture,
race: dict,
start_entry: dict,
) -> None:
"""Should return OK and a valid json body."""
START_ENTRY_ID = start_entry["id"]
mocker.patch(
"race_service.adapters.start_entries_adapter.StartEntriesAdapter.get_start_entries_by_race_id",
return_value=[start_entry],
)
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://users.example.com:8081/authorize", status=204)
resp = await client.get(f'races/{race["id"]}/start-entries')
assert resp.status == 200
assert "application/json" in resp.headers[hdrs.CONTENT_TYPE]
start_entries = await resp.json()
assert type(start_entries) is list
assert len(start_entries) > 0
assert START_ENTRY_ID == start_entries[0]["id"]
| 19,577
|
def naturalTimeDifference(value):
"""
Finds the difference between the datetime value given and now()
and returns appropriate humanize form
"""
from datetime import datetime
if isinstance(value, datetime):
delta = datetime.now() - value
if delta.days > 6:
return value.strftime("%b %d") # May 15
if delta.days > 1:
return value.strftime("%A") # Wednesday
elif delta.days == 1:
return 'yesterday' # yesterday
elif delta.seconds > 3600:
if delta.seconds < 7200:
return '1 hour ago'
else:
return str(delta.seconds / 3600 ) + ' hours ago' # 3 hours ago
elif delta.seconds > 60:
if delta.seconds < 120:
return '1 minute ago'
else:
return str(delta.seconds/60) + ' minutes ago' # 29 minutes ago
elif delta.seconds > 10:
return str(delta.seconds) + ' seconds ago' # 15 seconds ago
else:
return 'a moment ago' # a moment ago
return defaultfilters.date(value)
else:
return str(value)
| 19,578
|
def test_specific_location(client, user, db_setup, tag_data):
"""specific_location is an optional field. If it is included in the
posted data, it will be correctly associated with the recovery
object in the database.
"""
report = Report.objects.get(reported_by__first_name="Homer")
url = reverse("tfat:create_recovery", kwargs={"report_id": report.id})
specific_location = "Right here. Exactly here."
tag_data["specific_location"] = specific_location
client.login(username=user.email, password="Abcd1234")
response = client.post(url, tag_data, follow=True)
assert response.status_code == 200
recoveries = Recovery.objects.all()
assert len(recoveries) == 1
assert recoveries[0].specific_location == specific_location
| 19,579
|
def raises_Invalid(function):
"""A decorator that asserts that the decorated function raises
dictization_functions.Invalid.
Usage:
@raises_Invalid
def call_validator(*args, **kwargs):
return validators.user_name_validator(*args, **kwargs)
call_validator(key, data, errors)
"""
def call_and_assert(*args, **kwargs):
with pytest.raises(df.Invalid):
function(*args, **kwargs)
return call_and_assert
| 19,580
|
def load_dataset(dataset_path: Path) -> [Instruction]:
"""Returns the program as a list of alu instructions."""
with open_utf8(dataset_path) as file:
program = []
for line in file:
if len(line.strip()) > 0:
instruction = line.strip().split(" ")
if len(instruction) == 2:
instruction.append(None) # No b value
else:
try: # B instruction is constant.
instruction[2] = int(instruction[2])
except ValueError:
pass # B instruction is string reference.
program.append(
Instruction(
func=getattr(alu, instruction[0]),
a=instruction[1],
b=instruction[2],
)
)
return program
| 19,581
|
def test_document_delete1(flask_client, user8, dev_app):
"""document() DELETE should delete one item."""
doc = user8.documents.first()
access_token = api.create_token(user8, dev_app.client_id)
response = flask_client.delete(
"/api/documents/" + str(doc.id), headers={"authorization": "Bearer " + access_token}
)
json_data = response.get_json()
# have to do it this way because session is no longer available
docs = models.Documents.query.filter_by(user_id=user8.id).all()
assert (
response.status_code == 200 and json_data["message"] == "Item deleted." and len(docs) == 3
)
| 19,582
|
def process_info(args):
"""
Process a single json file
"""
fname, opts = args
with open(fname, 'r') as f:
ann = json.load(f)
f.close()
examples = []
skipped_instances = 0
for instance in ann:
components = instance['components']
if len(components[0]['poly']) < 3:
continue
if 'class_filter'in opts.keys() and instance['label'] not in opts['class_filter']:
continue
# if instance['image_url'].find('Bhoomi') == -1:
# continue
candidates = [c for c in components]
instance['components'] = candidates
if candidates:
examples.append(instance)
return examples, skipped_instances
| 19,583
|
def test_uncharge():
"""All charges should be neutralized."""
assert _uncharge_smiles("CNCC([O-])C[O-]") == "CNCC(O)CO"
| 19,584
|
def logout():
"""
退出登录
:return:
"""
# pop是移除session中的数据(dict),pop会有一个返回值,如果移除的key不存在返回None
session.pop('user_id', None)
session.pop('mobile', None)
session.pop('nick_name', None)
# 要清除is_admin的session值,不然登录管理员后退出再登录普通用户又能访问管理员后台
session.pop('is_admin', None)
return jsonify(errno=RET.OK, errmsg="退出成功")
| 19,585
|
def test_dun_depth_method_on_single_bst(bst):
"""Test the _depth method on single bst."""
assert bst._depth(bst.root) == 1
| 19,586
|
def lift_to_dimension(A,dim):
"""
Creates a view of A of dimension dim (by adding dummy dimensions if necessary).
Assumes a numpy array as input
:param A: numpy array
:param dim: desired dimension of view
:return: returns view of A of appropriate dimension
"""
current_dim = len(A.shape)
if current_dim>dim:
raise ValueError('Can only add dimensions, but not remove them')
if current_dim==dim:
return A
else:
return A.reshape([1]*(dim-current_dim)+list(A.shape))
| 19,587
|
def search(keyword=None):
"""
Display search results in JSON format
Parameters
----------
keyword : str
Search keyword. Default None
"""
return get_json(False, keyword)
| 19,588
|
def min_offerings(heights: List[int]) -> int:
"""
Get the max increasing sequence on the left and the right side of current index,
leading upto the current index.
current index's value would be the max of both + 1.
"""
length = len(heights)
if length < 2:
return length
left_inc = [0] * length
right_inc = [0] * length
for index in range(1, length):
if heights[index] > heights[index - 1]:
left_inc[index] = left_inc[index - 1] + 1
if heights[length - 1 - index] > heights[length - index]:
right_inc[length - 1 - index] = right_inc[length - index] + 1
return sum(1 + max(left_inc[index], right_inc[index]) for index in range(length))
| 19,589
|
def pretty(value, width=80, nl_width=80, sep='\n', **kw):
# type: (str, int, int, str, **Any) -> str
"""Format value for printing to console."""
if isinstance(value, dict):
return '{{{0} {1}'.format(sep, pformat(value, 4, nl_width)[1:])
elif isinstance(value, tuple):
return '{}{}{}'.format(
sep, ' ' * 4, pformat(value, width=nl_width, **kw),
)
else:
return pformat(value, width=width, **kw)
| 19,590
|
def get_csv_file_path(file_name: str) -> str:
"""
Get absolute path to csv metrics file
Parameters
----------
file_name
Name of metrics file
Returns
-------
file_path
Full path to csv file
"""
return os.path.join(os.getcwd(), file_name)
| 19,591
|
def test_drop_mrel_column(pipeline, clean_db):
"""
Verify that we can't drop matrel columns
"""
pipeline.create_stream('mrel_drop_s', x='integer')
q = """
SELECT x, sum(x), avg(x), count(*) FROM mrel_drop_s GROUP BY x
"""
pipeline.create_cv('mrel_drop_cv', q)
for col in ('x', 'sum', 'avg', 'count'):
with pytest.raises(psycopg2.InternalError):
pipeline.execute('ALTER TABLE mrel_drop_cv_mrel DROP COLUMN %s' % col)
| 19,592
|
def failed(config: dict, app_logger: logger.Logger) -> bool:
"""
Set migration status to FAILED.
:param config: pymigrate configuration.
:param app_logger: pymigrate configured logger.
:return: True on success, False otherwise.
"""
app_logger.log_with_ts('Running status_failed action for migration {0}'.format(config['MIGRATION_ID']),
logger.Levels.DEBUG)
migrations_directory_path = os.path.join(os.pardir, config['PROJECT_DIR'] + '/' + config['MIGRATIONS_DIR'])
return migration.set_status_failed(config['MIGRATION_ID'], app_logger, migrations_directory_path)
| 19,593
|
async def test_face_event_call_no_confidence(mock_config, hass, aioclient_mock):
"""Set up and scan a picture and test faces from event."""
face_events = await setup_image_processing_face(hass)
aioclient_mock.get(get_url(hass), content=b"image")
common.async_scan(hass, entity_id="image_processing.demo_face")
await hass.async_block_till_done()
state = hass.states.get("image_processing.demo_face")
assert len(face_events) == 3
assert state.state == "4"
assert state.attributes["total_faces"] == 4
event_data = [
event.data for event in face_events if event.data.get("name") == "Hans"
]
assert len(event_data) == 1
assert event_data[0]["name"] == "Hans"
assert event_data[0]["confidence"] == 98.34
assert event_data[0]["gender"] == "male"
assert event_data[0]["entity_id"] == "image_processing.demo_face"
| 19,594
|
def niceNumber(v, maxdigit=6):
"""Nicely format a number, with a maximum of 6 digits."""
assert(maxdigit >= 0)
if maxdigit == 0:
return "%.0f" % v
fmt = '%%.%df' % maxdigit
s = fmt % v
if len(s) > maxdigit:
return s.rstrip("0").rstrip(".")
elif len(s) == 0:
return "0"
else:
return s
| 19,595
|
def query_incident(conditions: list, method=None, plan_status="A", mulitple_fields=False):
"""
Queries incidents in Resilient/CP4S
:param condition_list: list of conditions as [field_name, field_value, method] or a list of list conditions if multiple_fields==True
:param method: set all field conditions to this method (save user from typing it for each field)
:param plan_status: "A" == Active, "C" == Closed
:param multiple_fields: query more than one field
"""
def buildConditionDict(conditions, method=method):
return {
'field_name': conditions[0],
'value': conditions[1],
"method": method if method else conditions[2],
}
conditionList = []
query_uri = u"/incidents/query?return_level=normal"
if not mulitple_fields:
conditionList.append(buildConditionDict(conditions))
query_uri += u"&field_handle={}".format(conditions[0])
else:
for condition in conditions:
conditionList.append(buildConditionDict(condition))
query_uri += u"&field_handle={}".format(condition[0])
conditionList.append({
'field_name': 'plan_status',
'method': 'equals',
'value': plan_status
})
query = {
'filters': [{
'conditions': conditionList
}],
"sorts": [{
"field_name": "create_date",
"type": "desc"
}]
}
client = create_authenticated_client()
return client.post(query_uri, query)
| 19,596
|
def test_no_value():
"""When no value is given we should return Failure."""
test = [[], '-']
with pytest.raises(UnwrapFailedError):
apply_separator(*test).unwrap()
| 19,597
|
def get_banner():
"""Return a banner message for the interactive console."""
global _CONN
result = ''
# Note how we are connected
result += 'Connected to %s' % _CONN.url
if _CONN.creds is not None:
result += ' as %s' % _CONN.creds[0]
# Give hint about exiting. Most people exit with 'quit()' which will
# not return from the interact() method, and thus will not write
# the history.
result += '\nPress Ctrl-D to exit'
return result
| 19,598
|
def generate_fish(
n,
channel,
interaction,
lim_neighbors,
neighbor_weights=None,
fish_max_speeds=None,
clock_freqs=None,
verbose=False,
names=None
):
"""Generate some fish
Arguments:
n {int} -- Number of fish to generate
channel {Channel} -- Channel instance
interaction {Interaction} -- Interaction instance
lim_neighbors {list} -- Tuple of min and max neighbors
neighbor_weight {float|list} -- List of neighbor weights
fish_max_speeds {float|list} -- List of max speeds
clock_freqs {int|list} -- List of clock speeds
names {list} -- List of names for your fish
"""
if neighbor_weights is None:
neighbor_weights = [1.0] * n
elif not isinstance(neighbor_weights, list):
neighbor_weights = [neighbor_weights] * n
if fish_max_speeds is None:
fish_max_speeds = [1.0] * n
elif not isinstance(fish_max_speeds, list):
fish_max_speeds = [fish_max_speeds] * n
if clock_freqs is None:
clock_freqs = [1] * n
elif not isinstance(clock_freqs, list):
clock_freqs = [clock_freqs] * n
if names is None:
names = ['Unnamed'] * n
fish = []
for i in range(n):
fish.append(Fish(
id=i,
channel=channel,
interaction=interaction,
lim_neighbors=lim_neighbors,
neighbor_weight=neighbor_weights[i],
fish_max_speed=fish_max_speeds[i],
clock_freq=clock_freqs[i],
verbose=verbose,
name=names[i]
))
return fish
| 19,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.