content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def main():
"""Command line execution of this building block. Please check the command line documentation."""
parser = argparse.ArgumentParser(description='This class is a wrapper for an associations call of teh DisGeNET database REST API.', formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
parser.add_argument('--config', required=False, help='Configuration file')
# Specific args of each building block
required_args = parser.add_argument_group('required arguments')
required_args.add_argument('--retrieve_by', required=True, help='Retrieval factor necessary to define the search of the associations; gene, uniprot entry, disease, source, evidence by disease, evidence by gene available choices.')
required_args.add_argument('--output_file_path', required=True, help='Description for the output file path. Accepted formats: json, csv or html.')
args = parser.parse_args()
config = args.config if args.config else None
properties = settings.ConfReader(config=config).get_prop_dic()
# Specific call of each building block
gda_disgenet(retrieve_by=args.retrieve_by,
output_file_path=args.output_file_path,
properties=properties) | 5,327,800 |
def app():
"""Required by pytest-tornado's http_server fixture"""
return tornado.web.Application() | 5,327,801 |
def is_bound_builtin_method(meth):
"""Helper returning True if meth is a bound built-in method"""
return (inspect.isbuiltin(meth)
and getattr(meth, '__self__', None) is not None
and getattr(meth.__self__, '__class__', None)) | 5,327,802 |
def normalize(adj):
"""Row-normalize sparse matrix"""
rowsum = np.array(adj.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = np.diag(r_inv)
mx = r_mat_inv.dot(adj)
return mx | 5,327,803 |
def put_bucket_policy(Bucket=None, ConfirmRemoveSelfBucketAccess=None, Policy=None):
"""
Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the PutBucketPolicy permissions on the specified bucket and belong to the bucket owner\'s account in order to use this operation.
If you don\'t have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you\'re not using an identity that belongs to the bucket owner\'s account, Amazon S3 returns a 405 Method Not Allowed error.
For more information about bucket policies, see Using Bucket Policies and User Policies .
The following operations are related to PutBucketPolicy :
See also: AWS API Documentation
Examples
The following example sets a permission policy on a bucket.
Expected Output:
:example: response = client.put_bucket_policy(
Bucket='string',
ConfirmRemoveSelfBucketAccess=True|False,
Policy='string'
)
:type Bucket: string
:param Bucket: [REQUIRED]\nThe name of the bucket.\n
:type ConfirmRemoveSelfBucketAccess: boolean
:param ConfirmRemoveSelfBucketAccess: Set this parameter to true to confirm that you want to remove your permissions to change this bucket policy in the future.
:type Policy: string
:param Policy: [REQUIRED]\nThe bucket policy as a JSON document.\n
:return: response = client.put_bucket_policy(
Bucket='examplebucket',
Policy='{"Version": "2012-10-17", "Statement": [{ "Sid": "id-1","Effect": "Allow","Principal": {"AWS": "arn:aws:iam::123456789012:root"}, "Action": [ "s3:PutObject","s3:PutObjectAcl"], "Resource": ["arn:aws:s3:::acl3/*" ] } ]}',
)
print(response)
:returns:
Bucket (string) -- [REQUIRED]
The name of the bucket.
ConfirmRemoveSelfBucketAccess (boolean) -- Set this parameter to true to confirm that you want to remove your permissions to change this bucket policy in the future.
Policy (string) -- [REQUIRED]
The bucket policy as a JSON document.
"""
pass | 5,327,804 |
def get_recommendation_and_prediction_from_text(input_text, num_feats=10):
"""
Gets a score and recommendations that can be displayed in the Flask app
:param input_text: input string
:param num_feats: number of features to suggest recommendations for
:return: current score along with recommendations
"""
global MODEL
feats = get_features_from_input_text(input_text)
pos_score = MODEL.predict_proba([feats])[0][1]
print("explaining")
exp = EXPLAINER.explain_instance(
feats, MODEL.predict_proba, num_features=num_feats, labels=(1,)
)
print("explaining done")
parsed_exps = parse_explanations(exp.as_list())
recs = get_recommendation_string_from_parsed_exps(parsed_exps)
output_str = """
Current score (0 is worst, 1 is best):
<br/>
%s
<br/>
<br/>
Recommendations (ordered by importance):
<br/>
<br/>
%s
""" % (
pos_score,
recs,
)
return output_str | 5,327,805 |
def set_title(title, uid='master'):
"""
Sets a new title of the window
"""
try:
_webview_ready.wait(5)
return gui.set_title(title, uid)
except NameError:
raise Exception('Create a web view window first, before invoking this function')
except KeyError:
raise Exception('Cannot call function: No webview exists with uid: {}'.format(uid)) | 5,327,806 |
def FK42FK5MatrixOLDATTEMPT():
"""
----------------------------------------------------------------------
Experimental.
Create matrix to precess from an epoch in FK4 to an epoch in FK5
So epoch1 is Besselian and epoch2 is Julian
1) Do an epoch transformation in FK4 from input epoch to
1984 January 1d 0h
2) Apply a zero point correction for the right ascension
w.r.t. B1950. The formula is:
E = E0 + E1*(jd-jd1950)/Cb
E0 = 0.525; E1 = 1.275 and Cb = the length of the tropical
century (ES 3.59 p 182) = 36524.21987817305
For the correction at 1984,1,1 the ES lists 0.06390s which is
0.06390*15=0.9585"
This function calculated E = 0.958494476885" which agrees with the
literature.
3) Transform in FK5 from 1984 January 1d 0h to epoch2
Note that we do not use the adopted values for the precession angles,
but use the Woolward and Clemence expressions to calculate the angles.
These are one digit more accurate than the adopted values.
----------------------------------------------------------------------
"""
# Epoch transformation from B1950 to 1984, 1,1 in FK4
jd = JD(1984,1,1)
epoch1984 = JD2epochBessel(jd)
M1 = BMatrixEpoch12Epoch2(1950.0, epoch1984)
# Equinox correction to the right ascension
jd1950 = epochBessel2JD(1950.0)
E0 = 0.525; E1 = 1.275
Cb = 36524.21987817305 # In days = length of the tropical century
E = E0 + E1*(jd-jd1950)/Cb
E /= 3600.0 # From seconds of arc to degree
M2 = rotZ(-E) # The correction is positive so we have to rotate
# around the z-axis in the negative direction.
# Epoch transformation from 1984,1,1 to J2000
epoch1984 = JD2epochJulian(jd)
M3 = JMatrixEpoch12Epoch2(epoch1984, 2000.0)
return M3*M2*M1 | 5,327,807 |
def _lg_undirected(G, selfloops=False, create_using=None):
"""Return the line graph L of the (multi)graph G.
Edges in G appear as nodes in L, represented as sorted tuples of the form
(u,v), or (u,v,key) if G is a multigraph. A node in L corresponding to
the edge {u,v} is connected to every node corresponding to an edge that
involves u or v.
Parameters
----------
G : graph
An undirected graph or multigraph.
selfloops : bool
If `True`, then self-loops are included in the line graph. If `False`,
they are excluded.
create_using : None
A graph instance used to populate the line graph.
Notes
-----
The standard algorithm for line graphs of undirected graphs does not
produce self-loops.
"""
if create_using is None:
L = G.__class__()
else:
L = create_using
# Graph specific functions for edges and sorted nodes.
get_edges = _edge_func(G)
sorted_node = _node_func(G)
# Determine if we include self-loops or not.
shift = 0 if selfloops else 1
edges = set([])
for u in G:
# Label nodes as a sorted tuple of nodes in original graph.
nodes = [ sorted_node(*x) for x in get_edges(u) ]
if len(nodes) == 1:
# Then the edge will be an isolated node in L.
L.add_node(nodes[0])
# Add a clique of `nodes` to graph. To prevent double adding edges,
# especially important for multigraphs, we store the edges in
# canonical form in a set.
for i, a in enumerate(nodes):
edges.update([ _sorted_edge(a,b) for b in nodes[i+shift:] ])
L.add_edges_from(edges)
return L | 5,327,808 |
def get_utxo_provider_client(utxo_provider, config_file):
"""
Get or instantiate our blockchain UTXO provider's client.
Return None if we were unable to connect
"""
utxo_opts = default_utxo_provider_opts( utxo_provider, config_file )
try:
utxo_provider = connect_utxo_provider( utxo_opts )
return utxo_provider
except Exception, e:
log.exception(e)
return None | 5,327,809 |
def metadata(
sceneid: str,
pmin: float = 2.0,
pmax: float = 98.0,
hist_options: Dict = {},
**kwargs: Any,
) -> Dict:
"""
Return band bounds and statistics.
Attributes
----------
sceneid : str
CBERS sceneid.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
hist_options : dict, optional
Options to forward to numpy.histogram function.
e.g: {bins=20, range=(0, 1000)}
kwargs : optional
These are passed to 'rio_tiler.reader.preview'
Returns
-------
out : dict
Dictionary with bounds and bands statistics.
"""
scene_params = cbers_parser(sceneid)
cbers_prefix = "{scheme}://{bucket}/{prefix}/{scene}".format(**scene_params)
bands = scene_params["bands"]
addresses = [f"{cbers_prefix}_BAND{band}.tif" for band in bands]
responses = reader.multi_metadata(
addresses,
indexes=[1],
nodata=0,
percentiles=(pmin, pmax),
hist_options=hist_options,
**kwargs,
)
info: Dict[str, Any] = dict(sceneid=sceneid)
info["instrument"] = scene_params["instrument"]
info["band_descriptions"] = [(ix + 1, b) for ix, b in enumerate(bands)]
info["bounds"] = [
r["bounds"]
for b, r in zip(bands, responses)
if b == scene_params["reference_band"]
][0]
info["statistics"] = {b: d["statistics"][1] for b, d in zip(bands, responses)}
return info | 5,327,810 |
def size_to_string(volume_size):
# type: (int) -> str
"""
Convert a volume size to string format to pass into Kubernetes.
Args:
volume_size: The size of the volume in bytes.
Returns:
The size of the volume in gigabytes as a passable string to Kubernetes.
"""
if volume_size >= Gi:
return str(volume_size >> 30) + 'Gi'
elif volume_size >= Mi:
return str(volume_size >> 20) + 'Mi'
else:
return str(volume_size >> 10) + 'Ki' | 5,327,811 |
def compute(model_path: Path = typer.Argument(..., exists=True, file_okay=True, dir_okay=False)):
"""Compute vertices/edge data from blender model."""
model_path = Path(model_path)
script_path = Path(__file__).parent / "compute.py"
assert script_path.exists(), "Failed to find script path."
data_path = model_path.with_suffix(".json")
threedframe.utils.exec_blender_script(Path(model_path), script_path, data_path)
print("[bold green]✔ Done!")
print(f"[bold white]Data written to: [cyan]{data_path.absolute()}") | 5,327,812 |
def argToDic(arg):
"""
Converts a parameter sequence into a dict.
Args:
arg (string): specified simulation parameters."""
params = dict()
options = arg.split("_")
if "=" in options[0]:
params["mode"] = ""
else:
params["mode"] = options.pop(0)
# parse arguments such as "M=2"
for op in options:
pair = op.split("=")
pv = parseValue(pair[1])
# exception
if "IT" in pair[0]:
pv = int(pv)
params[pair[0]] = pv
return params | 5,327,813 |
def run_pack():
"""
run package nuke project plugin
:return:
"""
wgt = nuke2pack.PackageDialog()
wgt.exec_() | 5,327,814 |
def test_handler_review_submitted_with_mention(monkeypatch):
""" handler_issue_pr_mentioned にサンプル入力を入れて動作確認 """
mentioned_header_path = SCRIPT_PATH.parent / "testdata/review-submitted-header.json"
mentioned_body_path = SCRIPT_PATH.parent / "testdata/review-submitted-body-with-mention.json"
header = json.loads(mentioned_header_path.read_text())
body = json.loads(mentioned_body_path.read_text())
import github_webhook_lambda
mock = MagicMock()
monkeypatch.setattr(github_webhook_lambda, "GITHUB_TO_SLACK", {"@smatsumt": "@smatsumt", "@smatsumt2": "@smatsumt2"})
monkeypatch.setattr(github_webhook_lambda, "notify_slack", mock)
r = github_webhook_lambda.handler_review_submitted(header, body)
args, kwargs = mock.call_args
assert args[0] == ':speech_balloon: <@smatsumt> <@smatsumt2>, *review commented* by skawagt in https://github.com/smatsumt/testrepo2/pull/2#pullrequestreview-394584166'
assert kwargs["attach_message"] == "@smatsumt2 yappari comment" | 5,327,815 |
def _installed_snpeff_genome(config_file, base_name):
"""Find the most recent installed genome for snpEff with the given name.
"""
data_dir = _find_snpeff_datadir(config_file)
dbs = [d for d in sorted(glob.glob(os.path.join(data_dir, "%s*" % base_name)), reverse=True)
if os.path.isdir(d)]
if len(dbs) == 0:
raise ValueError("No database found in %s for %s" % (data_dir, base_name))
else:
return os.path.split(dbs[0])[-1] | 5,327,816 |
def geth2hforplayer(matches,name):
"""get all head-to-heads of the player"""
matches = matches[(matches['winner_name'] == name) | (matches['loser_name'] == name)]
h2hs = {}
for index, match in matches.iterrows():
if (match['winner_name'] == name):
if (match['loser_name'] not in h2hs):
h2hs[match['loser_name']] = {}
h2hs[match['loser_name']]['l'] = 0
h2hs[match['loser_name']]['w'] = 1
else:
h2hs[match['loser_name']]['w'] = h2hs[match['loser_name']]['w']+1
elif (match['loser_name'] == name):
if (match['winner_name'] not in h2hs):
h2hs[match['winner_name']] = {}
h2hs[match['winner_name']]['w'] = 0
h2hs[match['winner_name']]['l'] = 1
else:
h2hs[match['winner_name']]['l'] = h2hs[match['winner_name']]['l']+1
#create list
h2hlist = []
for k, v in h2hs.items():
h2hlist.append([k, v['w'],v['l']])
#sort by wins and then by losses + print
#filter by h2hs with more than 6 wins:
#h2hlist = [i for i in h2hlist if i[1] > 6]
if (len(h2hlist) == 0):
return ''
else:
return sorted(h2hlist, key=itemgetter(1,2))
#for h2h in h2hlist:
# print(name+';'+h2h[0]+';'+str(h2h[1])+';'+str(h2h[2])) | 5,327,817 |
def test_best_codeblocks_have_no_lint_errors(lint_codeblock, best_codeblock):
"""Test that "best" codeblocks do not fail to lint."""
result = lint_codeblock(best_codeblock)
assert result, result.output | 5,327,818 |
def cli():
"""A unified CLI for the PHYLUCE software package."""
pass | 5,327,819 |
def backtracking_solver(
starting_event: Event,
**kwargs) -> FiniteSequence:
"""Compose a melodic sequence based upon the
domain and constraints given.
starting_event: Event dictate the starting pitch.
All subsequent events will be of similar duration.
constraints - list of constraint functions
(see composerstoolkit.composers.constraints)
heuristics - list of heuristics (weight maps)
that can be used to provide a rough shape to the line
(see composerstoolkit.composers.heuristics)
n_events - the number of notes of the desired target
sequence. (Default 1)
"""
opts = {
"constraints": [],
"heuristics": [],
"n_events": 1
}
opts.update(kwargs)
constraints = opts["constraints"]
heuristics = opts["heuristics"]
n_events = opts["n_events"]
tick = 0
seq = FiniteSequence([starting_event])
use_weights = len(heuristics) > 0
if n_events == 1:
return FiniteSequence(seq)
results = set()
for constraint in constraints:
results.update([constraint(seq)])
if results != {True}:
raise InputViolatesConstraints("Unable to solve!")
choices = list(range(NOTE_MIN, NOTE_MAX))
dead_paths = []
while tick < n_events-1:
if use_weights:
weights= [1.0 for i in range(len(choices))]
for heuristic in heuristics:
weights = heuristic(tick, choices, weights)
try:
if use_weights:
note = Event([random.choices(choices, weights)[0]], starting_event.duration)
else:
note = Event([random.choice(choices)], starting_event.duration)
except IndexError:
# this was thrown because we ran out of choices (we have reached a dead-end)
dead_paths.append(seq[:])
seq = seq[:-1]
tick = tick -1
choices = list(range(NOTE_MIN, NOTE_MAX))
if tick == 0:
raise AllRoutesExhausted("Unable to solve!")
continue
context = FiniteSequence(seq.events[:])
context.events.append(note)
results = set()
for constraint in constraints:
results.update([constraint(context)])
candidate = seq[:]
candidate.events.append(note)
if results == {True} and candidate not in dead_paths:
seq.events.append(note)
tick = tick + 1
choices = list(range(NOTE_MIN, NOTE_MAX))
else:
#this choice was bad, so we must exclude it
choices.remove(note.pitches[-1])
return seq | 5,327,820 |
def banner_print(msg, color='', width=60, file=sys.stdout):
"""Print the message as a banner with a fixed width.fixed
Args:
msg: The message to print.
color: Optional colorama color string to be applied to the message. You can
concatenate colorama color strings together in order to get any set of
effects you want.
width: Total width for the resulting banner.
file: A file object to which the banner text will be written. Intended for
use with CLI output file objects like sys.stdout.
Example:
>>> banner_print('Foo Bar Baz')
======================== Foo Bar Baz =======================
"""
lpad = int(math.ceil((width - len(msg) - 2) / 2.0)) * '='
rpad = int(math.floor((width - len(msg) - 2) / 2.0)) * '='
file.write('{color}{lpad} {msg} {rpad}{reset}\n'.format(
color=color, lpad=lpad, msg=msg, rpad=rpad,
reset=colorama.Style.RESET_ALL))
file.flush() | 5,327,821 |
def _save_conf_file(cfg):
"""
Given a ConfigParser object, it saves its contents to the config
file location, overwriting any previous contents
"""
conf_path = _locate_conf_file()
f = open(conf_path, "w")
cfg.write(f)
f.close()
# Make sure it has restricted permissons (since it tends to have stuff
# like credentials in it)
os.chmod(conf_path, stat.S_IRUSR + stat.S_IWUSR) | 5,327,822 |
def jdos(bs, f, i, occs, energies, kweights, gaussian_width, spin=Spin.up):
"""
Args:
bs: bandstructure object
f: final band
i: initial band
occs: occupancies over all bands.
energies: energy mesh (eV)
kweights: k-point weights
gaussian_width: width of gaussian plot.
spin: Which spin channel to include.
Returns:
Cumulative JDOS value for a specific i->f transition, with consideration of
partial occupancy and spin polarisation.
"""
jdos = np.zeros(len(energies))
for k in range(len(bs.bands[spin][i])):
final_occ = occs[f][k]
init_energy = bs.bands[spin][i][k]
final_energy = bs.bands[spin][f][k]
init_occ = occs[i][k]
k_weight = kweights[k]
factor = k_weight * (
(init_occ * (1 - final_occ)) - (final_occ * (1 - init_occ))
)
jdos += factor * gaussian(
energies, gaussian_width, center=final_energy - init_energy
)
return jdos | 5,327,823 |
def findConstantMetrics(inpath):
"""
Simple function that checks which metrics in a dictionary (read from a CSV) are constant and which change over time.
As a reference, the first record read from the file is used
:param inpath: The path to the CSV file that must be analyzed
:return: The list of metrics (keys) that are constant in the file
"""
infile = open(inpath, 'r')
reader = DictReader(infile)
try:
metricSet = next(reader)
except (StopIteration, IOError):
infile.close()
return []
line = metricSet
while line is not None:
metricsToRemove = []
for k in metricSet.keys():
if line[k] != metricSet[k]:
metricsToRemove.append(k)
for m in metricsToRemove:
metricSet.pop(m)
try:
line = next(reader)
except (StopIteration, IOError):
line = None
infile.close()
return list(metricSet.keys()) | 5,327,824 |
def scoreGold(playerList, iconCount, highScore):
"""Update each players' score based on the amount of gold that they have collected.
Args:
playerList: A list of all PlayerSprite objects in the game.
iconCount: A list of integers representing how many times each player has gained points from the
scoreGold function this level.
highScore: An integer showing the current high score.
Returns:
looping: A boolean indicating if scoreLevel should call this function again.
iconCount: A list of integers representing how many times each player has gained points from the
scoreGold function this level.
scoreText: A list of the current scores for each of the players.
iconCountText: A list of text objects representing each player's iconCount value.
highScore: An integer showing the current high score.
"""
scoreText = []
iconCountText = []
checkQuitGame()
checkPauseGameWithInput(playerList)
if any(player.goldCollectedCount > 0 for player in playerList):
playSound("count_points.wav")
# All living players increase their score by 100 points each time this function is called, until it has been called
# as many times as they've collected gold this level.
for num, player in enumerate(playerList):
if player.goldCollectedCount > iconCount[num]:
player.score += 100
iconCount[num] += 1
scoreText.append(c.FONT.render("{:06d}PTS.".format(player.score % 1000000), False, c.WHITE))
iconCountText.append(c.FONT.render("+{:02d}".format(iconCount[num] % 100), False, c.WHITE))
highScore = compareHighScore(playerList, highScore)
# Once iconCount has reached the correct number of collected gold for each player, looping is set to False so
# scoreGold will not be called again.
if all(iconCount[num] == player.goldCollectedCount for num, player in enumerate(playerList)):
return False, iconCount, scoreText, iconCountText, highScore
else:
return True, iconCount, scoreText, iconCountText, highScore | 5,327,825 |
def make_clone(dst_res, settings, interp_soilthick, M_method, M_minmax, directory_in, directory_out, clonefile, logger):
"""
Creates maps for wflow model (staticmaps).
Parameters:
dst_res : [float] resolution of output maps
settings : [string] path to settings file
interp_soilthick : [boolean] control for interpolation/filling of zeros in soil thickness map
M_method : [int] method to create M, 0 = numpy linalg, 1 = scipy optimize, 2 = both (default)
M_minmax : [int] value used to constrain M
directory_in : [string] input path
directory_out : [string] output path
clonefile : [string] filename of (PCRaster) clone map file (by default wflow_dem.map)
logger : [logging object] instance of Python logging module
Output: map files
"""
settings = pd.read_csv(settings)
clone_path = os.path.join(directory_out, clonefile)
clone = rasterio.open(clone_path)
clone_profile = clone.profile
soilgrids_depths = np.array([0,5,15,30,60,100,200])
for index, row in settings.iterrows():
files = []
files.extend((glob.glob(os.path.join(directory_in, row.folder_in, row.files))))
for index, filepath in enumerate(files):
logger.info('Reading ' + filepath)
src = rasterio.open(filepath)
src_profile = src.profile
if clone.crs == None:
logger.warning('*** clone file ' + clone_path + ' without CRS, CRS set to EPSG:4326 ***')
clone_profile['crs'] = rasterio.crs.CRS.from_epsg(4326)
shapes = rasterio_mask_shapes(clone, dst_res, src_profile, clone_profile)
logger.info('trim file ' + str(filepath))
out_grid, out_transform = mask(src, shapes, crop=True, all_touched=True)
nx, ny = out_grid[0].shape[1], out_grid[0].shape[0]
grid = out_grid[0] * float(row.mult_factor)
if row.parameter == 'LAI':
grid[np.where(grid == src_profile['nodata'] * float(row.mult_factor))] = 0.0
if src_profile['nodata'] != None:
grid[np.where(grid == src_profile['nodata'] * float(row.mult_factor))] = np.nan
logger.info('fill nodata for parameter ' + row.parameter)
grid = fill(grid)
scr_file = os.path.basename(files[index])
dst_tiff_file = scr_file
if scr_file.startswith('KsatVer'):
dst_map_file = '_'.join(scr_file.split('_')[0:2]) + '.map'
elif scr_file.startswith('lambda'):
dst_map_file = ('_'.join(scr_file.split('_')[0:2])).replace('lambda', 'c') + '.map'
elif scr_file.startswith('LAI'):
dst_map_file = scr_file.split('_')[0].ljust(8, '0') + '.' + scr_file.replace('.tif','').split('_')[-1].zfill(3)
elif scr_file.startswith('GLOBCOVER'):
dst_map_file = 'wflow_landuse.map'
elif scr_file.startswith('RootingDepth'):
dst_map_file = scr_file.replace('tif','') + 'map'
else:
dst_map_file = scr_file.split('_')[0] + '.map'
# update the relevant parts of the profiles
dst_profile = src.meta.copy()
dst_profile.update({
'transform': clone_profile['transform'],
'crs': clone_profile['crs'],
'dtype' : np.float32,
'width': clone_profile['width'],
'height': clone_profile['height']
})
src_profile.update({
'transform' : out_transform,
'width': nx,
'height': ny
})
if row.scale_method == 'average':
resample_method = rasterio.warp.Resampling.average
if row.scale_method == 'mode':
resample_method = rasterio.warp.Resampling.mode
if row.conversion == 'log':
grid = np.log(grid)
logger.info('Resample '+ row.parameter + ' to resolution ' + str(dst_res))
out = reproject_raster(grid, src_profile, dst_profile, resample_method, threads=4)
if row.conversion == 'log':
out = np.exp(out)
if (row.parameter == 'soilthickness') and (interp_soilthick):
logger.info('Interpolating/filling zeros for parameter ' + row.parameter)
out = fill(out, out==0)
# KsatHorFrac
if row.parameter == 'KsatHorFrac':
KsatVer = out_grid[0]
if src_profile['nodata'] != None:
KsatVer[np.where(KsatVer == src_profile['nodata'])] = np.nan
KsatVer = fill(KsatVer)
if row.conversion == 'log':
out = out/np.exp(reproject_raster(np.log(KsatVer), src_profile, dst_profile, resample_method, threads=4))
else:
out = out/reproject_raster(KsatVer, src_profile, dst_profile, resample_method, threads=4)
dst_tiff_file = 'KsatHorFrac.tif'
dst_map_file = 'KsatHorFrac.map'
if row.parameter == 'lambda':
logger.info('Convert '+ row.parameter + ' to parameter c')
out = (3. + (2./out))
path_tif = os.path.join(directory_out, dst_tiff_file)
logger.info('write resampled '+ row.parameter + ' to file ' + dst_tiff_file)
with rasterio.open(path_tif, 'w', **dst_profile) as dst:
dst.write(out,1)
path_map = os.path.join(directory_out, dst_map_file)
logger.info('convert ' + dst_tiff_file + ' to PCRaster file ' + dst_map_file)
gdal.Translate(path_map, path_tif, options = '-of PCRaster')
logger.info('calculating parameter M...')
files = []
files.extend((glob.glob(os.path.join(directory_out,"KsatVer*.tif"))))
files.extend((glob.glob(os.path.join(directory_out,"theta*.tif"))))
input_ksat = {}
input_theta = {}
for index, filepath in enumerate(files):
inputfile = os.path.basename(filepath)
logger.info('read file ' + inputfile )
if inputfile.startswith('KsatVer'):
input_ksat['_'.join(inputfile.split('_')[0:2])] = rasterio.open(filepath)
elif inputfile.startswith('theta'):
input_theta[inputfile.split('_')[0]] = rasterio.open(filepath)
create_M(input_ksat,input_theta,soilgrids_depths,M_method,M_minmax,directory_out,logger)
del input_ksat, input_theta, clone, src
for f in glob.glob(os.path.join(directory_out, "*.tif")):
try:
os.remove(f)
except:
logger.error('Could not remove ' + f)
for f in glob.glob(os.path.join(directory_out,"*.aux.xml")):
try:
os.remove(f)
except:
logger.error('Could not remove ' + f)
c_parameter_files = ['c_5cm.map','c_15cm.map','c_60cm.map','c_200cm.map']
for i,f in enumerate(c_parameter_files):
try:
shutil.copy(os.path.join(directory_out,f),os.path.join(directory_out,'c_'+ str(i) + '.map'))
except:
logger.error('Could not copy ' + f)
clim_dir = os.path.join(directory_out, "clim")
if not os.path.exists(clim_dir):
os.mkdir(clim_dir)
LAI_files = glob.glob(os.path.join(directory_out, 'LAI*'))
for f in LAI_files:
try:
shutil.move(f, os.path.join(clim_dir,os.path.basename(f)))
except:
logger.error('Could not move ' + f)
logger.info('Creating SoilMinThickness.map by copying and renaming SoilThickness.map')
try:
shutil.copy(os.path.join(directory_out,'SoilThickness.map'), os.path.join(directory_out,'SoilMinThickness.map'))
except:
logger.error('Could not copy and rename SoilThickness.map')
logger.info('Creating RootingDepth.map by copying and renaming RootingDepth_d75_300x300m.map')
try:
shutil.copy(os.path.join(directory_out,'RootingDepth_d75_300x300m.map'), os.path.join(directory_out,'RootingDepth.map'))
except:
logger.error('Could not copy and rename RootingDepth.map') | 5,327,826 |
def image_2d_transformer(pretrained=False, **kwargs):
"""
modified copy from timm
DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_transformer_2d('vit_deit_base_patch16_384', pretrained=pretrained, **model_kwargs)
return model | 5,327,827 |
def removeAPIs():
"""
This function will remove all created APIs from API Manager (apis in multiple tenants)
:return: None
"""
global tenant_config_details
remove_count = 0
# iterate for each API
with open(abs_path + '/../../data/runtime_data/api_ids_multi_tenant.csv', 'r') as f:
reader = csv.reader(f)
for api_id in reader:
tenant_conf = tenant_config_details.get(api_id[0])
deleted = request_methods.deleteAppAPI(
gateway_protocol, gateway_host, gateway_servelet_port_https, publisher_api_endpoint,
tenant_conf.get('create_token'), api_id[1])
if not deleted:
logger.error("API removing Failed!. API id: {}, Tenant: {}. Retrying...".format(api_id[1], api_id[0]))
deleted = request_methods.deleteAppAPI(gateway_protocol, gateway_host,
gateway_servelet_port_https, publisher_api_endpoint,
tenant_conf.get('create_token'), api_id[1])
if not deleted:
logger.error("API removing Failed!. API id: {}, Tenant: {}".format(api_id[1], api_id[0]))
else:
logger.info("API removed successfully!. API id: {}, Tenant: {}".format(api_id[1], api_id[0]))
remove_count += 1
else:
logger.info("API removed successfully!. API id: {}, Tenant: {}".format(api_id[1], api_id[0]))
remove_count += 1
logger.info("API deletion process completed. Total {} APIs removed".format(str(remove_count))) | 5,327,828 |
def addOnScriptSave(call, args=(), kwargs={}, nodeClass='Root'):
"""Add code to execute before a script is saved"""
_addCallback(onScriptSaves, call, args, kwargs, nodeClass) | 5,327,829 |
def create_fourier_heatmap_from_error_matrix(
error_matrix: torch.Tensor,
) -> torch.Tensor:
"""Create Fourier Heat Map from error matrix (about quadrant 1 and 4).
Note:
Fourier Heat Map is symmetric about the origin.
So by performing an inversion operation about the origin, Fourier Heat Map is created from error matrix.
Args:
error_matrix (torch.Tensor): The size of error matrix should be (H, H/2+1). Here, H is height of image.
This error matrix shoud be about quadrant 1 and 4.
Returns:
torch.Tensor (torch.Tensor): Fourier Heat Map created from error matrix.
"""
assert len(error_matrix.size()) == 2
assert error_matrix.size(0) == 2 * (error_matrix.size(1) - 1)
fhmap_rightside = error_matrix[1:, :-1]
fhmap_leftside = torch.flip(fhmap_rightside, (0, 1))
return torch.cat([fhmap_leftside[:, :-1], fhmap_rightside], dim=1) | 5,327,830 |
def get_cluster_credentials(DbUser=None, DbName=None, ClusterIdentifier=None, DurationSeconds=None, AutoCreate=None, DbGroups=None):
"""
Returns a database user name and temporary password with temporary authorization to log in to an Amazon Redshift database. The action returns the database user name prefixed with IAM: if AutoCreate is False or IAMA: if AutoCreate is True . You can optionally specify one or more database user groups that the user will join at log in. By default, the temporary credentials expire in 900 seconds. You can optionally specify a duration between 900 seconds (15 minutes) and 3600 seconds (60 minutes). For more information, see Generating IAM Database User Credentials in the Amazon Redshift Cluster Management Guide.
The IAM user or role that executes GetClusterCredentials must have an IAM policy attached that allows the redshift:GetClusterCredentials action with access to the dbuser resource on the cluster. The user name specified for dbuser in the IAM policy and the user name specified for the DbUser parameter must match.
If the DbGroups parameter is specified, the IAM policy must allow the redshift:JoinGroup action with access to the listed dbgroups .
In addition, if the AutoCreate parameter is set to True , then the policy must include the redshift:CreateClusterUser privilege.
If the DbName parameter is specified, the IAM policy must allow access to the resource dbname for the specified database name.
See also: AWS API Documentation
:example: response = client.get_cluster_credentials(
DbUser='string',
DbName='string',
ClusterIdentifier='string',
DurationSeconds=123,
AutoCreate=True|False,
DbGroups=[
'string',
]
)
:type DbUser: string
:param DbUser: [REQUIRED]
The name of a database user. If a user name matching DbUser exists in the database, the temporary user credentials have the same permissions as the existing user. If DbUser doesn't exist in the database and Autocreate is True , a new user is created using the value for DbUser with PUBLIC permissions. If a database user matching the value for DbUser doesn't exist and Autocreate is False , then the command succeeds but the connection attempt will fail because the user doesn't exist in the database.
For more information, see CREATE USER in the Amazon Redshift Database Developer Guide.
Constraints:
Must be 1 to 128 alphanumeric characters or hyphens
Must contain only lowercase letters.
First character must be a letter.
Must not contain a colon ( : ) or slash ( / ).
Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.
:type DbName: string
:param DbName: The name of a database that DbUser is authorized to log on to. If DbName is not specified, DbUser can log in to any existing database.
Constraints:
Must be 1 to 64 alphanumeric characters or hyphens
Must contain only lowercase letters.
Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]
The unique identifier of the cluster that contains the database for which your are requesting credentials. This parameter is case sensitive.
:type DurationSeconds: integer
:param DurationSeconds: The number of seconds until the returned temporary password expires.
Constraint: minimum 900, maximum 3600.
Default: 900
:type AutoCreate: boolean
:param AutoCreate: Create a database user with the name specified for DbUser if one does not exist.
:type DbGroups: list
:param DbGroups: A list of the names of existing database groups that DbUser will join for the current session. If not specified, the new user is added only to PUBLIC.
(string) --
:rtype: dict
:return: {
'DbUser': 'string',
'DbPassword': 'string',
'Expiration': datetime(2015, 1, 1)
}
"""
pass | 5,327,831 |
def test_connection(client):
"""
test `/api/ping` - Check connection
:param client: cope app client
:return: Passed status if code is similar
"""
response = client.get("/api/ping")
assert response.status_code == 200
assert response.json["Ping"] == "Pong" | 5,327,832 |
def _sc_print_ ( sc ) :
"""Print the Status Code
>>> st = ...
>>> print st
"""
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = list ( range ( 8 ) )
##
from ostap.logger.logger import colored_string
if sc.isSuccess () : return colored_string( 'SUCCESS' , WHITE , GREEN , True )
elif sc.isRecoverable () : return colored_string( 'RECOVERABLE' , RED , YELLOW , True )
elif _FAILURE != sc.getCode () :
return colored_string('FAILURE[%d]' % sc.getCode() , YELLOW , RED , True )
return colored_string('FAILURE' , YELLOW , RED , True ) | 5,327,833 |
def test_no_invalid_formats(locale):
"""
For each locale, for each provider, search all the definitions of "formats"
and make sure that all the providers in there (e.g. {{group}}) are valid
and do not emit empty strings. Empty strings are allowed only if the group
is not surrounded by spaces. This is a quick way to make sure that no
string is generated with "double spaces", starting spaces or ending spaces.
"""
randum = Factory.create(locale)
errors = []
for provider in PROVIDERS:
if provider == "randum.providers":
continue
prov_cls, lang = Factory._get_provider_class(provider, locale)
assert lang == locale
attributes = set(dir(prov_cls))
for attribute in attributes:
# consider only the format attributes
if not attribute.endswith("formats"):
continue
formats = getattr(prov_cls, attribute)
# may be a function or some other bizarre types
if not isinstance(formats, (list, tuple)):
continue
for format in formats:
# search all the {{groups}} in the format
for match in find_group.finditer(format):
group = match.group(1)
try:
attr = randum.format(group)
except AttributeError as e:
errors.append(str(e))
continue
# touching = True if the group is touching sometime on at
# least one side, i.e. it's not surrounded by spaces
touching = False
if match.start() != 0 and format[match.start() - 1] != " ":
touching = True
if match.end() != len(format) and format[match.end()] != " ":
touching = True
if not attr and not touching:
errors.append(
"Attribute {{%s}} provided an invalid value in format '%s' from %s.%s.%s"
% (group, format, provider, locale, attribute),
)
# group errors reporting all the ones from the same locale
assert not errors, "Errors:\n - " + "\n - ".join(errors) | 5,327,834 |
def get_state_name(state):
"""Maps a mongod node state id to a human readable string."""
if state in REPLSET_MEMBER_STATES:
return REPLSET_MEMBER_STATES[state][0]
else:
return 'UNKNOWN' | 5,327,835 |
def _get_platform_information():
"""Return a dictionary containing platform-specific information."""
import sysconfig
system_information = {"platform": sysconfig.get_platform()}
system_information.update({"python version": sys.version_info})
if sys.platform == "win32":
system_information.update({"binary path": sys.exec_prefix})
system_information.update(
{"main binary": str(Path(sys.exec_prefix).joinpath("python.exe"))})
system_information.update(
{"hidden console binary": str(Path(sys.exec_prefix).joinpath("pythonw.exe"))})
else:
system_information.update({"binary path": str(Path(sys.exec_prefix).joinpath(sys.exec_prefix).joinpath("bin"))})
system_information.update(
{"main binary": str(Path(sys.exec_prefix).joinpath("bin", "python"))})
system_information.update(
{"hidden console binary": str(Path(sys.exec_prefix).joinpath("bin", "python"))})
return system_information | 5,327,836 |
def compute_resilience(ugraph, attack_order):
"""
Alias to bfs or union find
:param ugraph:
:param attack_order:
:return:
"""
if USE_UF:
return uf.compute_resilience_uf(ugraph, attack_order)
else:
return bfs_visited.compute_resilience(ugraph, attack_order) | 5,327,837 |
def NumericalFlux(b, r, c):
"""Compute the flux by numerical integration of the surface integral."""
# I'm only coding up a specific case here
assert r <= 1, "Invalid range."
if b < 0:
b = np.abs(b)
# No occ
if b >= 1 + r:
return 1
# Get points of intersection
if b > 1 - r:
yi = (1. + b ** 2 - r ** 2) / (2. * b)
xi = (1. / (2. * b)) * np.sqrt(4 * b ** 2 - (1 + b ** 2 - r ** 2) ** 2)
else:
yi = np.inf
xi = r
# Specific intensity map
def I(y, x):
mu = np.sqrt(1 - x ** 2 - y ** 2)
return 1 - c[0] * (1 - mu ** 0.5) - c[1] * (1 - mu) - c[2] * (1 - mu ** 1.5) - c[3] * (1 - mu ** 2)
# Total flux
total, _ = dblquad(I, -1, 1, lambda x: 0, lambda x: np.sqrt(1 - x ** 2), epsabs=1e-12, epsrel=1e-12)
total *= 2
# Lower integration limit
def y1(x):
if yi <= b:
# Lower occultor boundary
return b - np.sqrt(r ** 2 - x ** 2)
elif b <= 1 - r:
# Lower occultor boundary
return b - np.sqrt(r ** 2 - x ** 2)
else:
# Tricky: we need to do this in two parts
return b - np.sqrt(r ** 2 - x ** 2)
# Upper integration limit
def y2(x):
if yi <= b:
# Upper occulted boundary
return np.sqrt(1 - x ** 2)
elif b <= 1 - r:
# Upper occultor boundary
return b + np.sqrt(r ** 2 - x ** 2)
else:
# Tricky: we need to do this in two parts
return np.sqrt(1 - x ** 2)
# Compute the total flux
flux, _ = dblquad(I, -xi, xi, y1, y2, epsabs=1e-12, epsrel=1e-12)
# Do we need to solve an additional integral?
if not (yi <= b) and not (b <= 1 - r):
def y1(x):
return b - np.sqrt(r ** 2 - x ** 2)
def y2(x):
return b + np.sqrt(r ** 2 - x ** 2)
additional_flux, _ = dblquad(I, -r, -xi, y1, y2,
epsabs=1e-12, epsrel=1e-12)
flux += 2 * additional_flux
return (total - flux) / total | 5,327,838 |
def build_volume_from(volume_from_spec):
"""
volume_from can be either a service or a container. We want to return the
container.id and format it into a string complete with the mode.
"""
if isinstance(volume_from_spec.source, Service):
containers = volume_from_spec.source.containers(stopped=True)
if not containers:
return "{}:{}".format(
volume_from_spec.source.create_container().id,
volume_from_spec.mode)
container = containers[0]
return "{}:{}".format(container.id, volume_from_spec.mode)
elif isinstance(volume_from_spec.source, Container):
return "{}:{}".format(volume_from_spec.source.id, volume_from_spec.mode) | 5,327,839 |
def generate_peripheral(csr, name, **kwargs):
""" Generates definition of a peripheral.
Args:
csr (dict): LiteX configuration
name (string): name of the peripheral
kwargs (dict): additional parameterss, including
'model' and 'properties'
Returns:
string: repl definition of the peripheral
"""
peripheral = get_descriptor(csr, name)
model = kwargs['model']
if csr['constants']['config_csr_data_width'] == 32 and 'model_CSR32' in kwargs:
model = kwargs['model_CSR32']
result = '\n{}: {} @ {}\n'.format(
kwargs['name'] if 'name' in kwargs else name,
model,
generate_sysbus_registration(peripheral))
for constant, val in peripheral['constants'].items():
if 'ignored_constants' not in kwargs or constant not in kwargs['ignored_constants']:
if constant == 'interrupt':
result += ' -> cpu@{}\n'.format(val)
else:
result += ' {}: {}\n'.format(constant, val)
if 'properties' in kwargs:
for prop, val in kwargs['properties'].items():
result += ' {}: {}\n'.format(prop, val(csr))
if 'interrupts' in kwargs:
for prop, val in kwargs['interrupts'].items():
result += ' {} -> {}\n'.format(prop, val())
return result | 5,327,840 |
def export_file(isamAppliance, instance_id, component_id, file_id, filepath, check_mode=False, force=False):
"""
Exporting the transaction logging data file or rollover transaction logging data file for a component
"""
if os.path.exists(filepath) is True:
logger.info("File '{0}' already exists. Skipping export.".format(filepath))
warnings = ["File '{0}' already exists. Skipping export.".format(filepath)]
return isamAppliance.create_return_object(warnings=warnings)
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_get_file(
"Exporting the transaction logging data file or rollover transaction logging data file for a component",
"{0}/{1}/transaction_logging/{2}/translog_files/{3}?export".format(uri, instance_id, component_id, file_id),
filepath
)
return isamAppliance.create_return_object() | 5,327,841 |
def map_vocabulary(docs, vocabulary):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
mapped = np.array([[vocabulary[word] for word in doc] for doc in docs])
return mapped | 5,327,842 |
def export_to_jdbc(self, connection_url, table_name):
"""
Write current frame to JDBC table
Parameters
----------
:param connection_url: (str) JDBC connection url to database server
:param table_name: (str) JDBC table name
Example
-------
<skip>
>>> from sparktk import TkContext
>>> c=TkContext(sc)
>>> data = [[1, 0.2, -2, 5], [2, 0.4, -1, 6], [3, 0.6, 0, 7], [4, 0.8, 1, 8]]
>>> schema = [('a', int), ('b', float),('c', int) ,('d', int)]
>>> my_frame = tc.frame.create(data, schema)
<progress>
</skip>
connection_url : (string) : "jdbc:{datasbase_type}://{host}/{database_name}
Sample connection string for postgres
ex: jdbc:postgresql://localhost/postgres [standard connection string to connect to default 'postgres' database]
table_name: (string): table name. It will create new table with given name if it does not exists already.
<skip>
>>> my_frame.export_to_jdbc("jdbc:postgresql://localhost/postgres", "demo_test")
<progress>
</skip>
Verify exported frame in postgres
From bash shell
$sudo -su ppostgres psql
postgres=#\d
You should see demo_test table.
Run postgres=#select * from demo_test (to verify frame).
Notes
-----
java.sql.SQLException: No suitable driver found for <jdbcUrl>
If this error is encountered while running your application, then your JDBC library cannot be found by the node
running the application. If you're running in Local mode, make sure that you have used the --driver-class-path
parameter. If a Spark cluster is involved, make sure that each cluster member has a copy of library, and that
each node of the cluster has been restarted since you modified the spark-defaults.conf file. See this
[site](https://sparkour.urizone.net/recipes/using-jdbc/).
Sparktk does not come with any JDBC drivers. A driver compatible with the JDBC data sink must be supplied when
creating the TkContext instance:
<skip>
>>> tc = sparktk.TkContext(pyspark_submit_args='--jars myJDBCDriver.jar')
</skip>
"""
self._scala.exportToJdbc(connection_url, table_name) | 5,327,843 |
def mobilenetv3_large_minimal_100(pretrained=False, **kwargs):
""" MobileNet V3 Large (Minimalistic) 1.0 """
# NOTE for train set drop_rate=0.2
model = _gen_mobilenet_v3('mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model | 5,327,844 |
def setup(upgrade=False):
"""
Setup virtualenv on the remote location
"""
venv_root = ctx('virtualenv.dirs.root')
venv_name = ctx('virtualenv.name')
venv_path = os.path.join(venv_root, venv_name)
py = 'python{}'.format(ctx('python.version'))
env.venv_path = venv_path
if not fabtools.deb.is_installed('python-virtualenv'):
fabtools.deb.install('python-virtualenv')
# Experimental
require.python.virtualenv(venv_path, python_cmd=py, use_sudo=True)
with _virtualenv(venv_path):
require.python.pip()
require.python.setuptools()
execute(install_requirements, upgrade=upgrade)
# /Experimental
# lib_root = os.path.join(venv_root, venv_name, 'lib')
# if not files.exists(lib_root, use_sudo=True):
# print(cyan("Setuping virtualenv on {}".format(env.stage)))
# with cd(venv_root):
# sudo('virtualenv --python=python{version} {name}'.format(
# version=ctx('python.version'),
# name=ctx('virtualenv.name')))
# pip('install -U setuptools pip') # Just avoiding some headaches.. | 5,327,845 |
async def remove(req):
"""
Remove a label.
"""
label_id = int(req.match_info["label_id"])
async with AsyncSession(req.app["pg"]) as session:
result = await session.execute(select(Label).filter_by(id=label_id))
label = result.scalar()
if label is None:
raise NotFound()
await session.delete(label)
await session.commit()
raise HTTPNoContent | 5,327,846 |
def create_from_triples(
triples_file,
out_file,
relation_name = None,
bidirectional = False,
head_prefix = PREFIXES["default"],
tail_prefix = PREFIXES["default"]
):
"""Method to create an ontology from a .tsv file with triples.
:param triples_file: Path for the file containing the triples. This file must be a `.tsv` file and each row must be of the form (head, relation, tail). It is also supported `.tsv` files with rows of the form (head, tail); in that case the field `relation_name` must be specified.
:type triples_file: str
:param relation_name: Name for relation in case the `.tsv` input file has only two columns.
:type relation_name: str
:param bidirectional: If `True`, the triples will be considered undirected.
:type bidirectional: bool
:param out_file: Path for the output ontology. If `None` and an existing ontology is input, the existing ontology will be overwritten.
:type out_file: str
:param head_prefix: Prefix to be assigned to the head of each triple. Default is `http://default/mowl/`
:type head_prefix: str
:param tail_prefix: Prefix to be assigned to the tail of each triple. Default is `http://default/mowl/`
:type tail_prefix: str
"""
manager = OWLManager.createOWLOntologyManager()
factory = manager.getOWLDataFactory()
ont = manager.createOntology()
with open(triples_file, "r") as f:
for line in f:
line = tuple(line.strip().split("\t"))
if len(line) < 2 or len(line) > 3:
raise ValueError(f"Expected number of elements in triple to be 2 or 3. Got {len(line)}")
if len(line) == 2 and relation_name is None:
raise ValueError("Found 2 elements in triple but the relation_name field is None")
if len(line) == 2:
head, tail = line
rel = relation_name
if len(line) == 3:
head, rel, tail = line
head = factory.getOWLClass(IRI.create(f"{head_prefix}{head}"))
rel = factory.getOWLObjectProperty(IRI.create(f"{rel}"))
tail = factory.getOWLClass(IRI.create(f"{tail_prefix}{tail}"))
axiom = factory.getOWLSubClassOfAxiom(
head, factory.getOWLObjectSomeValuesFrom(
rel, tail))
manager.addAxiom(ont, axiom)
if bidirectional:
axiom = factory.getOWLSubClassOfAxiom(
tail, factory.getOWLObjectSomeValuesFrom(
rel, head))
manager.addAxiom(ont, axiom)
manager.saveOntology(ont, IRI.create("file:" + os.path.abspath(out_file))) | 5,327,847 |
def get_identity(user, identity_uuid):
"""
Given the (request) user and an identity uuid,
return None or an Active Identity
"""
try:
identity_list = get_identity_list(user)
if not identity_list:
raise CoreIdentity.DoesNotExist(
"No identities found for user %s" % user.username)
identity = identity_list.get(uuid=identity_uuid)
return identity
except CoreIdentity.DoesNotExist:
logger.warn("Identity %s DoesNotExist" % identity_uuid)
return None | 5,327,848 |
def same_strange_looking_function(param1, callback_fn):
"""
This function is documented, but the function is identical to some_strange_looking_function
and should result in the same hash
"""
tail = param1[-1]
# return the callback value from the tail of param whatever that is
return callback_fn(tail) | 5,327,849 |
def put_output(dir_in, opts, Flowcell, Lane):
"""Uses shutil to move the output into galaxy directory"""
seq1_name = '%(code)s_%(Flowcell)s_s_%(lane)s_fastq.txt'%\
({'code': 'R1samplecode123','Flowcell':Flowcell, 'lane':Lane})
seq2_name = '%(code)s_%(Flowcell)s_s_%(lane)s_fastq.txt'%\
({'code': 'R2samplecode123','Flowcell':Flowcell, 'lane':Lane})
if not os.exists(os.path.join(dir_in, seq1_name)):
seq1_name += ".gz"
seq2_name += ".gz"
shutil.move(os.path.join(dir_in, seq1_name), opts.match1)
shutil.move(os.path.join(dir_in, seq2_name), opts.match2)
return 0 | 5,327,850 |
def __extractFunction(text, jsDoc, classConstructor):
"""
Extracts a function depending of its pattern:
'function declaration':
function <name>(<parameters>) {
<realization>
}[;]
'named function expression':
<variable> = function <name>(<parameters>) {
<realization>
}[;]
'unnamed function expression'.
<variable> = function(<parameters>) {
<realization>
}[;]
'alias function':
<variable> = <name>(<parameters>)[;]
@param {string} text.
@param {jsCodeParser.jsDoc.JsDoc} jsDoc.
@param {(jsCodeParser.elements.Class|jsCodeParser.elements.Method)}
classConstructor.
@return {(jsCodeParser.elements.Class|jsCodeParser.elements.Method)}
Element.
"""
parameters = extractTextBetweenTokens(text, '(')
if not parameters:
return None
end = text.find(parameters) + len(parameters)
realization = text[end:].strip()
if realization[0] == '{':
realization = extractTextBetweenTokens(realization, '{')
end = text.find(realization) + len(realization)
if end < len(text) and text[end] == ';':
end += 1
code = text[:end].strip()
return classConstructor(code, jsDoc) | 5,327,851 |
def vocabulary_size(tokens):
"""Returns the vocabulary size count defined as the number of alphabetic
characters as defined by the Python str.isalpha method. This is a
case-sensitive count. `tokens` is a list of token strings."""
vocab_list = set(token for token in tokens if token.isalpha())
return len(vocab_list) | 5,327,852 |
def max_iteration_for_analysis(query: Dict[str, Any],
db: cosem_db.MongoCosemDB,
check_evals_complete: bool = False,
conv_it: Optional[Tuple[int, int]] = None) -> Tuple[int, bool]:
"""
Find the first iteration that meets the convergence criterion like `convergence_iteration` but return a minimum
iteration of 700k if the convergence criterion is met at a previous iteration. To avoid re-computation if
`convergence_iteration` has explicitly been called before, the previous output can be passed in explicitly.
Args:
query: Dictionary specifying which set of configuration to consider for the maximum iteration. This will
typically contain keys for setups, label and crop.
db: Database containing the evaluation results.
conv_it: Output of `convergence_iteration` if already known. Otherwise, None and `convergence_iteration` will
be called.
check_evals_complete: Whether to first check whether the considered evaluations are consistent across the
queries (i.e. same for all crops/labels/raw_datasets within one setup, at least to 500k, if above threshold
by 500k at least to 700k). Should generally be set to True unless this has already been checked.
Returns:
The max iteration. If none of the results produce above threshold segmentations False is returned. If the
convergence condition isn't met anywhere or not evaluated to at least 700k iterations.
Raises:
ValueError if no evaluations are found for given query.
"""
if conv_it is None:
it, valid = convergence_iteration(query, db, check_evals_complete=check_evals_complete)
else:
it, valid = conv_it
if valid != 2:
it = max(it, 700000)
return it, bool(valid) | 5,327,853 |
async def show_token(root: Root) -> None:
"""
Print current authorization token.
"""
root.print(await root.client.config.token(), soft_wrap=True) | 5,327,854 |
def getMatirces(Dynamics, Cost):
"""
This functions takes the dynamics class as input and outputs the required
matrices and cvxpy.variables to turn the covariance steering problem into a
finite dimensional optimization problem.
"""
Alist = Dynamics.Alist
Blist = Dynamics.Blist
Dlist = Dynamics.Dlist
zlist = Dynamics.zlist
sigmaWlist = Dynamics.sigmaWlist
Rulist = Cost.Rulist
Rvlist = Cost.Rvlist
N = len(Alist) # Problem horizon
nx, nu, nv = Alist[0].shape[1], Blist[0].shape[1], Dlist[0].shape[1]
# Set Constant Matirces:
Gamma = []
for i in range(N+1):
Gamma.append(Phi_func(Alist, i, 0))
Gamma = np.vstack(Gamma)
block_Hu, block_Hv, block_Hw = [], [], []
for i in range(N+1):
row_Hu, row_Hv, row_Hw = [], [], []
for j in range(N):
if j < i:
row_Hu.append(Phi_func(Alist, i, j) @ Blist[j])
row_Hv.append(Phi_func(Alist, i, j) @ Dlist[j])
row_Hw.append(Phi_func(Alist, i, j))
else:
row_Hu.append(np.zeros((nx, nu)))
row_Hv.append(np.zeros((nx, nv)))
row_Hw.append(np.zeros((nx, nx)))
block_Hu.append(np.hstack(row_Hu))
block_Hv.append(np.hstack(row_Hv))
block_Hw.append(np.hstack(row_Hw))
Hu, Hv, Hw = np.vstack(block_Hu), np.vstack(block_Hv), np.vstack(block_Hw)
Z = np.vstack(zlist)
Wbig = np.zeros((nx*N, nx*N))
for k in range(N):
Wbig[k*nx:(k+1)*nx, k*nx:(k+1)*nx] = sigmaWlist[k]
Rubig = np.zeros((nu*N, nu*N))
Rvbig = np.zeros((nv*N, nv*N))
# set_trace()
for k in range(N):
Rubig[k*nu:(k+1)*nu, k*nu:(k+1)*nu] = Rulist[k]
Rvbig[k*nv:(k+1)*nv, k*nv:(k+1)*nv] = Rvlist[k]
return Gamma, Hu, Hv, Hw, Z, Wbig, Rubig, Rvbig | 5,327,855 |
def get_transit_boundary_indices(time, transit_size):
""" Determines transit boundaries from sorted time of transit cut out
:param time (1D np.array) sorted times of transit cut out
:param transit_size (float) size of the transit crop window in days
:returns tuple:
[0] list of transit start indices (int)
[1] list of sequence lengths (int) of each transit
"""
sequence_lengths = []
transit_start_indices = [0]
for i, t in enumerate(time):
if t - time[transit_start_indices[-1]] > transit_size:
sequence_lengths.append(i - transit_start_indices[-1])
transit_start_indices.append(i)
# last length is from last transit start til the end of the array
sequence_lengths.append(len(time) - transit_start_indices[-1])
return transit_start_indices, sequence_lengths | 5,327,856 |
def example_3():
"""Loads into tempory storage'
"""
from urllib.error import HTTPError
from time import time
import gc
def cleanup(path):
# Clean up the temp folder to remove the BerkeleyDB database files...
for f in os.listdir(path):
os.unlink(path + "/" + f)
os.rmdir(path)
print(f"\n{80 * '*'}\nExample 3, loading 45K triples from GitHub into memory and then adding them to a\nSQLiteLSM-backed ConjunctiveGraph...\n")
doacc_abox = "https://raw.githubusercontent.com/DOACC/individuals/master/cryptocurrency.nt"
path = os.path.join(tempfile.gettempdir(), "doacc")
store = plugin.get("SQLiteLSM", Store)(
identifier=URIRef("rdflib_sqlitelsm_test")
)
# Create an in-memory Graph into which to load the data
memgraph = Graph("Memory", URIRef("http://rdflib.net"))
# Factor out any gc-related lags
gcold = gc.isenabled()
gc.collect()
gc.disable()
# Load memgraph with remote data
#
print("Downloading and parsing data\n")
try:
t0 = time()
memgraph.parse(location=doacc_abox, format="nt")
t1 = time()
except HTTPError as e:
cleanup(path)
return e.code, str(e), None
print(f"Time taken to download and parse {len(memgraph)} triples to in-memory graph: {t1 - t0:.4f}s\n")
if os.path.exists(path):
cleanup(path)
# Create ConjunctiveGraph with LSM-backed Stg
sqlitelsmgraph = ConjunctiveGraph(store)
sqlitelsmgraph.open(path, create=True)
# Step through the memgraph triples, adding to the LSM-backed ConjunctiveGraph
t0 = time()
for triple in memgraph.triples((None, None, None)):
sqlitelsmgraph.add(triple)
t1 = time()
# Check total and report time
assert len(sqlitelsmgraph) == 44947, len(sqlitelsmgraph)
print(f"Time to add {len(sqlitelsmgraph)} triples to LSM-backed graph: {t1 - t0:.4f}s\n")
# Close the graphs
memgraph.close()
sqlitelsmgraph.close()
# Re-open (with “create=False”) sqlitelsmgraph with saved store:
t0 = time()
sqlitelsmgraph.open(path, create=False)
t1 = time()
print(f"Time to load {len(sqlitelsmgraph)} triples from LSM-backed store: {t1 - t0:.4f}s\n")
if gcold:
gc.enable()
print(f"Example 3 completed\n{80 * '*'}\n\n") | 5,327,857 |
def decode_gbe_string(s):
"""This helper function turns gbe output strings into dataframes"""
columns, df = s.replace('","',';').replace('"','').split('\n')
df = pd.DataFrame([column.split(',') for column in df.split(';')][:-1]).transpose().ffill().iloc[:-1]
df.columns = [c.replace('tr_','') for c in columns.split(',')[:-1]]
return df | 5,327,858 |
def get_scihub_namespaces(xml):
"""Take an xml string and return a dict of namespace prefixes to
namespaces mapping."""
nss = {}
matches = re.findall(r'\s+xmlns:?(\w*?)\s*=\s*[\'"](.*?)[\'"]', xml.decode('utf-8'))
for match in matches:
prefix = match[0]; ns = match[1]
if prefix == '': prefix = '_default'
nss[prefix] = ns
return nss | 5,327,859 |
def validate_args(args: Namespace) -> None:
"""
Validate the command line arguments
Parameters
----------
args: :class:`~argparse.Namespace`
Parsed command line arguments
Raises
------
:class:`~montreal_forced_aligner.exceptions.ArgumentError`
If there is a problem with any arguments
"""
args.output_directory = args.output_directory.rstrip("/").rstrip("\\")
args.corpus_directory = args.corpus_directory.rstrip("/").rstrip("\\")
if not os.path.exists(args.corpus_directory):
raise ArgumentError(f"Could not find the corpus directory {args.corpus_directory}.")
if not os.path.isdir(args.corpus_directory):
raise ArgumentError(
f"The specified corpus directory ({args.corpus_directory}) is not a directory."
)
if args.corpus_directory == args.output_directory:
raise ArgumentError("Corpus directory and output directory cannot be the same folder.") | 5,327,860 |
def insert_df_to_table(df, table):
"""
Using cursor.executemany() to insert a dataframe
Modified from: https://stackoverflow.com/a/70409917/11163214
"""
conn = connect_to_postgres()
cursor = conn.cursor()
# Create a list of tuples from the dataframe values
tuples = list(set([tuple(x) for x in df.to_numpy()]))
# Comma-separated dataframe columns
cols = ','.join(list(df.columns))
# SQL query to execute
query = "INSERT INTO %s(%s) VALUES(%%s,%%s,%%s)" % (
table, cols)
try:
cursor.executemany(query, tuples)
conn.commit()
except (Exception, pg.DatabaseError) as error:
print("Error: %s" % error)
conn.rollback()
finally:
cursor.close()
conn.close()
return | 5,327,861 |
def test_server_options(mocker, server_opt):
""" Test that the internal numbers of homogeneous programs are stored.
WARN: All in one test because it doesn't work when create_server is called twice.
"""
# test attributes
assert server_opt.parser is None
assert server_opt.program_class == {}
assert server_opt.process_groups == {}
assert server_opt.procnumbers == {}
# call realize
server = create_server(mocker, server_opt, ProgramConfiguration)
assert server.procnumbers == {'dummy': 0, 'dummy_0': 0, 'dummy_1': 1, 'dummy_2': 2, 'dumber_10': 0, 'dumber_11': 1,
'dummy_ears_20': 0, 'dummy_ears_21': 1}
expected_printable = {program_name: {group_name: [process.name for process in processes]}
for program_name, program_configs in server.process_groups.items()
for group_name, processes in program_configs.items()}
assert expected_printable == {'dumber': {'dumber': ['dumber_10', 'dumber_11']},
'dummies': {'dummy_group': ['dummy_0', 'dummy_1', 'dummy_2']},
'dummy': {'dummy_group': ['dummy']},
'dummy_ears': {'dummy_ears': ['dummy_ears_20', 'dummy_ears_21']}}
assert server.program_class['dummy'] is ProcessConfig
assert server.program_class['dummies'] is ProcessConfig
assert server.program_class['dumber'] is FastCGIProcessConfig
assert server.program_class['dummy_ears'] is EventListenerConfig
# udpate procnums of a program
assert server.update_numprocs('dummies', 1) == 'program:dummies'
assert server.parser['program:dummies']['numprocs'] == '1'
# reload programs
result = server.reload_processes_from_section('program:dummies', 'dummy_group')
expected_printable = [process.name for process in result]
assert expected_printable == ['dummy_0']
assert server.procnumbers == {'dummy': 0, 'dummy_0': 0, 'dumber_10': 0, 'dumber_11': 1,
'dummy_ears_20': 0, 'dummy_ears_21': 1}
# udpate procnums of a FastCGI program
assert server.update_numprocs('dumber', 1) == 'fcgi-program:dumber'
assert server.parser['fcgi-program:dumber']['numprocs'] == '1'
# reload programs
result = server.reload_processes_from_section('fcgi-program:dumber', 'dumber')
expected_printable = [process.name for process in result]
assert expected_printable == ['dumber_10']
assert server.procnumbers == {'dummy': 0, 'dummy_0': 0, 'dumber_10': 0, 'dummy_ears_20': 0, 'dummy_ears_21': 1}
# udpate procnums of an event listener
assert server.update_numprocs('dummy_ears', 3) == 'eventlistener:dummy_ears'
assert server.parser['eventlistener:dummy_ears']['numprocs'] == '3'
# reload programs
result = server.reload_processes_from_section('eventlistener:dummy_ears', 'dummy_ears')
expected_printable = [process.name for process in result]
assert expected_printable == ['dummy_ears_20', 'dummy_ears_21', 'dummy_ears_22']
assert server.procnumbers == {'dummy': 0, 'dummy_0': 0, 'dumber_10': 0,
'dummy_ears_20': 0, 'dummy_ears_21': 1, 'dummy_ears_22': 2} | 5,327,862 |
def valid_verify_email(form, email):
"""
Returns true if "email" is equal the first email
"""
try:
if(form.email.data!=form.email_verify.data):
raise ValidationError('Email address is not the same')
if models.Account.pull_by_email(form.email.data) is not None:
print('Account already exist')
raise ValidationError('An account already exists for that email address')
except Exception as e:
raise ValidationError('Email is wrong check it again: ' + str(e))
return True | 5,327,863 |
def _computePolyVal(poly, value):
"""
Evaluates a polynomial at a specific value.
:param poly: a list of polynomial coefficients, (first item = highest degree to last item = constant term).
:param value: number used to evaluate poly
:return: a number, the evaluation of poly with value
"""
#return numpy.polyval(poly, value)
acc = 0
for c in poly:
acc = acc * value + c
return acc | 5,327,864 |
def schedule_job(
client: scheduler_v1.CloudSchedulerClient,
project_id: str,
location_id: str,
timezone: str,
schedule: str,
path: str,
) -> None:
""" Schedules the given job for the specified project and location """
# Create a Job to schedule
target = AppEngineHttpTarget(relative_uri=path, http_method="GET")
job = Job(app_engine_http_target=target, schedule=schedule, time_zone=timezone)
# Schedule the Job we just created
parent = client.location_path(project_id, location_id)
client.create_job(parent, job) | 5,327,865 |
def fix_e26(source):
"""Format block comments."""
if '#' not in source:
# Optimization.
return source
string_line_numbers = multiline_string_lines(source,
include_docstrings=True)
fixed_lines = []
sio = StringIO(source)
for (line_number, line) in enumerate(sio.readlines(), start=1):
if (line.lstrip().startswith('#') and
line_number not in string_line_numbers):
indentation = _get_indentation(line)
line = line.lstrip()
# Normalize beginning if not a shebang.
if len(line) > 1:
# Leave multiple spaces like '# ' alone.
if line.count('#') > 1 or line[1].isalnum():
line = '# ' + line.lstrip('# \t')
fixed_lines.append(indentation + line)
else:
fixed_lines.append(line)
return ''.join(fixed_lines) | 5,327,866 |
def graph_distance(tree, node1, node2=None):
""" Return shortest distance from node1 to node2,
or just update all node.distance shortest to node1 """
for node in tree.nodes():
node.distance = inf
node.back = None # node backwards towards node1
fringe = Queue([node1])
while fringe:
node = fringe.pop()
#print(f"looking at '{node}'")
previous_distance = node.back.distance if node.back else -1
node.distance = previous_distance + 1
if node == node2:
break
for neighbor in node.neighbors():
if neighbor.distance > node.distance:
fringe.push(neighbor)
neighbor.back = node
if node2:
return node2.distance | 5,327,867 |
def trunicos(b):
"""Return a unit-distance embedding of the truncated icosahedron graph."""
p0 = star_radius(5)*root(1,20,1)
p1 = p0 + root(1,20,1)
p2 = mpc(b, 0.5)
p3 = cu(p2, p1)
p4 = cu(p3, p1*root(1,5,-1))
p5 = cu(p4, p2*root(1,5,-1))
return (symmetrise((p0, p1, p2, p3, p4, p5), "D5"),
[abs(p5 - root(1,5,-1)*conj(p5)) - 1]) | 5,327,868 |
def histogram2d(
x1: torch.Tensor, x2: torch.Tensor, bins: torch.Tensor, bandwidth: torch.Tensor, epsilon: float = 1e-10
) -> torch.Tensor:
"""Function that estimates the 2d histogram of the input tensor.
The calculation uses kernel density estimation which requires a bandwidth (smoothing) parameter.
Args:
x1 (torch.Tensor): Input tensor to compute the histogram with shape :math:`(B, D1)`.
x2 (torch.Tensor): Input tensor to compute the histogram with shape :math:`(B, D2)`.
bins (torch.Tensor): The number of bins to use the histogram :math:`(N_{bins})`.
bandwidth (torch.Tensor): Gaussian smoothing factor with shape shape [1].
epsilon (float): A scalar, for numerical stability. Default: 1e-10.
Returns:
torch.Tensor: Computed histogram of shape :math:`(B, N_{bins}), N_{bins})`.
Examples:
>>> x1 = torch.rand(2, 32)
>>> x2 = torch.rand(2, 32)
>>> bins = torch.torch.linspace(0, 255, 128)
>>> hist = histogram2d(x1, x2, bins, bandwidth=torch.tensor(0.9))
>>> hist.shape
torch.Size([2, 128, 128])
"""
pdf1, kernel_values1 = marginal_pdf(x1.unsqueeze(2), bins, bandwidth, epsilon)
pdf2, kernel_values2 = marginal_pdf(x2.unsqueeze(2), bins, bandwidth, epsilon)
pdf = joint_pdf(kernel_values1, kernel_values2)
return pdf | 5,327,869 |
def test_bond_explicit_ji(pcff):
"""Simple test of known bond parameters, ordered backwards"""
i = "c"
j = "h"
ptype, key, form, parameters = pcff.bond_parameters(i, j)
ptype2, key2, form, parameters2 = pcff.bond_parameters(j, i)
assert ptype2 == "explicit"
assert key2 == ("c", "h")
assert parameters == parameters2 | 5,327,870 |
def empty_iterator() -> typing.Iterator:
"""
Return an empty iterator.
:return: an iterator
:Example:
>>> from flpy.iterators import empty_iterator, It
>>> It(empty_iterator()).collect()
ItA<[]>
"""
yield from () | 5,327,871 |
def update(Q, target_Q, policy, target_policy, opt_Q, opt_policy,
samples, gamma=0.99):
"""Update a Q-function and a policy."""
xp = Q.xp
obs = xp.asarray([sample[0] for sample in samples], dtype=np.float32)
action = xp.asarray([sample[1] for sample in samples], dtype=np.float32)
reward = xp.asarray([sample[2] for sample in samples], dtype=np.float32)
done = xp.asarray([sample[3] for sample in samples], dtype=np.float32)
obs_next = xp.asarray([sample[4] for sample in samples], dtype=np.float32)
def update_Q():
# Predicted values: Q(s,a)
y = F.squeeze(Q(obs, action), axis=1)
# Target values: r + gamma * Q(s,policy(s))
with chainer.no_backprop_mode():
next_q = F.squeeze(target_Q(obs_next, target_policy(obs_next)),
axis=1)
target = reward + gamma * (1 - done) * next_q
loss = F.mean_squared_error(y, target)
Q.cleargrads()
loss.backward()
opt_Q.update()
def update_policy():
# Maximize Q(s,policy(s))
q = Q(obs, policy(obs))
q = q[:] # Avoid https://github.com/chainer/chainer/issues/2744
loss = - F.mean(q)
policy.cleargrads()
loss.backward()
opt_policy.update()
update_Q()
update_policy() | 5,327,872 |
def test_profile_increment() -> None:
"""Test the profile_increment method."""
m = MixpanelTrack(
settings={
"mixpanel.profile_properties": "pyramid_mixpanel.tests.test_track.FooProfileProperties"
},
distinct_id="foo",
)
m.profile_increment(props={FooProfileProperties.foo: 1})
assert m.mocked_messages == [
{
"endpoint": "people",
"msg": {
"$distinct_id": "foo",
"$add": {"Foo": 1},
},
}
] | 5,327,873 |
def test_plot_generic_string_argument():
"""do not see real use-case, beyond title, which is escaped in other ways.
"""
with autogpy.Figure("test_plot", file_identifier="figtest") as fig:
fig.plot(XX_test_linspace, test_arg__s="2")
fcontent = fig.get_gnuplot_file_content()
assert 'test_arg "2"' in fcontent
# note that here we pass the arg as int.
# It should be casted as a string in the preocess
with autogpy.Figure("test_plot", file_identifier="figtest") as fig:
fig.plot(XX_test_linspace, s__test_arg=2)
fcontent = fig.get_gnuplot_file_content()
assert 'test_arg "2"' in fcontent | 5,327,874 |
def error_heatmap(alloc_df, actual_df, demand_columns, region_col="pca", error_metric="r2", leap_exception=False):
"""
Create heatmap of 365X24 dimension to visualize the annual hourly error.
Uses the output of `allocate_and_aggregate` function as input along with
actual demand data to plot the annual hourly errors as a heatmap on a 365X24
grid.
Args:
alloc_df (pandas.DataFrame): A dataframe with the `region` and a subset
of `demand_columns`. Each column name in the `demand_columns` is
typically an hourly datetime object refering to the time period of
demand observed, but can be any other timeslice. The
`demand_columns` contain the allocated demand as allocated by the
`allocate_and_aggregate` function. Any columns not present in
`demand_columns` will be imputed as np.nan.
actual_df (pandas.DataFrame): A similar dataframe as actual_demand,
but contains actual demand data, which is being compared against.
region_col (str): The column_name which contains the unique ids for each
of the regions.
error_metric (str): Specifies the error metric to be observed in the
heatmap. Possible error metrics available include: Mean Squared
Error ('mse'), Mean Absolute Percentage Error ('mape%') and R2 value
('r2').
leap_exception (bool): Specify if the year being analyzed is a leap year
or not to account for February 29th.
Returns:
None: Displays the image
"""
font = {'size': 12}
mpl.rc('font', **font)
demand_columns = list(set(demand_columns)
.intersection(set(actual_df.columns)))
columns_excepted = set(demand_columns).difference(set(alloc_df.columns))
actual_df = actual_df.sort_values(
region_col)[[region_col] + demand_columns]
for col in columns_excepted:
alloc_df[col] = np.nan
alloc_df = actual_df[[region_col]].merge(
alloc_df[[region_col] + demand_columns], how="left")
hmap = np.empty((365 + int(leap_exception), 24))
dofw_list = [None] * (365 + int(leap_exception))
month_label_idx = []
month_start_idx = []
for col in demand_columns:
hmap[col.timetuple().tm_yday - 1, col.hour] = vec_error(np.array(alloc_df[col]),
np.array(
actual_df[col]),
error_metric)
dofw_list[col.timetuple().tm_yday - 1] = col.weekday()
if col.day == 1:
month_start_idx.append(
(col.timetuple().tm_yday - 1, "-------------"))
elif col.day == 15:
month_label_idx.append(
(col.timetuple().tm_yday, calendar.month_name[col.month] + ' '))
monday_idx = [(i + 0.5, "(Mon)")
for i, v in enumerate(dofw_list) if v == 0]
df_idx_label = (pd.DataFrame(list(set(month_start_idx))
+ list(set(month_label_idx))
+ list(set(monday_idx)), columns=['index', 'label'])
.sort_values("index"))
yticks = df_idx_label["index"].tolist()
yticklabels = df_idx_label["label"].tolist()
mask = np.isnan(hmap)
# fig, ax = plt.subplots(figsize=(6, 80))
fig = plt.figure(figsize=(6, 80))
ax = fig.add_subplot(111)
hmap = sns.heatmap(hmap, ax=ax, mask=mask)
hmap.set_yticks(yticks)
hmap.set_yticklabels(
yticklabels, rotation=0)
hmap.set_xticks([tick + 0.5 for tick in [0, 4, 8, 12, 16, 20]])
hmap.set_xticklabels([0, 4, 8, 12, 16, 20])
plt.ylabel("Day of Year")
plt.xlabel("Hour of Day (UTC Datetime)")
plt.title(error_metric.upper())
plt.show()
mpl.rcdefaults() | 5,327,875 |
def webhooks_v2(request):
"""
Handles all known webhooks from stripe, and calls signals.
Plug in as you need.
"""
if request.method != "POST":
return HttpResponse("Invalid Request.", status=400)
event_json = json.loads(request.body)
event_key = event_json['type'].replace('.', '_')
if event_key in WEBHOOK_MAP:
WEBHOOK_MAP[event_key].send(sender=None, full_json=event_json)
return HttpResponse(status=200) | 5,327,876 |
def nth(iterable, n, default=None):
"""
Returns the nth item or a default value
:param iterable: The iterable to retrieve the item from
:param n: index of the item to retrieve. Must be >= 0
:param default: the value to return if the index isn't valid
:return: the nth item, or the default value if n isn't a valid index
"""
return next(islice(iterable, n, None), default) | 5,327,877 |
def _dict_items(typingctx, d):
"""Get dictionary iterator for .items()"""
resty = types.DictItemsIterableType(d)
sig = resty(d)
codegen = _iterator_codegen(resty)
return sig, codegen | 5,327,878 |
def send_messages(connection, topic, input):
"""Read messages from the input and send them to the AMQP queue."""
while True:
try:
body = pickle.load(input)
except EOFError:
break
print('%s: %s' %
(body.get('timestamp'),
body.get('event_type', 'unknown event'),
))
connection.topic_send(topic, body) | 5,327,879 |
def odd_desc(count):
"""
Replace ___ with a single call to range to return a list of descending odd numbers ending with 1
For e.g if count = 2, return a list of 2 odds [3,1]. See the test below if it is not clear
"""
return list(reversed(range(1,count*2,2))) | 5,327,880 |
def validate_log_row(columns):
"""
:param columns: (name, email, datetime) tuple
"""
assert len(columns) == 3
for i in [0, 1]:
try:
# For Python 2.x
assert isinstance(columns[i], basestring)
except NameError:
assert isinstance(columns[i], str)
assert isinstance(columns[2], datetime) | 5,327,881 |
def retrieval_score(test_ratings: pd.DataFrame,
recommender,
remove_known_pos: bool = False,
metric: str = 'mrr') -> float:
"""
Mean Average Precision / Mean Reciprocal Rank of first relevant item @ N
"""
N = recommender.N
user_scores = []
relevant_items = get_relevant_items(test_ratings)
for user in recommender.users:
if user in relevant_items.keys():
predicted_items = recommender.get_recommendations(user, remove_known_pos)
predicted_items = [item for item, _ in predicted_items]
if metric == 'map':
true_positives = np.intersect1d(relevant_items[user],
predicted_items)
score = len(true_positives) / N
elif metric == 'mrr':
score = np.mean([reciprocal_rank(item, predicted_items)
for item in relevant_items[user]])
else:
raise ValueError(f"Unknown value {metric} for Argument `metric`")
user_scores.append(score)
return np.mean(user_scores) | 5,327,882 |
def test_dump_load_keras_model_with_dict(tmpdir, save_and_load):
"""Test whether tensorflow ser/de-ser work for models returning dictionaries"""
class DummyModel(tf.keras.Model):
def __init__(self):
super().__init__()
def _random_method(self):
pass
def call(self, in_):
out = {}
out["b1"], out["b2"] = in_, in_
return out
in_ = tf.ones((1, 3))
model = DummyModel()
# this line is very important or tensorflow cannot trace the graph of the module
model(in_)
loaded = save_and_load(model, str(tmpdir))
out = loaded(in_)
assert set(out) == {"b1", "b2"}
assert tf.is_tensor(out["b1"])
assert tf.is_tensor(out["b2"])
assert not hasattr(loaded, "_random_method") | 5,327,883 |
def stemmer_middle_high_german(text_l, rem_umlauts=True, exceptions=exc_dict):
"""text_l: text in string format
rem_umlauts: choose whether to remove umlauts from string
exceptions: hard-coded dictionary for the cases the algorithm fails"""
# Normalize text
text_l = normalize_middle_high_german(
text_l, to_lower_all=False, to_lower_beginning=True
)
# Tokenize text
word_tokenizer = WordTokenizer("middle_high_german")
text_l = word_tokenizer.tokenize(text_l)
text = []
for word in text_l:
try:
text.append(exceptions[word]) # test if word in exception dictionary
except:
if word[0].isupper():
# MHG only uses upper case for locations, people, etc. So any word that starts with a capital
# letter while not being at the start of a sentence will automatically be excluded.
text.append(word)
elif word in MHG_STOPS:
text.append(word) # Filter stop words
else:
text.append(stem_helper(word, rem_umlaut=rem_umlauts))
return text | 5,327,884 |
def test_queue_trials(start_connected_emptyhead_cluster):
"""Tests explicit oversubscription for autoscaling.
Tune oversubscribes a trial when `queue_trials=True`, but
does not block other trials from running.
"""
cluster = start_connected_emptyhead_cluster
runner = TrialRunner()
def create_trial(cpu, gpu=0):
kwargs = {
"resources": Resources(cpu=cpu, gpu=gpu),
"stopping_criterion": {
"training_iteration": 3
}
}
return Trial("__fake", **kwargs)
runner.add_trial(create_trial(cpu=1))
with pytest.raises(TuneError):
runner.step() # run 1
del runner
executor = RayTrialExecutor(queue_trials=True)
runner = TrialRunner(trial_executor=executor)
cluster.add_node(num_cpus=2)
cluster.wait_for_nodes()
cpu_only = create_trial(cpu=1)
runner.add_trial(cpu_only)
runner.step() # add cpu_only trial
gpu_trial = create_trial(cpu=1, gpu=1)
runner.add_trial(gpu_trial)
runner.step() # queue gpu_trial
# This tests that the cpu_only trial should bypass the queued trial.
for i in range(3):
runner.step()
assert cpu_only.status == Trial.TERMINATED
assert gpu_trial.status == Trial.RUNNING
# Scale up
cluster.add_node(num_cpus=1, num_gpus=1)
cluster.wait_for_nodes()
for i in range(3):
runner.step()
assert gpu_trial.status == Trial.TERMINATED | 5,327,885 |
def get_bboxes(outputs, proposals, num_proposals, num_classes,
im_shape, im_scale, max_per_image=100, thresh=0.001, nms_thresh=0.4):
"""
Returns bounding boxes for detected objects, organized by class.
Transforms the proposals from the region proposal network to bounding box predictions
using the bounding box regressions from the classification network:
(1) Applying bounding box regressions to the region proposals.
(2) For each class, take proposed boxes where the corresponding objectness score is greater
then THRESH.
(3) Apply non-maximum suppression across classes using NMS_THRESH
(4) Limit the maximum number of detections over all classes to MAX_PER_IMAGE
Arguments:
outputs (list of tensors): Faster-RCNN model outputs
proposals (Tensor): Proposed boxes from the model's proposalLayer
num_proposals (int): Number of proposals
num_classes (int): Number of classes
im_shape (tuple): Shape of image
im_scale (float): Scaling factor of image
max_per_image (int): Maximum number of allowed detections per image. Default is 100.
None indicates no enforced maximum.
thresh (float): Threshold for objectness score. Default is 0.001.
nms_thresh (float): Threshold for non-maximum suppression. Default is 0.4.
Returns:
detections (list): List of bounding box detections, organized by class. Each element
contains a numpy array of bounding boxes for detected objects
of that class.
"""
detections = [[] for _ in range(num_classes)]
proposals = proposals.get()[:num_proposals, :] # remove padded proposals
boxes = proposals[:, 1:5] / im_scale # scale back to real image space
# obtain bounding box corrections from the frcn layers
scores = outputs[2][0].get()[:, :num_proposals].T
bbox_deltas = outputs[2][1].get()[:, :num_proposals].T
# apply bounding box corrections to the region proposals
pred_boxes = bbox_transform_inv(boxes, bbox_deltas)
pred_boxes = clip_boxes(pred_boxes, im_shape)
# Skip the background class, start processing from class 1
for j in range(1, num_classes):
inds = np.where(scores[:, j] > thresh)[0]
# obtain class-specific boxes and scores
cls_scores = scores[inds, j]
cls_boxes = pred_boxes[inds, j * 4:(j + 1) * 4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32, copy=False)
# apply non-max suppression
keep = nms(cls_dets, nms_thresh)
cls_dets = cls_dets[keep, :]
# store results
detections[j] = cls_dets
# Limit to max_per_image detections *over all classes*
if max_per_image is not None:
# obtain flattened list of all image scores
image_scores = np.hstack([detections[j][:, -1]
for j in range(1, num_classes)])
if len(image_scores) > max_per_image:
# compute threshold needed to keep the top max_per_image
image_thresh = np.sort(image_scores)[-max_per_image]
# apply threshold
for j in range(1, num_classes):
keep = np.where(detections[j][:, -1] >= image_thresh)[0]
detections[j] = detections[j][keep, :]
return detections | 5,327,886 |
def air_transport_per_year_by_country(country):
"""Returns the number of passenger carried per year of the given country."""
cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="IS.AIR.PSGR"'.format(country))
air_transport = cur.fetchall()
cur.close()
return jsonify(air_transport) | 5,327,887 |
def outside_range(number, min_range, max_range):
"""
Returns True if `number` is between `min_range` and `max_range` exclusive.
"""
return number < min_range or number > max_range | 5,327,888 |
def is_string_constant(node):
"""Checks whether the :code:`node` is a string constant."""
return is_leaf(node) and re.match('^\"[^\"]*\"$', node) is not None | 5,327,889 |
def test_show_dimensions(capsys, nc_dataset):
"""show_dimensions prints dimension string representation
"""
nc_dataset.createDimension('foo', 42)
nc_tools.show_dimensions(nc_dataset)
out, err = capsys.readouterr()
assert out.splitlines()[0] == (
"<class 'netCDF4._netCDF4.Dimension'>: name = 'foo', size = 42") | 5,327,890 |
def is_list_type(t) -> bool:
"""
Return True if ``t`` is ``List`` python type
"""
# print(t, getattr(t, '__origin__', None) is list)
return t == list or is_pa_type(t, pa.types.is_list) or (
hasattr(t, '__origin__') and t.__origin__ in (list, List)
) or (
isinstance(t, dict) and is_list_type(t.get('type'))
) | 5,327,891 |
def get_measure_of_money_supply():
""" 从 Sina 获取 中国货币供应量数据。
Returns: 返回获取到的数据表。数据从1978.1开始。
Examples:
.. code-block:: python
>>> from finance_datareader_py.sina import get_measure_of_money_supply
>>> df = get_measure_of_money_supply()
>>> print(df.iloc[0][df.columns[0]])
>>> print(df.index[-1])
>>> print(df.columns)
1776196.11
1978.8
Index(['货币和准货币(广义货币M2)(亿元)', '货币和准货币(广义货币M2)同比增长(%)', '货币(狭义货币M1)(亿元)',
'货币(狭义货币M1)同比增长(%)', '流通中现金(M0)(亿元)', '流通中现金(M0)同比增长(%)', '活期存款(亿元)',
'活期存款同比增长(%)', '准货币(亿元)', '准货币同比增长(%)', '定期存款(亿元)', '定期存款同比增长(%)',
'储蓄存款(亿元)', '储蓄存款同比增长(%)', '其他存款(亿元)', '其他存款同比增长(%)'],
dtype='object')
"""
num = (datetime.date.today().year + 1 - 1978) * 12
return _get_mac_price(num=num, event=1, cate='fininfo', index='统计时间') | 5,327,892 |
def p_parametro(production):
"""parametro : tipo DOIS_PONTOS ID
| parametro ABRE_COL FECHA_COL
"""
node = Parametro()
first_name = production[1].id
if first_name == "TIPO":
node.insert_node_below(production[1])
node.insert_node_below(Token(identifier=':'))
node.insert_node_below(Id(identifier=production[3]))
else:
parametro = production[1]
parametro.insert_node_below(Token(identifier='['))
parametro.insert_node_below(Token(identifier=']'))
node = parametro
production[0] = node | 5,327,893 |
def getallbdays_ml():
"""Reads all the saved files and parses all birthdays. If parsing is
unsuccessful, writes the line into a file."""
# Open up the file to write failed lines
failfile = u'./data-failedlines-v2.dat'
failedlines = codecs.open(failfile, 'w', 'utf-8')
fails = 0
# Open up the file to write csv
csvfile = u'./data-wikibdays-occupations-v2.csv'
csvf = codecs.open(csvfile,'w','utf-8')
csvf.write(u'i_year,i_month,i_day,s_fullname,s_nationality,b_double_nationality,s_occupation\n')
successes = 0
months = [31, 29, 31, # January, February, March
30, 31, 20, # April, May, June
31, 31, 30, # July, August, September
31, 30, 31] # October, November, December
#months = [1]
# This is super inefficient but we don't need concurrency right now
# Iterate over all files and write the resulting data into a csv files
# All failed lines are also reported
for month in range(1, len(months)+1):
days = range(1,months[month-1]+1)
for day in days:
bdays = getbdays(month,day)
for bday in bdays:
try:
# parseline_ml(bday, month, day)
csvf.write(dicttocsv_ml(parseline_ml(bday, month, day)))
successes = successes + 1
except TypeError:
failedlines.write(bday+' None TypeError'+u'\n')
fl = u"Failed: " + bday
print(fl.encode('utf-8'), 'Type Error')
fails = fails + 1
except ValueError:
failedlines.write(bday+' ValueError'+u'\n')
fl = u"Failed: " + bday
print(fl.encode('utf-8'), 'Value Error')
fails = fails + 1
except:
failedlines.write(bday+' Unknown Error'+u'\n')
fl = u"Failed: " + bday
print(fl.encode('utf-8'), 'Unknown Error')
fails = fails + 1
failedlines.close()
csvf.close()
print(u"Success: {0:d}, Fails: {1:d}, Total: {2:d}".format(successes, fails, successes+fails)) | 5,327,894 |
def create_all():
"""Create the complete TkinterPmwDemo."""
root = Tk()
Pmw.initialise(root)
root.title('Tkinter/Pmw Demo')
# tk_strictMotif changes the file dialog in Tk's OpenFile etc.
#root.tk_strictMotif(1)
#Pmw.initialise(root,fontScheme='pmw1')
#import scitools.misc; scitools.misc.fontscheme1(root)
widget = TkinterPmwDemo(root)
# this widget packs itself...
root.mainloop() | 5,327,895 |
def _SparseMatrixAddGrad(op, grad):
"""Gradient for sparse_matrix_add op."""
# input to sparse_matrix_add is (a, b, alpha, beta)
# with a, b CSR and alpha beta scalars.
# output is: alpha * a + beta * b
# d(a*A + b*B)/dA . grad = a * grad
# May have gotten the transposes wrong below.
# d(a*A + b*B)/da . grad = tr(A' . grad)
# For now, only implement gradients w.r.t. A and B.
# TODO(ebrevdo): Implement reduce_sum for SparseMatrix so that we
# can implement gradients w.r.t. a and b.
(a_csr, b_csr, alpha, beta) = op.inputs
return (sparse_csr_matrix_ops.sparse_matrix_mul(
_PruneCSRMatrix(grad, a_csr), alpha),
sparse_csr_matrix_ops.sparse_matrix_mul(
_PruneCSRMatrix(grad, b_csr), beta), None, None) | 5,327,896 |
def download(
filename,
data,
contentType=None,
sessionId="current_session",
pageId="current_page",
):
"""Downloads data from the gateway to a device running a session.
Args:
filename (str): Suggested name for the downloaded file.
data (object): The data to be downloaded. May be a String, a
byte[], or an InputStream. Strings will be written with in
"utf-8" encoding.
contentType (str): Value for the "Content-Type" header. Example:
"text/plain; charset=utf-8". Optional.
sessionId (str): Identifier of the Session to target. If omitted
the current Session will be used automatically. When
targeting a different session, then the pageId parameter
must be included in the call. Optional.
pageId (str): Identifier of the Page to target. If omitted, the
current Page will be used automatically. Optional.
"""
builtins.print(filename, data, contentType, sessionId, pageId) | 5,327,897 |
def make_inverter_path(wire, inverted):
""" Create site pip path through an inverter. """
if inverted:
return [('site_pip', '{}INV'.format(wire), '{}_B'.format(wire)),
('inverter', '{}INV'.format(wire))]
else:
return [('site_pip', '{}INV'.format(wire), wire)] | 5,327,898 |
def test_encodes_semantic_meaning():
"""
Check if the distance between embeddings of similar sentences are smaller
than dissimilar pair of sentences.
"""
docs = DocumentArray(
[
Document(id="A", text="a furry animal that with a long tail"),
Document(id="B", text="a domesticated mammal with four legs"),
Document(id="C", text="a type of aircraft that uses rotating wings"),
Document(id="D", text="flying vehicle that has fixed wings and engines"),
]
)
clip_text_encoder = CLIPTextEncoder()
clip_text_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ["B", "A", "D", "C"]
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i] | 5,327,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.