content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def bbox_to_radec_grid(wcs, bbox, tol=1e-7):
"""
Create an ra/dec grid aligned with pixels from a bounding box.
Parameters
----------
wcs : `lsst.afw.geom.SkyWcs`
WCS object
bbox : `lsst.geom.Box2I`
Bounding box
tol : `float`
Tolerance for WCS grid approximation
Returns
-------
xy : `numpy.ndarray`
(nx*ny)x2 array with with x ([:, 0]) and y ([:, 1]) pixel values
of generated grid points
radec : `numpy.ndarray`
(nx*ny)x2 array with RA ([:, 0]) and Dec ([:, 1]) positions of
generated grid points (in degrees)
"""
trans = wcs.getTransform()
mapping = trans.getMapping()
temp = mapping.tranGridForward([bbox.getBeginX(), bbox.getBeginY()],
[bbox.getEndX() - 1, bbox.getEndY() - 1],
tol,
max([bbox.getEndX(), bbox.getEndY()]),
bbox.getArea()).flatten()
radec = np.zeros((temp.size // 2, 2))
radec[:, 0] = np.rad2deg(temp[0: temp.size // 2]) % 360.0
radec[:, 1] = np.rad2deg(temp[temp.size // 2:])
xy = np.zeros_like(radec, dtype=np.int32)
xy[:, 0] = np.tile(np.arange(bbox.getDimensions().getX()),
bbox.getDimensions().getY()) + bbox.getBeginX()
xy[:, 1] = np.repeat(np.arange(bbox.getDimensions().getY()),
bbox.getDimensions().getX()) + bbox.getBeginY()
return xy, radec | 5f11541dd1c1bbdaffa4fff8048785f511f41757 | 3,628,000 |
def step2(x):
"""Convolution of three step functions."""
y = np.zeros_like(x)
y[x > 0] = 1/2 * x[x > 0]**2
return y | dadfd366ecd1900ed7993b49084b79c0ca10f275 | 3,628,001 |
from typing import Union
from datetime import datetime
from typing import List
from pathlib import Path
def evtx2json(input_path: str, shift: Union[str, datetime], multiprocess: bool = False, chunk_size: int = 500) -> List[dict]:
"""Convert Windows Eventlog to List[dict].
Args:
input_path (str): Input Eventlog file.
multiprocess (bool): Flag to run multiprocessing.
chunk_size (int): Size of the chunk to be processed for each process.
Note:
Since the content of the file is loaded into memory at once,
it requires the same amount of memory as the file to be loaded.
"""
evtx = Evtx2es(Path(input_path).resolve())
records: List[dict] = sum(list(evtx.gen_records(multiprocess=multiprocess, chunk_size=chunk_size)), list())
return records | 677f8927567bbaaa49e47073a08526781e64d6da | 3,628,002 |
import math
def multid_dilution_wrapper(inducers,constructs,fname,avoidedges=[],maxinducer=500,\
wellvol=50,shuffle=False,wellorder="across",mypath=".",start=None):
"""this function contains some helpful pre-sets for doing multiple
inducer sweeps in a 384 well plate.
inducers:
this is a list of lists that looks like this:
[ ["name",[int(amount1),int(amount2)],["well",number of wells]]]
"name"
is the name of the inducer. for example, "ATC" or "ARA"
amount1, amount2
are the percent of the 'maxinducer' volume to add. for
example, if you put '50', that means 50 percent of the max inducer volume
(which is default 500), so the echo will add 250 nl of that inducer.
"well" is the well where your inducers will start, in the source plate.
You will need about 220 ul to populate an entire 384 well plate which does
not fit into one source plate well. This is why we usually do many source
plate wells. The first of these is denoted by "well", and the rest are placed
vertically downwards in the plate. For example if you put "K1" here, you also
need to put inducer into wells "L1","M1", and "N1", or more or less wells in
a vertical column as determined by the number given by 'number of wells.'
constructs:
This is a list of strings that denote the names of the different constructs
you are going to use. Each construct gets the same array of inducers.
fname:
this is the name of the file you want to make. Usually "date.csv". The program
also makes "supp_<fname>" as a supplementary data file used with the biotek analysis
package.
avoidedges:
a list of numbers indicating which wells to avoid. Use the two predifined lists
called "edges" or "topbottom" to block out all edge wells or the top and bottom rows
maxinducer:
"100%" volume of inducer to add. Usually 500 nl so it's 100x diluted into the
50 uL culture volume.
wellvol:
culture volume
shuffle:
True means that the wells which recieve different inducer concentrations Will
be randomly distributed within the set of wells set aside for each construct.
wellorder:
"across" means lettered rows from left to right. A1 -> A2 -> A3 etc
"down" means columns down (letters in order) then left to right. So, A1 -> B1
->->A2 -> B2 etc.
"""
if(avoidedges==edges):
#print("edges")
if(start==None):
start="B2"
rows=14
cols=22
elif(avoidedges==topbottom):
if(start==None):
start="B1"
rows=14
cols=24
else:
if(start==None):
start="A1"
rows=16
cols=24
#divideplate=int(rows/len(constructs))
conwells=[]
if(wellorder=="across"):
divideplate=math.ceil(rows/len(constructs))
while(divideplate*len(constructs)>rows):
divideplate-=1
for i in range(len(constructs)):
conwells+=[chr(ord(start[0])+divideplate*i)+start[1:]]
else:
divideplate=math.ceil(cols/len(constructs))
while(divideplate*len(constructs)>cols):
divideplate-=1
for i in range(len(constructs)):
conwells+=[start[0]+str(int(start[1:])+divideplate*i)]
indonly=[a[0] for a in inducers]
volonly=[[int(b*maxinducer/100) for b in a[1]] for a in inducers]
sourceonly=[]
for src in [a[2] for a in inducers]:
allwells=[]
for i in range(src[1]):
allwells+=[chr(ord(src[0][0])+i)+src[0][1:]]
sourceonly+=[allwells]
print("load inducers into: ")
for ind,amts in zip(indonly,sourceonly):
print("{} : {}".format(ind,str(amts)))
print()
inducervol=50*len(sourceonly[0])*1.1
print("make %02f ul of 100x each inducer"%(inducervol))
print()
print("constructs start at: ")
for cname,cwell in zip(constructs,conwells):
print("{} : {}".format(cname,str(cwell)))
print("prepare {} ml of each construct for {} well volume".format(int(wellvol*1.1*384/len(constructs))/1000,wellvol))
return multid_dilution(volonly,sourceonly,conwells,fname,avoidedges,constructs,indonly,shuffle,wellorder=wellorder,mypath=mypath) | d18063c3396d093d0792dbb70b6d968234f2fcca | 3,628,003 |
def matyas(x: np.ndarray):
"""
The Matyas function has no local minima except the global one.
The function is usually evaluated on the square xi ∈ [-10, 10], for all i = 1, 2.
Global minimum at (0, 0)
:param x: 2-dimensional
:return: float
"""
assert x.shape[-1] == 2
x1 = x.T[0]
x2 = x.T[1]
term1 = 0.26 * (x1 ** 2 + x2 ** 2)
term2 = -0.48 * x1 * x2
return term1 + term2 | 1e33694a26d0829e49c81691dd45d662d71961f2 | 3,628,004 |
def flatten(array: list):
"""Converts a list of lists into a single list of x elements"""
return [x for row in array for x in row] | 178f8ddb6e4b4887e8c1eb79f32fe51c0cf5fd89 | 3,628,005 |
def slice_sample(x_start, logpdf_target, D, num_samples=1, burn=1, lag=1,
w=1.0, rng=None):
"""Slice samples from the univariate disitrbution logpdf_target.
Parameters
----------
x_start : float
Initial point.
logpdf_target : function(x)
Evaluates the log pdf of target distribution at x.
D : tuple<float, float>
Support of target distribution.
num_samples : int, optional
Number of samples to return, default 1.
burn : int, optional
Number of samples to discard before any are collected, default 1.
lag : int, optional
Number of moves between successive samples, default 1.
rng : np.random.RandomState, optional
Source of random bits.
Returns
-------
samples : list
`num_samples` length list.
metadata : dict
Dictionary of intermediate slice sampling state, used for plotting.
"""
if rng is None:
rng = np.random.RandomState(0)
M = {'u':[], 'r':[], 'a_out':[], 'b_out':[], 'x_proposal':[], 'samples':[]}
x = x_start
num_iters = 0
while len(M['samples']) < num_samples:
num_iters += 1
u = np.log(rng.rand()) + logpdf_target(x)
r = rng.rand()
a, b, r, a_out, b_out = _find_slice_interval(
logpdf_target, x, u, D, r, w=w)
x_proposal = []
while True:
x_prime = rng.uniform(a, b)
x_proposal.append(x)
if logpdf_target(x_prime) > u:
x = x_prime
break
else:
if x_prime > x:
b = x_prime
else:
a = x_prime
if burn <= num_iters and num_iters%lag == 0:
M['u'].append(u)
M['r'].append(r)
M['a_out'].append(a_out)
M['b_out'].append(b_out)
M['x_proposal'].append(x_proposal)
M['samples'].append(x)
return M['samples'], M | 8937ddc6bd3e6368c75bb9eb9f65766a2376baf1 | 3,628,006 |
from typing import Dict
import os
def format_object_name(meta: Dict, object_name: str) -> str:
"""
Parameters
----------
metas: Dict
Single Granule metadata JSON response from CMR
object_name: str
Name of object (ex. hdf file, xml file)
Returns
----------
str
Object name for granule.
If insufficient information is available, empty string is returned.
"""
default_value = ""
if meta is None:
return default_value
folder_prefix = ""
try:
folder_prefix = format_object_prefix(meta)
except ValueError:
return ''
os.makedirs(folder_prefix, exist_ok=True)
return f"{folder_prefix}/{object_name}" | 6df876ab50467ef3111a8f1fe68f0d4d257d4a6c | 3,628,007 |
def ShortName(url):
"""Returns a shortened version of a URL."""
parsed = urlparse.urlparse(url)
path = parsed.path
hostname = parsed.hostname if parsed.hostname else '?.?.?'
if path != '' and path != '/':
last_path = parsed.path.split('/')[-1]
if len(last_path) < 10:
if len(path) < 10:
return hostname + '/' + path
else:
return hostname + '/..' + parsed.path[-10:]
else:
return hostname + '/..' + last_path[:5]
else:
return hostname | 80669ecbe46bdb0adf3f9c9a9ceb35a9a628fc27 | 3,628,008 |
def select_attachment(pattern, cursor):
"""Prompt the user for the attachment that matches the pattern.
Args:
This function takes the same arguments as the find_attachments
function.
Returns:
A (parentItemID, path) pair, None if no matches were found.
"""
attachments = find_attachments(args.path, cursor)
num_attachments = len(attachments)
if num_attachments == 0:
return None
elif num_attachments == 1:
return attachments[0]
else:
print('Found several attachments that match the specified pattern:')
selection = pyzottk.simple_menu([path for _, path in attachments])
return attachments[selection] | 84ec1b4cb869321eb2d21147cc7576e9d31ce5b3 | 3,628,009 |
def keyevent2tuple(event):
"""Convert QKeyEvent instance into a tuple"""
return (event.type(), event.key(), event.modifiers(), event.text(),
event.isAutoRepeat(), event.count()) | a456ce7790232ecf8ea4f6f68109a2023f4f257b | 3,628,010 |
from typing import Union
import pathlib
def log_git(repo_path: Union[pathlib.Path, str], repo_name: str = None):
"""
Use python logging module to log git information
Args:
repo_path (Union[pathlib.Path, str]): path to repo or file inside repository (repository is recursively searched)
"""
try:
git_info = get_repo_info(repo_path)
return git_info
except Exception:
logger.error("Was not able to read git information, trying to continue without.")
return {} | ca11f8247ca875ff45c477baa3a7ae89dcfdc92c | 3,628,011 |
def datetime_to_absolute_validity(d, tzname='Unknown'):
"""Convert ``d`` to its integer representation"""
n = d.strftime("%y %m %d %H %M %S %z").split(" ")
# compute offset
offset = FixedOffset.from_timezone(n[-1], tzname).offset
# one unit is 15 minutes
s = "%02d" % int(floor(offset.seconds / (60 * 15)))
if offset.days < 0:
# set MSB to 1
s = "%02x" % ((int(s[0]) << 4) | int(s[1]) | 0x80)
n[-1] = s
return [int(c[::-1], 16) for c in n] | 2dcf6001bfb84bc87c16f4e6cfda390a56466eb3 | 3,628,012 |
def backward_committor(basis, weights, in_domain, guess, lag, test_basis=None):
"""Estimate the backward committor using DGA.
Parameters
----------
basis : list of (n_frames[i], n_basis) ndarray of float
Basis for estimating the committor. Must be zero outside of the
domain.
weights : list of (n_frames[i],) ndarray of float
Reweighting factor to the invariant distribution for each frame.
in_domain : list of (n_frames[i],) ndarray of bool
Whether each frame of the trajectories is in the domain.
guess : list of (n_frames[i],) ndarray of float
Guess for the committor. Must obey boundary conditions.
lag : int
DGA lag time in units of frames.
test_basis : list of (n_frames[i], n_basis) ndarray of float, optional
Test basis against which to minimize the error. Must have the
same dimension as the basis used to estimate the committor.
If None, use the basis that is used to estimate the committor.
Returns
-------
list of (n_frames[i],) ndarray
Estimate of the backward committor at each frame of the
trajectory.
"""
if test_basis is None:
test_basis = basis
a = 0.0
b = 0.0
for x, y, w, d, g in zip(test_basis, basis, weights, in_domain, guess):
assert np.all(w[-lag:] == 0.0)
iy = np.maximum(np.arange(len(d) - lag), backward_stop(d)[lag:])
assert np.all(iy >= 0)
a += (x[lag:].T * w[:-lag]) @ (y[iy] - y[lag:])
b -= (x[lag:].T * w[:-lag]) @ (g[iy] - g[lag:])
coeffs = linalg.solve(a, b)
return [y @ coeffs + g for y, g in zip(basis, guess)] | ffec1798c41903e26c84ecb85c15c9a7b075db25 | 3,628,013 |
def get_language_from_request(request, current_page=None):
"""
Return the most obvious language according the request
"""
language = get_language_in_settings(request.REQUEST.get('language', None))
if language is None:
language = getattr(request, 'LANGUAGE_CODE', None)
if language is None:
# in last resort, get the first language available in the page
if current_page:
languages = current_page.get_languages()
if len(languages) > 0:
language = languages[0]
if language is None:
language = settings.CMS_DEFAULT_LANGUAGE
return language[:2] | ae224ba3c4900821b664ba12cd2c5d95b87031b7 | 3,628,014 |
from typing import Tuple
import struct
def process_refund_contract_transaction(contract_transaction_bytes: bytes, delivery_time: int,
funding_value: int, funding_output_script: P2MultiSig_Output, server_keys: ServerKeys,
account_metadata: AccountMetadata, channel_row: ChannelRow) -> Tuple[bytes, bytes, bytes]:
"""
The way a payment channel is set up, is by the client first getting the server to sign a
version of the contract transaction that fully refunds the coins that the client is
putting into the funding transaction.
Raises:
- InvalidTransactionError
"""
assert channel_row.channel_state == ChannelState.PAYMENT_KEY_DISPENSED, "Invalid channel state"
assert channel_row.payment_key_bytes is not None, "Missing 'payment_key_bytes' state"
try:
contract_transaction = Tx.from_bytes(contract_transaction_bytes)
except struct.error:
raise InvalidTransactionError("Contract transaction is corrupt")
# The refund transaction should be spending the funding output that is locked in a
# multi-signature contract between client and server. There is no reason for the refund
# transaction to be spending anything else.
if len(contract_transaction.inputs) != 1:
raise InvalidTransactionError("Only the funding input should be present")
tx_input = contract_transaction.inputs[0]
# The refund transaction is the payment channel. It's sequence number will be incremented
# in every payment update by the client. We expect it to start at 0 for the initial refund
# transaction which does the full refund to the client.
if tx_input.sequence != 0:
raise InvalidTransactionError("The initial funding input nSequence value should be 0")
# The refund transaction is for the client to close the payment channel and reclaim their
# funds if the server is non-responsive. The life of the channel is limited to this time, and
# it is expected that the server will close the channel and claim any funds they have been
# paid before this channel expiry time is reached.
lock_time_seconds = contract_transaction.locktime - delivery_time
if lock_time_seconds < MINIMUM_REFUND_SECONDS or lock_time_seconds > MAXIMUM_REFUND_SECONDS:
raise InvalidTransactionError(f"Locktime must be around {IDEAL_LOCKTIME_SECONDS} seconds")
# It is expected that the client have locked the refund input to the refund output. This
# means that there will be one output to match the input.
if len(contract_transaction.outputs) != 1:
raise InvalidTransactionError("Only one refund output should be present")
# The staked contract funds by the client must be at least this minimum funding value.
if funding_value < MINIMUM_FUNDING_VALUE:
raise InvalidTransactionError(
f"Channel funding value {funding_value} < {MINIMUM_FUNDING_VALUE}")
# Verify that the funding value they gave us covers the refund value. We have no idea what
# fee rate the client is paying, so we assume they can legitimately be paying no fee and
# we do not impose any restrictions.
if contract_transaction.outputs[0].value > funding_value:
raise InvalidTransactionError("Refunded value higher than funded value")
# This is the required key (and signature) ordering for client and server.
client_key_index = 0
server_key_index = 1
server_payment_key = PublicKey.from_bytes(channel_row.payment_key_bytes)
if funding_output_script.public_keys[server_key_index] != server_payment_key:
raise InvalidTransactionError("Funding output script lacks server payment key")
try:
refund_script_data = list(tx_input.script_sig.ops())
except TruncatedScriptError:
raise InvalidRefundInputScriptError("Truncated refund input script")
if len(refund_script_data) != 3:
raise InvalidRefundInputScriptError("Invalid refund spend stack size")
client_refund_signature_bytes = refund_script_data[client_key_index+1]
if Signature.analyze_encoding(client_refund_signature_bytes) == 0:
raise InvalidTransactionError("Invalid client refund signature")
client_sighash = SigHash.from_sig_bytes(client_refund_signature_bytes)
if client_sighash != SigHash(SigHash.SINGLE | SigHash.ANYONE_CAN_PAY | SigHash.FORKID):
raise InvalidTransactionError("Invalid sighash for client refund signature")
refund_private_key = generate_payment_private_key(server_keys.identity_private_key,
account_metadata.public_key_bytes, channel_row.payment_key_index)
server_refund_signature_bytes = _sign_contract_transaction_input(contract_transaction,
funding_output_script.to_script_bytes(), funding_value, refund_private_key)
client_refund_payment_key_bytes = funding_output_script.public_keys[client_key_index].to_bytes()
return client_refund_payment_key_bytes, tx_input.prev_hash, server_refund_signature_bytes | 95554ca7ba14a0e945e1d4deb45548b5508b5dc5 | 3,628,015 |
def _get_link_url(start_component_revision_dict, end_component_revision_dict):
"""Return link text given a start and end revision. This is used in cases
when revision url is not available."""
url = start_component_revision_dict['url']
if not url:
return None
vcs_viewer = source_mapper.get_vcs_viewer_for_url(url)
if not vcs_viewer:
# If we don't support the vcs yet, bail out.
return None
start_revision = _get_revision(start_component_revision_dict)
end_revision = _get_revision(end_component_revision_dict)
if start_revision == end_revision:
return vcs_viewer.get_source_url_for_revision(start_revision)
return vcs_viewer.get_source_url_for_revision_diff(start_revision,
end_revision) | 8b7c18c2aa3d58f5b77d56ace86fed2a521f326c | 3,628,016 |
import subprocess
def blastall_available():
"""Returns True if blastall can be run, False otherwise."""
cmd = str(BLASTALL_DEFAULT)
# Can't use check=True, as blastall without arguments returns 1!
try:
result = subprocess.run(
cmd,
shell=False,
check=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
except OSError:
return False
return result.stdout[1:9] == b"blastall" | dbea3ce6a425ad7e1d14fb0e0e26df24a995832e | 3,628,017 |
import math
def get_inv_unit(block_index,diff):
"""
given a block index and a 0-indexed layer in that block, returns a unit index.
"""
bottleneck_block_mapping = {1:0,
2:3,
3:7,
4:13}
return bottleneck_block_mapping[block_index] + math.floor((abs(diff-1)/3)) | ed6936a81dd8f32f76a27efcf89b8e76d384b008 | 3,628,018 |
from typing import Union
import os
def find_image_any_format(filename: str, folder: str) -> Union[str, None]:
"""Takes a filename and returns an image of any of the supported formats."""
for _format in IMAGES:
image = f"{filename}.{_format}"
image_path = IMAGE_SET.path(filename=image, folder=folder)
if os.path.isfile(image_path):
return image_path
return None | d65311deaaf8c08730927a66ac0a3016a9dc424a | 3,628,019 |
def dummy_token(db, dummy_app_link):
"""Return a token for the dummy app/user."""
token_string = TOKEN_PREFIX_OAUTH + generate_token()
token = OAuthToken(access_token=token_string, app_user_link=dummy_app_link, scopes=['read:legacy_api', 'read:user'])
token._plaintext_token = token_string
db.session.add(token)
db.session.flush()
return token | 4ef1c7ab09db2a33cf6cfff9c0281603c7e8cb19 | 3,628,020 |
import sys
import os
def get_update_packages():
"""
Return a list of dict about package updates
"""
pkgs = []
apt_pkg.init()
# force apt to build its caches in memory for now to make sure
# that there is no race when the pkgcache file gets re-generated
apt_pkg.config.set("Dir::Cache::pkgcache","")
try:
cache = apt_pkg.Cache(apt.progress.base.OpProgress())
except SystemError as e:
sys.stderr.write("Error: Opening the cache (%s)" % e)
sys.exit(-1)
depcache = apt_pkg.DepCache(cache)
# read the pin files
depcache.read_pinfile()
# read the synaptic pins too
if os.path.exists(SYNAPTIC_PINFILE):
depcache.read_pinfile(SYNAPTIC_PINFILE)
# init the depcache
depcache.init()
try:
saveDistUpgrade(cache,depcache)
except SystemError as e:
sys.stderr.write("Error: Marking the upgrade (%s)" % e)
sys.exit(-1)
# use assignment here since apt.Cache() doesn't provide a __exit__ method
# on Ubuntu 12.04 it looks like
# aptcache = apt.Cache()
for pkg in cache.packages:
if not (depcache.marked_install(pkg) or depcache.marked_upgrade(pkg)):
continue
inst_ver = pkg.current_ver
cand_ver = depcache.get_candidate_ver(pkg)
if cand_ver == inst_ver:
continue
record = {"name": pkg.name,
"security": isSecurityUpgrade(pkg, depcache),
"section": pkg.section,
"current_version": inst_ver.ver_str if inst_ver else '-',
"candidate_version": cand_ver.ver_str if cand_ver else '-',
"priority": cand_ver.priority_str}
pkgs.append(record)
return pkgs | 24966d55db8e8d9c455fed394595eeba52a766f0 | 3,628,021 |
import sys
def learn_conditional_density(model, xs, ys, ws=None, regularizer=None, val_frac=0.05, step=ss.Adam(a=1.e-4), minibatch=100, patience=20, monitor_every=1, logger=sys.stdout, rng=np.random):
"""
Train model to learn the conditional density p(y|x).
"""
xs = np.asarray(xs, np.float32)
ys = np.asarray(ys, np.float32)
n_data = xs.shape[0]
assert ys.shape[0] == n_data, 'wrong sizes'
# shuffle data, so that training and validation sets come from the same distribution
idx = rng.permutation(n_data)
xs = xs[idx]
ys = ys[idx]
# split data into training and validation sets
n_trn = int(n_data - val_frac * n_data)
xs_trn, xs_val = xs[:n_trn], xs[n_trn:]
ys_trn, ys_val = ys[:n_trn], ys[n_trn:]
if ws is None:
# train model without weights
trainer = trainers.SGD(
model=model,
trn_data=[xs_trn, ys_trn],
trn_loss=model.trn_loss if regularizer is None else model.trn_loss + regularizer,
trn_target=model.y,
val_data=[xs_val, ys_val],
val_loss=model.trn_loss,
val_target=model.y,
step=step
)
trainer.train(
minibatch=minibatch,
patience=patience,
monitor_every=monitor_every,
logger=logger
)
else:
# prepare weights
ws = np.asarray(ws, np.float32)
assert ws.size == n_data, 'wrong sizes'
ws = ws[idx]
ws_trn, ws_val = ws[:n_trn], ws[n_trn:]
# train model with weights
trainer = trainers.WeightedSGD(
model=model,
trn_data=[xs_trn, ys_trn],
trn_losses=-model.L,
trn_weights=ws_trn,
trn_reg=regularizer,
trn_target=model.y,
val_data=[xs_val, ys_val],
val_losses=-model.L,
val_weights=ws_val,
val_target=model.y,
step=step
)
trainer.train(
minibatch=minibatch,
patience=patience,
monitor_every=monitor_every,
logger=logger
)
return model | eeb6d66eec4e10a26ca1505d9d0a493d8662531f | 3,628,022 |
import subprocess
def _run_with_output(command, cwd="caffe/"):
""" Runs a comand, and displays the output as it is running.
Args:
command: The command to run.
cwd: Directory to run command in.
Returns:
The command output. """
popen = subprocess.Popen(command, stdout=subprocess.PIPE, cwd=cwd)
lines_iterator = iter(popen.stdout.readline, b"")
output = ""
for line in lines_iterator:
print(line)
output += line
return output | 4b0e5b73e6df9226841602048b8ca33ce4026a55 | 3,628,023 |
def powspec_highom(fs, mu, s, kp, km, vr, vt, tr):
"""Return the high-frequency behavior of the power spectrum"""
o = 2*pi*fs
Td = np.log((mu + s - vr) / (mu + s - vt)) + tr
Ppp = (kp*exp(-(kp+km)*tr)+km)/(kp+km)
return r0(locals()) * (1.-Ppp*Ppp*np.exp(-2*kp*(Td-tr)))/(1+Ppp*Ppp*np.exp(-2*kp*(Td-tr))-2*Ppp*np.exp(-kp*(Td-tr))*np.cos(o*Td)) | 6e02a6a930ca326286a8214e132c2ab203ca35f5 | 3,628,024 |
def bigger_price(limit: int, data: list) -> list:
"""
TOP most expensive goods
"""
result_list = []
for i in range(limit):
id_max_price, max_price = find_max_price(data)
result_list.append(data.pop(id_max_price))
return result_list | e380183071686778244656fbfaeac80630527fd5 | 3,628,025 |
def makeBlock(data):
"""Applies the block tags to text
"""
global appliedstyle
return "%s%s%s" % (appliedstyle['block'][0], data, appliedstyle['block'][1]) | 018f0ec12742241ab3e62b3125e47fc33c2bcadc | 3,628,026 |
def by_key(dct, keys, fill_val=None):
""" dictionary on a set of keys, filling missing entries
"""
return dict(zip(keys, values_by_key(dct, keys, fill_val=fill_val))) | cd069ab90a5a8db26f6191a81f52d352a231efbd | 3,628,027 |
import random
def get_vivo_uri():
"""
Find an unused VIVO URI with the specified VIVO_URI_PREFIX
"""
test_uri = VIVO_URI_PREFIX + 'n' + str(random.randint(1, 9999999999))
query = """
SELECT COUNT(?z) WHERE {
<""" + test_uri + """> ?y ?z
}"""
response = vivo_sparql_query(query)
while int(response["results"]["bindings"][0]['.1']['value']) != 0:
test_uri = VIVO_URI_PREFIX + str(random.randint(1, 9999999999))
query = """
SELECT COUNT(?z) WHERE {
<""" + test_uri + """> ?y ?z
}"""
response = vivo_sparql_query(query)
return test_uri | d68f3676de24907fb9399e5ac995c0c3963483dd | 3,628,028 |
def imbothat(img, el):
"""
Function to mimic MATLAB's imbothat function
Returns bottom-hat of image
Bottom-hat defined to be the difference between input and
the closing of the image
"""
closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, el)
return closing - img | 8c0b51b443169a6feec10b92fecac99d271cdb9a | 3,628,029 |
import numpy
def _downsampling_base(
primary_id_strings, storm_times_unix_sec, target_values, target_name,
class_fraction_dict, test_mode=False):
"""Base for `downsample_for_training` and `downsample_for_non_training`.
The procedure is described below.
[1] Find all storm objects in the highest class (e.g., tornadic). Call this
set {s_highest}.
[2] Find all storm cells with at least one object in {s_highest}. Call this
set {S_highest}.
[3] Find all time steps with at least one storm cell in {S_highest}. Call
this set {t_highest}.
[4] Randomly remove a large fraction of time steps NOT in {t_highest}.
[5] Downsample remaining storm objects, leaving a prescribed fraction in
each class (according to `class_fraction_dict`).
N = number of storm objects before downsampling
:param primary_id_strings: length-N list of primary storm IDs.
:param storm_times_unix_sec: length-N numpy array of corresponding times.
:param target_values: length-N numpy array of corresponding target values
(integer class labels).
:param target_name: Name of target variable (must be accepted by
`target_val_utils.target_name_to_params`).
:param class_fraction_dict: Dictionary, where each key is an integer class
label (-2 for "dead storm") and the corresponding value is the
sampling fraction.
:param test_mode: Never mind. Just leave this alone.
:return: indices_to_keep: 1-D numpy array of indices to keep.
"""
_report_class_fractions(target_values)
error_checking.assert_is_boolean(test_mode)
num_storm_objects = len(primary_id_strings)
num_classes = target_val_utils.target_name_to_num_classes(
target_name=target_name, include_dead_storms=False)
# Step 1.
print((
'Finding storm objects in class {0:d} (the highest class), yielding set'
' {{s_highest}}...'
).format(num_classes - 1))
highest_class_indices = numpy.where(target_values == num_classes - 1)[0]
print('{{s_highest}} contains {0:d} of {1:d} storm objects.'.format(
len(highest_class_indices), num_storm_objects
))
# Step 2.
print ('Finding storm cells with at least one object in {s_highest}, '
'yielding set {S_highest}...')
highest_class_indices = _find_storm_cells(
object_id_strings=primary_id_strings,
desired_cell_id_strings=
[primary_id_strings[k] for k in highest_class_indices]
)
print('{{S_highest}} contains {0:d} of {1:d} storm objects.'.format(
len(highest_class_indices), num_storm_objects)
)
# Step 3.
print ('Finding all time steps with at least one storm cell in '
'{S_highest}, yielding set {t_highest}...')
lower_class_times_unix_sec = (
set(storm_times_unix_sec.tolist()) -
set(storm_times_unix_sec[highest_class_indices].tolist())
)
lower_class_times_unix_sec = numpy.array(
list(lower_class_times_unix_sec), dtype=int)
# Step 4.
print('Randomly removing {0:.1f}% of times not in {{t_highest}}...'.format(
FRACTION_UNINTERESTING_TIMES_TO_OMIT * 100))
this_num_times = int(numpy.round(
FRACTION_UNINTERESTING_TIMES_TO_OMIT * len(lower_class_times_unix_sec)
))
if test_mode:
times_to_remove_unix_sec = lower_class_times_unix_sec[:this_num_times]
else:
times_to_remove_unix_sec = numpy.random.choice(
lower_class_times_unix_sec, size=this_num_times, replace=False)
indices_to_keep = _find_uncovered_times(
all_times_unix_sec=storm_times_unix_sec,
covered_times_unix_sec=times_to_remove_unix_sec)
_report_class_fractions(target_values[indices_to_keep])
# Step 5.
print('Downsampling storm objects from remaining times...')
subindices_to_keep = dl_utils.sample_by_class(
sampling_fraction_by_class_dict=class_fraction_dict,
target_name=target_name, target_values=target_values[indices_to_keep],
num_examples_total=LARGE_INTEGER, test_mode=test_mode)
return indices_to_keep[subindices_to_keep] | 0e5b834e0ca011ad83c3929ad05757c777cfc77a | 3,628,030 |
def liste_erreur(estimation, sol):
"""
Renvoie une liste d'erreurs pour une estimation et un pas donnés.
Paramètres
----------
estimation : estimation calculée pour la résolution de l'équation différentielle
sol : solution exacte
"""
(x,y) = estimation
sol = list(sol)
erreur = []
if len(y) != len(sol[1]):
raise ValueError ("L'estimation et la solution ne sont pas de la même longueur.")
for i in range(len(y)):
erreur.append(abs(y[i]-sol[1][i]))
return erreur | 61a800f1316a153d50e39ce80239ddd4b841f74e | 3,628,031 |
def get_pods_amount(v1: CoreV1Api, namespace) -> int:
"""
Get an amount of pods.
:param v1: CoreV1Api
:param namespace: namespace
:return: int
"""
pods = v1.list_namespaced_pod(namespace)
return 0 if not pods.items else len(pods.items) | f676bc9ec4c2fc48c4ff3c332e4201b6ca591f87 | 3,628,032 |
def create_pod(kube_host, kube_port, namespace, pod_name, image_name,
container_port_list, cmd_list, arg_list):
"""Creates a Kubernetes Pod.
Note that it is generally NOT considered a good practice to directly create
Pods. Typically, the recommendation is to create 'Controllers' to create and
manage Pods' lifecycle. Currently Kubernetes only supports 'Replication
Controller' which creates a configurable number of 'identical Replicas' of
Pods and automatically restarts any Pods in case of failures (for eg: Machine
failures in Kubernetes). This makes it less flexible for our test use cases
where we might want slightly different set of args to each Pod. Hence we
directly create Pods and not care much about Kubernetes failures since those
are very rare.
"""
post_url = 'http://%s:%d/api/v1/namespaces/%s/pods' % (kube_host, kube_port,
namespace)
request_body = _make_pod_config(pod_name, image_name, container_port_list,
cmd_list, arg_list)
return _do_post(post_url, 'Create Pod', request_body) | 1c578c4eec5df0ec37ba687f053d932c37be7186 | 3,628,033 |
import os
import shutil
import zipfile
import glob
def detection(post_id):
""" zipファイル展開 """
file = File.objects.get(pk=post_id)
p_id = post_id % 10
OUT_DIR = os.path.join(
BASE_DIR,
'media',
'images',
str(p_id))
if os.path.exists(OUT_DIR):
shutil.rmtree(OUT_DIR)
os.makedirs(OUT_DIR)
if file.zip.path.split('.')[-1] == 'zip':
with zipfile.ZipFile(os.path.join(BASE_DIR, file.zip.path)) as existing_zip:
existing_zip.extractall(OUT_DIR)
# TODO: fix
images_path = glob.glob(os.path.join(OUT_DIR, '*', '*'))
images_path = [p for p in images_path if p.split(
'.')[-1] in ['jpg', 'png', 'tif', 'JPG', 'JPEG', 'TIF']]
images_url = list(map(lambda x: x.split('media/')[-1], images_path))
# images_name = list(map(lambda x: x.split(
# '/')[-1].split('.')[0], images_url))
print(images_url)
elif file.zip.path.split('.')[-1] in ['jpg', 'png', 'tif', 'JPG', 'JPEG', 'TIF']:
images_path = [file.zip.path]
else:
return 0
print('loaded')
RESULT_DIR = os.path.join(
BASE_DIR,
'media',
'results',
str(p_id)
)
if os.path.exists(RESULT_DIR):
shutil.rmtree(RESULT_DIR)
os.makedirs(RESULT_DIR)
# # pdfファイル展開
# if file.zip.path.split('.')[-1] == 'pdf' or file.zip.path.split('.')[-1] == 'PDF':
# images = imgminer(file.zip.path, OUT_DIR)
# """ 図の分割 """
# if len(images_path) > 0:
# Dismantler().dismantle(images_path, EXTRACT_DIR)
# subimgs_path = glob.glob(os.path.join(EXTRACT_DIR, 'subimg_cut', '*.png'))
# suspicious_images = [
# SuspiciousImage(img_path) for img_path in subimgs_path]
# if len(suspicious_images) is 0:
# print("No images")
# return 0
# detector = PhotoPick(
# model_name=os.path.join(
# MODEL_DIR, 'photopicker_rf_lee_2700.sav'),
# param_name=os.path.join(
# MODEL_DIR, 'photopicker_rf_lee_2700.sav-param.npz'))
# pred = detector.detect(suspicious_images)
# suspicious_images = [img for i, img in enumerate(
# suspicious_images) if pred[i] == 1]
suspicious_images = [
SuspiciousImage(
path,
nfeatures=5000) for path in images_path]
print(suspicious_images)
len_sus = len(suspicious_images)
# """ 画像の切り出し """
# images = splitting(images)
""" Detection """
# Detectors #
# detector_no = Noise(
# model_name=os.path.join(
# MODEL_DIR,
# 'noise_oneclass_42.sav'))
detector_cl = Clipping(min_area=100)
detector_cm = CopyMove(
min_kp=20,
min_match=20,
min_key_ratio=0.75,
flags=0)
detector_du = Duplication(
min_kp=20,
min_match=20,
min_key_ratio=0.75,
flags=0)
# detector_cp = CutPaste(
# model_name=os.path.join(
# MODEL_DIR, 'cutpaste_svm_uci_200.sav'), param_name=os.path.join(
# MODEL_DIR, 'cutpaste_svm_uci_200.sav-param.npz'), )
for img in suspicious_images:
dsize = (img.w, img.h)
# Clipping check #
pred = detector_cl.detect(img)
img.clipping = pred
if pred is 1:
ratio = detector_cl.ratio_
img.area_ratio = ratio
img.cl_img = cv.resize(detector_cl.image_, dsize=dsize)
# Copy-move check #
pred = detector_cm.detect(img)
img.copymove = pred
if pred is 1:
ratio = detector_cm.mask_ratio_
img.mask_ratio = ratio
img.cm_img = cv.resize(detector_cm.image_, dsize=dsize)
print('detected')
for img in suspicious_images:
nameroot = img.name
dsize = (img.w, img.h)
suspicious = Suspicious()
suspicious.post_id = p_id
suspicious.w, suspicious.h = dsize
suspicious.name = img.name
suspicious.size = img.size
file_name = os.path.join(
RESULT_DIR, '{}.jpg'.format(nameroot))
cv.imwrite(file_name, cv.resize(img.mat, dsize=dsize))
suspicious.original = file_name.split('media/')[-1]
# suspicious.noise = img.noise
file_name = os.path.join(
RESULT_DIR, '{}_no.jpg'.format(nameroot))
cv.imwrite(file_name, img.no_img)
suspicious.no_img = file_name.split('media/')[-1]
# suspicious.no_img = img.no_img
suspicious.clipping = img.clipping
if img.clipping:
file_name = os.path.join(
RESULT_DIR, '{}_cl.jpg'.format(nameroot))
cv.imwrite(file_name, img.cl_img)
suspicious.cl_img = file_name.split('media/')[-1]
else:
suspicious.cl_img = suspicious.original
suspicious.area_ratio = int(img.area_ratio * 100)
suspicious.copymove = img.copymove
if suspicious.copymove:
file_name = os.path.join(
RESULT_DIR, '{}_cm.jpg'.format(nameroot))
cv.imwrite(file_name, img.cm_img)
suspicious.cm_img = file_name.split('media/')[-1]
else:
suspicious.cm_img = suspicious.original
suspicious.mask_ratio = int(img.mask_ratio * 100)
# suspicious.cutpaste = img.cutpaste
file_name = os.path.join(
RESULT_DIR, '{}_cp.jpg'.format(nameroot))
cv.imwrite(file_name, cv.resize(
img.keyimg[img.gap:-img.gap, img.gap:-img.gap], dsize=dsize))
suspicious.cp_img = file_name.split('media/')[-1]
suspicious.save()
print('saved')
### Duplication check ###
n_dp = 0
for i in range(len_sus):
img = suspicious_images[i]
imgname = img.name
for j in range(i + 1, len_sus):
pred = detector_du.detect(
suspicious_images[j], img)
if pred is 1:
file_name = os.path.join(
RESULT_DIR, '{}_{}_duplication.jpg'.format(
suspicious_images[j].name, imgname))
detector_du.save_image(file_name)
suspicious = SuspiciousDuplication()
suspicious.post_id = p_id
suspicious.name1 = suspicious_images[j].name
suspicious.name2 = imgname
suspicious.du_img = file_name.split('media/')[-1]
suspicious.mask_ratio = int(detector_du.mask_ratio_ * 100)
suspicious.save()
n_dp = n_dp + 1
# for i in range(len(images)):
# imgname = images[i].name.split('/')[-1]
# img = images[i]
#
#
# ### Painting-out check ###
# isPaintingOut, img_detect = paintout(images[i])
# if isPaintingOut:
# result_path = os.path.join(RESULT_DIR, imgname.split('.')[0] + '_po.jpg')
# result_url = result_path.split('media/')[-1]
# cv.imwrite(result_path, img_detect)
# photo = Photo()
# photo.name = imgname
# photo.result = result_url
# photo.title = 'Over-adjustment of contrast/brightness or painting-out'
# photo.ratio = 50
# photo.save()
# continue
# print('paintingout')
#
# ### Copy-move check ###
# isCopyMove, img_detect = copymove(img)
# if isCopyMove:
# result_path = os.path.join(Duplication.RESULT_DIR, imgname.split('.')[0] + '_cm.jpg')
# result_url = result_path.split('media/')[-1]
# cv.imwrite(result_path, img_detect)
# photo = Photo()
# photo.name = imgname
# photo.result = result_url
# photo.title = 'Reuse within a same image'
# photo.ratio = 50
# photo.save()
# print('copymove')
#
# print('DONE: ', imgname)
#
# print(Photo.objects.all())
return len_sus, n_dp | 01eed17ac8fa3dad87184ba4e2dc90ba7ae339ca | 3,628,034 |
import json
def convert_jupyter_to_databricks(
input_filename: str = "nofile", output_filename: str = "nofile"
):
"""Main function to convert jupyter files to databricks python files.
Args:
input_filename (str, optional): input filename .ipynb. Defaults to "nofile".
output_filename (str, optional): output filename .py. Defaults to "nofile".
Raises:
ValueError: convert_list.json missing
ValueError: Reading jupyter file error
ValueError: Writing python file error
Returns:
[bool]: returns True if function run successfully
"""
# Default use the convert_list.json
if (input_filename == "nofile") & (output_filename == "nofile"):
try:
f = open(
"convert_list.json",
)
files = json.load(f)
except Exception as e:
raise ValueError(
f"Loading default list did not work. Make sure to create a convert_list.json: {e}"
)
else:
check_for_ending(input_filename, ".ipynb")
check_for_ending(output_filename, ".py")
files = {input_filename: output_filename}
for file in files:
# Get Lines from jupyter
try:
lines = jupyterfile_tolist(file)
except Exception as e:
raise ValueError(
f"Transforming jupyter file to python file did not work: {e}"
)
# write to Python file
try:
write_topython(lines, files[file])
except Exception as e:
raise ValueError(f"Saving to python file did not work: {e}")
return True | 6a5fc3010a84a4fdbeb817bd57824e99867cb6dc | 3,628,035 |
import warnings
def insul_diamond(pixels, bins,
window=10, ignore_diags=2, balanced=True, norm_by_median=True):
"""
Calculates the insulation score of a Hi-C interaction matrix.
Parameters
----------
pixels : pandas.DataFrame
A table of Hi-C interactions. Must follow the Cooler columnar format:
bin1_id, bin2_id, count, balanced (optional)).
bins : pandas.DataFrame
A table of bins, is used to determine the span of the matrix
and the locations of bad bins.
window : int
The width (in bins) of the diamond window to calculate the insulation score.
ignore_diags : int
If > 0, the interactions at separations < `ignore_diags` are ignored
when calculating the insulation score. Typically, a few first diagonals
of the Hi-C map should be ignored due to contamination with Hi-C
artifacts.
norm_by_median : bool
If True, normalize the insulation score by its NaN-median.
"""
lo_bin_id = bins.index.min()
hi_bin_id = bins.index.max() + 1
N = hi_bin_id - lo_bin_id
sum_pixels = np.zeros(N)
n_pixels = np.zeros(N)
bad_bin_mask = bins.weight.isnull().values if balanced else np.zeros(N, dtype=bool)
diag_pixels = pixels[pixels.bin2_id - pixels.bin1_id <= (window - 1) * 2]
if balanced:
diag_pixels = diag_pixels[~diag_pixels.balanced.isnull()]
i = diag_pixels.bin1_id.values - lo_bin_id
j = diag_pixels.bin2_id.values - lo_bin_id
val = diag_pixels.balanced.values if balanced else diag_pixels['count'].values
for i_shift in range(0, window):
for j_shift in range(0, window):
if i_shift+j_shift < ignore_diags:
continue
mask = (i+i_shift == j-j_shift) & (i + i_shift < N ) & (j - j_shift >= 0 )
sum_pixels += np.bincount(i[mask] + i_shift, val[mask], minlength=N)
loc_bad_bin_mask = np.zeros(N, dtype=bool)
if i_shift == 0:
loc_bad_bin_mask |= bad_bin_mask
else:
loc_bad_bin_mask[i_shift:] |= bad_bin_mask[:-i_shift]
if j_shift == 0:
loc_bad_bin_mask |= bad_bin_mask
else:
loc_bad_bin_mask[:-j_shift] |= bad_bin_mask[j_shift:]
n_pixels[i_shift:(-j_shift if j_shift else None)] += (
1 - loc_bad_bin_mask[i_shift:(-j_shift if j_shift else None)])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
score = sum_pixels / n_pixels
if norm_by_median:
score /= np.nanmedian(score)
return score | a2ce240a669789b1beacc443c122b805ac34cb57 | 3,628,036 |
import random
import numpy
def add_frame(dataset):
""" process a dataset consisting of a list of imgs"""
if args.place != 'random':
offset = eval(args.place)
assert type(offset) == tuple and len(offset) == 2
Xs = dataset[0]
newX = []
for (idx, k) in enumerate(Xs):
if args.place == 'random':
# generate a random offset
offset = (random.randint(0, frame_size - orig_size),
random.randint(0, frame_size - orig_size))
frame = numpy.zeros((frame_size, frame_size), dtype=numpy.float32)
add_img_to_frame(k, frame, offset)
newX.append(numpy.ndarray.flatten(frame))
return (numpy.asarray(newX), dataset[1]) | 948e0e09a12f3835b10ba1d5dd4a5720b7477f0f | 3,628,037 |
def floor(x: ndarray) -> ndarray:
"""Returns the floor of the input, element-wise.."""
return _which_np(x).floor(x) | 58eae0a1b0802514041dc445ac246dc0345c3640 | 3,628,038 |
def brightness_classification(ms_image):
"""
Description of brightness_classification
Classifies pixels of image using a simplified version of the method described in:
- http://www.sciencedirect.com/science/article/pii/S0169204617301950
Geometrical rules are not applied.
Args:
ms_image (undefined): PIL Image
"""
blu_tresh, green_tresh = find_otsu_tresh(ms_image)
new = []
fisheye_image_seg = Image.new('RGB', (ms_image.size[0], ms_image.size[1]))
SVF = 0
TVF = 0
pixel_circle, pixel_total = count_img_pixels(ms_image.width/2)
green_values = []
ms_image_arr = np.asarray(ms_image)
for i,row in enumerate(ms_image_arr):
t = []
g = []
for j,px in enumerate(row):
if is_inside_image(i, j, ms_image.width/2):
brightness = brightness_calc(px)
green = green_calc(px)
t.append(brightness)
if brightness >= blu_tresh:
fisheye_image_seg.putpixel((i, j), (255,255,255))
SVF += 1
else:
g.append(green)
if green <= green_tresh:
fisheye_image_seg.putpixel((i, j), (128, 128, 128))
else:
fisheye_image_seg.putpixel((i, j), (0,255,0))
TVF += 1
else:
fisheye_image_seg.putpixel((i, j), (0,0,0))
new.append(t)
green_values.append(g)
return ImageOps.flip(fisheye_image_seg.rotate(90)), SVF/pixel_circle, TVF/pixel_circle | 2201b0bbd9cff115709fa73c4dda412cc6fdbb75 | 3,628,039 |
def build_lights(cfg):
"""Build lights."""
return LIGHTS.build(cfg) | 7d6bbca9676f78d849b95f81df49e39e36bdb04a | 3,628,040 |
def get_obo(force: bool = False) -> Obo:
"""Get miRBase as OBO."""
version = bioversions.get_version(PREFIX)
return Obo(
ontology=PREFIX,
name="miRBase",
iter_terms=get_terms,
iter_terms_kwargs=dict(version=version, force=force),
typedefs=[from_species, has_mature],
data_version=version,
auto_generated_by=f"bio2obo:{PREFIX}",
) | 9202600c1e24a2582fadb62269e21a0f30aea1c0 | 3,628,041 |
import token
import requests
def create_invitation(secrets) -> dict:
"""
Create an invitation for an existing share.
"""
subscription_id = secrets["SUBSCRIPTIONID"]
resource_group_name = "rg_crosstenant"
account_name = "kmdatashare"
share_name = "publishershare1"
invitation_name = "invitesp"
url = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.DataShare/accounts/{account_name}/shares/{share_name}/invitations/{invitation_name}"
h = {"Authorization": "Bearer " + token, "Content-Type": "application/json"}
p = {"api-version": API_VERSION}
body = {
"properties": {
"targetActiveDirectoryId": secrets["TARGET_AAD"],
"targetObjectId": secrets["TARGET_OBJECT_ID"],
}
}
r = requests.put(url=url, headers=h, params=p, json=body)
return r.json() | 6be8cb7aa5d5eef2cababb8e9e918fe0de5f16d0 | 3,628,042 |
def allocate_lock():
"""Dummy implementation of _thread.allocate_lock()."""
return LockType() | 2163feff679d8ea7535e2db13b38e44bf6778e0b | 3,628,043 |
import copy
def cropped_thumbnail(instance, field_name, width=None, height=None, scale=None, **kwargs):
"""Cropper"""
thumbnail_options = copy(kwargs)
ratiofield = instance._meta.get_field(field_name)
image = getattr(instance, ratiofield.image_field)
if ratiofield.image_fk_field:
image = getattr(image, ratiofield.image_fk_field)
if not image:
return ''
size = (int(ratiofield.width), int(ratiofield.height))
box = getattr(instance, field_name)
if scale:
scale = float(scale)
width = size[0] * scale
height = size[1] * scale
elif width and height:
width = float(width)
h = size[1] * width / size[0]
if h > height:
width = height * size[0] / size[1]
else:
height = h
elif width:
width = float(width)
height = size[1] * width / size[0]
elif height:
height = float(height)
width = height * size[0] / size[1]
if width and height:
size = (int(width), int(height))
if ratiofield.adapt_rotation:
if (image.height > image.width) != (size[1] > size[0]):
size = (size[1], size[0])
thumbnailer = get_thumbnailer(image)
thumbnail_options.update({
'size': size,
'box': box
})
try:
return thumbnailer.get_thumbnail(thumbnail_options).url
except InvalidImageFormatError:
return '' | 2f33e33e4591bd16291f5411912596ffff578d3e | 3,628,044 |
import math
def find_roots_quadratic(a: float, b: float, c: float) -> set:
"""Return a set containing the solutions to the equation ax^2 + bx + c = 0.
Each solution is a float.
You may ASSUME that:
- a != 0
- (b * b) - (4 * a * c) >= 0
>>> find_roots_quadratic(1, -15, 56) == {8.0, 7.0}
True
>>> find_roots_quadratic(1, -10, 21) == {3.0, 7.0}
True
>>> find_roots_quadratic(1, 8, 15) == {-3.0, -5.0}
True
>>> # Have to use isclose to compare floats
>>> all([math.isclose(sol, -0.739, abs_tol=0.001) or math.isclose(sol, 1.739, abs_tol=0.001) for sol in find_roots_quadratic(7, -7, -9)])
True
Hint: use the quadratic formula.
"""
assert a != 0
assert (b * b) - (4 * a * c) >= 0
part = math.sqrt(b * b - 4 * a * c)
return {(-b - part) / (2 * a), (-b + part) / (2 * a)} | 664f3ec213200ac2ed3a1cc4f8001da4331938bc | 3,628,045 |
def reconcile(
column_to_reconcile,
type_id=None,
top_res=1,
property_mapping=None,
reconciliation_endpoint="https://wikidata.reconci.link/en/api",
):
"""
Reconcile a DataFrame column
This is the main function of this package, it takes in a Pandas Series,
that is, a column of a DataFrame, and sends it for reconciliation. In
order to return more confident results, the parameter type_id corresponds
to the type of item you're trying to reconcile against, that is, in case of a Wikidata item,
it is the item's 'instance of' property. There is also a top_res argument,
to filter the retrieved matches, this can be either an int, corresponding to the number of
matches you want to retrieve for each reconciled item, or 'all', to return all matches.
The property_mapping argument is an optional argument to denote particular triples to reconcile
against, so you could, for example, reconcile against items of a particular type, that have
a specific property equals to some specific value. The reconciliation_endpoint argument corresponds
to the reconciliation service you're trying to access, if no value is given, it will default to
the Wikidata reconciliation endpoint. See <https://reconciliation-api.github.io/testbench/> for a
list of available endpoints.
Args:
column_to_reconcile (Series): A pandas Series corresponding to
the column to be reconciled.
type_id (str): The item type to reconcile against, in case of a
wikidata item, it corresponds to the item's 'instance of' QID.
top_res (int or str): The maximum number of matches to return for
each reconciled item, defaults to one. To retrieve all matches,
set it to 'all'.
property_mapping (dict): Property-column mapping of the items you want to
reconcile against. For example, {"P17": df['country']} to reconcile
against items that have the property country equals to the values
in the column country. This is optional and defaults to None.
reconciliation_endpoint (str): The reconciliation endpoint, defaults
to the Wikidata reconciliation endpoint.
Returns:
DataFrame: A Pandas DataFrame with the reconciled results.
Raises:
ValueError: top_res argument must be one of either 'all' or an integer.
"""
input_keys, response = return_reconciled_raw(
column_to_reconcile,
type_id,
property_mapping,
reconciliation_endpoint,
)
full_df = parse_raw_results(input_keys, response)
if top_res == "all":
return full_df
elif isinstance(top_res, int):
filtered = full_df.groupby("input_value").head(top_res).reset_index(drop=True)
return filtered
else:
raise ValueError("top_res argument must be one of either 'all' or an integer") | f43b14df7016714207612e755231ca17515d46a8 | 3,628,046 |
def dilute_mask(mask, dilute_distance=0):
"""Expand mask regions with given distance
mask: image with dim (H, W, C)
"""
if dilute_distance == 0: # no dilution
return mask
background = np.zeros_like(mask)
# left, right, up, down dilute
l_dilute = background.copy()
r_dilute = background.copy()
u_dilute = background.copy()
d_dilute = background.copy()
l_dilute[:, 0:-dilute_distance] = mask[:, dilute_distance:]
r_dilute[:, dilute_distance:] = mask[:, 0:-dilute_distance]
u_dilute[0:-dilute_distance, :] = mask[dilute_distance:, :]
d_dilute[dilute_distance:, :] = mask[0:-dilute_distance, :]
diluted = l_dilute | r_dilute | u_dilute | d_dilute
return diluted | 94599454e65c1a55c324d99547bd325c268138fb | 3,628,047 |
def with_expected_arguments(args, env={}):
"""Python script that checks its argv and environment."""
arg_string = ', '.join(['"{}"'.format(arg) for arg in args])
return _SCRIPT_WITH_EXPECTED_ARGUMENTS_TEMPLATE.format(arg_string, env) | 8ae6545da4dbc175501e80af277f84e6f9b32498 | 3,628,048 |
import numpy
def createisosurfacemesh(grid, step=None, offset=None, isolevel=None):
"""
This function creates an isosurface from voxel data using the
marching cubes algorithm.
Returns a mesh.
**Parameters:**
`grid` : 3D numpy array containing the voxel data
`step` : voxel sizes in each direction
`offset` : coordinate origin in each direction
`isolevel` : isovalue at which the surface will be created
**Raises:**
`gr3.GR3_Error.GR3_ERROR_EXPORT`: Raises GR3_Exception
+----------------------+-------------------------------+
| GR3_ERROR_NONE | on success |
+----------------------+-------------------------------+
| GR3_ERROR_OPENGL_ERR | if an OpenGL error occured |
+----------------------+-------------------------------+
| GR3_ERROR_OUT_OF_MEM | if a memory allocation failed |
+----------------------+-------------------------------+
"""
try:
# integral values
input_max = numpy.iinfo(grid.dtype).max
except ValueError:
# floating point values are expected to be in range [0, 1]
input_max = 1
grid[grid > 1] = 1
if isolevel is None:
isolevel = 0.5 * input_max
elif isolevel < 0:
isolevel = 0
elif isolevel > input_max:
isolevel = input_max
scaling_factor = 1.0 * numpy.iinfo(numpy.uint16).max / input_max
isolevel = numpy.uint16(isolevel * scaling_factor)
grid = (grid * scaling_factor).astype(numpy.uint16)
nx, ny, nz = grid.shape
if step is None and offset is None:
step = (2.0/(nx-1), 2.0/(ny-1), 2.0/(nz-1))
offset = (-1.0, -1.0, -1.0)
elif offset is None:
offset = (-step[0] * (nx-1) / 2.0,
-step[1] * (ny-1) / 2.0,
-step[2] * (nz-1) / 2.0)
elif step is None:
step = (-offset[0] * 2.0 / (nx-1),
-offset[1] * 2.0 / (ny-1),
-offset[2] * 2.0 / (nz-1))
_mesh = c_uint(0)
data = grid.ctypes.data_as(POINTER(c_ushort))
isolevel = c_ushort(isolevel)
dim_x, dim_y, dim_z = map(c_uint, grid.shape)
stride_x, stride_y, stride_z = [c_uint(stride // grid.itemsize) for stride in grid.strides]
step_x, step_y, step_z = map(c_double, step)
offset_x, offset_y, offset_z = map(c_double, offset)
_gr3.gr3_createisosurfacemesh(byref(_mesh), data, isolevel,
dim_x, dim_y, dim_z,
stride_x, stride_y, stride_z,
step_x, step_y, step_z,
offset_x, offset_y, offset_z)
return _mesh | 0dbb1bb4a4d468ec3fc4c30366137f0218904067 | 3,628,049 |
import os
import re
def get_property(prop, project):
"""Get certain property from project folder."""
with open(os.path.join(project, '__init__.py')) as f:
result = re.search(r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format(prop),
f.read())
return result.group(1) | 8a0c09d9ce5ee448d648400f66a6b56669871cff | 3,628,050 |
def get_project_cls_by_name(project_name: str) -> tp.Type[bb.Project]:
"""Look up a BenchBuild project by it's name."""
for project_map_key in bb.project.ProjectRegistry.projects:
if not _is_vara_project(project_map_key):
# currently we only support vara provided projects
continue
if project_map_key.startswith(project_name):
project: tp.Type[
bb.Project
] = bb.project.ProjectRegistry.projects[project_map_key]
return project
raise LookupError | 23b346628841cd6de4159cf7ca67db40929b76f8 | 3,628,051 |
def get_next_non_summer_quarter(request):
"""
Return the Term object for the non-summer quarter after the quarter
refered in the current user sesssion.
"""
return get_next_non_summer_term(get_current_quarter(request)) | a3339645813d3835ba33c552ad8ef04fc33fe1fb | 3,628,052 |
def trick_for_mountaincar(state, done, reward, state_):
"""
-1 for each time step, until the goal position of 0.5 is reached.
As with MountainCarContinuous v0, there is no penalty for climbing the left hill,
which upon reached acts as a wall.
state[0] means position: -1.2 ~ 0.6
state[1] velocity: -0.07 ~ 0.07
"""
return abs(state_[1]) | 7ef703f6df9c1d10a250c18cb85def2702a3378d | 3,628,053 |
def create_optimizer(
learning_rate, num_train_steps,
warmup_steps=0, warmup_proportion=0, lr_decay_power=1.0,
layerwise_lr_decay_power=-1, n_transformer_layers=None):
"""Creates an optimizer and training op."""
global_step = tf.train.get_or_create_global_step()
increment_global_step_op = tf.assign(global_step, global_step+1)
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.2,
power=lr_decay_power,
cycle=False)
warmup_steps = max(num_train_steps * warmup_proportion, warmup_steps)
learning_rate *= tf.minimum(
1.0, tf.cast(global_step, tf.float32) / tf.cast(warmup_steps, tf.float32))
if layerwise_lr_decay_power > 0:
learning_rate = _get_layer_lrs(learning_rate, layerwise_lr_decay_power, n_layers=n_transformer_layers)
return learning_rate, increment_global_step_op, global_step | 03218eb5861388bb5042e8006224a6dddd8b83a2 | 3,628,054 |
def build_cpo_transition_matrix(val):
""" Builds a TransitionMatrix model expression from a Python value.
If active, this method uses the value cache to return the same CpoExpr for the same value.
Args:
val: Value to convert. Iterator or iterators of integers, or existing TransitionMatrix expression.
Returns:
Model transition matrix, not editable.
Raises:
Exception if conversion is not possible.
"""
def normalize_value(val):
assert not is_string(val), "Impossible to build a transition matrix expression from a string"
assert not isinstance(val, dict), "Impossible to build a transition matrix expression from a dictionary. Select values() or keys()."
try:
return tuple(tuple(x) for x in val)
except TypeError:
raise CpoException("Impossible to build a transition matrix from value '{}' of type '{}'".format(to_string(val), type(val)))
# Check if already a TransitionMatrix expression
if isinstance(val, CpoExpr) and val.is_type(Type_TransitionMatrix):
return val
# Check if already in the cache
if _CACHE_ACTIVE:
cpval = _CPO_VALUES_FROM_PYTHON.get_or_create('matrix', val, normalize_value, _create_cpo_transition_matrix)
else:
cpval = _create_cpo_transition_matrix(normalize_value(val))
return cpval | eff10fd7cc1a6d7cffb15b1b6589e9a7d9624ec8 | 3,628,055 |
from datetime import datetime
def quarter_to_month(str_date):
"""
Transform string representing year and quarter in date.
Parameters
----------
str_date : string
String representing year (YYYY) and quarter (qq) as YYYYQq.
Returns
-------
date : datetime.date
Transformed date.
"""
format_string = '%YQ%m'
try:
# convert string to date
d = datetime.strptime(str_date, format_string)
day = d.day
month = d.month * 3 - 2
year = d.year
return date(year, month, day)
except Exception as e:
logger.error(e)
return d | f3cb5fa80df0638c26525fb8f50413a72e125572 | 3,628,056 |
def edge_failure_sampling(failure_scenarios,edge_column):
"""Criteria for selecting failure samples
Parameters
---------
failure_scenarios - Pandas DataFrame of failure scenarios
edge_column - String name of column to select failed edge ID's
Returns
-------
edge_failure_samples - List of lists of failed edge sets
"""
edge_failure_samples = list(set(failure_scenarios[edge_column].values.tolist()))
return edge_failure_samples | 91c251241dcde7d457b69b2033a1751b3ae963fd | 3,628,057 |
def basicClusteringProperties(network, clustering):
"""
compute diversity and related properties for the given clustering
adds results to node attributes
"""
if clustering == 'Cluster':
properties = ['InterclusterFraction', 'ClusterDiversity', 'ClusterBridging', 'ClusterCentrality']
else:
properties = ['fracIntergroup_' + clustering, 'diversity_' + clustering, 'bridging_' + clustering,
'centrality_' + clustering]
results = {prop: {} for prop in properties}
clusters = {}
# iterate over each node
for node in network:
nodedata = network.nodes[node]
cluster = nodedata[clustering] if clustering in nodedata else None
if cluster is not None and cluster != '':
# build list of nodes in each cluster
if cluster not in clusters:
clusters[cluster] = []
clusters[cluster].append(node)
clusterCounts = {} # dict of cluster name and count
nIntergroup = 0
degree = float(network.degree(node))
# walk neighbors and save cluster info
for neighbor in network.neighbors(node):
neighborCluster = network.nodes[neighbor][clustering]
if neighborCluster not in clusterCounts:
clusterCounts[neighborCluster] = 0.0
clusterCounts[neighborCluster] += 1.0
if neighborCluster is not None and cluster != neighborCluster:
nIntergroup += 1
# compute diversity and related properties
nGroups = len(clusterCounts)
fracIntergroup = float(nIntergroup) / degree if (degree > 0) else 0
diversity = 0
p = np.array(list(clusterCounts.values())) / degree
p = p[np.nonzero(p)]
diversity = -np.sum(p * np.log(p))
bridging = 0 if nGroups < 2 else diversity * float(nIntergroup) / (nGroups - 1)
centrality = (1 - fracIntergroup) * degree / (1 + diversity)
results[properties[0]][node] = fracIntergroup
results[properties[1]][node] = diversity
results[properties[2]][node] = bridging
results[properties[3]][node] = centrality
# normalize values within each cluster
for nodes in clusters.values():
def normalize(results, nodes, attr):
def normalizeArray(arr):
npArr = np.array(arr)
sd = np.std(npArr)
if sd != 0:
mn = np.mean(npArr)
return ((npArr - mn) / sd).tolist()
return arr
vals = normalizeArray([results[attr][node] for node in nodes])
for v in zip(nodes, vals):
results[attr][v[0]] = v[1]
normalize(results, nodes, properties[1])
normalize(results, nodes, properties[2])
normalize(results, nodes, properties[3])
return results | bcfed0fb0b6aaf8e7e5d3fb21f212ad165b6ad54 | 3,628,058 |
def deserializer(chain) -> Contract:
"""Set crowdsale end strategy."""
# Create finalizer contract
args = []
contract, hash = chain.provider.deploy_contract('TestBytesDeserializer', deploy_args=args)
return contract | 2db7302256cb7b2d500852ae9012ebf5bf07c04f | 3,628,059 |
def get_payload(fetch_me):
"""Get object from s3, reads underlying http stream, returns bytes.
Args:
fetch_me (dict): Mandatory. Must contain key:
- methodArgs
- Bucket: string. s3 bucket name.
- Key: string. s3 key name.
Returns:
bytes: payload of the s3 obj in bytes
Raises:
KeyNotInContextError: s3Fetch or s3Fetch.methodArgs missing
"""
logger.debug("started")
try:
operation_args = fetch_me['methodArgs']
except KeyError as err:
raise KeyNotInContextError(
"s3Fetch missing required key for pypyraws.steps.s3fetch step: "
"methodArgs") from err
client_args = fetch_me.get('clientArgs', None)
response = pypyraws.aws.service.operation_exec(
service_name='s3',
method_name='get_object',
client_args=client_args,
operation_args=operation_args)
logger.debug("reading response stream")
payload = response['Body']
logger.debug("returning response bytes")
logger.debug("done")
return payload | 0a62d0125ea9501591901ce94edc99379d64da1e | 3,628,060 |
from typing import List
def repl_remove_attributes_from_sgr(matchobj, remove: List[str]) -> str:
"""Addapted remove_sequence_from_text function to be used with regex"""
return remove_attributes_from_sgr(matchobj.group(0), remove) | b235361f59565f1095d488d06c4b07a2005a61d4 | 3,628,061 |
def _update_data(entity_id, public_key, sso_url, expires_at):
"""
Update/Create the SAMLProviderData for the given entity ID.
Return value:
False if nothing has changed and existing data's "fetched at" timestamp is just updated.
True if a new record was created. (Either this is a new provider or something changed.)
"""
data_obj = SAMLProviderData.current(entity_id)
fetched_at = now()
if data_obj and (data_obj.public_key == public_key and data_obj.sso_url == sso_url):
data_obj.expires_at = expires_at
data_obj.fetched_at = fetched_at
data_obj.save()
return False
else:
SAMLProviderData.objects.create(
entity_id=entity_id,
fetched_at=fetched_at,
expires_at=expires_at,
sso_url=sso_url,
public_key=public_key,
)
return True | 73d05b2531a35b202ea9d365051c53052f836df6 | 3,628,062 |
import time
import random
def my_solver(filename: str) -> str:
"""Dummy solver function.
It does nothing apart from waiting on average 2.5sec
:type filename: object
:Return: the same filename a the input
"""
print("Running my solver")
time.sleep(random.random() * 2)
return filename | 8aac2ebe64e8c3d1596441942e4c9a348c977f8f | 3,628,063 |
def distance_reward_predator(dx, dy, dz):
"""
Returns: Reward = $\exp(-c \cdot d^2)$
"""
distance = np.linalg.norm([dx, dy, dz])
rw = np.exp(-config.reward.coef_distance_reward_predator * distance * distance)
return rw, distance | eb99bfa00746b545ee13f4b3b52203c567744d38 | 3,628,064 |
import os
def load_combo_catalog():
"""Load a union of the user and global catalogs for convenience"""
user_dir = user_data_dir()
global_dir = global_data_dir()
cat_dirs = []
if os.path.isdir(user_dir):
cat_dirs.append(user_dir + '/*.yaml')
cat_dirs.append(user_dir + '/*.yml')
if os.path.isdir(global_dir):
cat_dirs.append(global_dir + '/*.yaml')
cat_dirs.append(global_dir + '/*.yml')
for path_dir in conf.get('catalog_path', []):
if path_dir != '':
if not path_dir.endswith(('yaml', 'yml')):
cat_dirs.append(path_dir + '/*.yaml')
cat_dirs.append(path_dir + '/*.yml')
else:
cat_dirs.append(path_dir)
return YAMLFilesCatalog(cat_dirs, name='builtin') | 8d0a86a9600b53ca55ccd7f4ac7a048d3e602ecf | 3,628,065 |
def flags_t_v_chan(data, chan, targets, freq_range=None, pol=[0, 1], **plot_kwargs):
"""Waterfall plot of flagged data in channels vs time.
Parameters
----------
data : :class:`np.ndarray`
complex, shape(num_times, num_chans, num_pol)
chan : :class:`np.ndarray`
real, shape(num_chans), index number of chan axis
targets : list of str
target names/labels for targets in each scan
freq_range : list
start and stop frequencies of the array, optional
pol : list
list of polarisation descriptions, optional
plot_kwargs : keyword arguments, optional
additional keyword arguments for plotting function
"""
npols = data.shape[-1]
nscans = data.shape[0]
ncols = npols
nrows = 1
# scale the size of the plot by the number of scans but have a min and max plot size
rowsize = min(max(1.0, data.shape[0] / 50.0), 10.0)
fig, axes = plt.subplots(nrows, ncols, figsize=(
ncols * FIG_X, rowsize * FIG_Y), squeeze=False, sharey='row')
for p in range(npols):
im = axes[0, p].imshow(data[..., p], extent=(
chan[0], chan[-1], 0, nscans), aspect='auto', origin='lower',
cmap=plt.cm.jet, **plot_kwargs)
axes[0, p].set_ylabel('Pol {0} Scans'.format(pol[p]))
axes[0, p].set_xlabel('Channels')
plt.setp(axes[0, 1].get_yticklabels(), visible=False)
# major tick step
step = nscans // 25 + 1
axes[0, 0].set_yticks(np.arange(0, len(targets))[::step]+0.5)
axes[0, 0].set_yticks(np.arange(0, len(targets))+0.5, minor=True)
axes[0, 0].set_yticklabels(targets[::step])
# Add colorbar
cax = fig.add_axes([0.92, 0.12, 0.02, 0.75])
cb = fig.colorbar(im, cax=cax)
cb.set_label('% baselines flagged')
if freq_range is not None:
for ax in axes.flatten()[0: 2]:
add_freq_axis(ax, chan_range=[chan[0], chan[-1]], freq_range=freq_range)
return fig | ac8146cd765620918384347cb4933de3439324e7 | 3,628,066 |
import numpy
import copy
def canonical_pruning(X, y, sample_weight, initial_mx_formula,
loss_function,
iterations=100,
n_candidates=100,
n_kept_best=0,
learning_rate=0.1,
regularization=10.,
verbose=False):
"""
Represents basic pruning algorithm, which greedily adds
:param X: data
:param y: binary labels (0 and 1)
:param sample_weight: weights
:param initial_mx_formula:
:param loss_function: loss function (following hep_ml convention for losses)
:param iterations: int, how many estimators we shall leave
:param n_candidates: how many candidate_trees we check on each iteration
:param n_kept_best: how many classifiers saved from previous iteration
:param learning_rate: shrinkage, float
:param regularization: roughly, it is amount of event of each class added to each leaf. Represents a penalty
:param verbose: bool, print stats at each step?
:return: new OBDT list classifier.
"""
assert n_candidates > n_kept_best, "can't keep more then tested on each stage"
X = numpy.array(X, dtype='float32', order='F')
# collecting information from formula
old_trees = []
mn_applier = _matrixnetapplier.MatrixnetClassifier(BytesIO(initial_mx_formula))
for depth, n_trees, iterator_trees in mn_applier.iterate_trees():
for tree in iterator_trees:
old_trees.append(tree)
features = list(mn_applier.features)
# taking divisible by 8
X, y, w = take_divisible(X, y, sample_weight=sample_weight)
# fitting loss function
loss_function = copy.deepcopy(loss_function)
loss_function.fit(X, y, w)
new_trees = []
pred = numpy.zeros(len(X), dtype=float)
prev_iteration_best_ids = [] # ids of best trees at previous stages
for iteration in range(iterations):
new_candidate_trees = numpy.random.choice(len(old_trees), replace=False,
size=n_candidates - len(prev_iteration_best_ids))
candidate_trees_indices = numpy.concatenate([new_candidate_trees, prev_iteration_best_ids]).astype(int)
candidate_trees = [old_trees[i] for i in candidate_trees_indices]
assert len(candidate_trees) == n_candidates
grads = loss_function.negative_gradient(pred)
hesss = loss_function.hessian(pred)
candidate_losses = []
candidate_new_trees = []
for tree in candidate_trees:
leaves = compute_leaves(X, tree)
new_leaf_values = numpy.bincount(leaves, weights=grads, minlength=2 ** 6)
new_leaf_values /= numpy.bincount(leaves, weights=hesss, minlength=2 ** 6) + regularization
new_leaf_values *= learning_rate
new_preds = pred + new_leaf_values[leaves]
candidate_losses.append(loss_function(new_preds))
new_tree = tree[0], tree[1], new_leaf_values
candidate_new_trees.append(new_tree)
# selecting one with minimal loss
tree = candidate_new_trees[numpy.argmin(candidate_losses)]
new_trees.append(tree)
pred += predict_tree(X, tree)
prev_iteration_best_ids = numpy.take(candidate_trees_indices, numpy.argsort(candidate_losses)[1:n_kept_best])
if verbose:
print(iteration, loss_function(pred))
return OBDTListClassifier(features, trees=new_trees) | 02b7b95638cb25250f36ad6a5ff799238c1230af | 3,628,067 |
import re
def count_arg_nums(method_signature):
"""
Based on the method signature(jni format) to count the arguments number.
:param method_signature: method signature(jni format)
:return: arguments number
"""
arg_signature = re.findall(re.compile(r'\((.*?)\)'), method_signature)[0]
pattern = re.compile(r'(L.*?;)|([BCDFISJZ])|(\[[BCDFISJZ])')
args = pattern.findall(arg_signature)
args_num = len(args)
return args_num
# print(len(args))
# print(args) | 6703653e26ced05baf1a639d93d6435ea8b6ff8e | 3,628,068 |
import netaddr # try to cast to IP Address
def ip_to_net_ent_title_ios(ip_addr):
""" Converts an IP address into an OSI Network Entity Title
suitable for use in IS-IS on IOS.
>>> ip_to_net_ent_title_ios(IPAddress("192.168.19.1"))
'49.1921.6801.9001.00'
"""
try:
ip_words = ip_addr.words
except AttributeError:
ip_addr = netaddr.IPAddress(ip_addr)
ip_words = ip_addr.words
log.debug("Converting IP to OSI ENT format")
area_id = "49"
ip_octets = "".join("%03d" % int(
octet) for octet in ip_words) # single string, padded if needed
return ".".join([area_id, ip_octets[0:4], ip_octets[4:8], ip_octets[8:12],
"00"]) | 294e1ce7296f7573455de114bd2bf17cbf81b89c | 3,628,069 |
def voiceProgression(key, chordProgression):
"""Voices a chord progression in a specified key using DP.
Follows eighteenth-century voice leading procedures, as guided by the cost
function defined in the `chordCost` and `progressionCost` functions.
Returns a list of four-pitch chords, corresponding to successive Roman
numerals in the chord progression.
"""
key = Key(key)
if isinstance(chordProgression, str):
chordProgression = list(filter(None, chordProgression.split()))
dp = [{} for _ in chordProgression]
for i, numeral in enumerate(chordProgression):
chord = RomanNumeral(numeral, key)
voicings = voiceChord(key, chord)
if i == 0:
for v in voicings:
dp[0][v.pitches] = (chordCost(key, v), None)
else:
for v in voicings:
best = (float("inf"), None)
for pv_pitches, (pcost, _) in dp[i - 1].items():
pv = Chord(pv_pitches)
ccost = pcost + progressionCost(key, pv, v)
if ccost < best[0]:
best = (ccost, pv_pitches)
dp[i][v.pitches] = (best[0] + chordCost(key, v), best[1])
cur, (totalCost, _) = min(dp[-1].items(), key=lambda p: p[1][0])
ret = []
for i in reversed(range(len(chordProgression))):
ret.append(Chord(cur, lyric=chordProgression[i]))
cur = dp[i][cur][1]
return list(reversed(ret)), totalCost | cb989f59a9b51d09903985c8be55b8f034bbff23 | 3,628,070 |
def metade(valor:float, formatado:bool = False):
"""
Retorna a metade do valor que você botou
:param valor: o valor que você quer a metade
:param formatado: Se você quer que o valor estaja formatado para R$
:return: A metade do valor
"""
valor /= 2
if(formatado == True):
valorformatado = ('R${:.2f}'.format(valor))
valor = valorformatado.replace('.', ',')
return valor | df6c3fb48ac68620e5d585361e277503902fe476 | 3,628,071 |
def may_data():
"""
Values of a few transport coefficients calculated with
the closed analytic Fermi-Dirac expressions for the
free electron mass at 300 K. The values assume acoustic
phonon scattering.
Parameters
----------
None
Returns
-------
data : ndarray
| Dimension: (11,5)
Contains the data from Table 11.1 in Andrew F. May and G. Jeffrey Snyder,
Introduction to Modelling Thermoelectric Transport at High Temperature,
Taylor \& Francis Inc, 2012, Chapter 11. With 11 entries
of different unitless chemical potentials (first index
of the second axis). The remaining 4 indexes of the
second axis contains the Seebeck coefficient, Lorenz
coefficient, the charge carrier density and the unitless
Hall coefficient. The units for the three first are
..math:: \\micro \\mathrm{V/K}, 10^{-8} \\mathrm{V}^2/
\\mathrm{K}^2, 10^{21} \\mathrm{cm}^{-3},
respectively.
Notes
----
Used in this implementation to tests the closed analytic
Fermi-Dirac integrals. The data given by may is converted to
a conduction band and thus the Seebeck should be
negative. Otherwise the values entered should be precicely the
same as in the reference given above.
"""
data = np.zeros((11, 5))
# unitless eta values (chempot/k_bT)
# data[:, 0] = [-3, -2, -1, 0, 1, 2, 3, 4, 6, 8, 10]
# data[:, 0] = [-10, -8, -6, -4, -3, -2, -1, 0, 1, 2, 3]
data[:, 0] = [3, 2, 1, 0, -1, -2, -3, -4, -6, -8, -10]
# Seebeck coefficient in microV/K
data[:, 1] = [432.97, 350.24, 272.50, 204.50, 150.88, 112.39, 86.09, 68.22, 46.95, 35.40, 28.35]
# Lorenz coefficient in 10^-8 V^2/K^2
data[:, 2] = [1.49, 1.51, 1.54, 1.61, 1.72, 1.86, 1.99, 2.09, 2.24, 2.32, 2.36]
# carrier concentration in 10^21 cm^-3
data[:, 3] = [0.00123, 0.00324, 0.0082, 0.0192, 0.0395, 0.0709, 0.1126, 0.1634, 0.2872, 0.4355, 0.6044]
# unitless hall coefficient
data[:, 4] = [1.17, 1.17, 1.16, 1.13, 1.11, 1.08, 1.05, 1.04, 1.02, 1.01, 1.01]
return data | 14bfb558c2a75a417979c7b112d000f7bd5084d2 | 3,628,072 |
def and_nominal_group(sentence):
"""
add 'and' between nominal groups separated with nothing
Input=sentence Output=sentence
"""
# init
i = 0
list_nominal_group = our_list = []
while i < len(sentence):
# We start by finding the first nominal group
nominal_group = determination_nominal_group(sentence, i, 'of')
position = i
#We recovery all nominal groups followed the first one
while nominal_group:
list_nominal_group = list_nominal_group + [nominal_group]
i += len(nominal_group)
nominal_group = determination_nominal_group(sentence, i, 'of')
#If we have 'and' just after, we recovery the nominal group followed
if i < len(sentence) and sentence[i] == 'and' and list_nominal_group != []:
i += 1
nominal_group = determination_nominal_group(sentence, i, 'of')
#If the first one of the list is not a pronoun => OK
if other_functions.there_is_pronoun(list_nominal_group + [nominal_group]) == 0:
for j in list_nominal_group:
our_list = our_list + j + ['and']
sentence = sentence[:position] + our_list + sentence[i:]
i = i + len(nominal_group) + len(list_nominal_group)
list_nominal_group = our_list = []
#We forgot the first nominal group and we continue just after it
else:
i = position + len(list_nominal_group[0])
list_nominal_group = []
else:
i += 1
list_nominal_group = []
return sentence | f204797743326c6146fb2e363bd3f783a6da7a32 | 3,628,073 |
import base64
import zlib
def decompressData(contentData):
"""
Handles the actual decompression of Base64 data.
Args:
contentData: String of Base64 content
Returns:
contentData: Decompressed content of ASCII printable
"""
decoded = base64.b64decode(contentData)
# IO.Compression.DeflateStream
try:
# 15 is the default parameter
contentData = zlib.decompress(decoded, 15) # zlib
except:
pass
try:
# -15 makes it ignore the gzip header
contentData = zlib.decompress(decoded, -15) # zlib
except:
pass
try:
contentData = zlib.decompress(decoded, -zlib.MAX_WBITS) # deflate
except:
pass
try:
contentData = zlib.decompress(decoded, 16 + zlib.MAX_WBITS) # gzip
except:
pass
contentData = stripASCII(contentData)
return contentData | 98ca474f64f7eed2e87fda1602f21a5cbfe517c7 | 3,628,074 |
def compute_edge_measures(ntwk):
"""
These return edge-based measures
"""
iflogger.info("Computing edge measures:")
measures = {}
# iflogger.info('...Computing google matrix...' #Makes really large networks (500k+ edges))
# measures['google_matrix'] = nx.google_matrix(ntwk)
# iflogger.info('...Computing hub matrix...')
# measures['hub_matrix'] = nx.hub_matrix(ntwk)
# iflogger.info('...Computing authority matrix...')
# measures['authority_matrix'] = nx.authority_matrix(ntwk)
return measures | cc426521836ece0dbaa219fade1bc94a7948b5a7 | 3,628,075 |
def gen_new_axis(x_axis, expr, precision):
"""evaluate lagrange interpolation"""
new_x = np.arange(min(x_axis), max(x_axis), precision)
f = lambdify('x', expr, "numpy")
new_y = f(new_x)
return new_x, new_y | 576f52d2b94098472e8f2ed91cffe3587011949c | 3,628,076 |
from typing import Tuple
def calc_fresnel_coefficients(n1, n2, cos_theta1, cos_theta2 = None) -> Tuple:
"""
Args:
n1: Refractive index in first medium.
n2: Refractive index in second medium
cos_theta1: Cosine of angle of incidence.
Returns:
"""
assert np.all(cos_theta1>=0)
if cos_theta2 is None:
sin_theta1 = (1 - cos_theta1**2)**0.5
sin_theta2 = n1*sin_theta1/n2
cos_theta2 = (1 - sin_theta2**2)**0.5
r_s = (n1*cos_theta1 - n2*cos_theta2)/(n1*cos_theta1 + n2*cos_theta2)
r_p = (n2*cos_theta1 - n1*cos_theta2)/(n2*cos_theta1 + n1*cos_theta2)
t_s = 2*n1*cos_theta1/(n1*cos_theta1 + n2*cos_theta2)
t_p = 2*n1*cos_theta1/(n2*cos_theta1 + n1*cos_theta2)
return (r_p, r_s), (t_p, t_s) | df37ddb4dece7ee0f89b1974dfe8093deba50b0c | 3,628,077 |
def _fetch_detailed_dataset(args=None):
"""
Fetch detailed info by crawling the detailed artwork pages, using
the links from the basic dataset.
Parallelized with vislab.utils.distributed.map_through_rq.
"""
basic_df = get_basic_df(args)
print("Fetching detailed Wikipaintings dataset by scraping artwork pages.")
if args is None:
args = {
'force_dataset': False,
'num_workers': 1, 'mem': 2000,
'cpus_per_task': 1, 'async': True
}
db = vislab.util.get_mongodb_client()[DB_NAME]
collection = db['image_info']
print("Old collection size: {}".format(collection.count()))
force = args.force_dataset
if not force:
# Exclude ids that were already computed.
image_ids = basic_df.index.tolist()
image_ids = vislab.util.exclude_ids_in_collection(
image_ids, collection)
basic_df = basic_df.ix[image_ids]
# Chunk up the rows.
rows = [row.to_dict() for ind, row in basic_df.iterrows()]
chunk_size = 10
num_chunks = len(rows) / chunk_size
chunks = np.array_split(rows, num_chunks)
args_list = [(chunk.tolist(), force) for chunk in chunks]
# Work the jobs.
vislab.utils.distributed.map_through_rq(
vislab.datasets.wikipaintings._fetch_artwork_infos,
args_list, 'wikipaintings_info',
num_workers=args['num_workers'], mem=args['mem'],
cpus_per_task=args['cpus_per_task'], async=args['async'])
print("Final collection size: {}".format(collection.count()))
# Assemble into DataFrame to return.
# Drop artworks without an image.
orig_df = pd.DataFrame([doc for doc in collection.find()])
df = orig_df.dropna(subset=['image']).copy()
# Rename some columns and add an index.
df['image_url'] = df['image']
df['date'] = df['dateCreated']
df.index = pd.Index(df['image_id'], name='image_id')
# Only take useful columns.
columns_to_take = [
'image_id', 'artist_slug', 'artwork_slug', 'date',
'genre', 'style', 'technique', 'keywords', 'name',
'page_url', 'image_url'
]
df = df[columns_to_take]
# Drop artworks with messed up image urls
good_inds = []
for ind, row in df.iterrows():
try:
str(row['image_url'])
good_inds.append(ind)
except:
pass
df = df.ix[good_inds]
df['image_url'] = df['image_url'].apply(lambda x: str(x))
return df | 9a2c20f1f39046cfff4402066105b0256803bffd | 3,628,078 |
def crop_image(image):
"""
Crop an image that has a padding of 0 value so that there is no empty
margin. Return the cropped image, and the x, y offsets of the new image
coordinates relative to the uncropped image. If there is no non-zero value
in the image, returns False.
Parameters
----------
image : numpy.ndarray
The input image.
Returns
-------
(numpy.ndarray, [int, int])
The cropped image and the [x, y] offsets relative to the uncropped
image.
False
If the image has no non-zero pixels.
"""
_ = np.abs(image)
filled_x = np.where(np.sum(_, axis=0) != 0)[0]
filled_y = np.where(np.sum(_, axis=1) != 0)[0]
if len(filled_x) == 0:
# Empty image
print('The image has no non-zero pixels')
return False
min_x, max_x = np.min(filled_x), np.max(filled_x)
min_y, max_y = np.min(filled_y), np.max(filled_y)
return image[min_y:max_y + 1, min_x:max_x + 1], [min_x, min_y] | 005e4b5ea79bccfffe04a2ccef3c33df2f29f249 | 3,628,079 |
def create_node_from_server(server):
""" Translate AWS EC2 Instance representation into a Node object.
"""
return Node(
id=server.id,
ip=server.private_ip_address,
extIp=server.public_ip_address,
az=server.placement['AvailabilityZone'],
name="",
state=server_status_to_state(server.state),
) | 2406e611c60ed5be9240cc09dec7d85bd7f673a2 | 3,628,080 |
def if_nametoindex(name):
"""
Converts interface name to ifindex
@name - inteface name
@return - ifindex in case of success, else raises exception
"""
ret = libc.if_nametoindex(name)
if not ret:
raise RuntimeError("Invalid Name")
return ret | 04c57a397b2b9f62a90ab6b6e4478a8cfc9e238c | 3,628,081 |
import os
import subprocess
def run_export(export_s3_uri, export_id):
"""Run spark export
Args:
export_s3_uri (str): location of job definition
export_id (str): ID of export job to process
"""
status_uri = "s3://{}/export-statuses/{}".format(
os.getenv("DATA_BUCKET"), export_id
)
command = [
"java",
"-jar",
"/opt/raster-foundry/jars/backsplash-export-assembly.jar",
"-d",
export_s3_uri,
]
subprocess.check_call(command)
logger.info("Finished exporting %s", export_s3_uri)
return status_uri | f5e264d71f45f59f35d6345e7654bd976c2f4380 | 3,628,082 |
def gauss_seidel_split(A):
""" split A matrix in additive lower and upper triangular matrices
Args:
A (ndarray): input matrix
Returns:
L (ndarray): lower triangular matrix
U (ndarray): upper triangular matrix (zero diagonal)
"""
L = np.tril(A)
U = np.triu(A)
np.fill_diagonal(U,0)
return L,U | 411f38c48540c8072994e3300da6e49c5d57f7e9 | 3,628,083 |
def _define_annuli(angle_list, ann, n_annuli, fwhm, radius_int, annulus_width,
delta_rot, n_segments, verbose, strict=False):
""" Function that defines the annuli geometry using the input parameters.
Returns the parallactic angle threshold, the inner radius and the annulus
center for each annulus.
"""
if ann == n_annuli - 1:
inner_radius = radius_int + (ann * annulus_width - 1)
else:
inner_radius = radius_int + ann * annulus_width
ann_center = inner_radius + (annulus_width / 2)
pa_threshold = _compute_pa_thresh(ann_center, fwhm, delta_rot)
mid_range = np.abs(np.amax(angle_list) - np.amin(angle_list)) / 2
if pa_threshold >= mid_range - mid_range * 0.1:
new_pa_th = float(mid_range - mid_range * 0.1)
msg = 'WARNING: PA threshold {:.2f} is too big, recommended '
msg+=' value for annulus {:.0f}: {:.2f}'
if strict:
print(msg.format(pa_threshold,ann, new_pa_th))
#raise ValueError(msg.format(pa_threshold,ann, new_pa_th))
else:
print('PA threshold {:.2f} is likely too big, will be set to '
'{:.2f}'.format(pa_threshold, new_pa_th))
pa_threshold = new_pa_th
if verbose:
if pa_threshold > 0:
print('Ann {} PA thresh: {:5.2f} Ann center: '
'{:3.0f} N segments: {} '.format(ann + 1, pa_threshold,
ann_center, n_segments))
else:
print('Ann {} Ann center: {:3.0f} N segments: '
'{} '.format(ann + 1, ann_center, n_segments))
return pa_threshold, inner_radius, ann_center | 8681ec5e94f5779439c6715a56508cf30fe01cad | 3,628,084 |
import torch
import tqdm
import sys
def train(cfg: dict):
""" Train a model for multiple epochs with a given configuration. """
global_seed = cfg.get('global_seed')
if global_seed is not None:
torch.manual_seed(global_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
data = Traffic4Cast20(
"/publicdata/",
train=True,
city=cfg['city'],
single_sample=cfg['single_sample'],
normalised=cfg.get('normalised', False),
time_diff=cfg.get('time_diff', 0),
masking=cfg.get('masking'),
sparse=cfg.get('sparse', False),
seed=cfg.get('data_seed'),
)
split = int(cfg.get('valid_split', .85) * len(data))
train_data, valid_data = random_split(data, [split, len(data) - split])
train_loader = DataLoader(train_data, shuffle=True, batch_size=cfg['batch_size'], num_workers=cfg['num_workers'])
valid_loader = DataLoader(valid_data, shuffle=False, batch_size=cfg['batch_size'], num_workers=cfg['num_workers'])
model = get_model(cfg).to(cfg['device'])
loss_func = nn.MSELoss()
optimiser = optim.Adam(model.parameters(), lr=cfg['lr'])
logger = Logger(cfg)
if cfg["log_tensorboard"]:
logger.start_tb()
evaluate(model, loss_func, train_loader, logger.train())
print(f"Train: {logger.summarise(model):.4f}", end='')
evaluate(model, loss_func, valid_loader, logger.valid())
print(f" -- Valid: {logger.summarise(model):.4f}")
for epoch in range(1, cfg['num_epochs'] + 1):
with tqdm(train_loader, file=sys.stdout) as pbar:
pbar.set_description(f"Epoch {epoch: 3d}")
update(model=model, loss_func=loss_func, loader=pbar, opt=optimiser, logger=logger.train(), progress=True)
avg_train_err = logger.summarise(model)
train_msg = f"Train: {avg_train_err: .4f}"
evaluate(model=model, loss_func=loss_func, loader=valid_loader, logger=logger.valid())
avg_valid_err = logger.summarise(model)
valid_msg = f"Valid: {avg_valid_err: .4f}"
print(" -- ".join([train_msg, valid_msg]))
return None, None | 9be7753917fcf5626bee344917fe0b02967d974e | 3,628,085 |
from HTMLParser import HTMLParser
def get_links_from_html(html_body):
"""
Extract all <a></a> links from html body
:returns: list of dicts with links data
:param html_body:
"""
links = []
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
self.context_data = {}
if tag == 'a':
data = dict(attrs)
if 'href' in data:
data['org_href'] = data['href']
data['href'] = urlparse.urlparse(data['href'])
self.context_data.update(data)
def handle_data(self, data):
if getattr(self, 'context_data', {}).get('href'):
self.context_data['name'] = data
def handle_endtag(self, tag):
if tag == 'a' and self.context_data.get('href'):
links.append(self.context_data)
parser = MyHTMLParser()
parser.feed(html_body)
return links | 99a5dd8445e97588d58fda50052ccdbfcea09036 | 3,628,086 |
def adj_to_networkx(graphs):
"""Convert adj matrices to networkx graphs."""
return [nx.from_numpy_array(g) for g in graphs] | 69f05f305f5e34385a02092443b907ecfc375d09 | 3,628,087 |
async def add_datasource(request: Request, call_next):
"""
Attach the data source to the request.state.
"""
# Retrieve the datas ource from query param.
source = data_source(request.query_params.get("source", default="jhu"))
# Abort with 404 if source cannot be found.
if not source:
return Response("The provided data-source was not found.", status_code=404)
# Attach source to request.
request.state.source = source
# Move on...
LOGGER.info(f"source provided: {source.__class__.__name__}")
response = await call_next(request)
return response | a4624752b3c7a3ede7f7fc564e97a3394350fb9d | 3,628,088 |
from typing import Optional
from typing import List
from typing import Union
from typing import OrderedDict
def summary(
data,
var_names: Optional[List[str]] = None,
fmt: str = "wide",
kind: str = "all",
round_to=None,
include_circ=None,
stat_funcs=None,
extend=True,
credible_interval=None,
order="C",
index_origin=None,
skipna=False,
coords: Optional[CoordSpec] = None,
dims: Optional[DimSpec] = None,
) -> Union[pd.DataFrame, xr.Dataset]:
"""Create a data frame with summary statistics.
Parameters
----------
data : obj
Any object that can be converted to an az.InferenceData object
Refer to documentation of az.convert_to_dataset for details
var_names : list
Names of variables to include in summary
fmt : {'wide', 'long', 'xarray'}
Return format is either pandas.DataFrame {'wide', 'long'} or xarray.Dataset {'xarray'}.
kind : {'all', 'stats', 'diagnostics'}
Whether to include the `stats`: `mean`, `sd`, `hpd_3%`, `hpd_97%`, or the `diagnostics`:
`mcse_mean`, `mcse_sd`, `ess_bulk`, `ess_tail`, and `r_hat`. Default to include `all` of
them.
round_to : int
Number of decimals used to round results. Defaults to 2. Use "none" to return raw numbers.
include_circ : bool
Whether to include circular statistics
stat_funcs : dict
A list of functions or a dict of functions with function names as keys used to calculate
statistics. By default, the mean, standard deviation, simulation standard error, and
highest posterior density intervals are included.
The functions will be given one argument, the samples for a variable as an nD array,
The functions should be in the style of a ufunc and return a single number. For example,
`np.mean`, or `scipy.stats.var` would both work.
extend : boolean
If True, use the statistics returned by ``stat_funcs`` in addition to, rather than in place
of, the default statistics. This is only meaningful when ``stat_funcs`` is not None.
credible_interval : float, optional
Credible interval to plot. Defaults to 0.94. This is only meaningful when ``stat_funcs`` is
None.
order : {"C", "F"}
If fmt is "wide", use either C or F unpacking order. Defaults to C.
index_origin : int
If fmt is "wide, select n-based indexing for multivariate parameters.
Defaults to rcParam data.index.origin, which is 0.
skipna : bool
If true ignores nan values when computing the summary statistics, it does not affect the
behaviour of the functions passed to ``stat_funcs``. Defaults to false.
coords: Dict[str, List[Any]], optional
Coordinates specification to be used if the ``fmt`` is ``'xarray'``.
dims: Dict[str, List[str]], optional
Dimensions specification for the variables to be used if the ``fmt`` is ``'xarray'``.
Returns
-------
pandas.DataFrame or xarray.Dataset
Return type dicated by `fmt` argument.
Return value will contain summary statistics for each variable. Default statistics are:
`mean`, `sd`, `hpd_3%`, `hpd_97%`, `mcse_mean`, `mcse_sd`, `ess_bulk`, `ess_tail`, and
`r_hat`.
`r_hat` is only computed for traces with 2 or more chains.
Examples
--------
.. ipython::
In [1]: import arviz as az
...: data = az.load_arviz_data("centered_eight")
...: az.summary(data, var_names=["mu", "tau"])
Other statistics can be calculated by passing a list of functions
or a dictionary with key, function pairs.
.. ipython::
In [1]: import numpy as np
...: def median_sd(x):
...: median = np.percentile(x, 50)
...: sd = np.sqrt(np.mean((x-median)**2))
...: return sd
...:
...: func_dict = {
...: "std": np.std,
...: "median_std": median_sd,
...: "5%": lambda x: np.percentile(x, 5),
...: "median": lambda x: np.percentile(x, 50),
...: "95%": lambda x: np.percentile(x, 95),
...: }
...: az.summary(
...: data,
...: var_names=["mu", "tau"],
...: stat_funcs=func_dict,
...: extend=False
...: )
"""
extra_args = {} # type: Dict[str, Any]
if coords is not None:
extra_args["coords"] = coords
if dims is not None:
extra_args["dims"] = dims
if index_origin is None:
index_origin = rcParams["data.index_origin"]
if credible_interval is None:
credible_interval = rcParams["stats.credible_interval"]
else:
if not 1 >= credible_interval > 0:
raise ValueError("The value of credible_interval should be in the interval (0, 1]")
posterior = convert_to_dataset(data, group="posterior", **extra_args)
var_names = _var_names(var_names, posterior)
posterior = posterior if var_names is None else posterior[var_names]
fmt_group = ("wide", "long", "xarray")
if not isinstance(fmt, str) or (fmt.lower() not in fmt_group):
raise TypeError("Invalid format: '{}'. Formatting options are: {}".format(fmt, fmt_group))
unpack_order_group = ("C", "F")
if not isinstance(order, str) or (order.upper() not in unpack_order_group):
raise TypeError(
"Invalid order: '{}'. Unpacking options are: {}".format(order, unpack_order_group)
)
alpha = 1 - credible_interval
extra_metrics = []
extra_metric_names = []
if stat_funcs is not None:
if isinstance(stat_funcs, dict):
for stat_func_name, stat_func in stat_funcs.items():
extra_metrics.append(
xr.apply_ufunc(
_make_ufunc(stat_func), posterior, input_core_dims=(("chain", "draw"),)
)
)
extra_metric_names.append(stat_func_name)
else:
for stat_func in stat_funcs:
extra_metrics.append(
xr.apply_ufunc(
_make_ufunc(stat_func), posterior, input_core_dims=(("chain", "draw"),)
)
)
extra_metric_names.append(stat_func.__name__)
if extend and kind in ["all", "stats"]:
mean = posterior.mean(dim=("chain", "draw"), skipna=skipna)
sd = posterior.std(dim=("chain", "draw"), ddof=1, skipna=skipna)
hpd_lower, hpd_higher = xr.apply_ufunc(
_make_ufunc(hpd, n_output=2),
posterior,
kwargs=dict(credible_interval=credible_interval, multimodal=False, skipna=skipna),
input_core_dims=(("chain", "draw"),),
output_core_dims=tuple([] for _ in range(2)),
)
if include_circ:
nan_policy = "omit" if skipna else "propagate"
circ_mean = xr.apply_ufunc(
_make_ufunc(st.circmean),
posterior,
kwargs=dict(high=np.pi, low=-np.pi, nan_policy=nan_policy),
input_core_dims=(("chain", "draw"),),
)
_numba_flag = Numba.numba_flag
func = None
if _numba_flag:
func = _circular_standard_deviation
kwargs_circ_std = dict(high=np.pi, low=-np.pi, skipna=skipna)
else:
func = st.circstd
kwargs_circ_std = dict(high=np.pi, low=-np.pi, nan_policy=nan_policy)
circ_sd = xr.apply_ufunc(
_make_ufunc(func),
posterior,
kwargs=kwargs_circ_std,
input_core_dims=(("chain", "draw"),),
)
circ_mcse = xr.apply_ufunc(
_make_ufunc(_mc_error),
posterior,
kwargs=dict(circular=True),
input_core_dims=(("chain", "draw"),),
)
circ_hpd_lower, circ_hpd_higher = xr.apply_ufunc(
_make_ufunc(hpd, n_output=2),
posterior,
kwargs=dict(credible_interval=credible_interval, circular=True, skipna=skipna),
input_core_dims=(("chain", "draw"),),
output_core_dims=tuple([] for _ in range(2)),
)
if kind in ["all", "diagnostics"]:
mcse_mean, mcse_sd, ess_mean, ess_sd, ess_bulk, ess_tail, r_hat = xr.apply_ufunc(
_make_ufunc(_multichain_statistics, n_output=7, ravel=False),
posterior,
input_core_dims=(("chain", "draw"),),
output_core_dims=tuple([] for _ in range(7)),
)
# Combine metrics
metrics = []
metric_names = []
if extend:
metrics_names_ = (
"mean",
"sd",
"hpd_{:g}%".format(100 * alpha / 2),
"hpd_{:g}%".format(100 * (1 - alpha / 2)),
"mcse_mean",
"mcse_sd",
"ess_mean",
"ess_sd",
"ess_bulk",
"ess_tail",
"r_hat",
)
if kind == "all":
metrics_ = (
mean,
sd,
hpd_lower,
hpd_higher,
mcse_mean,
mcse_sd,
ess_mean,
ess_sd,
ess_bulk,
ess_tail,
r_hat,
)
elif kind == "stats":
metrics_ = (mean, sd, hpd_lower, hpd_higher)
metrics_names_ = metrics_names_[:4]
elif kind == "diagnostics":
metrics_ = (mcse_mean, mcse_sd, ess_mean, ess_sd, ess_bulk, ess_tail, r_hat)
metrics_names_ = metrics_names_[4:]
metrics.extend(metrics_)
metric_names.extend(metrics_names_)
if include_circ:
metrics.extend((circ_mean, circ_sd, circ_hpd_lower, circ_hpd_higher, circ_mcse))
metric_names.extend(
(
"circular_mean",
"circular_sd",
"circular_hpd_{:g}%".format(100 * alpha / 2),
"circular_hpd_{:g}%".format(100 * (1 - alpha / 2)),
"circular_mcse",
)
)
metrics.extend(extra_metrics)
metric_names.extend(extra_metric_names)
joined = (
xr.concat(metrics, dim="metric").assign_coords(metric=metric_names).reset_coords(drop=True)
)
if fmt.lower() == "wide":
dfs = []
for var_name, values in joined.data_vars.items():
if len(values.shape[1:]):
metric = list(values.metric.values)
data_dict = OrderedDict()
for idx in np.ndindex(values.shape[1:] if order == "C" else values.shape[1:][::-1]):
if order == "F":
idx = tuple(idx[::-1])
ser = pd.Series(values[(Ellipsis, *idx)].values, index=metric)
key_index = ",".join(map(str, (i + index_origin for i in idx)))
key = "{}[{}]".format(var_name, key_index)
data_dict[key] = ser
df = pd.DataFrame.from_dict(data_dict, orient="index")
df = df.loc[list(data_dict.keys())]
else:
df = values.to_dataframe()
df.index = list(df.index)
df = df.T
dfs.append(df)
summary_df = pd.concat(dfs, sort=False)
elif fmt.lower() == "long":
df = joined.to_dataframe().reset_index().set_index("metric")
df.index = list(df.index)
summary_df = df
else:
# format is 'xarray'
summary_df = joined
if (round_to is not None) and (round_to not in ("None", "none")):
summary_df = summary_df.round(round_to)
elif round_to not in ("None", "none") and (fmt.lower() in ("long", "wide")):
# Don't round xarray object by default (even with "none")
decimals = {
col: 3
if col not in {"ess_mean", "ess_sd", "ess_bulk", "ess_tail", "r_hat"}
else 2
if col == "r_hat"
else 0
for col in summary_df.columns
}
summary_df = summary_df.round(decimals)
return summary_df | 9d69b4f305a05dcc85f7b02d1e4cef5a80f6a099 | 3,628,089 |
def matrix_multiply(MM):
"""Multiplies a list of matrices: M[0] * M[1] * M[2]..."""
P = MM[0]
for M in MM[1:]:
P = dot(P, M)
return P | d63a9ecad8c951383e9fac4bb35e976972fdd658 | 3,628,090 |
import requests
import html
def urban_lookup(word):
""" returns definitions for a given word from urban dictionary """
resp = requests.get(UD_URL % word)
if resp.ok: # check for 200
doc = html.fromstring(resp.read())
return [d.text for d in doc.xpath('//div[@class="definition"]')]
return [] | bfba0040c5fbd36570a035abe96d7e2862bb6799 | 3,628,091 |
from typing import List
from typing import Dict
from typing import Optional
from typing import Any
def encode_sequences(
sequences: List[str],
vocab: Dict[str, int],
target_size: Optional[int] = None,
eos: int = -1,
sos: Optional[int] = None,
pad: Optional[int] = None,
**kwargs: Any,
) -> np.ndarray:
"""Encode character sequences using a given vocab as mapping
Args:
sequences: the list of character sequences of size N
vocab: the ordered vocab to use for encoding
target_size: maximum length of the encoded data
eos: encoding of End Of String
sos: optional encoding of Start Of String
pad: optional encoding for padding. In case of padding, all sequences are followed by 1 EOS then PAD
Returns:
the padded encoded data as a tensor
"""
if 0 <= eos < len(vocab):
raise ValueError("argument 'eos' needs to be outside of vocab possible indices")
if not isinstance(target_size, int):
target_size = max(len(w) for w in sequences)
if sos:
target_size += 1
if pad:
target_size += 1
# Pad all sequences
if pad: # pad with padding symbol
if 0 <= pad < len(vocab):
raise ValueError(
"argument 'pad' needs to be outside of vocab possible indices"
)
# In that case, add EOS at the end of the word before padding
encoded_data = np.full([len(sequences), target_size], pad, dtype=np.int32)
else: # pad with eos symbol
encoded_data = np.full([len(sequences), target_size], eos, dtype=np.int32)
for idx, seq in enumerate(sequences):
encoded_seq = encode_sequence(seq, vocab)
if pad: # add eos at the end of the sequence
encoded_seq.append(eos)
encoded_data[idx, : min(len(encoded_seq), target_size)] = encoded_seq[
: min(len(encoded_seq), target_size)
]
if sos: # place eos symbol at the beginning of each sequence
if 0 <= sos < len(vocab):
raise ValueError(
"argument 'sos' needs to be outside of vocab possible indices"
)
encoded_data = np.roll(encoded_data, 1)
encoded_data[:, 0] = sos
return encoded_data | dc0edd5bac8356df6a491efb8a331516573306b5 | 3,628,092 |
def form_columns(form):
"""
:param form: Taken from requests.form
:return: columns: list of slugified column names
labels: dict mapping string labels of special column types
(observed_date, latitude, longitude, location)
to names of columns
"""
labels = {}
columns = []
for k, v in form.items():
if k.startswith('col_name_'):
# key_type_observed_date
key = k.replace("col_name_", "")
columns.append(key)
# e.g labels['observed_date'] = 'date'
labels[v] = key
return columns, labels | a3a2fdaa17310c04bb28675f88976cd7283f65a9 | 3,628,093 |
def resolve_bbox_order(bbox, crs, size):
"""
Utility that puts the OGC WMS/WCS BBox in the order specified by the CRS.
"""
crs = pyproj.CRS(crs)
r = 1
if crs.axis_info[0].direction != "north":
r = -1
lat_start, lon_start = bbox[:2][::r]
lat_stop, lon_stop = bbox[2::][::r]
size = size[::r]
return {"lat": [lat_start, lat_stop, size[0]], "lon": [lon_start, lon_stop, size[1]]} | 13c29fe44b241f262c1c07b144f9592b1649714e | 3,628,094 |
import collections
def _r(val):
"""
Convert val to valid R code
"""
if isinstance(val, str):
# no quoting quote
if val.startswith("quote("):
return val
return '"{0}"'.format(val)
if val is True:
return "TRUE"
if val is False:
return "FALSE"
if isinstance(val, collections.Iterable):
return "c({0})".format(','.join([_r(v) for v in val]))
# this could probably be more robust
if hasattr(val, 'r_repr'):
return val.r_repr()
return val | 6a6fbcbfd0a32105aa2b2d796525421e9d156d25 | 3,628,095 |
def bin2int(buf):
"""the reverse of int2bin, convert a binary buffer to an integer"""
x = 0
for b in bytearray(buf):
x <<= 8
x |= b
return x | 0c0edd88d7d4157f60641bc05f810184ef56f133 | 3,628,096 |
from vardefunc.noise import decsiz
from vsutil import get_y
from typing import Union
from typing import Tuple
def main() -> Union[vs.VideoNode, Tuple[vs.VideoNode, ...]]:
"""Vapoursynth filtering"""
src = JP_BD.clip_cut
panorama = flt.panner_x(src, JP_BD.workdir.to_str() + r"/assets/ED/FGCBD_NCED1_panorama.png")
denoise = decsiz(panorama, min_in=164 << 8, max_in=204 << 8)
grain = flt.grain(denoise, strength=0.2, luma_scaling=6)
mask = core.std.Expr(get_y(panorama), f"x {233 << 8} > {255 << 8} 0 ?")
mask = core.morpho.Close(mask, size=3).std.Minimum().std.Minimum()
mask = mask.std.Convolution(matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]) \
.std.Convolution(matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]) \
.std.Convolution(matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]) \
.std.Convolution(matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1])
wh = core.std.BlankClip(grain).std.Invert()
masked = core.std.MaskedMerge(grain, wh, mask)
return masked | b8a2d52e66f42ab23f5e31ea8cf8907d1da6c453 | 3,628,097 |
import base64
import six
def UrlSafeB64Decode(message):
"""wrapper of base64.urlsafe_b64decode.
Helper method to avoid calling six multiple times for preparing b64 strings.
Args:
message: string or binary to decode
Returns:
decoded data in string format.
"""
data = base64.urlsafe_b64decode(six.ensure_binary(message))
return six.ensure_str(data) | f675c56f0bbd35661adfbea85135a9434fd7b107 | 3,628,098 |
def crypto_box_open_afternm(ciphertext, nonce, k):
"""
Decrypts and returns the encrypted message ``ciphertext``, using the shared
key ``k`` and the nonce ``nonce``.
:param ciphertext: bytes
:param nonce: bytes
:param k: bytes
:rtype: bytes
"""
if len(nonce) != crypto_box_NONCEBYTES:
raise exc.ValueError("Invalid nonce")
if len(k) != crypto_box_BEFORENMBYTES:
raise exc.ValueError("Invalid shared key")
padded = (b"\x00" * crypto_box_BOXZEROBYTES) + ciphertext
plaintext = ffi.new("unsigned char[]", len(padded))
res = lib.crypto_box_open_afternm(
plaintext, padded, len(padded), nonce, k)
ensure(res == 0, "An error occurred trying to decrypt the message",
raising=exc.CryptoError)
return ffi.buffer(plaintext, len(padded))[crypto_box_ZEROBYTES:] | 05cdce773ac9537e18661713f491bedafcd2d836 | 3,628,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.