content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def find_best_classifier(data, possible_classifiers, target_classifier):
"""Given a list of points, a list of possible Classifiers to use as tests,
and a Classifier for determining the true classification of each point,
finds and returns the classifier with the lowest disorder. Breaks ties by
preferring classifiers that appear earlier in the list. If the best
classifier has only one branch, raises NoGoodClassifiersError."""
min_disorder = INF
for test in possible_classifiers:
avg_disorder = average_test_disorder(data, test, target_classifier)
if avg_disorder < min_disorder:
best_test = test
min_disorder = avg_disorder
if len(split_on_classifier(data, best_test))==1:
raise NoGoodClassifiersError
return best_test
| 23,300
|
def get_product_type_name(stac_item):
""" Create a ProductType name from a STAC Items metadata
"""
properties = stac_item['properties']
assets = stac_item['assets']
parts = []
platform = properties.get('platform') or properties.get('eo:platform')
instruments = properties.get('instruments') or \
properties.get('eo:instruments')
constellation = properties.get('constellation') or \
properties.get('eo:constellation')
mission = properties.get('mission') or properties.get('eo:mission')
if platform:
parts.append(platform)
if instruments:
parts.extend(instruments)
if constellation:
parts.append(constellation)
if mission:
parts.append(mission)
bands = properties.get('eo:bands')
if not bands:
bands = []
for asset in assets.values():
bands.extend(asset.get('eo:bands'), [])
parts.extend([band['name'] for band in bands])
if not parts:
raise RegistrationError(
'Failed to generate Product type name from metadata'
)
return '_'.join(parts)
| 23,301
|
def get_show_default():
""" gets the defaults """
return SHOW_DEFAULT
| 23,302
|
def test_can_build_lookup_table_and_use_it_for_known_values():
"""Functional (a.k.a acceptance) test for LookupTable"""
# John prepares data to be looked up
ts = array([0.1, 1.1, 2.1])
x1 = array([10.2, -1.4, 4.1])
x2 = array([0.1, 0.01, 0.4])
# John calculates "trajectory" for his data
table = LookupTable({
'name': 'lookup',
'tdata': ts,
'ics': dict(zip(['x1', 'x2'], [x1, x2])),
})
traj = table.compute('ltable')
# Now John can retrieve his values from table
for i, t in enumerate(ts):
assert traj(t) == Point({'coordnames': ['x1', 'x2'], 'coordarray': [x1[i], x2[i]]})
assert traj(t, 'x1') == Point({'x1': x1[i]})
assert traj(t, 'x2') == Point({'x2': x2[i]})
# John can get only those values, that he has previously inserted
with pytest.raises(ValueError):
traj(0.4)
with pytest.raises(ValueError):
traj(0.4, 'x1')
with pytest.raises(ValueError):
traj(0.4, 'x2')
| 23,303
|
def sample_langevin_v2(x, model, stepsize, n_steps, noise_scale=None, intermediate_samples=False,
clip_x=None, clip_grad=None, reject_boundary=False, noise_anneal=None,
spherical=False, mh=False, temperature=None, norm=False, cut=True):
"""Langevin Monte Carlo
x: torch.Tensor, initial points
model: An energy-based model. returns energy
stepsize: float
n_steps: integer
noise_scale: Optional. float. If None, set to np.sqrt(stepsize * 2)
clip_x : tuple (start, end) or None boundary of square domain
reject_boundary: Reject out-of-domain samples if True. otherwise clip.
"""
assert not ((stepsize is None) and (noise_scale is None)), 'stepsize and noise_scale cannot be None at the same time'
if noise_scale is None:
noise_scale = np.sqrt(stepsize * 2)
if stepsize is None:
stepsize = (noise_scale ** 2) / 2
noise_scale_ = noise_scale
stepsize_ = stepsize
if temperature is None:
temperature = 1.
# initial data
x.requires_grad = True
E_x = model(x)
grad_E_x = autograd.grad(E_x.sum(), x, only_inputs=True)[0]
if clip_grad is not None:
grad_E_x = clip_vector_norm(grad_E_x, max_norm=clip_grad)
E_y = E_x; grad_E_y = grad_E_x;
l_samples = [x.detach().to('cpu')]
l_dynamics = []; l_drift = []; l_diffusion = []; l_accept = []
for i_step in range(n_steps):
noise = torch.randn_like(x) * noise_scale_
dynamics = - stepsize_ * grad_E_x / temperature + noise
y = x + dynamics
reject = torch.zeros(len(y), dtype=torch.bool)
if clip_x is not None:
if reject_boundary:
accept = ((y >= clip_x[0]) & (y <= clip_x[1])).view(len(x), -1).all(dim=1)
reject = ~ accept
y[reject] = x[reject]
else:
y = torch.clamp(y, clip_x[0], clip_x[1])
if norm:
y = y/y.sum(dim=(2,3)).view(-1,1,1,1)
if spherical:
y = y / y.norm(dim=1, p=2, keepdim=True)
# y_accept = y[~reject]
# E_y[~reject] = model(y_accept)
# grad_E_y[~reject] = autograd.grad(E_y.sum(), y_accept, only_inputs=True)[0]
E_y = model(y)
grad_E_y = autograd.grad(E_y.sum(), y, only_inputs=True)[0]
if clip_grad is not None:
grad_E_y = clip_vector_norm(grad_E_y, max_norm=clip_grad)
if mh:
y_to_x = ((grad_E_x + grad_E_y) * stepsize_ - noise).view(len(x), -1).norm(p=2, dim=1, keepdim=True) ** 2
x_to_y = (noise).view(len(x), -1).norm(dim=1, keepdim=True, p=2) ** 2
transition = - (y_to_x - x_to_y) / 4 / stepsize_ # B x 1
prob = -E_y + E_x
accept_prob = torch.exp((transition + prob) / temperature)[:,0] # B
reject = (torch.rand_like(accept_prob) > accept_prob) # | reject
y[reject] = x[reject]
E_y[reject] = E_x[reject]
grad_E_y[reject] = grad_E_x[reject]
x = y; E_x = E_y; grad_E_x = grad_E_y
l_accept.append(~reject)
x = y; E_x = E_y; grad_E_x = grad_E_y
if noise_anneal is not None:
noise_scale_ = noise_scale / (1 + i_step)
l_dynamics.append(dynamics.detach().cpu())
l_drift.append((- stepsize * grad_E_x).detach().cpu())
l_diffusion.append(noise.detach().cpu())
l_samples.append(x.detach().cpu())
if cut:
x = x[x.var(dim=(2,3))>1e-6].view(-1,1,40,40)
return {'sample': x.detach(), 'l_samples': l_samples, 'l_dynamics': l_dynamics,
'l_drift': l_drift, 'l_diffusion': l_diffusion, 'l_accept': l_accept}
| 23,304
|
async def dump(self, chan, source, msg):
"""
dump the contents of self to a file
for debugging purposes.
"""
if len(msg) < 1:
await out.msg(self, modname, chan, ["need filename"])
return
with open(msg, "w") as f:
pprint.pprint(vars(self), stream=f)
pprint.pprint("\n\n\n", stream=f)
pprint.pprint(dir(self), stream=f)
await out.msg(self, modname, chan, ["done"])
| 23,305
|
def fpoly(x, m):
"""Compute the first `m` simple polynomials.
Parameters
----------
x : array-like
Compute the simple polynomials at these abscissa values.
m : :class:`int`
The number of simple polynomials to compute. For example, if
:math:`m = 3`, :math:`x^0`, :math:`x^1` and
:math:`x^2` will be computed.
Returns
-------
:class:`numpy.ndarray`
"""
if isinstance(x, np.ndarray):
n = x.size
else:
n = 1
if m < 1:
raise ValueError('Order of polynomial must be at least 1.')
try:
dt = x.dtype
except AttributeError:
dt = np.float64
leg = np.ones((m, n), dtype=dt)
if m >= 2:
leg[1, :] = x
if m >= 3:
for k in range(2, m):
leg[k, :] = leg[k-1, :] * x
return leg
| 23,306
|
def get_custom_logger(context):
""" Customizable template for creating a logger.
What would work is to have the format and date format passed
"""
# Initialize Custom Logging
# Timestamps with logging assist debugging algorithms
# With long execution times
manifest = context.gear_dict['manifest_json']
# Set suite (default to flywheel)
try:
suite = manifest['custom']['flywheel']['suite']
except KeyError:
suite = 'flywheel'
# Set gear_name
gear_name = manifest['name']
log_name = '/'.join([suite, gear_name])
log_level = logging.INFO
# Tweak the formatting
fmt = '%(asctime)s.%(msecs)03d %(levelname)-8s [%(name)s %(funcName)s()]: %(message)s'
dtfmt = '%Y-%m-%d %H:%M:%S'
logging.basicConfig(level=log_level, format=fmt, datefmt=dtfmt)
log = logging.getLogger(log_name)
log.critical('{} log level is {}'.format(log_name, log_level))
return log
| 23,307
|
def get_torch_core_binaries(module):
"""Return required files from the torch folders.
Notes:
So far only tested for Windows. Requirements for other platforms
are unknown.
"""
binaries = []
torch_dir = module.getCompileTimeDirectory()
extras = os.path.join(torch_dir, "lib")
if os.path.isdir(extras):
for f in os.listdir(extras):
# apart from shared libs, also the C header files are required!
if f.endswith((".dll", ".so", ".h")) or ".so." in f:
item = os.path.join(extras, f)
if os.path.isfile(item):
binaries.append((item, "."))
# this folder exists in the Linux version
extras = os.path.join(torch_dir, "bin")
if os.path.isdir(extras):
for f in os.listdir(extras):
item = os.path.join(extras, f)
if os.path.isfile(item):
binaries.append((item, "."))
# this folder exists in the Linux version
extras = os.path.join(torch_dir, "include")
if os.path.isdir(extras):
for root, _, files in os.walk(extras):
for f in files:
item = os.path.join(root, f)
if os.path.isfile(item):
binaries.append((item, "."))
return binaries
| 23,308
|
def _function_fullname(f):
"""Return the full name of the callable `f`, including also its module name."""
function, _ = getfunc(f) # get the raw function also for OOP methods
if not function.__module__: # At least macros defined in the REPL have `__module__=None`.
return function.__qualname__
return f"{function.__module__}.{function.__qualname__}"
| 23,309
|
def dists2centroids_numpy(a):
"""
:param a: dist ndarray, shape = (*, h, w, 4=(t, r, b, l))
:return a: Box ndarray, shape is (*, h, w, 4=(cx, cy, w, h))
"""
return corners2centroids_numpy(dists2corners_numpy(a))
| 23,310
|
def heatmap(data_df, figsize=None, cmap="Blues", heatmap_kw=None, gridspec_kw=None):
""" Plot a residue matrix as a color-encoded matrix.
Parameters
----------
data_df : :class:`pandas.DataFrame`
A residue matrix produced with :func:`~luna.analysis.residues.generate_residue_matrix`.
figsize : tuple, optional
Size (width, height) of a figure in inches.
cmap : str, iterable of str
The mapping from data values to color space. The default value is 'Blues'.
heatmap_kw : dict, optional
Keyword arguments for :func:`seaborn.heatmap`.
gridspec_kw : dict, optional
Keyword arguments for :class:`matplotlib.gridspec.GridSpec`.
Used only if the residue matrix (``data_df``) contains interactions.
Returns
-------
: :class:`matplotlib.axes.Axes` or :class:`numpy.ndarray` of :class:`matplotlib.axes.Axes`
"""
data_df = data_df.reset_index()
heatmap_kw = heatmap_kw or {}
gridspec_kw = gridspec_kw or {}
interactions = None
if "interaction" in data_df.columns:
interactions = sorted(data_df["interaction"].unique())
max_value = data_df[data_df.columns[2:]].max().max()
else:
max_value = data_df[data_df.columns[1:]].max().max()
if not interactions:
data_df.set_index('entry', inplace=True)
fig = plt.figure(figsize=figsize)
ax = sns.heatmap(data_df, cmap=cmap, vmax=max_value, vmin=0, **heatmap_kw)
ax.set_xlabel("")
ax.set_ylabel("")
return ax
else:
ncols = 3
if "ncols" in gridspec_kw:
ncols = gridspec_kw["ncols"]
del gridspec_kw["ncols"]
nrows = math.ceil(len(interactions) / ncols)
fig, axs = plt.subplots(nrows, ncols, figsize=figsize, gridspec_kw=gridspec_kw)
row, col = 0, 0
for i, interaction in enumerate(interactions):
df = data_df[data_df["interaction"] == interaction].copy()
df.drop(columns="interaction", inplace=True)
df.set_index('entry', inplace=True)
g = sns.heatmap(df, cmap=cmap, vmax=max_value, vmin=0, ax=axs[row][col], **heatmap_kw)
g.set_title(interaction)
g.set_xlabel("")
g.set_ylabel("")
col += 1
if col == ncols:
row += 1
col = 0
if len(interactions) < nrows * ncols:
diff = (nrows * ncols) - len(interactions)
for i in range(1, diff + 1):
axs[-1][-1 * i].axis('off')
return axs
| 23,311
|
def combined_loss(x, reconstructed_x, mean, log_var, args):
"""
MSE loss for reconstruction, KLD loss as per VAE.
Also want to output dimension (element) wise RCL and KLD
"""
# First, binary data
loss1 = torch.nn.BCEWithLogitsLoss(size_average=False)
loss1_per_element = torch.nn.BCEWithLogitsLoss(
size_average=False,
reduce=False
)
binary_range = args.binary_real_one_hot_parameters['binary_range']
reconstructed_x1 = reconstructed_x[:, binary_range[0]: binary_range[1]]
x1 = x[:, binary_range[0]: binary_range[1]]
RCL1 = loss1(reconstructed_x1, x1)
RCL1_per_element = loss1_per_element(reconstructed_x1, x1)
# Next, real data
loss2 = torch.nn.MSELoss(size_average=False)
loss2_per_element = torch.nn.MSELoss(size_average=False, reduce=False)
real_range = args.binary_real_one_hot_parameters['real_range']
reconstructed_x2 = reconstructed_x[:, real_range[0]: real_range[1]]
x2 = x[:, real_range[0]: real_range[1]]
RCL2 = loss2(reconstructed_x2, x2)
RCL2_per_element = loss2_per_element(reconstructed_x2, x2)
# Next, one-hot data
loss3 = torch.nn.CrossEntropyLoss(size_average=True)
loss3_per_element = torch.nn.CrossEntropyLoss(
size_average=True,
reduce=False
)
one_hot_range = args.binary_real_one_hot_parameters['one_hot_range']
reconstructed_x3 = reconstructed_x[:, one_hot_range[0]: one_hot_range[1]]
x3 = x[:, one_hot_range[0]: one_hot_range[1]]
# This has 3 one-hot's. lets split it up
x3_1 = x3[:, :19]
x3_2 = x3[:, 19:19 + 19]
x3_3 = x3[:, 19+19:]
reconstructed_x3_1 = reconstructed_x3[:, :19]
reconstructed_x3_2 = reconstructed_x3[:, 19:19 + 19]
reconstructed_x3_3 = reconstructed_x3[:, 19+19:]
_, labels1 = x3_1.max(dim=1)
_, labels2 = x3_2.max(dim=1)
_, labels3 = x3_3.max(dim=1)
# print(labels.size(), reconstructed_x3.size(), x3.size())
RCL3_1 = loss3(reconstructed_x3_1, labels1.long())
RCL3_per_element_1 = loss3_per_element(reconstructed_x3_1, labels1.long())
RCL3_2 = loss3(reconstructed_x3_2, labels2.long())
RCL3_per_element_2 = loss3_per_element(reconstructed_x3_2, labels2.long())
RCL3_3 = loss3(reconstructed_x3_3, labels3.long())
RCL3_per_element_3 = loss3_per_element(reconstructed_x3_3, labels3.long())
KLD = -0.5 * torch.sum(1 + log_var - mean.pow(2) - log_var.exp())
KLD_per_element = -0.5 * (1 + log_var - mean.pow(2) - log_var.exp())
RCL = RCL1 + RCL2 + RCL3_1 + RCL3_2 + RCL3_3
RCL_per_element = torch.cat(
(
RCL1_per_element,
RCL2_per_element,
RCL3_per_element_1.view([-1, 1]),
RCL3_per_element_2.view([-1, 1]),
RCL3_per_element_3.view([-1, 1])
),
1
)
return RCL + args.beta_vae*KLD, RCL, KLD, RCL_per_element, KLD_per_element
| 23,312
|
def isfloat(string: str) -> bool:
"""
This function receives a string and returns if it is a float or not.
:param str string: The string to check.
:return: A boolean representing if the string is a float.
:rtype: bool
"""
try:
float(string)
return True
except (ValueError, TypeError):
return False
| 23,313
|
def parse_path_kvs(file_path):
"""
Find all key-value pairs in a file path;
the pattern is *_KEY=VALUE_*.
"""
parser = re.compile("(?<=[/_])[a-z0-9]+=[a-zA-Z0-9]+[.]?[0-9]*(?=[_/.])")
kvs = parser.findall(file_path)
kvs = [kv.split("=") for kv in kvs]
return {kv[0]: to_number(kv[1]) for kv in kvs}
| 23,314
|
def user_count_by_type(utype: str) -> int:
"""Returns the total number of users that match a given type"""
return get_count('users', 'type', (utype.lower(),))
| 23,315
|
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, switch.DOMAIN, DEFAULT_CONFIG
)
| 23,316
|
def create_stellar_deposit(transaction_id):
"""Create and submit the Stellar transaction for the deposit."""
transaction = Transaction.objects.get(id=transaction_id)
# We can assume transaction has valid stellar_account, amount_in, and asset
# because this task is only called after those parameters are validated.
stellar_account = transaction.stellar_account
payment_amount = transaction.amount_in - transaction.amount_fee
asset = transaction.asset.name
# If the given Stellar account does not exist, create
# the account with at least enough XLM for the minimum
# reserve and a trust line (recommended 2.01 XLM), update
# the transaction in our internal database, and return.
address = Address(
stellar_account,
network=settings.STELLAR_NETWORK,
horizon_uri=settings.HORIZON_URI,
)
try:
address.get()
except HorizonError as address_exc:
# 404 code corresponds to Resource Missing.
if address_exc.status_code != 404:
return
starting_balance = settings.ACCOUNT_STARTING_BALANCE
builder = Builder(
secret=settings.STELLAR_ACCOUNT_SEED,
horizon_uri=settings.HORIZON_URI,
network=settings.STELLAR_NETWORK,
)
builder.append_create_account_op(
destination=stellar_account,
starting_balance=starting_balance,
source=settings.STELLAR_ACCOUNT_ADDRESS,
)
builder.sign()
try:
builder.submit()
except HorizonError:
return
transaction.status = Transaction.STATUS.pending_trust
transaction.save()
return
# If the account does exist, deposit the desired amount of the given
# asset via a Stellar payment. If that payment succeeds, we update the
# transaction to completed at the current time. If it fails due to a
# trustline error, we update the database accordingly. Else, we do not update.
builder = Builder(
secret=settings.STELLAR_ACCOUNT_SEED,
horizon_uri=settings.HORIZON_URI,
network=settings.STELLAR_NETWORK,
)
builder.append_payment_op(
destination=stellar_account,
asset_code=asset,
asset_issuer=settings.STELLAR_ACCOUNT_ADDRESS,
amount=str(payment_amount),
)
builder.sign()
try:
response = builder.submit()
# Functional errors at this stage are Horizon errors.
except HorizonError as exception:
if TRUSTLINE_FAILURE_XDR not in exception.message:
return
transaction.status = Transaction.STATUS.pending_trust
transaction.save()
return
# If this condition is met, the Stellar payment succeeded, so we
# can mark the transaction as completed.
if response["result_xdr"] != SUCCESS_XDR:
return
transaction.stellar_transaction_id = response["hash"]
transaction.status = Transaction.STATUS.completed
transaction.completed_at = now()
transaction.status_eta = 0 # No more status change.
transaction.amount_out = payment_amount
transaction.save()
| 23,317
|
def test_parameter_file_proper_toml():
"""Tells you what line of the TOML has an error"""
ll = pkg_resources.resource_string("cascade.executor", "data/parameters.toml").decode().split("\n")
for i in range(1, len(ll)):
try:
toml.loads("".join(ll[:i]))
except toml.TomlDecodeError:
assert False, f"failed on line {i}: {ll[i-1]}"
| 23,318
|
def test_getSBMLFromBiomodelsURN2():
""" Check that model can be loaded in roadrunner.
:return:
"""
urn = 'urn:miriam:biomodels.db:BIOMD0000000139'
sbml = temiriam.getSBMLFromBiomodelsURN(urn)
print("*" * 80)
print(type(sbml))
print("*" * 80)
print(sbml)
print("*" * 80)
r = roadrunner.RoadRunner(sbml)
assert r is not None
| 23,319
|
def node_label(label, number_of_ports, debug=None):
""" generate the HTML-like label
<TABLE ALIGN="CENTER"><TR><TD COLSPAN="2">name</TD></TR>
<TR>
<TD PORT="odd">odd</TD>
<TD PORT="even">even</TD>
</TR>
singleport:
<TR>
<TD PORT="port">port</TD>
</TR>
return a string
"""
long_string = []
# name = re.sub(r"[;: ]+", "\\\\n", label) # LF do not work in HTML-like
name = re.sub(r'[;: ]+', ' ', label)
port_range = range(1, number_of_ports + 1)
long_string.append('<<TABLE ALIGN="CENTER">')
if number_of_ports % 2 == 1:
long_string.extend(['<TR>', '<TD>', name, '</TD>', '</TR>'])
long_string.append('<TR>')
str_single = '<TD PORT="' + str(number_of_ports) + '">' + str(number_of_ports) + '</TD>'
long_string.append(str_single)
long_string.append('</TR>')
else:
long_string.extend(['<TR>', '<TD COLSPAN="2">', name, '</TD>', '</TR>'])
for i in range(number_of_ports // 2):
long_string.append('<TR>')
odd = i * 2 + 1
str_odd = '<TD PORT="' + str(odd) + '">' + str(odd).zfill(2) + '</TD>'
long_string.append(str_odd)
even = i * 2 + 2
str_even = '<TD PORT="' + str(even) + '">' + str(even).zfill(2) + '</TD>'
long_string.append(str_even)
long_string.append('</TR>')
long_string.append('</TABLE>>')
return ''.join([str(elem) for elem in long_string])
| 23,320
|
def grouperElements(liste, function=len):
"""
fonctions qui groupe selon la fonction qu'on lui donne.
Ainsi pour le kalaba comme pour les graphèmes, nous aurons
besoin de la longueur,
"""
lexique=[]
data=sorted(liste, key=function)
for k,g in groupby(data, function):
lexique.append(list(g))
return lexique
| 23,321
|
def test_eofs_matches_sklearn():
"""Test result from eofs routine matches reference implementation."""
x = np.random.uniform(size=(100, 20))
k = 10
eofs_result = rdu.eofs(x, n_modes=k)
model = sd.PCA(n_components=k)
pcs = model.fit_transform(x)
assert np.allclose(model.components_, eofs_result['EOFs'].data)
assert np.allclose(pcs, eofs_result['PCs'].data)
assert np.allclose(
model.explained_variance_ratio_, eofs_result['explained_var'].data)
| 23,322
|
def SensorLocation_Cast(*args):
"""
Cast(BaseObject o) -> SensorLocation
SensorLocation_Cast(Seiscomp::Core::BaseObjectPtr o) -> SensorLocation
"""
return _DataModel.SensorLocation_Cast(*args)
| 23,323
|
def fatorial(num=1, show=False):
"""
-> Calcula o fatorial de um número.
:param num: Fatorial a ser calculado
:param show: (opicional) Mostra a conta
:return: Fatorial de num.
"""
print('-=' * 20)
fat = 1
for i in range(num, 0, -1):
fat *= i
if show:
resp = f'{str(num)}! = '
while num > 1:
resp += f'{str(num)} x '
num -= 1
resp += f'{str(num)} = {str(fat)}'
return resp
else:
return fat
| 23,324
|
def test_plotting_with_each_graph_data_property(my_outdir):
# Note: All outputs were inspected manually, all bugs were resolved and all shortcomings
# documented in the docstrings and below here. This becomes necessary again in case of
# major code changes and can hardly be automated (with reasonable effort).
"""
1. **graph metadata**
- arrow_color: d3, vis FAILS (not supported), three
- arrow_size: d3, vis, three
- background_color: d3, vis, three
- node_color: d3, vis, three
- node_opacity: d3, vis FAILS (not supported), three
- node_size: d3, vis, three
- node_shape: d3, vis (if no image), three
- node_border_color: d3, vis, three FAILS (no border used)
- node_border_size: d3, vis, three FAILS (no border used)
- node_label_color: d3, vis, three
- node_label_size: d3, vis, three
- node_hover: d3, vis, three
- node_click: d3, vis, three
- node_image: d3, vis, three
- node_x: d3, vis, three
- node_y: d3, vis, three
- node_z: three
- edge_color: d3, vis, three
- edge_opacity: d3, vis FAILS (not supported), three
- edge_size: d3, vis, three
- edge_label_color: d3, vis, three FAILS (no labels used)
- edge_label_size: d3, vis, three FAILS (no labels used)
- edge_hover: d3, vis, three
- edge_click: d3, vis, three
2. **node metadata**
- color: d3, vis, three
- opacity: d3, vis FAILS (not supported), three
- size: d3, vis, three
- shape: d3, vis (if no image), three
- border_color: d3, vis, three FAILS (no border used)
- border_size: d3, vis, three FAILS (no border used)
- label_color: d3, vis, three
- label_size: d3, vis, three
- hover: d3, vis, three
- click: d3, vis, three
- image: d3, vis, three
- x: d3, vis, three
- y: d3, vis, three
- z: three
3. **edge metadata**
- color: d3, vis, three
- opacity: d3, vis FAILS (not supported), three
- size: d3, vis, three
- label_color: d3, vis, three FAILS (no labels)
- label_size: d3, vis, three FAILS (no labels)
- hover: d3, vis, three
- click: d3, vis, three
"""
plotting_functions = [
('d3', gv.d3),
('vis', gv.vis),
('three', gv.three),
]
base_name = 'graph_arg_{}_{}'
# all in one
base_distance = 50.0
data = {
"graph": {
"directed": True,
"metadata": {
"arrow_color": "yellow",
"arrow_size": 30,
"background_color": "lightgray",
"node_color": "red",
"node_opacity": 0.1,
"node_size": 15,
"node_shape": "hexagon",
"node_border_color": "#fff",
"node_border_size": 7,
"node_label_color": "orange",
"node_label_size": 5,
"node_hover": "General node hover",
"node_click": "General node click",
"node_image": gv.convert.image_to_data_url(
os.path.join(shared.IN_DIR, 'rectangle_10x10.png')),
"node_x": 0.0,
"node_y": 0.0,
"node_z": 0.0,
"edge_color": "blue",
"edge_opacity": 0.2,
"edge_size": 4,
"edge_label_color": "blue",
"edge_label_size": 5,
"edge_hover": "General edge hover",
"edge_click": "General edge click",
},
"nodes": [
{"id": 1, "label": "Node 1 special label", "metadata": {
"color": "#ff00ff",
"opacity": 0.75,
"size": 30,
"shape": "rectangle",
"border_color": "#aa00aa",
"border_size": 2,
"label_color": "#ff00ff",
"label_size": 30,
"hover": "Node $id special <span style='color:red'>hover</span> with HTML",
"click": "Node $id special <span style='color:orange'>click</span> with HTML",
"image": gv.convert.image_to_data_url(
os.path.join(shared.IN_DIR, 'rectangle_30x10.png')),
"x": base_distance,
"y": base_distance * 2,
"z": base_distance,
}},
{"id": 2, "label": "node b", "metadata": {
"shape": "circle",
"size": 40,
"image": gv.convert.image_to_data_url(
os.path.join(shared.IN_DIR, 'rectangle_30x10.png')),
"x": base_distance * 2,
"y": base_distance / 2,
}},
{"id": 3},
{"id": 4, "label": "node d", "metadata": {
"size": 70,
"x": base_distance * 4,
"y": base_distance,
}},
{"id": 5, "label": "node e", "metadata": {
"shape": "hexagon",
"image": gv.convert.image_to_data_url(
os.path.join(shared.IN_DIR, 'rectangle_10x30.png')),
"x": base_distance * 5,
"y": base_distance * 3,
}},
],
"edges": [
{"source": 1, "target": 2, "label": "Edge 1 special label", "metadata": {
"color": "#ff00ff",
"opacity": 0.75,
"size": 1,
"label_color": "#ff00ff",
"label_size": 30,
"hover": "Edge $id special <span style='color:blue'>hover</span> with HTML",
"click": "Edge $id special <span style='color:green'>click</span> with HTML",
}},
{"source": 2, "target": 3},
{"source": 3, "target": 4},
{"source": 4, "target": 1},
{"source": 1, "target": 5},
]
}
}
for func_name, func in plotting_functions:
fig = func(data, show_edge_label=True)
filepath = os.path.join(my_outdir, base_name.format('all', func_name))
shared.export_all_available_formats(fig, filepath)
# background
data = {
"graph": {
"directed": True,
"metadata": {"background_color": "lightgray"},
"nodes": [
{"id": 1, "label": "node a", "metadata": {"size": 30}},
{"id": 2, "label": "node b", "metadata": {"size": 40}},
{"id": 3},
{"id": 4, "label": "node d", "metadata": {"size": 4}},
{"id": 5, "label": "node e", "metadata": {"shape": "hexagon"}},
],
"edges": [
{"source": 1, "target": 2},
{"source": 2, "target": 3},
{"source": 3, "target": 4},
{"source": 4, "target": 1},
{"source": 1, "target": 5},
]
}
}
for func_name, func in plotting_functions:
fig = func(data)
filepath = os.path.join(my_outdir, base_name.format('background', func_name))
shared.export_all_available_formats(fig, filepath)
# node label
data = {
"graph": {
"directed": True,
"nodes": [
{"id": 1, "label": "node a", "metadata": {"size": 30}},
{"id": 2, "label": "node b", "metadata": {"size": 40}},
{"id": 3},
{"id": 4, "label": "node d", "metadata": {"size": 4}},
{"id": 5, "label": "node e", "metadata": {"shape": "hexagon"}},
],
"edges": [
{"source": 1, "target": 2},
{"source": 2, "target": 3},
{"source": 3, "target": 4},
{"source": 4, "target": 1},
{"source": 1, "target": 5},
]
}
}
for func_name, func in plotting_functions:
fig = func(data)
filepath = os.path.join(my_outdir, base_name.format('node_label', func_name))
shared.export_all_available_formats(fig, filepath)
# node and edge label
data = {
"graph": {
"directed": True,
"nodes": [
{"id": 1, "label": "node a"},
{"id": 2, "label": "node b"},
{"id": 3},
{"id": 4, "label": "node d"},
{"id": 5, "label": "node e"},
],
"edges": [
{"source": 1, "target": 2, "label": "e12"},
{"source": 2, "target": 3},
{"source": 3, "target": 4, "label": "e34"},
{"source": 4, "target": 1},
{"source": 1, "target": 5},
]
}
}
for func_name, func in plotting_functions:
fig = func(data)
filepath = os.path.join(
my_outdir, base_name.format('node_label_and_edge_label', func_name))
shared.export_all_available_formats(fig, filepath)
# node color
data = {
"graph": {
"directed": True,
"nodes": [
{"id": 1, "metadata": {"color": "#f00"}},
{"id": 2, "metadata": {"color": "green"}},
{"id": 3},
{"id": 4, "metadata": {"color": "#0000ff"}},
{"id": 5, "metadata": {"color": "WRONG"}},
],
"edges": [
{"source": 1, "target": 2},
{"source": 2, "target": 3},
{"source": 3, "target": 4},
{"source": 4, "target": 1},
{"source": 1, "target": 5},
]
}
}
for func_name, func in plotting_functions:
fig = func(data)
filepath = os.path.join(my_outdir, base_name.format('node_color', func_name))
shared.export_all_available_formats(fig, filepath)
# node opacity
data = {
"graph": {
"directed": True,
"nodes": [
{"id": 1, "metadata": {"opacity": 0.1}},
{"id": 2, "metadata": {"opacity": 0.5}},
{"id": 3},
{"id": 4, "metadata": {"opacity": 1.0}},
{"id": 5, "metadata": {"opacity": "WRONG"}},
],
"edges": [
{"source": 1, "target": 2},
{"source": 2, "target": 3},
{"source": 3, "target": 4},
{"source": 4, "target": 1},
{"source": 1, "target": 5},
]
}
}
for func_name, func in plotting_functions:
fig = func(data)
filepath = os.path.join(my_outdir, base_name.format('node_opacity', func_name))
shared.export_all_available_formats(fig, filepath)
# node size
data = {
"graph": {
"directed": True,
"nodes": [
{"id": 1, "metadata": {"size": 20}},
{"id": 2, "metadata": {"size": 30}},
{"id": 3},
{"id": 4, "metadata": {"size": 4}},
{"id": 5, "metadata": {"size": "WRONG"}},
],
"edges": [
{"source": 1, "target": 2},
{"source": 2, "target": 3},
{"source": 3, "target": 4},
{"source": 4, "target": 1},
{"source": 1, "target": 5},
]
}
}
for func_name, func in plotting_functions:
fig = func(data)
filepath = os.path.join(my_outdir, base_name.format('node_size', func_name))
shared.export_all_available_formats(fig, filepath)
# node shape
data = {
"graph": {
"directed": True,
"nodes": [
{"id": 1, "metadata": {"shape": "circle"}},
{"id": 2, "metadata": {"shape": "rectangle"}},
{"id": 3},
{"id": 4, "metadata": {"shape": "hexagon"}},
{"id": 5, "metadata": {"shape": "WRONG"}},
],
"edges": [
{"source": 1, "target": 2},
{"source": 2, "target": 3},
{"source": 3, "target": 4},
{"source": 4, "target": 1},
{"source": 1, "target": 5},
]
}
}
for func_name, func in plotting_functions:
fig = func(data)
filepath = os.path.join(my_outdir, base_name.format('node_shape', func_name))
shared.export_all_available_formats(fig, filepath)
# node border color and border size
data = {
"graph": {
"directed": True,
"nodes": [
{"id": 1,
"metadata": {"shape": "hexagon", "border_color": "red", "border_size": 3}},
{"id": 2,
"metadata": {"shape": "circle", "border_color": "green", "border_size": 3}},
{"id": 3,
"metadata": {"border_color": "green", "border_size": 3}},
{"id": 4,
"metadata": {"shape": "rectangle", "border_color": "blue", "border_size": 3}},
{"id": 5,
"metadata": {"border_color": "WRONG", "border_size": "WRONG"}},
{"id": 6},
],
"edges": [
{"source": 1, "target": 2},
{"source": 2, "target": 3},
{"source": 3, "target": 4},
{"source": 4, "target": 1},
{"source": 1, "target": 5},
{"source": 1, "target": 6},
]
}
}
for func_name, func in plotting_functions:
fig = func(data)
filepath = os.path.join(
my_outdir, base_name.format('node_border_color_and_size', func_name))
shared.export_all_available_formats(fig, filepath)
# node image
png10x10 = gv.convert.image_to_data_url(os.path.join(shared.IN_DIR, 'rectangle_10x10.png'))
png30x10 = gv.convert.image_to_data_url(os.path.join(shared.IN_DIR, 'rectangle_30x10.png'))
png10x30 = gv.convert.image_to_data_url(os.path.join(shared.IN_DIR, 'rectangle_10x30.png'))
png100x50 = gv.convert.image_to_data_url(os.path.join(shared.IN_DIR, 'rectangle_100x50.png'))
svg10x10 = gv.convert.image_to_data_url(os.path.join(shared.IN_DIR, 'rectangle_10x10.svg'))
svg30x10 = gv.convert.image_to_data_url(os.path.join(shared.IN_DIR, 'rectangle_30x10.svg'))
svg10x30 = gv.convert.image_to_data_url(os.path.join(shared.IN_DIR, 'rectangle_10x30.svg'))
svg100x50 = gv.convert.image_to_data_url(os.path.join(shared.IN_DIR, 'rectangle_100x50.svg'))
data = {
"graph": {
"directed": True,
"nodes": {
1: {"metadata": {"shape": "circle", "image": png10x10}},
2: {"metadata": {"shape": "rectangle", "image": png10x10}},
3: {"metadata": {"shape": "hexagon", "image": png10x10}},
4: {"metadata": {"shape": "circle", "image": png30x10}},
5: {"metadata": {"shape": "rectangle", "image": png30x10}},
6: {"metadata": {"shape": "hexagon", "image": png30x10}},
7: {"metadata": {"shape": "circle", "image": png10x30}},
8: {"metadata": {"shape": "rectangle", "image": png10x30}},
9: {"metadata": {"shape": "hexagon", "image": png10x30}},
10: {"metadata": {"shape": "circle", "image": png100x50}},
11: {"metadata": {"shape": "rectangle", "image": png100x50}},
12: {"metadata": {"shape": "hexagon", "image": png100x50}},
13: {"metadata": {"shape": "circle", "image": svg10x10}},
14: {"metadata": {"shape": "rectangle", "image": svg10x10}},
15: {"metadata": {"shape": "hexagon", "image": svg10x10}},
16: {"metadata": {"shape": "circle", "image": svg30x10}},
17: {"metadata": {"shape": "rectangle", "image": svg30x10}},
18: {"metadata": {"shape": "hexagon", "image": svg30x10}},
19: {"metadata": {"shape": "circle", "image": svg10x30}},
20: {"metadata": {"shape": "rectangle", "image": svg10x30}},
21: {"metadata": {"shape": "hexagon", "image": svg10x30}},
22: {"metadata": {"shape": "circle", "image": svg100x50}},
23: {"metadata": {"shape": "rectangle", "image": svg100x50}},
24: {"metadata": {"shape": "hexagon", "image": svg100x50}},
25: {"metadata": {"shape": "circle", "image": 'WRONG'}},
26: {"metadata": {"shape": "rectangle", "image": 'WRONG'}},
27: {"metadata": {"shape": "hexagon", "image": 'WRONG'}},
28: {"metadata": {"image": 'WRONG'}},
29: {"metadata": {"shape": "WRONG", "image": svg30x10}},
},
"edges": [
{"source": 1, "target": 2},
{"source": 2, "target": 3},
{"source": 3, "target": 1},
{"source": 1, "target": 13},
{"source": 4, "target": 5},
{"source": 5, "target": 6},
{"source": 6, "target": 4},
{"source": 4, "target": 16},
{"source": 7, "target": 8},
{"source": 8, "target": 9},
{"source": 9, "target": 7},
{"source": 7, "target": 19},
{"source": 10, "target": 11},
{"source": 11, "target": 12},
{"source": 12, "target": 10},
{"source": 10, "target": 22},
{"source": 13, "target": 14},
{"source": 14, "target": 15},
{"source": 15, "target": 13},
{"source": 16, "target": 17},
{"source": 17, "target": 18},
{"source": 18, "target": 16},
{"source": 19, "target": 20},
{"source": 20, "target": 21},
{"source": 21, "target": 19},
{"source": 22, "target": 23},
{"source": 23, "target": 24},
{"source": 24, "target": 22},
{"source": 25, "target": 26},
{"source": 26, "target": 27},
{"source": 27, "target": 28},
{"source": 28, "target": 29},
{"source": 29, "target": 25},
]
}
}
for func_name, func in plotting_functions:
fig = func(data)
filepath = os.path.join(
my_outdir, base_name.format('node_image_and_shape', func_name))
shared.export_all_available_formats(fig, filepath)
# node and edge hover
data = {
"graph": {
"directed": True,
"nodes": [
{"id": 1, "metadata": {
"hover": ("Test node 1 hover which is an example of a long text that goes on"
+ " and on"*50
+ "andon"*200)}},
{"id": 2, "metadata": {"hover": "Test node 2 hover"}},
{"id": 3},
{"id": 4, "metadata": {"hover": "Test node 4 hover"}},
{"id": 5},
],
"edges": [
{"source": 1, "target": 2, "metadata": {"hover": "Test edge (1,2) hover"}},
{"source": 2, "target": 3, "metadata": {"hover": "Test edge (2,3) hover"}},
{"source": 3, "target": 4},
{"source": 4, "target": 1, "metadata": {"hover": "Test edge (4,1) hover"}},
{"source": 1, "target": 5},
]
}
}
for func_name, func in plotting_functions:
fig = func(data)
filepath = os.path.join(
my_outdir, base_name.format('node_and_edge_hover', func_name))
shared.export_all_available_formats(fig, filepath)
# node and edge click
data = {
"graph": {
"directed": True,
"nodes": [
{"id": 1, "metadata": {"click": "Test node 1 click"}},
{"id": 2, "metadata": {"click": "Test node 2 click"}},
{"id": 3},
{"id": 4, "metadata": {
"click": "Test node 4 click <ul><li>a: 1</li><li>e: 5</li></ul>"}},
{"id": 5},
],
"edges": [
{"source": 1, "target": 2,
"metadata": {"click": "Test edge (1,2) click"}},
{"source": 2, "target": 3,
"metadata": {"click": "Test edge (2,3) click"}},
{"source": 3, "target": 4},
{"source": 4, "target": 1,
"metadata":
{"click": "Test edge (4,1) click <ul><li>a: 1</li><li>b: 2</li></ul>"}},
{"source": 1, "target": 5},
]
}
}
for func_name, func in plotting_functions:
fig = func(data)
filepath = os.path.join(
my_outdir, base_name.format('node_and_edge_click', func_name))
shared.export_all_available_formats(fig, filepath)
# ----------------------------------------------------------------------------------------
# edge label
for directed in [True, False]:
data = {
"graph": {
"directed": directed,
"nodes": [
{"id": 1},
{"id": 2},
{"id": 3},
{"id": 4},
{"id": 5},
],
"edges": [
{"source": 1, "target": 2, "label": "e12"},
{"source": 2, "target": 3, "label": "e23"},
{"source": 3, "target": 4},
{"source": 4, "target": 1, "label": "e41"},
{"source": 1, "target": 5, "label": 42},
]
}
}
for func_name, func in plotting_functions:
fig = func(data)
suffix = 'directed' if directed else 'undirected'
filepath = os.path.join(
my_outdir, base_name.format('edge_label_'+suffix, func_name))
shared.export_all_available_formats(fig, filepath)
# edge color
for directed in [True, False]:
data = {
"graph": {
"directed": directed,
"nodes": [
{"id": 1},
{"id": 2},
{"id": 3},
{"id": 4},
{"id": 5},
],
"edges": [
{"source": 1, "target": 2, "metadata": {"color": "#f00"}},
{"source": 2, "target": 3, "metadata": {"color": "blue"}},
{"source": 3, "target": 4},
{"source": 4, "target": 1, "metadata": {"color": "#00ff00"}},
{"source": 1, "target": 5, "metadata": {"color": "WRONG"}},
]
}
}
for func_name, func in plotting_functions:
fig = func(data)
suffix = 'directed' if directed else 'undirected'
filepath = os.path.join(
my_outdir, base_name.format('edge_color_'+suffix, func_name))
shared.export_all_available_formats(fig, filepath)
# edge opacity
for directed in [True, False]:
data = {
"graph": {
"directed": directed,
"nodes": [
{"id": 1},
{"id": 2},
{"id": 3},
{"id": 4},
{"id": 5},
],
"edges": [
{"source": 1, "target": 2, "metadata": {"opacity": 0.1}},
{"source": 2, "target": 3, "metadata": {"opacity": 0.5}},
{"source": 3, "target": 4},
{"source": 4, "target": 1, "metadata": {"opacity": 1.0}},
{"source": 1, "target": 5, "metadata": {"opacity": "WRONG"}},
]
}
}
for func_name, func in plotting_functions:
fig = func(data)
suffix = 'directed' if directed else 'undirected'
filepath = os.path.join(
my_outdir, base_name.format('edge_opacity_'+suffix, func_name))
shared.export_all_available_formats(fig, filepath)
# edge size
for directed in [True, False]:
data = {
"graph": {
"directed": directed,
"nodes": [
{"id": 1},
{"id": 2},
{"id": 3},
{"id": 4},
{"id": 5},
],
"edges": [
{"source": 1, "target": 2, "metadata": {"size": 1}},
{"source": 2, "target": 3, "metadata": {"size": 2}},
{"source": 3, "target": 4},
{"source": 4, "target": 1, "metadata": {"size": 3}},
{"source": 1, "target": 5, "metadata": {"size": "WRONG"}},
]
}
}
for func_name, func in plotting_functions:
fig = func(data)
suffix = 'directed' if directed else 'undirected'
filepath = os.path.join(
my_outdir, base_name.format('edge_size_'+suffix, func_name))
shared.export_all_available_formats(fig, filepath)
| 23,325
|
def read_rudder_config(path=None):
"""Reads the servo configuration from config.yml and returns a matching servo."""
if path is None:
path = os.path.dirname(os.path.abspath(__file__))
with open(path + "/config.yml", "r") as yml:
conf = yaml.full_load(yml)
rudder_config = conf["rudder"]
return rudder_config
| 23,326
|
def test_simple_line_parse():
"""Simple line parse"""
print(test_simple_line_parse.__doc__)
line = " int i = 0;"
parsed_line = Sea5kgCppLintLineParser(line, 'file', 0)
assert parsed_line.get_line() == line
assert parsed_line.get_filename() == 'file'
assert parsed_line.get_number_of_line() == 0
literals = parsed_line.get_literals()
expected_literals = [' ', 'int', ' ', 'i', ' ', '=', ' ', '0', ';']
print(literals)
assert len(literals) == len(expected_literals)
assert literals == expected_literals
| 23,327
|
def linear_trend(series=None, coeffs=None, index=None, x=None, median=False):
"""Get a series of points representing a linear trend through `series`
First computes the lienar regression, the evaluates at each
dates of `series.index`
Args:
series (pandas.Series): data with DatetimeIndex as the index.
coeffs (array or List): [slope, intercept], result from np.polyfit
index (DatetimeIndex, list[date]): Optional. If not passing series, can pass
the DatetimeIndex or list of dates to evaluate coeffs at.
Converts to numbers using `matplotlib.dates.date2num`
x (ndarray-like): directly pass the points to evaluate the poly1d
Returns:
Series: a line, equal length to arr, with same index as `series`
"""
if coeffs is None:
coeffs = fit_line(series, median=median)
if index is None and x is None:
index = series.dropna().index
if x is None:
x = mdates.date2num(index)
poly = np.poly1d(coeffs)
linear_points = poly(x)
return pd.Series(linear_points, index=index)
| 23,328
|
def cli(ctx: callable) -> None:
"""DIRBS script to output reports (operator and country) for a given MONTH and YEAR.
Arguments:
ctx: click context (required)
Returns:
None
"""
pass
| 23,329
|
def test_hook_initialization(base_app):
"""Test hook initialization."""
app = base_app
magic_hook = MagicMock()
app.config['INDEXER_BEFORE_INDEX_HOOKS'] = [
magic_hook, 'test_invenio_bulkindexer:_global_magic_hook'
]
ext = InvenioIndexer(app)
with app.app_context():
recid = uuid.uuid4()
record = Record.create({'title': 'Test'}, id_=recid)
db.session.commit()
client_mock = MagicMock()
RecordIndexer(search_client=client_mock, version_type='force').index(
record)
args = (app, )
kwargs = dict(
index=app.config['INDEXER_DEFAULT_INDEX'],
doc_type=app.config['INDEXER_DEFAULT_DOC_TYPE'],
record=record,
json={
'title': 'Test',
'_created': pytz.utc.localize(record.created).isoformat(),
'_updated': pytz.utc.localize(record.updated).isoformat(),
},
)
magic_hook.assert_called_with(*args, **kwargs)
_global_magic_hook.assert_called_with(*args, **kwargs)
client_mock.index.assert_called_with(
id=str(recid),
version=0,
version_type='force',
index=app.config['INDEXER_DEFAULT_INDEX'],
doc_type=app.config['INDEXER_DEFAULT_DOC_TYPE'],
body={
'title': 'Test',
'_created': pytz.utc.localize(record.created).isoformat(),
'_updated': pytz.utc.localize(record.updated).isoformat(),
},
)
| 23,330
|
def snapshot_metadata_get(context, snapshot_id):
"""Get all metadata for a snapshot."""
return IMPL.snapshot_metadata_get(context, snapshot_id)
| 23,331
|
def graph_from_tensors(g, is_real=True):
"""
"""
loop_edges = list(nx.selfloop_edges(g))
if len(loop_edges) > 0:
g.remove_edges_from(loop_edges)
if is_real:
subgraph = (g.subgraph(c) for c in nx.connected_components(g))
g = max(subgraph, key=len)
g = nx.convert_node_labels_to_integers(g)
else:
g = pick_connected_component_new(g)
return g
| 23,332
|
def hook(t):
"""Calculate the progress from download callbacks (For progress bar)"""
def inner(bytes_amount):
t.update(bytes_amount) # Update progress bar
return inner
| 23,333
|
def delete(event, context):
"""
Delete a cfn stack using an assumed role
"""
stack_id = event["PhysicalResourceId"]
if '[$LATEST]' in stack_id:
# No stack was created, so exiting
return stack_id, {}
cfn_client = get_client("cloudformation", event, context)
cfn_client.delete_stack(StackName=stack_id)
return stack_id
| 23,334
|
def _rec_compare(lhs,
rhs,
ignore,
only,
key,
report_mode,
value_cmp_func,
_regex_adapter=RegexAdapter):
"""
Recursive deep comparison implementation
"""
# pylint: disable=unidiomatic-typecheck
lhs_cat = _categorise(lhs)
rhs_cat = _categorise(rhs)
## NO VALS
if ((lhs_cat == Category.ABSENT) or (rhs_cat == Category.ABSENT)) and \
(lhs_cat != Category.CALLABLE) and (rhs_cat != Category.CALLABLE):
return _build_res(
key=key,
match=Match.PASS if lhs_cat == rhs_cat else Match.FAIL,
lhs=fmt(lhs),
rhs=fmt(rhs))
## CALLABLES
if lhs_cat == rhs_cat == Category.CALLABLE:
match = Match.from_bool(lhs == rhs)
return _build_res(
key=key,
match=match,
lhs=(0, 'func', callable_name(lhs)),
rhs=(0, 'func', callable_name(rhs)))
if lhs_cat == Category.CALLABLE:
result, error = compare_with_callable(callable_obj=lhs, value=rhs)
return _build_res(
key=key,
match=Match.from_bool(result),
lhs=(0, 'func', callable_name(lhs)),
rhs='Value: {}, Error: {}'.format(
rhs, error) if error else fmt(rhs))
if rhs_cat == Category.CALLABLE:
result, error = compare_with_callable(callable_obj=rhs, value=lhs)
return _build_res(
key=key,
match=Match.from_bool(result),
lhs='Value: {}, Error: {}'.format(
lhs, error) if error else fmt(lhs),
rhs=(0, 'func', callable_name(rhs)))
## REGEXES
if lhs_cat == rhs_cat == Category.REGEX:
match = _regex_adapter.compare(lhs, rhs)
return _build_res(
key=key,
match=match,
lhs=_regex_adapter.serialize(lhs),
rhs=_regex_adapter.serialize(rhs))
if lhs_cat == Category.REGEX:
match = _regex_adapter.match(regex=lhs, value=rhs)
return _build_res(
key=key,
match=match,
lhs=_regex_adapter.serialize(lhs),
rhs=fmt(rhs))
if rhs_cat == Category.REGEX:
match = _regex_adapter.match(regex=rhs, value=lhs)
return _build_res(
key=key,
match=match,
lhs=fmt(lhs),
rhs=_regex_adapter.serialize(rhs))
## VALUES
if lhs_cat == rhs_cat == Category.VALUE:
response = value_cmp_func(lhs, rhs)
match = Match.from_bool(response)
return _build_res(
key=key,
match=match,
lhs=fmt(lhs),
rhs=fmt(rhs))
## ITERABLE
if lhs_cat == rhs_cat == Category.ITERABLE:
results = []
match = Match.IGNORED
for lhs_item, rhs_item in six.moves.zip_longest(lhs, rhs):
# iterate all elems in both iterable non-mapping objects
result = _rec_compare(
lhs_item,
rhs_item,
ignore,
only,
key=None,
report_mode=report_mode,
value_cmp_func=value_cmp_func)
match = Match.combine(match, result[1])
results.append(result)
# two lists of formatted objects from a
# list of objects with lhs/rhs attributes
lhs_vals, rhs_vals = _partition(results)
return _build_res(
key=key,
match=match,
lhs=(1, lhs_vals),
rhs=(1, rhs_vals))
## DICTS
if lhs_cat == rhs_cat == Category.DICT:
match, results = _cmp_dicts(
lhs, rhs, ignore, only, report_mode, value_cmp_func)
lhs_vals, rhs_vals = _partition(results)
return _build_res(
key=key,
match=match,
lhs=(2, lhs_vals),
rhs=(2, rhs_vals))
## DIFF TYPES -- catch-all for unhandled
# combinations, e.g. VALUE vs ITERABLE
return _build_res(
key=key,
match=Match.FAIL,
lhs=fmt(lhs),
rhs=fmt(rhs))
| 23,335
|
def exists_job_onqueue(queuename, when, hour):
"""
Check if a job is present on queue
"""
scheduler = Scheduler(connection=Redis())
jobs = scheduler.get_jobs()
for job in jobs:
if 'reset_stats_queue' in job.func_name:
args = job.args
if queuename == args[0] and when == args[1] and hour == args[2]:
return True
return False
| 23,336
|
def q_inv_batch_of_sequences(seq):
"""
:param seq: (n_batch x n_frames x 32 x 4)
:return:
"""
n_batch = seq.size(0)
n_frames = seq.size(1)
n_joints = seq.size(2)
seq = seq.reshape((n_batch * n_frames * n_joints, 4))
seq = qinv(seq)
seq = seq.reshape((n_batch, n_frames, n_joints, 4))
return seq
| 23,337
|
def dealer_wins(chips):
"""Player loses chips"""
print("The dealer won.")
chips.lose_bet()
| 23,338
|
def check_policy_enforce(logical_line, filename):
"""Look for uses of nova.policy._ENFORCER.enforce()
Now that policy defaults are registered in code the _ENFORCER.authorize
method should be used. That ensures that only registered policies are used.
Uses of _ENFORCER.enforce could allow unregistered policies to be used, so
this check looks for uses of that method.
N351
"""
msg = ('N351: nova.policy._ENFORCER.enforce() should not be used. '
'Use the authorize() method instead.')
if policy_enforce_re.match(logical_line):
yield(0, msg)
| 23,339
|
def msa_job_space_demand(job_space_demand):
"""
Job space demand aggregated to the MSA.
"""
df = job_space_demand.local
return df.fillna(0).sum(axis=1).to_frame('msa')
| 23,340
|
def get_pdf_cdf_3(corr, bins_pdf, bins_cdf, add_point=True, cdf_bool=True,
checknan=False):
"""
corr is a 3d array, the first dimension are the iterations, the second
dimension is usually the cells
the function gives back the pdf and the cdf
add_point option duplicated the last point
checknan checks if there are any nans in the set and gives nan as
result for the pdf and cdf instead 0 as would be calculated naturally
"""
N1, N2, N3 = corr.shape
pdfs = np.zeros((N1, N2, len(bins_pdf) - 1))
cdfs = np.zeros((N1, N2, len(bins_cdf) - 1))
for i in range(N1):
pdfs[i], cdfs[i] = get_pdf_cdf_2(corr[i], bins_pdf, bins_cdf,
add_point=False, cdf_bool=False,
checknan=checknan)
if cdf_bool:
cdfs = np.cumsum(cdfs, axis=2)/corr.shape[2]
if add_point:
pdfs = add_point3(pdfs)
cdfs = add_point3(cdfs)
return pdfs, cdfs
| 23,341
|
def unconfigure_route_map(device, route_map_name, permit):
""" unconfigure route map
Args:
device (`obj`): device to execute on
route_map_name (`int`): route map name
permit (`int`): Sequence to insert to existing route-map entry
Return:
None
Raises:
SubCommandFailure
"""
try:
device.configure([
"no route-map {route_map_name} permit {permit}".format(
route_map_name=route_map_name, permit=permit)]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to unconfigure route map {route_map_name}, Error: {error}"\
.format(route_map_name=route_map_name, error=e
)
)
| 23,342
|
def test_wrong_params(param, match):
"""Test Exceptions at check_params function."""
rng = np.random.RandomState(0)
X = rng.rand(5, 2)
with pytest.raises(ValueError, match=match):
bisect_means = BisectingKMeans(n_clusters=3, **param)
bisect_means.fit(X)
| 23,343
|
def create_app():
""" Application factory to create the app and be passed to workers """
app = Flask(__name__)
import logging
logging.basicConfig(
filename='./logs/flask.log',
level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
app.config['SECRET_KEY'] = 'thisisthesecretkeyfortheflaskserver'
#app.config['SESSION_TYPE'] = 'redis'
server_ip = requests.get("http://ipinfo.io/ip").content.decode('utf-8')
graphs = {}
expiration = 300 # 5 minutes
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
def index():
return "", 418 # render_template('/index.html', graphs=graphs)
@app.route('/graph/<ID>', methods=['GET', 'POST'])
def graph(ID):
"""
Main graph display page.
If in debug mode, serves raw source files.
"""
return render_template('/graph.html', development=app.config['DEBUG'])
@app.route('/help')
def tutorial():
""" Serve the tutorial page """
return render_template("/help.html", development=app.config['DEBUG'])
@app.route('/src/<path:path>', methods=['GET'])
def source(path):
""" Serve source files in development mode """
if app.config['DEBUG']:
return send_from_directory("src", path)
else:
return "", 418
@app.route('/create_graph', methods=['GET'])
def create_graph():
""" receive graph JSON from external source """
logging.info("Received create_graph request")
logging.info("Number of stored graphs: {}".format(len(list(graphs.keys()))))
# remove expired graphs
for ID in list(graphs.keys()):
try:
if time() - graphs[ID][1] > expiration: # temporary until we implement sessions
logging.info("Removing graph ID: {}".format(ID))
del graphs[ID]
except Exception as e:
logging.error("Problem removing graph from dict: {} {}".format(ID,e))
continue
ID = uuid4().hex # generate random uuid
logging.info("Created id: {}".format(ID))
# store graph in index of all graphs with time created
graphs[ID] = (request.json, time())
logging.info("Stored graph")
# return url to the graph display
url = "http://{}:5000/graph/{}".format(server_ip, ID)
logging.info("Generated URL and returning it: {}".format(url))
return url
@app.route('/get_graph/<ID>')
def get_data(ID):
""" Request graph JSON by ID """
stuff = graphs.get(ID)
if stuff is None:
data = {
"error": "Graph does not exist.",
"message": "The graph (ID: {}) does not exist. If this graph was used previously, it may have expired since.".format(ID)}
return data, 410
return graphs[ID][0]
return app
| 23,344
|
def build_vocab(args, dataset, schema_graphs):
"""
Construct vocabularies.
This function saves to disk:
- text vocab: consists of tokens appeared in the natural language query and schema
- program vocab: consists of tokens appeared in the program
- schema vocab: consists of table and field names from the schema
- world vocab: consists of tokens in the program that does not come from any of the above category
(which likely needed to be inferred from world knowledge)
"""
print('Constructing vocabulary...')
text_tokenize, program_tokenize, _, tu = tok.get_tokenizers(args)
if args.pretrained_transformer:
sql_reserved_vocab = sql_reserved_tokens
else:
sql_reserved_vocab = sql_reserved_tokens_revtok
parsed_programs = load_parsed_sqls(args, augment_with_wikisql=args.augment_with_wikisql)
schema_graphs.lexicalize_graphs(tokenize=text_tokenize, normalized=(args.model_id in [BRIDGE]))
# compute text and program vocab
text_hist, program_hist = collections.defaultdict(int), collections.defaultdict(int)
world_vocab = Vocabulary('world')
for split in ['train', 'dev', 'test']:
if not split in dataset:
continue
data_split = dataset[split]
for i, example in enumerate(data_split):
if isinstance(example, AugmentedText2SQLExample):
continue
schema_graph = schema_graphs.get_schema(example.db_id)
text = example.text
if args.pretrained_transformer:
text_tokens = text_tokenize(text)
else:
text_tokens = text_tokenize(text.lower(), functional_tokens)
for word in text_tokens:
text_hist[word] += 1
for program in example.program_list:
ast, _ = get_ast(program, parsed_programs, args.denormalize_sql, schema_graph)
if ast:
program = ast
program_tokens = program_tokenize(program, omit_from_clause=args.omit_from_clause,
no_join_condition=args.no_join_condition)
for token in program_tokens:
program_hist[token] += 1
if split == 'train':
if not token in text_tokens and not sql_reserved_vocab.contains(token):
world_vocab.index_token(token, in_vocab=True)
if i > 0 and i % 5000 == 0:
print('{} examples processed'.format(i))
if args.pretrained_transformer.startswith('bert') or args.pretrained_transformer == 'table-bert':
text_hist = dict()
for v in tu.tokenizer.vocab:
text_hist[v] = tu.tokenizer.vocab[v]
for v in tu.tokenizer.added_tokens_encoder:
text_hist[v] = tu.tokenizer.convert_tokens_to_ids(v)
schema_lexical_vocab = None
elif args.pretrained_transformer.startswith('roberta'):
text_hist = tu.tokenizer.encoder
schema_lexical_vocab = None
else:
schema_lexical_vocab = schema_graphs.get_lexical_vocab()
export_vocab(text_hist, program_hist, schema_lexical_vocab, world_vocab, args)
| 23,345
|
def create_win_jupyter_console(folders):
"""
create a batch file to start jupyter
@param folders see @see fn create_win_batches
@return operations (list of what was done)
"""
text = ['@echo off',
'set CURRENT2=%~dp0',
'call "%CURRENT2%env.bat"',
'set JUPYTERC=%PYTHON_WINSCRIPTS%\\jupyter-console.exe',
'"%JUPYTERC%" console']
# command jupyter console does not work yet even if the documentation says
# so
text = "\n".join(text)
name = os.path.join(folders["config"], "jupyter_console.bat")
with open(name, "w") as f:
f.write(text)
return [('batch', name)]
| 23,346
|
def incidence_matrix(
H, order=None, sparse=True, index=False, weight=lambda node, edge, H: 1
):
"""
A function to generate a weighted incidence matrix from a Hypergraph object,
where the rows correspond to nodes and the columns correspond to edges.
Parameters
----------
H: Hypergraph object
The hypergraph of interest
order: int, optional
Order of interactions to use. If None (default), all orders are used. If int,
must be >= 1.
sparse: bool, default: True
Specifies whether the output matrix is a scipy sparse matrix or a numpy matrix
index: bool, default: False
Specifies whether to output dictionaries mapping the node and edge IDs to indices
weight: lambda function, default=lambda function outputting 1
A function specifying the weight, given a node and edge
Returns
-------
I: numpy.ndarray or scipy csr_matrix
The incidence matrix, has dimension (n_nodes, n_edges)
rowdict: dict
The dictionary mapping indices to node IDs, if index is True
coldict: dict
The dictionary mapping indices to edge IDs, if index is True
"""
edge_ids = H.edges
if order is not None:
edge_ids = [id_ for id_, edge in H._edge.items() if len(edge) == order + 1]
if not edge_ids:
return (np.array([]), {}, {}) if index else np.array([])
node_ids = H.nodes
num_edges = len(edge_ids)
num_nodes = len(node_ids)
node_dict = dict(zip(node_ids, range(num_nodes)))
edge_dict = dict(zip(edge_ids, range(num_edges)))
if node_dict and edge_dict:
if index:
rowdict = {v: k for k, v in node_dict.items()}
coldict = {v: k for k, v in edge_dict.items()}
if sparse:
# Create csr sparse matrix
rows = []
cols = []
data = []
for node in node_ids:
memberships = H.nodes.memberships(node)
# keep only those with right order
memberships = [i for i in memberships if i in edge_ids]
if len(memberships) > 0:
for edge in memberships:
data.append(weight(node, edge, H))
rows.append(node_dict[node])
cols.append(edge_dict[edge])
else: # include disconnected nodes
for edge in edge_ids:
data.append(0)
rows.append(node_dict[node])
cols.append(edge_dict[edge])
I = csr_matrix((data, (rows, cols)))
else:
# Create an np.matrix
I = np.zeros((num_nodes, num_edges), dtype=int)
for edge in edge_ids:
members = H.edges.members(edge)
for node in members:
I[node_dict[node], edge_dict[edge]] = weight(node, edge, H)
if index:
return I, rowdict, coldict
else:
return I
else:
if index:
return np.array([]), {}, {}
else:
return np.array([])
| 23,347
|
def write_linkdirs_geotiff(links, gd_obj, writepath):
"""
Creates a geotiff where links are colored according to their directionality.
Pixels in each link are interpolated between 0 and 1 such that the upstream
pixel is 0 and the downstream-most pixel is 1. In a GIS, color can then
be set to see directionality.
"""
# Initialize plotting raster
I = gd_obj.ReadAsArray()
I = np.zeros((gd_obj.RasterYSize, gd_obj.RasterXSize), dtype=np.float32)
# Loop through links and store each pixel's interpolated value
for lidcs in links['idx']:
n = len(lidcs)
vals = np.linspace(0,1, n)
rcidcs = np.unravel_index(lidcs, I.shape)
I[rcidcs] = vals
# Save the geotiff
write_geotiff(I, gd_obj.GetGeoTransform(), gd_obj.GetProjection(), writepath, dtype=gdal.GDT_Float32, nodata=0)
return
| 23,348
|
def test_encode_decode_get_profile():
"""Test encode decode get profile."""
msg = YotiMessage(
performative=YotiMessage.Performative.GET_PROFILE,
token=str(uuid4()),
dotted_path="a",
args=tuple(),
)
assert YotiMessage.decode(msg.encode()) == msg
| 23,349
|
def compare_models(model1, model2, tmpdir):
"""Checks if weights between two models can be shared."""
clf = model1()
pipe = make_pipeline(
make_union(
HashingVectorizer(), HashingVectorizer(ngram_range=(2, 3), analyzer="char")
),
clf,
)
X = [
"i really like this post",
"thanks for that comment",
"i enjoy this friendly forum",
"this is a bad post",
"i dislike this article",
"this is not well written",
]
y = np.array([1, 1, 1, 0, 0, 0])
pipe.fit(X, y)
if is_classifier(clf):
assert np.all(pipe.predict(X) == y)
# Here we create in the new pipeline.
clf_new = model2()
pipe_new = make_pipeline(
make_union(
HashingVectorizer(), HashingVectorizer(ngram_range=(2, 3), analyzer="char")
),
clf_new,
)
path = pathlib.Path(tmpdir, "coefs.h5")
save_coefficients(clf, path)
load_coefficients(clf_new, path)
assert np.all(clf.intercept_ == clf_new.intercept_)
assert np.all(clf.coef_ == clf_new.coef_)
if is_classifier(clf_new):
assert np.all(clf.classes_ == clf_new.classes_)
assert np.all(pipe_new.predict(X) == y)
| 23,350
|
def _add_parents_and_tree(commit, git_objects, git_root):
"""Add parent reference (i.e. to the parent commit) and tree reference (to
the top-level tree) to a commit object.
"""
content = cat_file(commit.sha, git_root, CatFileOption.PRETTY)
ptr_str = content.split("\n")[0].strip()
ptr_obj = git_objects[ptr_str.strip().split()[1]]
commit.add_child("", ptr_obj)
# parents may not exist
for line in content.split("\n")[1:]:
if line.startswith("author"):
break
elif line.startswith("parent"):
_, parent_sha = line.strip().split()
commit.add_parent(git_objects[parent_sha])
| 23,351
|
def check_for_cd_frame(fem1):
"""
A cylindrical/spherical CD frame will cause problems with the
grid point force transformation
"""
if any([card_name in fem1.card_count for card_name in ['GRID', 'SPOINT', 'EPOINT', 'RINGAX']]):
icd_transform, icp_transform, xyz_cp, nid_cp_cd = fem1.get_displacement_index_xyz_cp_cd(
fdtype='float64', idtype='int32', sort_ids=True)
cds = np.unique(nid_cp_cd[:, 2])
cd_coords = []
for cd in cds:
if cd == -1:
continue
coord = fem1.coords[cd]
# coordRs work in op2 extraction
if coord.type not in ['CORD2R', 'CORD1R']:
cd_coords.append(cd)
if cd_coords:
msg = (
'GRID-CD coords=%s can cause a problem in the OP2 results processing; '
'be careful' % cd_coords
)
fem1.log.warning(msg)
| 23,352
|
def initial_landing_distance(interest_area, fixation_sequence):
"""
Given an interest area and fixation sequence, return the initial landing
distance on that interest area. The initial landing distance is the pixel
distance between the first fixation to land in an interest area and the
left edge of that interest area (or, in the case of right-to-left text,
the right edge). Technically, the distance is measured from the text onset
without including any padding. Returns `None` if no fixation landed on the
interest area.
"""
for fixation in fixation_sequence.iter_without_discards():
if fixation in interest_area:
for char in interest_area:
if fixation in char: # be sure not to find a fixation in the padding
return abs(interest_area.onset - fixation.x)
return None
| 23,353
|
def load_object(filename):
"""
Load saved object from file
:param filename: The file to load
:return: the loaded object
"""
with gzip.GzipFile(filename, 'rb') as f:
return pickle.load(f)
| 23,354
|
def runAsStandalone():
"""Contains script execution flow when run as standalone application.
"""
#### Pre-execution internal configuation check.
#initLogger = logging.Logger(name=_toolName)
#initLogger.addHandler(logging.StreamHandler(sys.stderr))
#### Configuration fine, execute as planned.
useParser = getStandaloneParser()
args = useParser.parse_args()
# Setup main logger for messages.
logStream = logging.StreamHandler(
open(args.logfile, "w") if args.logfile else sys.stderr)
# Either streams of a logger or the logger itself can have its level set,
# but ONLY the logger itself can have the logger level retrieved.
#logStream.setLevel(logging.DEBUG if args.verbose else logging.INFO)
logStream.setFormatter(logging.Formatter(
"%(asctime)-15s %(levelname)s:%(message)s",
datefmt="%Y-%m-%S %H:%M"))
logger = logging.Logger(name=_toolName)
logger.setLevel(logging.DEBUG if args.verbose else logging.INFO)
logger.addHandler(logStream)
logger.debug("Invocation arguments: %s" % args)
logging.getLogger("GS-PRACTICE").addHandler(logStream)
logging.getLogger("GS-PRACTICE").setLevel(logging.INFO)
# Start to analyze mutational signatures.
logger.info("Start to analyze mutational signatures.")
df_decomp=analyzeSignature(input_data=args.input_data,
genome_version=args.genome_version,
sample_name=args.sample_name,
sample_name_list=args.sample_name_list,
input_format=args.input_format)
# Write results
logger.debug("Decomposition result to %s " % args.out_prefix + _defDecompositionSuffix)
df_decomp.to_csv(args.out_prefix + _defDecompositionSuffix,sep="\t")
# Proceed to prediction by multi-estimator.
logger.info("Start to predict subtypes.")
df_preds=predictSubtype(df_decomp=df_decomp,
no_table_sort=args.no_table_sort,
use_default_clfs=args.use_default_clfs)
# Write out results
logger.debug("Prediction result to %s" % args.out_prefix + _defPredictionSuffix)
df_preds.to_csv(args.out_prefix + _defPredictionSuffix,sep="\t")
# Proceed to plot umap
if args.no_plot==False:
out_f=args.out_prefix + _defPlotSuffix + "." + args.figure_format
logger.debug("Plot and save mapping figure to %s" % out_f)
visualizePrediction(df_decomp=df_decomp,
df_preds=df_preds,
out_f=out_f,
figure_size=args.figure_size,
marker_size=args.marker_size,
use_default_umap=args.use_default_umap)
else:
logger.warning("Skip plotting figure.")
pass
# Clean up and terminate.
logger.info("Execution completed.")
| 23,355
|
def list_events_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""Lists all events and return outputs in Demisto's format
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs
"""
max_results = args.get('max_results')
event_created_date_before = args.get('event_created_date_before')
event_created_date_after = args.get('event_created_date_after')
raw_response = client.list_events(
event_created_date_before=event_created_date_before,
event_created_date_after=event_created_date_after,
max_results=max_results)
events = raw_response.get('event')
if events:
title = f'{INTEGRATION_NAME} - List events:'
context_entry = raw_response_to_context(events)
context = {
f'{INTEGRATION_CONTEXT_NAME}.Event(val.ID && val.ID === obj.ID)': context_entry
}
# Creating human readable for War room
human_readable = tableToMarkdown(title, context_entry)
# Return data to Demisto
return human_readable, context, raw_response
else:
return f'{INTEGRATION_NAME} - Could not find any events.', {}, {}
| 23,356
|
def update_weights(weights, expectation_g_squared, g_dict, decay_rate, learning_rate):
""" Refer: http://sebastianruder.com/optimizing-gradient-descent/index.html#rmsprop"""
epsilon = 1e-5
for layer_name in weights.keys():
g = g_dict[layer_name]
expectation_g_squared[layer_name] = decay_rate * expectation_g_squared[layer_name] + (1 - decay_rate) * g**2
weights[layer_name] += (learning_rate * g)/(np.sqrt(expectation_g_squared[layer_name] + epsilon))
g_dict[layer_name] = np.zeros_like(weights[layer_name])
| 23,357
|
def get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generator, max_subtoken_length=None,
reserved_tokens=None):
"""Inner implementation for vocab generators.
Args:
data_dir: The base directory where data and vocab files are stored. If None,
then do not save the vocab even if it doesn't exist.
vocab_filename: relative filename where vocab file is stored
vocab_size: target size of the vocabulary constructed by SubwordTextEncoder
generator: a generator that produces tokens from the vocabulary
max_subtoken_length: an optional integer. Set this to a finite value to
avoid quadratic costs during vocab building.
reserved_tokens: List of reserved tokens. `text_encoder.RESERVED_TOKENS`
should be a prefix of `reserved_tokens`. If `None`, defaults to
`RESERVED_TOKENS`.
Returns:
A SubwordTextEncoder vocabulary object.
"""
if data_dir and vocab_filename:
vocab_filepath = os.path.join(data_dir, vocab_filename)
if tf.gfile.Exists(vocab_filepath):
tf.logging.info("Found vocab file: %s", vocab_filepath)
return text_encoder.SubwordTextEncoder(vocab_filepath)
else:
vocab_filepath = None
tf.logging.info("Generating vocab file: %s", vocab_filepath)
vocab = text_encoder.SubwordTextEncoder.build_from_generator(
generator, vocab_size, max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
if vocab_filepath:
tf.gfile.MakeDirs(data_dir)
vocab.store_to_file(vocab_filepath)
return vocab
| 23,358
|
def response_map(fetch_map):
"""Create an expected FETCH response map from the given request map.
Most of the keys returned in a FETCH response are unmodified from the
request. The exceptions are BODY.PEEK and BODY partial range. A BODY.PEEK
request is answered without the .PEEK suffix. A partial range (e.g.
BODY[]<0.1000>) has the octet count (1000) removed, since that information
is provided in the literal size (and may be different if the data was
truncated).
"""
if not isinstance(fetch_map, dict):
fetch_map = dict((v, v) for v in fetch_map)
rmap = {}
for k, v in fetch_map.items():
for name in ('BODY', 'BINARY'):
if k.startswith(name):
k = k.replace(name + '.PEEK', name, 1)
if k.endswith('>'):
k = k.rsplit('.', 1)[0] + '>'
rmap[k] = v
return rmap
| 23,359
|
def test_attr(pymel, node):
"""Validate the `attr` method behavior"""
port = node.attr("foo")
assert isinstance(port, pymel.Attribute)
| 23,360
|
def make_cat_wfpc2(fitsfile, outcat='default', regfile='default',
configfile='sext_astfile.config',
whtfile=None, weight_type='MAP_WEIGHT',
gain=2.5, texp='header', ncoadd=1, satur=65535.,
catformat='ldac', det_area=30, det_thresh=2.5,
logfile=None, verbose=True):
"""
Calls make_fits_cat, but with gain preset for WFPC2
"""
""" Set output file if user wants default values """
if outcat=='default':
outcat = fitsfile.replace('.fits', '.cat')
if regfile=='default':
regfile = fitsfile.replace('.fits', '.reg')
""" Set exposure time """
f = pf.open(fitsfile)
hdr = f[0].header
if texp == 'header':
readok = True
try:
texp = hdr['exptime']
except:
texp = 1.
readok = False
if readok:
print "Exposure time from fits header: %8.1f sec" % texp
else:
print "Failed to read EXPTIME header. Setting texp = 1 sec"
else:
print "Exposure time set by function call to %8.1f sec" % texp
f.close()
make_fits_cat(fitsfile, outcat, configfile, gain, texp, ncoadd, satur,
catformat=catformat,
whtfile=whtfile, weight_type=weight_type,
det_thresh=det_thresh, det_area=det_area,
logfile=logfile, regfile=regfile, verbose=verbose)
| 23,361
|
def RigidTendonMuscle_getClassName():
"""RigidTendonMuscle_getClassName() -> std::string const &"""
return _actuators.RigidTendonMuscle_getClassName()
| 23,362
|
def mp_solve_choices(p, q, n, delta_x, delta_y, delta, psi): # <<<
"""Implements mp_solve_choices form metapost (mp.c)"""
uu = [None]*(len(delta)+1) # relations between adjacent angles ("matrix" entries)
ww = [None]*len(uu) # additional matrix entries for the cyclic case
vv = [None]*len(uu) # angles ("rhs" entries)
theta = [None]*len(uu) # solution of the linear system of equations
# 348:
# the "matrix" is in tridiagonal form, the solution is obtained by Gaussian elimination.
# uu and ww are of type "fraction", vv and theta are of type "angle"
# k is the current knot number
# r, s, t registers for list traversal
k = 0
s = p
r = 0
while True:
t = s.next
if k == 0: # <<<
# 354:
# Get the linear equations started
# or return with the control points in place, if linear equations needn't be solved
if s.rtype == mp_given: # <<<
if t.ltype == mp_given:
# 372: Reduce to simple case of two givens and return
aa = mp_n_arg(delta_x[0], delta_y[0])
ct, st = mp_n_sin_cos(p.right_given() - aa)
cf, sf = mp_n_sin_cos(q.left_given() - aa)
mp_set_controls(p, q, delta_x[0], delta_y[0], st, ct, -sf, cf)
return
else:
# 362:
vv[0] = s.right_given() - mp_n_arg(delta_x[0], delta_y[0])
vv[0] = reduce_angle(vv[0])
uu[0] = 0
ww[0] = 0
# >>>
elif s.rtype == mp_curl: # <<<
if t.ltype == mp_curl:
# 373: (mp.pdf) Reduce to simple case of straight line and return
p.rtype = mp_explicit
q.ltype = mp_explicit
lt = abs(q.left_tension())
rt = abs(p.right_tension())
ff = mp_make_fraction(unity, 3.0*rt)
p.rx_pt = p.x_pt + mp_take_fraction(delta_x[0], ff)
p.ry_pt = p.y_pt + mp_take_fraction(delta_y[0], ff)
ff = mp_make_fraction(unity, 3.0*lt)
q.lx_pt = q.x_pt - mp_take_fraction(delta_x[0], ff)
q.ly_pt = q.y_pt - mp_take_fraction(delta_y[0], ff)
return
else: # t.ltype != mp_curl
# 363:
cc = s.right_curl()
lt = abs(t.left_tension())
rt = abs(s.right_tension())
uu[0] = mp_curl_ratio(cc, rt, lt)
vv[0] = -mp_take_fraction(psi[1], uu[0])
ww[0] = 0
# >>>
elif s.rtype == mp_open: # <<<
uu[0] = 0
vv[0] = 0
ww[0] = fraction_one
# >>>
# end of 354 >>>
else: # k > 0 <<<
if s.ltype == mp_end_cycle or s.ltype == mp_open: # <<<
# 356: Set up equation to match mock curvatures at z_k;
# then finish loop with theta_n adjusted to equal theta_0, if a
# cycle has ended
# 357: Calculate the values
# aa = Ak/Bk, bb = Dk/Ck, dd = (3-alpha_{k-1})d(k,k+1),
# ee = (3-beta_{k+1})d(k-1,k), cc=(Bk-uk-Ak)/Bk
aa = mp_make_fraction(unity, 3.0*abs(r.right_tension()) - unity)
dd = mp_take_fraction(delta[k],
fraction_three - mp_make_fraction(unity, abs(r.right_tension())))
bb = mp_make_fraction(unity, 3*abs(t.left_tension()) - unity)
ee = mp_take_fraction(delta[k-1],
fraction_three - mp_make_fraction(unity, abs(t.left_tension())))
cc = fraction_one - mp_take_fraction(uu[k-1], aa)
# 358: Calculate the ratio ff = Ck/(Ck + Bk - uk-1Ak)
dd = mp_take_fraction(dd, cc)
lt = abs(s.left_tension())
rt = abs(s.right_tension())
if lt < rt:
dd *= (lt/rt)**2
elif lt > rt:
ee *= (rt/lt)**2
ff = mp_make_fraction(ee, ee + dd)
uu[k] = mp_take_fraction(ff, bb)
# 359: Calculate the values of vk and wk
acc = -mp_take_fraction(psi[k+1], uu[k])
if r.rtype == mp_curl:
ww[k] = 0
vv[k] = acc - mp_take_fraction(psi[1], fraction_one - ff)
else:
ff = mp_make_fraction(fraction_one - ff, cc)
acc = acc - mp_take_fraction(psi[k], ff)
ff = mp_take_fraction(ff, aa)
vv[k] = acc - mp_take_fraction(vv[k-1], ff)
ww[k] = -mp_take_fraction(ww[k-1], ff)
if s.ltype == mp_end_cycle:
# 360: Adjust theta_n to equal theta_0 and finish loop
aa = 0
bb = fraction_one
while True:
k -= 1
if k == 0:
k = n
aa = vv[k] - mp_take_fraction(aa, uu[k])
bb = ww[k] - mp_take_fraction(bb, uu[k])
if k == n:
break
aa = mp_make_fraction(aa, fraction_one - bb)
theta[n] = aa
vv[0] = aa
for k in range(1, n):
vv[k] = vv[k] + mp_take_fraction(aa, ww[k])
break
# >>>
elif s.ltype == mp_curl: # <<<
# 364:
cc = s.left_curl()
lt = abs(s.left_tension())
rt = abs(r.right_tension())
ff = mp_curl_ratio(cc, lt, rt)
theta[n] = -mp_make_fraction(mp_take_fraction(vv[n-1], ff),
fraction_one - mp_take_fraction(ff, uu[n-1]))
break
# >>>
elif s.ltype == mp_given: # <<<
# 361:
theta[n] = s.left_given() - mp_n_arg(delta_x[n-1], delta_y[n-1])
theta[n] = reduce_angle(theta[n])
break
# >>>
# end of k == 0, k != 0 >>>
r = s
s = t
k += 1
# 367:
# Finish choosing angles and assigning control points
for k in range(n-1, -1, -1):
theta[k] = vv[k] - mp_take_fraction(theta[k+1], uu[k])
s = p
k = 0
while True:
t = s.next
ct, st = mp_n_sin_cos(theta[k])
cf, sf = mp_n_sin_cos(-psi[k+1]-theta[k+1])
mp_set_controls(s, t, delta_x[k], delta_y[k], st, ct, sf, cf)
k += 1
s = t
if k == n:
break
| 23,363
|
def Generate3DBlueNoiseTexture(Width,Height,Depth,nChannel,StandardDeviation=1.5):
"""This function generates a single 3D blue noise texture with the specified
dimensions and number of channels. It then outputs it to a sequence of Depth
output files in LDR and HDR in a well-organized tree of directories. It also
outputs raw binary files.
\sa StoreNDTextureHDR() """
OutputDirectory="../Data/%d_%d_%d"%(Width,Height,Depth);
if(not path.exists(OutputDirectory)):
makedirs(OutputDirectory);
# Generate the blue noise for the various channels using multi-threading
ChannelTextureList=[None]*nChannel;
ChannelThreadList=[None]*nChannel;
def GenerateAndStoreTexture(Index):
ChannelTextureList[Index]=GetVoidAndClusterBlueNoise((Height,Width,Depth),StandardDeviation);
for i in range(nChannel):
ChannelThreadList[i]=threading.Thread(target=GenerateAndStoreTexture,args=(i,));
ChannelThreadList[i].start();
for Thread in ChannelThreadList:
Thread.join();
Texture=np.concatenate([ChannelTextureList[i][:,:,:,np.newaxis] for i in range(nChannel)],3);
LDRFormat=["LLL1","RG01","RGB1","RGBA"][nChannel-1];
HDRFormat=["L","LA","RGB","RGBA"][nChannel-1];
StoreNDTextureHDR(Texture,path.join(OutputDirectory,"HDR_"+HDRFormat+".raw"));
for i in range(Depth):
StoreNoiseTextureLDR(Texture[:,:,i,:],path.join(OutputDirectory,"LDR_%s_%d.png"%(LDRFormat,i)),Height*Width*Depth);
StoreNoiseTextureHDR(Texture[:,:,i,:],path.join(OutputDirectory,"HDR_%s_%d.png"%(HDRFormat,i)),Height*Width*Depth);
| 23,364
|
def submit():
"""Receives the new paste and stores it in the database."""
if request.method == 'POST':
form = request.get_json(force=True)
pasteText = json.dumps(form['pasteText'])
nonce = json.dumps(form['nonce'])
burnAfterRead = json.dumps(form['burnAfterRead'])
pasteKeyHash = json.dumps(form['hash'])
if burnAfterRead == "true":
burnAfterRead = True
else:
burnAfterRead = False
# Creates Expire time
expireTime = json.dumps(form['expire_time'])
expireTime = int(time.time()) + int(expireTime)*60
# set paste type
pasteType = json.dumps(form['pasteType'])[1:-1] # cuts "'" out
# print(type(form['nonce']))
db = get_db()
# Creates random 64 bit int
idAsInt = uuid.uuid4().int >> 65
db.execute('''insert into pastes (id, paste_text, nonce,
expire_time, burn_after_read, paste_hash, paste_format) values (?, ?, ?, ?, ?, ?, ?)''',
[idAsInt, pasteText, nonce, expireTime, burnAfterRead, pasteKeyHash, pasteType])
db.commit() # add text to sqlite3 db
return jsonify(id=hex(idAsInt)[2:])
| 23,365
|
def select_from_dictset(
dictset: Iterator[dict],
columns: List[str] = ['*'],
condition: Callable = select_all) -> Iterator[dict]:
"""
Scan a dictset, filtering rows and selecting columns.
Basic implementation of SQL SELECT statement for a single table
Approximate SQL:
SELECT columns FROM dictset WHERE condition
"""
for record in dictset:
if condition(record):
if columns != ['*']:
record = select_record_fields(record, columns)
yield record
| 23,366
|
def money_recall_at_k(recommended_list, bought_list, prices_recommended, prices_bought, k=5):
""" Доля дохода по релевантным рекомендованным объектам
:param recommended_list - список id рекомендаций
:param bought_list - список id покупок
:param prices_recommended - список цен для рекомендаций
:param prices_bought - список цен покупок
"""
flags = np.isin(recommended_list[:k], bought_list) # get recommend to bought matches
prices = np.array(prices_recommended[:k]) # get prices of recommended items
return flags @ prices / np.sum(prices_bought)
| 23,367
|
def get_clustering_fips( collection_of_fips, adj = None ):
"""
Finds the *separate* clusters of counties or territorial units that are clustered together. This is used to identify possibly *different* clusters of counties that may be separate from each other. If one does not supply an adjacency :py:class:`dict`, it uses the adjacency dictionary that :py:meth:`fips_adj_2018 <covid19_stats.COVID19Database.fips_adj_2018>` returns. Look at :download:`fips_2019_adj.pkl.gz </_static/gis/fips_2019_adj.pkl.gz>` to see what this dictionary looks like.
:param list collection_of_fips: the :py:class:`list` of counties or territorial units, each identified by its `FIPS code`_.
:param dict adj: optionally specified adjacency dictionary. Otherwise it uses the :py:meth:`fips_adj_2018 <covid19_stats.COVID19Database.fips_adj_2018>` returned dictionary. Look at :download:`fips_2019_adj.pkl.gz </_static/gis/fips_2019_adj.pkl.gz>` to see what this dictionary looks like.
:returns: a :py:class:`list` of counties clustered together. Each cluster is a :py:class:`set` of `FIPS code`_\ s of counties grouped together.
:rtype: list
"""
if adj is None: adj = COVID19Database.fips_adj_2018( )
fips_rem = set( collection_of_fips )
#
## our adjacency matrix from this
subset = set(filter(lambda tup: all(map(lambda tok: tok in fips_rem, tup)), adj )) | \
set(map(lambda fips: ( fips, fips ), fips_rem ))
G = networkx.Graph( sorted( subset ) )
#
## now greedy clustering algo
fips_clusters = [ ]
while len( fips_rem ) > 0:
first_fips = min( fips_rem )
fips_excl = fips_rem - set([ first_fips, ])
fips_clust = [ first_fips ]
for fips in fips_excl:
try:
dist = networkx.shortest_path_length( G, first_fips, fips )
fips_clust.append( fips )
except: pass
fips_clusters.append( set( fips_clust ) )
fips_rem = fips_rem - set( fips_clust )
return fips_clusters
| 23,368
|
def stop_after(space_number):
""" Decorator that determines when to stop tab-completion
Decorator that tells command specific complete function
(ex. "complete_use") when to stop tab-completion.
Decorator counts number of spaces (' ') in line in order
to determine when to stop.
ex. "use exploits/dlink/specific_module " -> stop complete after 2 spaces
"set rhost " -> stop completing after 2 spaces
"run " -> stop after 1 space
:param space_number: number of spaces (' ') after which tab-completion should stop
:return:
"""
def _outer_wrapper(wrapped_function):
@wraps(wrapped_function)
def _wrapper(self, *args, **kwargs):
try:
if args[1].count(" ") == space_number:
return []
except Exception as err:
logger.error(err)
return wrapped_function(self, *args, **kwargs)
return _wrapper
return _outer_wrapper
| 23,369
|
def find_cut_line(img_closed_original): # 对于正反面粘连情况的处理,求取最小点作为中线
"""
根据规则,强行将粘连的区域切分
:param img_closed_original: 二值化图片
:return: 处理后的二值化图片
"""
img_closed = img_closed_original.copy()
img_closed = img_closed // 250
#print(img_closed.shape)
width_sum = img_closed.sum(axis=1) # 沿宽度方向求和,统计宽度方向白点个数
start_region_flag = 0
start_region_index = 0 # 身份证起始点高度值
end_region_index = 0 # 身份证结束点高度值
for i in range(img_closed_original.shape[0]): # 1000是原始图片高度值,当然, 这里也可以用 img_closed_original.shape[0]替代
if start_region_flag == 0 and width_sum[i] > 330:
start_region_flag = 1
start_region_index = i # 判定第一个白点个数大于330的是身份证区域的起始点
if width_sum[i] > 330:
end_region_index = i # 只要白点个数大于330,便认为是身份证区域,更新结束点
# 身份证区域中白点最少的高度值,认为这是正反面的交点
# argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值
min_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[0]
img_closed_original[min_line_position][:] = 0
for i in range(1, 11): # 参数可变,分割10个点
temp_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[i]
if abs(temp_line_position - min_line_position) < 30: # 限定范围,在最小点距离【-30, 30】的区域内
img_closed_original[temp_line_position][:] = 0 # 强制变为0
return img_closed_original
| 23,370
|
def _search(progtext, qs=None):
""" Perform memoized url fetch, display progtext. """
loadmsg = "Searching for '%s'" % (progtext)
wdata = pafy.call_gdata('search', qs)
def iter_songs():
wdata2 = wdata
while True:
for song in get_tracks_from_json(wdata2):
yield song
if not wdata2.get('nextPageToken'):
break
qs['pageToken'] = wdata2['nextPageToken']
wdata2 = pafy.call_gdata('search', qs)
# # The youtube search api returns a maximum of 500 results
length = min(wdata['pageInfo']['totalResults'], 500)
slicer = IterSlicer(iter_songs(), length)
# paginatesongs(slicer, length=length, msg=msg, failmsg=failmsg, loadmsg=loadmsg)
func = slicer
s = 0
e = 3
if callable(func):
songs = (s, e)
else:
songs = func[s:e]
return songs
| 23,371
|
def img_to_vector(img_fn, label=0):
"""Read the first 32 characters of the first 32 rows of an image file.
@return <ndarray>: a 1x(1024+1) numpy array with data and label, while the
label is defaults to 0.
"""
img = ""
for line in open(img_fn).readlines()[:32]:
img += line[:32]
# labels are always attached at the last position
itera = [_ for _ in img + str(label)]
return numpy.fromiter(itera, "f4")
| 23,372
|
def get_config():
"""Return a user configuration object."""
config_filename = appdirs.user_config_dir(_SCRIPT_NAME, _COMPANY) + ".ini"
config = _MyConfigParser()
config.optionxform = str
config.read(config_filename)
config.set_filename(config_filename)
return config
| 23,373
|
def launch_matchcomms_server() -> MatchcommsServerThread:
"""
Launches a background process that handles match communications.
"""
host = 'localhost'
port = find_free_port() # deliberately not using a fixed port to prevent hardcoding fragility.
event_loop = asyncio.new_event_loop()
matchcomms_server = MatchcommsServer()
start_server = websockets.serve(matchcomms_server.handle_connection, host, port, loop=event_loop)
server = event_loop.run_until_complete(start_server)
thread = Thread(target=event_loop.run_forever, daemon=True)
thread.start()
return MatchcommsServerThread(
root_url=URL(scheme='ws', netloc=f'{host}:{port}', path='', params='', query='', fragment=''),
_server=server,
_event_loop=event_loop,
_thread=thread,
)
| 23,374
|
def acq_randmaxvar():
"""Initialise a RandMaxVar fixture.
Returns
-------
RandMaxVar
Acquisition method.
"""
gp, prior = _get_dependencies_acq_fn()
# Initialising the acquisition method.
method_acq = RandMaxVar(model=gp, prior=prior)
return method_acq
| 23,375
|
def test_imdb_shard_id():
"""
Feature: Test IMDB Dataset.
Description: read data from all file with num_shards=4 and shard_id=1.
Expectation: the data is processed successfully.
"""
logger.info("Test Case withShardID")
# define parameters
repeat_count = 1
# apply dataset operations
data1 = ds.IMDBDataset(DATA_DIR, num_shards=2, shard_id=0)
data1 = data1.repeat(repeat_count)
# Verify dataset size
data1_size = data1.get_dataset_size()
logger.info("dataset size is: {}".format(data1_size))
assert data1_size == 4
num_iter = 0
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary
# in this example, each dictionary has keys "text" and "label"
logger.info("text is {}".format(item["text"].item().decode("utf8")))
logger.info("label is {}".format(item["label"]))
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 4
| 23,376
|
def enable_logging(logging_lvl=logging.DEBUG, save_log=False, logfile_prefix=""):
"""
Enables Color logging on multi-platforms as well as in environments like jupyter notebooks
:param logging_lvl: Given debug level for setting what messages to show. (logging.DEBUG is lowest)
:param save_log: If true a log file will be created under ./logs
:param logfile_prefix; Prefix for defining the name of the log file
:return:
"""
root = logging.getLogger()
root.setLevel(logging_lvl)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging_lvl)
# FORMAT from https://github.com/xolox/python-coloredlogs
FORMAT = '%(asctime)s %(name)s[%(process)d] \033[1m%(levelname)s\033[0m %(message)s'
# FORMAT="%(asctime)s %(name)-12s %(levelname)-8s %(message)s"
formatter = logging.Formatter(FORMAT, "%Y-%m-%d %H:%M:%S")
ch.setFormatter(formatter)
root.addHandler(ch)
if save_log:
# include current timestamp in dataset export file
timestmp = datetime.datetime.fromtimestamp(time()).strftime("%Y-%m-%d-%H-%M-%S")
formatter = logging.Formatter("%(asctime)s %(message)s")
if not os.path.isdir("./experiments/logs/"):
print(os.getcwd())
os.mkdir("./experiments/logs/")
file_handler = logging.FileHandler("./experiments/logs/" + logfile_prefix + timestmp + ".log", mode='a')
file_handler.setFormatter(formatter)
# avoid spamming the log file, only log INFO , WARNING, ERROR events
file_handler.setLevel(logging.INFO)
root.addHandler(file_handler)
| 23,377
|
def from_config(func):
"""Run a function from a JSON configuration file."""
def decorator(filename):
with open(filename, 'r') as file_in:
config = json.load(file_in)
return func(**config)
return decorator
| 23,378
|
def update_user(user, domain, password=None):
""" create/update user record. if password is None, the user is
removed. Password should already be SHA512-CRYPT'd """
passwdf = PASSWDFILE % {"domain": domain}
passwdb = KeyValueFile.open_file(passwdf, separator=":", lineformat=USERLINE+"\n")
passwdb[user] = password
return True
| 23,379
|
def puzzle_pieces(n):
"""Return a dictionary holding all 1, 3, and 7 k primes."""
kprimes = defaultdict(list)
kprimes = {key : [] for key in [7, 3, 1]}
upper = 0
for k in sorted(kprimes.keys(), reverse=True):
if k == 7:
kprimes[k].extend(count_Kprimes(k, 2, n))
if not kprimes[k]:
return []
upper = n - kprimes[k][0]
if k == 3:
kprimes[k].extend(count_Kprimes(k, 2, upper))
upper -= kprimes[k][0]
if k == 1:
primes = get_primes(upper)
for p in takewhile(lambda x: x <= upper, primes):
kprimes[k].append(p)
return kprimes
| 23,380
|
def periodogram_snr(periodogram,periods,index_to_evaluate,duration,per_type,
freq_window_epsilon=3.,rms_window_bin_size=100):
"""
Calculate the periodogram SNR of the best period
Assumes fixed frequency spacing for periods
periodogram - the periodogram values
periods - periods associated with the above values
index_to_evaluate - index of period to examine
duration - total duration of the observations
per_type - which period search algorithm was used
(optional)
freq_window_epsilon - sets the size of the exclusion area
in the periodogram for the calculation
rms_window_bin_size - number of points to include in
calculating the RMS for the SNR
"""
# Some value checking
if len(periodogram) != len(periods):
raise ValueError("The lengths of the periodogram and the periods are not the same")
if hasattr(index_to_evaluate,'__len__'):
raise AttributeError("The index_to_evaluate has len attribute")
if np.isnan(periodogram[index_to_evaluate]):
raise ValueError("Selected periodogram value is nan")
if np.isinf(periodogram[index_to_evaluate]):
raise ValueError("Selected periodogram value is not finite")
if per_type.upper() not in ['LS','PDM','BLS']:
raise ValueError("Periodogram type " + per_type + " not recognized")
# Setting up some parameters
freq_window_size = freq_window_epsilon/duration
delta_frequency = abs(1./periods[1] - 1./periods[0])
freq_window_index_size = int(round(freq_window_size/delta_frequency))
# More value checking
if freq_window_index_size > len(periodogram):
raise ValueError("freq_window_index_size is greater than total periodogram length")
elif freq_window_index_size > .9*len(periodogram):
raise ValueError("freq_window_index_size is greater than 90% total length of periodogram")
elif freq_window_index_size > .8*len(periodogram):
print("here 80%")
warnings.warn("freq_window_index_size is greater than 80% total length of periodogram")
perdgm_window = [] # For storing values for RMS calculation
# Which values to include in perdgm_window
if index_to_evaluate > freq_window_index_size:
perdgm_window.extend(periodogram[max(0,index_to_evaluate-freq_window_index_size-rms_window_bin_size+1):index_to_evaluate-freq_window_index_size+1].tolist())
if index_to_evaluate + freq_window_index_size < len(periodogram):
perdgm_window.extend(periodogram[index_to_evaluate+freq_window_index_size:index_to_evaluate+freq_window_index_size+rms_window_bin_size].tolist())
perdgm_window = np.array(perdgm_window)
# Include only finite values
wherefinite = np.isfinite(perdgm_window)
# Sigma clip
vals, low, upp = sigmaclip(perdgm_window[wherefinite],low=3,high=3)
# Calculate standard deviation
stddev = np.std(vals)
# Return
if per_type.upper() == 'PDM': # If PDM, use correct amplitude
return (1.-periodogram[index_to_evaluate])/stddev
else:
return periodogram[index_to_evaluate]/stddev
| 23,381
|
def derivative_overview(storage_service_id, storage_location_id=None):
"""Return a summary of derivatives across AIPs with a mapping
created between the original format and the preservation copy.
"""
report = {}
aips = AIP.query.filter_by(storage_service_id=storage_service_id)
if storage_location_id:
aips = aips.filter_by(storage_location_id=storage_location_id)
aips = aips.all()
all_aips = []
for aip in aips:
if not aip.preservation_file_count > 0:
continue
aip_report = {}
aip_report[fields.FIELD_TRANSFER_NAME] = aip.transfer_name
aip_report[fields.FIELD_UUID] = aip.uuid
aip_report[fields.FIELD_FILE_COUNT] = aip.original_file_count
aip_report[fields.FIELD_DERIVATIVE_COUNT] = aip.preservation_file_count
aip_report[fields.FIELD_RELATED_PAIRING] = []
original_files = File.query.filter_by(
aip_id=aip.id, file_type=FileType.original
)
for original_file in original_files:
preservation_derivative = File.query.filter_by(
file_type=FileType.preservation, original_file_id=original_file.id
).first()
if preservation_derivative is None:
continue
file_derivative_pair = {}
file_derivative_pair[
fields.FIELD_DERIVATIVE_UUID
] = preservation_derivative.uuid
file_derivative_pair[fields.FIELD_ORIGINAL_UUID] = original_file.uuid
original_format_version = original_file.format_version
if original_format_version is None:
original_format_version = ""
file_derivative_pair[fields.FIELD_ORIGINAL_FORMAT] = "{} {} ({})".format(
original_file.file_format, original_format_version, original_file.puid
)
file_derivative_pair[fields.FIELD_DERIVATIVE_FORMAT] = "{}".format(
preservation_derivative.file_format
)
aip_report[fields.FIELD_RELATED_PAIRING].append(file_derivative_pair)
all_aips.append(aip_report)
report[fields.FIELD_ALL_AIPS] = all_aips
report[fields.FIELD_STORAGE_NAME] = get_storage_service_name(storage_service_id)
report[fields.FIELD_STORAGE_LOCATION] = get_storage_location_description(
storage_location_id
)
return report
| 23,382
|
def fetch_packages(vendor_dir, packages):
"""
Fetches all packages from github.
"""
for package in packages:
tar_filename = format_tar_path(vendor_dir, package)
vendor_owner_dir = ensure_vendor_owner_dir(vendor_dir, package['owner'])
url = format_tarball_url(package)
print("Downloading {owner}/{project} {version}".format(**package))
urlretrieve(url, tar_filename)
with tarfile.open(tar_filename) as tar:
tar.extractall(vendor_owner_dir, members=tar.getmembers())
return packages
| 23,383
|
def execute_search(search_term, sort_by, **kwargs):
"""
Simple search API to query Elasticsearch
"""
# Get the Elasticsearch client
client = get_client()
# Perform the search
ons_index = get_index()
# Init SearchEngine
s = SearchEngine(using=client, index=ons_index)
# Define type counts (aggregations) query
s = s.type_counts_query(search_term)
# Execute
type_counts_response = s.execute()
# Format the output
aggregations, total_hits = aggs_to_json(
type_counts_response.aggregations, "docCounts")
# Setup initial paginator
page_number = int(get_form_param("page", False, 1))
page_size = int(get_form_param("size", False, 10))
paginator = None
if total_hits > 0:
paginator = Paginator(
total_hits,
MAX_VISIBLE_PAGINATOR_LINK,
page_number,
page_size)
# Perform the content query to populate the SERP
# Init SearchEngine
s = SearchEngine(using=client, index=ons_index)
# Define the query with sort and paginator
s = s.content_query(
search_term, sort_by=sort_by, paginator=paginator, **kwargs)
# Execute the query
content_response = s.execute()
# Update the paginator
paginator = Paginator(
content_response.hits.total,
MAX_VISIBLE_PAGINATOR_LINK,
page_number,
page_size)
# Check for featured results
featured_result_response = None
# Only do this if we have results and are on the first page
if total_hits > 0 and paginator.current_page <= 1:
# Init the SearchEngine
s = SearchEngine(using=client, index=ons_index)
# Define the query
s = s.featured_result_query(search_term)
# Execute the query
featured_result_response = s.execute()
# Return the hits as JSON
return hits_to_json(
content_response,
aggregations,
paginator,
sort_by.name,
featured_result_response=featured_result_response)
| 23,384
|
def cli(dbname, direct_store, hashdb_path, logfile, sleeptime):
"""
Trivial wrapper about main to create a command line interface entry-point.
(This preserves main for use as a regular function for use elsewhere
e.g. testing, and also provide a sensible location to initialise logging.)
"""
dburl = dbconfig.make_db_url(dbconfig.default_admin_db_params, dbname)
session_registry.configure(
bind=sqlalchemy.engine.create_engine(dburl, echo=False)
)
if direct_store:
main(hashdb_path, logfile, voevent_pause_secs=0.0,
process_function=direct_store_voevent)
else:
main(hashdb_path, logfile, sleeptime)
| 23,385
|
def last_char_to_aou(word):
"""Intended for abbreviations, returns "a" or "ä" based on vowel harmony
for the last char."""
assert isinstance(word, str)
ch = last_char_to_vowel(word)
if ch in "aou":
return "a"
return "ä"
| 23,386
|
def times_vector(mat, vec):
"""Returns the symmetric block-concatenated matrix multiplied by a vector.
Specifically, each value in the vector is multiplied by a row of the full
matrix. That is, the vector is broadcast and multiplied element-wise. Note
this would be the transpose of full_mat * vec if full_mat represented the full
symmetric matrix.
Args:
mat: The symmetric matrix represented as the concatenated blocks.
vec: The vector, having the same dimension as the materialized matrix.
"""
rows, cols = mat.shape
num_blocks = num_blocks_from_total_blocks(cols // rows)
multiplied = []
for i in range(num_blocks):
mat_block = mat[Ellipsis,
rows * ((i + 1) * i) // 2:rows * ((i + 1) * (i + 2)) // 2]
vec_block = vec[Ellipsis, rows * i:rows * (i + 1)]
multiplied.append(jnp.einsum("...ij,...i->ij", mat_block, vec_block))
return jnp.concatenate(multiplied, axis=-1)
| 23,387
|
def ensure_path(path:[str, pathlib.Path]):
"""
Check if the input path is a string or Path object, and return a path object.
:param path: String or Path object with a path to a resource.
:return: Path object instance
"""
return path if isinstance(path, pathlib.Path) else pathlib.Path(path)
| 23,388
|
def create_arguments(parser):
"""Sets up the CLI and config-file options"""
client_args = parser.add_argument_group('client test arguments')
client_args.add_argument('-t', '--tenantname',
default='admin',
help='Tenant Name')
client_args.add_argument('--interface',
default='publicURL',
help='public/internal/admin URL')
client_args.add_argument('-u', '--username',
default='admin',
help='Username')
client_args.add_argument('-p', '--password',
default='admin',
help='Password')
client_args.add_argument('--cacert',
default='/etc/ssl/certs/ca-certificates.crt',
help='CA cert file for TLS/SSL')
client_args.add_argument(
'--auth_url',
default='http://standard-ccp-vip-KEY-API-mgmt:35357/v3',
help='auth_url')
client_args.add_argument('-V', '--api-version', dest="api_version",
default="1", type=str,
help="Run check using version specified")
client_args.add_argument('-N', '--nova-api-version',
dest="nova_api_version",
default="2", type=str,
help="Run check using version specified")
client_args.add_argument('-v', '--verbose', dest="verbose",
default=False, action="store_true",
help="Run check in verbose mode")
client_args.add_argument('-a', '--check-api', dest="check_api",
default=False, action="store_true",
help="Run Cinder API monitoring check")
client_args.add_argument('-f', '--full', dest="full",
default=False, action="store_true",
help="Run a more detailed check")
client_args.add_argument('-i', '--image',
default=None,
help="Specify the image to boot an instance.")
client_args.add_argument('-l', '--flavor',
default=None,
help="Specify the flavor to boot an instance.")
| 23,389
|
def examine_mode(mode):
"""
Returns a numerical index corresponding to a mode
:param str mode: the subset user wishes to examine
:return: the numerical index
"""
if mode == 'test':
idx_set = 2
elif mode == 'valid':
idx_set = 1
elif mode == 'train':
idx_set = 0
else:
raise NotImplementedError
return idx_set
| 23,390
|
def main():
"""Validates individual trigger files within the raidboss Cactbot module.
Current validation only checks that the trigger file successfully compiles.
Returns:
An exit status code of 0 or 1 if the tests passed successfully or failed, respectively.
"""
exit_status = 0
for filepath in Path(CactbotModule.RAIDBOSS.directory(), DATA_DIRECTORY).glob('**/*.js'):
exit_status |= subprocess.call(['node', str(filepath)])
return exit_status
| 23,391
|
def create_pinata(profile_name: str) -> Pinata:
"""
Get or create a Pinata SDK instance with the given profile name.
If the profile does not exist, you will be prompted to create one,
which means you will be prompted for your API key and secret. After
that, they will be stored securely using ``keyring`` and accessed
as needed without prompt.
Args:
profile_name (str): The name of the profile to get or create.
Returns:
:class:`~pinata.sdk.Pinata`
"""
try:
pinata = Pinata.from_profile_name(profile_name)
except PinataMissingAPIKeyError:
set_keys_from_prompt(profile_name)
pinata = Pinata.from_profile_name(profile_name)
if not pinata:
set_keys_from_prompt(profile_name)
return Pinata.from_profile_name(profile_name)
| 23,392
|
def create_tfid_weighted_vec(tokens, w2v, n_dim, tfidf):
"""
Create train, test vecs using the tf-idf weighting method
Parameters
----------
tokens : np.array
data (tokenized) where each line corresponds to a document
w2v : gensim.Word2Vec
word2vec model
n_dim : int
dimensionality of our word vectors
Returns
-------
vecs_w2v : np.array
data ready for the model, shape (n_samples, n_dim)
"""
vecs_w2v = np.concatenate(
[build_doc_vector(doc, n_dim, w2v, tfidf)
for doc in tokens])
return vecs_w2v
| 23,393
|
def miniimagenet(folder, shots, ways, shuffle=True, test_shots=None,
seed=None, **kwargs):
"""Helper function to create a meta-dataset for the Mini-Imagenet dataset.
Parameters
----------
folder : string
Root directory where the dataset folder `miniimagenet` exists.
shots : int
Number of (training) examples per class in each task. This corresponds
to `k` in `k-shot` classification.
ways : int
Number of classes per task. This corresponds to `N` in `N-way`
classification.
shuffle : bool (default: `True`)
Shuffle the examples when creating the tasks.
test_shots : int, optional
Number of test examples per class in each task. If `None`, then the
number of test examples is equal to the number of training examples per
class.
seed : int, optional
Random seed to be used in the meta-dataset.
kwargs
Additional arguments passed to the `MiniImagenet` class.
See also
--------
`datasets.MiniImagenet` : Meta-dataset for the Mini-Imagenet dataset.
"""
defaults = {
'transform': Compose([Resize(84), ToTensor()])
}
return helper_with_default(MiniImagenet, folder, shots, ways,
shuffle=shuffle, test_shots=test_shots,
seed=seed, defaults=defaults, **kwargs)
| 23,394
|
def process_account_request(request, order_id, receipt_code):
"""
Process payment via online account like PayPal, Amazon ...etc
"""
order = get_object_or_404(Order, id=order_id, receipt_code=receipt_code)
if request.method == "POST":
gateway_name = request.POST["gateway_name"]
gateway = get_object_or_404(Gateway, name=gateway_name)
try:
if gateway.name == Gateway.PAYPAL:
processor = PayPal(gateway)
return HttpResponseRedirect(processor.create_account_payment(order, request.user))
else:
raise ImproperlyConfigured('Doorstep doesn\'t yet support payment with %s account.'
% gateway.get_name_display())
except DoorstepError as e:
request.session['processing_error'] = e.message
return HttpResponseRedirect(reverse('payments_processing_message'))
raise Http404
| 23,395
|
def add_classification_categories(json_object, classes_file):
"""
Reads the name of classes from the file *classes_file* and adds them to
the JSON object *json_object*. The function assumes that the first line
corresponds to output no. 0, i.e. we use 0-based indexing.
Modifies json_object in-place.
Args:
json_object: an object created from a json in the format of the detection API output
classes_file: the list of classes that correspond to the output elements of the classifier
Return:
The modified json_object with classification_categories added. If the field 'classification_categories'
already exists, then this function is a no-op.
"""
if ('classification_categories' not in json_object.keys()) or (len(json_object['classification_categories']) == 0):
# Read the name of all classes
with open(classes_file, 'rt') as fi:
class_names = fi.read().splitlines()
# remove empty lines
class_names = [cn for cn in class_names if cn.strip()]
# Create field with name *classification_categories*
json_object['classification_categories'] = dict()
# Add classes using 0-based indexing
for idx, name in enumerate(class_names):
json_object['classification_categories']['%i'%idx] = name
else:
print('WARNING: The input json already contains the list of classification categories.')
return json_object
| 23,396
|
def create_compound_states(reference_thermodynamic_state,
top,
protocol,
region=None,
restraint=False):
"""
Return alchemically modified thermodynamic states.
Parameters
----------
reference_thermodynamic_state : ThermodynamicState object
top : Topography or Topology object
protocol : dict
The dictionary ``{parameter_name: list_of_parameter_values}`` defining
the protocol. All the parameter values list must have the same
number of elements.
region : str or list
Atomic indices defining the alchemical region.
restraint : bool
If ligand exists, restraint ligand and receptor movements.
"""
create_compound_states.metadata = {}
compound_state = _reference_compound_state(reference_thermodynamic_state,
top,
region=region,
restraint=restraint)
create_compound_states.metadata.update(_reference_compound_state.metadata)
# init the array of compound states
compound_states = []
protocol_keys, protocol_values = zip(*protocol.items())
for state_id, state_values in enumerate(zip(*protocol_values)):
compound_states.append(copy.deepcopy(compound_state))
for lambda_key, lambda_value in zip(protocol_keys, state_values):
if hasattr(compound_state, lambda_key):
setattr(compound_states[state_id], lambda_key, lambda_value)
else:
raise AttributeError(
'CompoundThermodynamicState object does not '
'have protocol attribute {}'.format(lambda_key))
return compound_states
| 23,397
|
def initialize_binary_MERA_random(phys_dim, chi, dtype=tf.float64):
"""
initialize a binary MERA network of bond dimension `chi`
isometries and disentanglers are initialized with random unitaries (not haar random)
Args:
phys_dim (int): Hilbert space dimension of the bottom layer
chi (int): maximum bond dimension
dtype (tf.dtype): dtype of the MERA tensors
Returns:
wC (list of tf.Tensor): the MERA isometries
uC (list of tf.Tensor): the MERA disentanglers
rho (tf.Tensor): initial reduced density matrix
"""
#Fixme: currently, passing tf.complex128 merely initializez imaginary part to 0.0
# make it random
wC, uC, rho = initialize_binary_MERA_identities(phys_dim, chi, dtype=dtype)
wC = [tf.cast(tf.random_uniform(shape=w.shape, dtype=dtype.real_dtype), dtype) for w in wC]
wC = [misc_mera.w_update_svd_numpy(w) for w in wC]
uC = [tf.cast(tf.random_uniform(shape=u.shape, dtype=dtype.real_dtype), dtype) for u in uC]
uC = [misc_mera.u_update_svd_numpy(u) for u in uC]
return wC, uC, rho
| 23,398
|
def tts_init():
"""
Initialize choosen TTS.
Returns: tts (TextToSpeech)
"""
if (TTS_NAME == "IBM"):
return IBM_initialization()
elif (TTS_NAME == "pytts"):
return pytts_initialization()
else:
print("ERROR - WRONG TTS")
| 23,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.