content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def main_page(request) :
"""Renders main page and gets the n (matrix demension number)"""
if request.method != 'POST' :
form = InputForm()
else :
form = InputForm(data=request.POST)
if form.is_valid() :
return redirect('calculator:set_demensions')
context = {'form' : form}
return render(request, 'calculator/main_page.html', context)
| 20,000
|
def test_import():
"""Check if the app modules can be imported."""
from libpdf import core # pylint: disable=import-outside-toplevel
del core
| 20,001
|
def int2fin_reference(n):
"""Calculates a checksum for a Finnish national reference number"""
checksum = 10 - (sum([int(c) * i for c, i in zip(str(n)[::-1], it.cycle((7, 3, 1)))]) % 10)
return "%s%s" % (n, checksum)
| 20,002
|
def COLSTR(str, tag):
"""
Utility function to create a colored line
@param str: The string
@param tag: Color tag constant. One of SCOLOR_XXXX
"""
return SCOLOR_ON + tag + str + SCOLOR_OFF + tag
| 20,003
|
def get_instance_pricing(instance_types):
"""
Get the spot and on demand price of an instance type
in all the regions at current instant
:param instance_types: EC2 instance type
:return: a pandas DataFrame with columns as
region, spot price and on demand price
"""
all_regions = get_all_regions()
price_df = pd.DataFrame({DF_COL_INSTANCE_TYPE: [],
DF_COL_REGION: [],
DF_COL_SPOT_PRICE: [],
DF_COL_ON_DEMAND_PRICE: []})
for region_name in all_regions:
spot_prices = get_spot_price(instance_types, region_name)
on_demand_prices = get_on_demand_price(instance_types, region_name)
both_prices = pd.merge(spot_prices, on_demand_prices,
on=DF_COL_INSTANCE_TYPE)
n_rows = both_prices.shape[0]
region_list = n_rows * [region_name]
both_prices[DF_COL_REGION] = region_list
both_prices = both_prices[[DF_COL_INSTANCE_TYPE, DF_COL_REGION,
DF_COL_SPOT_PRICE,
DF_COL_ON_DEMAND_PRICE]]
price_df = price_df.append(both_prices)
return price_df
| 20,004
|
def run_origami_bootsteps():
"""
Run bootsteps to configure origamid.
This includes the following
* Configure web server logging
* Configure Database
* Validating origami configs.
"""
logging.info('Running origami bootsteps')
origami_config_dir = os.path.join(os.environ['HOME'], ORIGAMI_CONFIG_DIR)
if not os.path.isdir(origami_config_dir):
logging.info('Config directory does not exist, creating...')
os.makedirs(origami_config_dir, mode=0o755, exist_ok=True)
if not validate_directory_access(origami_config_dir, 'w+'):
logging.error(
'Permissions are not valid for {}'.format(origami_config_dir))
sys.exit(1)
configure_flask_logging()
configure_origami_db(origami_config_dir)
logging.info('Bootsteps completed...')
| 20,005
|
async def get_user_groups(request):
"""Returns the groups that the user in this request has access to.
This function gets the user id from the auth.get_auth function, and passes
it to the ACL callback function to get the groups.
Args:
request: aiohttp Request object
Returns:
If the ACL callback function returns None, this function returns None.
Otherwise this function returns the sequence of group permissions
provided by the callback, plus the Everyone group. If user_id is not
None, the AuthnticatedUser group and the user_id are added to the
groups returned by the function
Raises:
RuntimeError: If the ACL middleware is not installed
"""
acl_callback = request.get(GROUPS_KEY)
if acl_callback is None:
raise RuntimeError('acl_middleware not installed')
user_id = await get_auth(request)
groups = await acl_callback(user_id)
if groups is None:
return None
user_groups = (Group.AuthenticatedUser, user_id) if user_id is not None else ()
return set(itertools.chain(groups, (Group.Everyone,), user_groups))
| 20,006
|
def plot_bivariate_correlations(df, path=None, dpi=150):
"""
Plots heatmaps of 2-variable correlations to the Target function
The bivariate correlations are assmebled using both the arithmatic and geometric means for
two subplots in the figure.
Parameters
----------
df: dataframe
path: optional string path for saving
dpi: integer dots per inch
Returns
-------
fig: figure with 2 subplots of bivariate correlations (using arithmatic and geometric mean)
"""
# Plot function for subplots
def makeit(ax):
bound = np.max(np.abs(correlations))
img = ax.matshow(correlations, cmap=cm.coolwarm, vmin=-bound, vmax=bound)
ax.set(xticks=np.arange(df.shape[1]),
yticks=np.arange(df.shape[1]),
xticklabels=df.columns,
yticklabels=df.columns
)
for label in ax.xaxis.get_ticklabels():
label.set_rotation(75)
label.set_fontsize(16)
for label in ax.yaxis.get_ticklabels():
label.set_fontsize(16)
if matplotlib.__version__ == '3.1.1':
ax.set_ylim(len(df.columns) - 0.5, -0.5)
# create an axes on the right side of ax. The width of cax will be 5%
# of ax and the padding between cax and ax will be fixed at 0.05 inch.
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="8%", pad=0.1)
cb = plt.colorbar(img, cax=cax)
cb.set_ticks([])
try:
target = df.Target
except AttributeError:
print('Not set up for working without Target series in DataFrame')
df = df.drop(columns=["Target"])
features = list(df.columns)
arr = np.array(df)
correlations = np.zeros((len(features), len(features)))
# First the arithmatic mean
for i in range(len(features)):
dic = {}
for j in range(len(features)):
dic["{}+{}".format(features[i], features[j])] = (arr[:, i] + arr[:, j]) / 2
_df = pd.DataFrame(dic)
correlations[i, :] = _df.corrwith(target)
fig, axes = plt.subplots(2, 1, figsize=(10, 20))
ax = axes[0]
makeit(ax)
ax.set_title('Arithmatic Mean Bivariate Correlation', y=1.3, fontweight="bold", fontsize=18)
correlations = np.zeros((len(features), len(features)))
# Second the geometrix mean
for i in range(len(features)):
dic = {}
for j in range(len(features)):
dic["{}*{}".format(features[i], features[j])] = np.sqrt((arr[:, i] * arr[:, j]))
_df = pd.DataFrame(dic)
correlations[i, :] = _df.corrwith(target)
ax = axes[1]
makeit(ax)
ax.set_title('Geometric Mean Bivariate Correlation', y=1.3, fontweight="bold", fontsize=18)
plt.tight_layout()
if path: plt.savefig(path, dpi=dpi)
return fig
| 20,007
|
def create_datastream(dataset_path, **kwargs):
""" create data_loader to stream images 1 by 1 """
from torch.utils.data import DataLoader
if osp.isfile(osp.join(dataset_path, 'calibration.txt')):
db = ETH3DStream(dataset_path, **kwargs)
elif osp.isdir(osp.join(dataset_path, 'image_left')):
db = TartanAirStream(dataset_path, **kwargs)
elif osp.isfile(osp.join(dataset_path, 'rgb.txt')):
db = TUMStream(dataset_path, **kwargs)
elif osp.isdir(osp.join(dataset_path, 'mav0')):
db = EurocStream(dataset_path, **kwargs)
elif osp.isfile(osp.join(dataset_path, 'calib.txt')):
db = KITTIStream(dataset_path, **kwargs)
else:
# db = TartanAirStream(dataset_path, **kwargs)
db = TartanAirTestStream(dataset_path, **kwargs)
stream = DataLoader(db, shuffle=False, batch_size=1, num_workers=4)
return stream
| 20,008
|
def is_contained(target, keys):
"""Check is the target json object contained specified keys
:param target: target json object
:param keys: keys
:return: True if all of keys contained or False if anyone is not contained
Invalid parameters is always return False.
"""
if not target or not keys:
return False
# if keys is just a string convert it to a list
if type(keys) == str:
keys = [keys]
# traverse the list to check json object
# if key does not exist or value is None then return False
try:
for key in keys:
if target[key] is None:
return False
except KeyError:
return False
# All seems to be going well
return True
| 20,009
|
def map(x, in_min, in_max, out_min, out_max):
"""
Map a value from one range to another
:param in_min: minimum of input range
:param in_max: maximum of input range
:param out_min: minimum of output range
:param out_max: maximum of output range
:return: The value scaled to the new range
:rtype: int
"""
return int((x-in_min) * (out_max-out_min) / (in_max-in_min) + out_min)
| 20,010
|
def show(Z, type_restrict, restrict, result, row, col):
"""
[construct and show the functions]
Arguments:
Z {[list]} -- [list Z values]
type_restrict {[int]} -- [<= or >=]
restrict {[list]} -- [list of all restrictions]
result {[list]} -- [list of results from each restrition]
row {[int]} -- [number of line]
col {[int]} -- [number of coumn]
"""
print("Z = ", end='')
aux = 1
for x in Z:
if aux == len(Z):
st = str(x)+"X"+str(aux)
else:
st = str(x)+"X"+str(aux)+" + "
print(st, end='')
aux += 1
print("\n\nRestrições: \n")
for i in range(0,row):
for j in range(0,col):
if j == col-1:
print(str(restrict[i][j])+"X"+str(j+1), end='')
if type_restrict[i] == 1:
print(" <= "+str(result[i])+"\n")
else:
print(" >= "+str(result[i])+"\n")
else:
print(str(restrict[i][j])+"X"+str(j+1)+" + ", end='')
| 20,011
|
def get_points(sess: requests.Session, console: Console, status: Status, projectID: int):
"""
Get all exisiting points in a project
"""
base_url = f"https://mapitfast.agterra.com/api/Points"
resp = sess.get(base_url, params={"projectId": projectID})
points_obj_list = list()
for raw_resp in resp.json():
points_obj_list.append(Points(raw_data=raw_resp))
return points_obj_list
| 20,012
|
def verify_source(
models: List[AOTCompiledTestModel],
accel="ethos-u55-256",
):
"""
This method verifies the generated source from an NPU module by building it and running on an FVP.
"""
interface_api = "c"
test_runner = _create_test_runner(accel)
run_and_check(
models,
test_runner,
interface_api,
workspace_byte_alignment=16,
data_linkage=AOTDataLinkage(section="ethosu_scratch", alignment=16),
)
| 20,013
|
def calculate_discounted_returns(rewards):
"""
Calculate discounted reward and then normalize it
(see Sutton book for definition)
Params:
rewards: list of rewards for every episode
"""
returns = np.zeros(len(rewards))
next_return = 0 # 0 because we start at the last timestep
for t in reversed(range(0, len(rewards))):
next_return = rewards[t] + args.gamma * next_return
returns[t] = next_return
# normalize for better statistical properties
returns = (returns - returns.mean()) / (returns.std() + np.finfo(np.float32).eps)
return returns
| 20,014
|
def mock_tensorboard(logdir, host, port, print_nonsense, print_nothing,
address_in_use, sleep_time):
"""Run fake TensorBoard."""
if logdir is None:
print('A logdir must be specified. Run `tensorboard --help` for '
'details and examples.')
return -1
elif print_nothing:
time.sleep(sleep_time)
elif print_nonsense:
for i in range(0, 150):
print('Lorem ipsum %d' % i, file=sys.stderr)
time.sleep(0.1)
elif address_in_use:
print('TensorBoard attempted to bind to port %d, but it was already in use' % 1234, file=sys.stderr)
else:
time.sleep(1)
print('TensorBoard 1.8.0 at http://ntbthinkpad:%d' % 6006, file=sys.stderr)
| 20,015
|
def parse_binskim_old(bin_an_dic, output):
"""Parse old version of binskim."""
current_run = output['runs'][0]
if 'results' in current_run:
rules = output['runs'][0]['rules']
for res in current_run['results']:
if res['level'] != 'pass':
if len(res['formattedRuleMessage']['arguments']) > 2:
info = ('{}, {}').format(
res['formattedRuleMessage']['arguments'][1],
res['formattedRuleMessage']['arguments'][2])
else:
info = ''
result = {
'rule_id': res['ruleId'],
'status': 'Insecure',
'info': info,
'desc': rules[res['ruleId']]['shortDescription'],
}
else:
result = {
'rule_id': res['ruleId'],
'status': 'Secure',
'info': '',
'desc': rules[res['ruleId']]['shortDescription'],
}
bin_an_dic['results'].append(result)
else:
logger.warning('binskim has no results.')
# Create an warining for the gui
warning = {
'rule_id': 'No Binskim-Results',
'status': 'Info',
'info': '',
'desc': 'No results from Binskim.',
}
bin_an_dic['warnings'].append(warning)
if 'configurationNotifications' in current_run:
for warn in current_run['configurationNotifications']:
warning = {
'rule_id': warn['ruleId'],
'status': 'Info',
'info': '',
'desc': warn['message'],
}
bin_an_dic['warnings'].append(warning)
# Return updated dict
return bin_an_dic
| 20,016
|
def binary_elementwise_compute(
ifm: te.Tensor,
ifm2: te.Tensor,
lut: te.Tensor,
operator_type: str,
ifm_scale: float,
ifm_zero_point: int,
ifm2_scale: float,
ifm2_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
ifm_channels: int,
ifm2_channels: int,
reversed_operands: bool,
activation: str,
clip_min: int,
clip_max: int,
rounding_mode: str,
ifm_layout: str,
ifm2_layout: str,
ofm_layout: str,
ofm_dtype: str,
) -> te.Tensor:
"""A compute operator representing the capabilities of binary_elementwise for the NPU.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
ifm2 : te.Tensor
The Input Feature Map tensor 2 (IFM2).
lut : te.Tensor
The look-up table values to use if activation = "LUT".
operator_type: str
The type of the binary elementwise operator.
"ADD"
"SUB"
"MUL"
"MIN"
"MAX"
"SHR"
"SHL"
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ifm2_scale : float
The quantization scale for the Input Feature Map tensor 2.
ifm2_zero_point : int
The quantization zero point for the Input Feature Map tensor 1.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
ifm_channels : int
The number of the Input Feature Map channels.
ifm2_channels : int
The number of the Input Feature Map 2 channels.
reversed_operands : bool
True if IFM2 is the first operand and IFM is the second operand.
activation : str
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
Available activations for activation type:
{int8, uint8}: "NONE", "CLIP", "TANH", "SIGMOID", "LUT"
{int32}: "NONE"
clip_min : int
The minimum clipping value if activation = "CLIP".
clip_max : int
The maximum clipping value if activation = "CLIP".
rounding_mode : str
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ifm2_layout : str, optional
The layout of the Input Feature Map tensor 2. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_dtype: str
The Output Feature Map tensor type.
MUL, ADD, SUB {IFM}->{OFM}:
{uint8, int8 int32} -> {uint8, int8, int32}, any pairing
MAX, MIN:
IFM and OFM must be of the same type, one of:
{int8, uint8}
SHR {IFM}->{OFM}:
{int32}->{int8, uint8, int32}, any pairing"
SHL:
{int32}->{int32} only
Returns
-------
te.Tensor
The Output Feature Map tensor.
"""
assert ifm.shape[0] == 1
assert ifm2.shape[0] == 1
assert ifm_layout in {"NHWC", "NHCWB16"}
assert ifm2_layout in {"NHWC", "NHCWB16"}
assert ofm_layout in {"NHWC", "NHCWB16"}
# Compute operation for the IFM DMA pipeline
dmaed_ifm = dma_ifm_compute(
ifm, ifm_layout, ifm_zero_point, ifm_scale, ifm_channels, (0, 0, 0, 0)
)
dmaed_ifm2 = dma_ifm_compute(
ifm2, ifm2_layout, ifm2_zero_point, ifm2_scale, ifm2_channels, (0, 0, 0, 0)
)
# Binary elementwise compute operation
ofm_height = dmaed_ifm.shape[1]
ofm_width = dmaed_ifm.shape[2]
binary_elementwise_attrs = {
"op": "ethosu_binary_elementwise",
"operator_type": operator_type,
"reversed_operands": reversed_operands,
"activation": activation,
"clip_min": clip_min,
"clip_max": clip_max,
"rounding_mode": rounding_mode,
}
operators = {
"ADD": operator.add,
"SUB": operator.sub,
"MUL": operator.mul,
"MIN": te.min,
"MAX": te.max,
"SHR": operator.add,
"SHL": operator.add,
}
broadcast = [value == 1 for value in dmaed_ifm2.shape]
if reversed_operands:
binary_elementwise = te.compute(
(1, ofm_height, ofm_width, ifm_channels),
lambda nn, hh, ww, cc: operators[operator_type](
dmaed_ifm2(
0 if broadcast[0] else nn,
0 if broadcast[1] else hh,
0 if broadcast[2] else ww,
0 if broadcast[3] else cc,
).astype(ifm.dtype),
dmaed_ifm(nn, hh, ww, cc).astype(ifm.dtype),
).astype(ofm_dtype),
name="ethosu_binary_elementwise",
attrs=binary_elementwise_attrs,
)
else:
binary_elementwise = te.compute(
(1, ofm_height, ofm_width, ifm_channels),
lambda nn, hh, ww, cc: operators[operator_type](
dmaed_ifm(nn, hh, ww, cc).astype(ifm.dtype),
dmaed_ifm2(
0 if broadcast[0] else nn,
0 if broadcast[1] else hh,
0 if broadcast[2] else ww,
0 if broadcast[3] else cc,
).astype(ifm.dtype),
).astype(ofm_dtype),
name="ethosu_binary_elementwise",
attrs=binary_elementwise_attrs,
)
nhwc_to_nhcwb16 = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1 / 16, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 16],
[0, 0, 0, 0, 1],
]
nhcwb16_to_nhwc = [
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 16, 0, 1, -16],
[0, 0, 0, 0, 0, 1],
]
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
ifm2_matrix = [
[1, 0, 0, 0, 0],
[0, (1 - int(broadcast[1])), 0, 0, int(broadcast[1])],
[0, 0, (1 - int(broadcast[2])), 0, int(broadcast[2])],
[0, 0, 0, (1 - int(broadcast[3])), int(broadcast[3])],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
ifm2_matrix = np.matmul(ifm2_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
if ifm2_layout == "NHCWB16":
ifm2_matrix = np.matmul(nhwc_to_nhcwb16, ifm2_matrix).tolist()
ifm_propagator = Propagator(
ifm_matrix,
[0, 0, 0, 0] if ifm_layout == "NHWC" else [0, 0, 0, 0, 0],
)
ifm2_propagator = Propagator(
ifm2_matrix,
[0, 0, 0, 0] if ifm2_layout == "NHWC" else [0, 0, 0, 0, 0],
)
propagator_attrs = {
"ifm_propagator": ifm_propagator,
"ifm2_propagator": ifm2_propagator,
}
# Compute operation for the OFM DMA pipeline
return dma_ofm_compute(
binary_elementwise,
ofm_layout,
ofm_zero_point,
ofm_scale,
ifm_channels,
attrs=propagator_attrs,
)
| 20,017
|
def start_metronome(aux):
"""Function that starts a metronome, with
the beat durations being calculated from the
BPM. If a time signature is given, two different notes
are used to set it."""
global metronome_on
metronome_on = True
seq = sequencers[1][1]
synthID = sequencers[1][2]
start = 0
bpm = aux.get("bpm")
if "time" in aux:
time = aux.get("time")
else:
time = 1
duration = int(60000 / bpm)
for i in range(0, 1000):
mod = i % time
if mod == 0:
key = 67
else:
key = 68
seq.note_on(time=start, absolute=False, channel=0, key=key, velocity=100, dest=synthID)
seq.note_off(time=start + duration, absolute=False, channel=0, key=key, dest=synthID)
start += duration
change_metronome_label_bpm(bpm)
| 20,018
|
def main(ts, fastARG_executable, fa_in, fa_out, nodes_fh, edges_fh, sites_fh, muts_fh):
"""
This is just to test if fastarg produces the same haplotypes
"""
import subprocess
seq_len = ts.get_sequence_length()
ts_to_fastARG_in(ts, fa_in)
subprocess.call([fastARG_executable, 'build', fa_in.name], stdout=fa_out)
fastARG_out_to_ts_txts(fa_out, variant_positions_from_fastARGin(fa_in),
nodes_fh, edges_fh, sites_fh, muts_fh, seq_len=seq_len)
new_ts = msprime.load_text(nodes=nodes_fh, edges=edges_fh, sites=sites_fh, mutations=muts_fh)
simple_ts = new_ts.simplify()
logging.debug("Simplified num_records should always be < unsimplified num_records.\n"
"For low mutationRate:recombinationRate ratio,"
" the initial num records will probably be higher than the"
" fastarg num_records, as the original simulation will have records"
" which leave no mutational trace. As the mutation rate increases,"
" we expect the fastarg num_records to equal, then exceed the original"
" as fastarg starts inferring the wrong (less parsimonious) set of trees")
logging.debug(
"Initial num records = {}, fastARG (simplified) = {}, fastARG (unsimplified) = {}".format(
ts.get_num_records(), simple_ts.get_num_records(), new_ts.get_num_records()))
| 20,019
|
def x_power_dependence(n, dep_keys, ctfs=list(), force_zero=None, **kwargs):
"""Returns a fit function that allows x^n depdendence on the constants
associated with each of the dep_keys
y(x) = (a0 * b0 + a1 * b1 + ...) * x^n
where each of the a's are fit parameters and each of the b's are either
a constant associated with the keys in dep_keys or a constant constructed
by a ctf (constant transform function) in ctfs
"""
return _dependence(
f=lambda p, x: p[0] * x ** n, n_params=1,
dep_keys=dep_keys, ctfs=ctfs, force_zero=force_zero,
name='x^{} dependence'.format(n), code='x{}'.format(n)+':{}', **kwargs
)
| 20,020
|
def get_subseqs(s, ops):
"""Returns a list of sequences given when applying the list of (ops)
on them, until a constant one is found, thus:
new[0] = next seq of s with ops[0]
new[i] = next seq of new[i-1] with op[i]
If 'ops' is not a list, then the same operation will be repeated.
The length of 'ops' should be equal to the length of 's' minus 1"""
if len(s) < 2:
# We can't get the next sequence based on two terms if there's only one
return []
if not isinstance(ops, Iterable):
ops = [ops for _ in range(len(s)-1)]
# Start with the initial subsequence
subseqs = [get_subseq(s, ops[0])]
# And base the next subsequences on the previous one until they're constant
i = 1
while not is_constant(subseqs[-1]) and len(subseqs[-1]) > 1:
subseqs.append(get_subseq(subseqs[-1], ops[i]))
i += 1
return subseqs
| 20,021
|
def read_hst_siaf(file=None):#, AperNames=None):
"""Read apertures from HST SIAF file and return a collection.
This was partially ported from Lallo's plotap.f.
Parameters
----------
file : str
AperNames : str list
Returns
-------
apertures: dict
Dictionary of apertures
"""
from pysiaf import aperture # runtime import to avoid circular import on startup
if file is None:
file = os.path.join(HST_PRD_DATA_ROOT, 'siaf.dat')
# read all lines
siaf_stream = open(file)
data = siaf_stream.readlines()
siaf_stream.close()
# initialize dict of apertures
apertures = OrderedDict()
# inspect SIAF and populate Apertures
CAJ_index = 0
CAK_index = 0
for l, text in enumerate(data):
skip_aperture = False
if (text.rstrip()[-3::] == 'CAJ') & (CAJ_index == 0):
a = aperture.HstAperture()
# Process the first 'CAJ' record.
a.ap_name = text[0:10].strip() # Aperture Identifier.
a.v2_cent = np.float(text[10:25]) # SICS V2 Center. (same as a_v2_ref)
a.v3_cent = np.float(text[25:40]) # SICS V3 Center. (same as a_v3_ref)
a.a_shape = text[40:44] # Aperture Shape.
try:
a.maj = np.float(text[44:59]) # Major Axis Dimension.
except ValueError: # when field is empty
a.maj = None
a.Mac_Flag = text[59] # !SI Macro Aperture Flag.
a.BR_OBJ_Flag = text[60] # !Bright Object Alert Flag.
a.brt_obj_thres = text[61:66] # !Bright Object Alert Threshold.
a.Macro_ID = text[66:70] # !SI Macro Aperture Identifier.
rec_type = text[70:73] # !Record type.
CAJ_index = 1
aperture_name = a.ap_name
elif (text.rstrip()[-3::] == 'CAJ') & (CAJ_index == 1):
# Process the second 'CAJ' record.
try:
a.min = np.float(text[0:15]) # !Minor Axis Dimension.
except ValueError: # when field is empty
a.min = None
a.plate_scale = np.float(text[15:30]) # !Arcsecond per Pixel plate scale.
a.a_area = np.float(text[30:45]) # !Area of SI Aperture.
a.theta = np.float(text[45:60]) # !Aperture Rotation Angle.
a.SIAS_Flag = text[60] # !SIAS coordinate system flag. (If set then AK rec.)
rec_type = text[70:73] # !Record type.
CAJ_index = 2
elif (text.rstrip()[-3::] == 'CAJ') & (CAJ_index == 2):
# Process the third 'CAJ' record.
a.im_par = np.int(text[0:2]) # Image Parity.
a.ideg = np.int(text[2]) # !Polynomial Degree.
a.xa0 = np.float(text[3:18]) # !SIAS X Center. -> like JWST SCIENCE frame
a.ya0 = np.float(text[18:33]) # !SIAS Y Center.
a.xs0 = np.float(text[33:48]) # !SICS X Center. -> like JWST IDEAL frame
a.ys0 = np.float(text[48:63]) # !SICS Y Center.
rec_type = text[70:73] # !Record type.
CAJ_index = 0
elif text.rstrip()[-2::] == 'AJ':
a.SI_mne = text[0:4].strip() # !Science Instrument Mnemonic
a.Tlm_mne = text[4] # !SI Telemetry Mnemonic.
a.Det_mne = text[5] # !SI Detector Mnemonic.
a.A_mne = text[6:10] # !SI Aperture Mnemonic.
a.APOS_mne = text[10] # !SI Aperture Position Mnemonic.
rec_type = text[70:73] # !Record type.
elif text.rstrip()[-3::] == 'CAQ':
a.v1x = np.float(text[0:15]) # !SICS Vertex 1_X -> like JWST IDEAL frame
a.v1y = np.float(text[15:30]) # !SICS Vertex 1_Y
a.v2x = np.float(text[30:45]) # !SICS Vertex 2_X
a.v2y = np.float(text[45:60]) # !SICS Vertex 2_Y
rec_type = text[70:73] # !Record type.
elif text.rstrip()[-2::] == 'AQ':
a.v3x = np.float(text[0:15]) # !SICS Vertex 3_X
a.v3y = np.float(text[15:30]) # !SICS Vertex 3_Y
a.v4x = np.float(text[30:45]) # !SICS Vertex 4_X
a.v4y = np.float(text[45:60]) # !SICS Vertex 4_Y
rec_type = text[70:73] # !Record type.
elif text.rstrip()[-2::] == 'AP':
# FGS pickles
a.pi_angle = np.float(text[0:15]) # !Inner Radius Orientation Angle.
a.pi_ext = np.float(text[15:30]) # !Angular Extent of the Inner Radius.
a.po_angle = np.float(text[30:45]) # !Outer Radius Orientation Angle.
a.po_ext = np.float(text[45:60]) # !Angular Extent of the Outer Radius.
rec_type = text[70:73] # !Record type.
elif text.rstrip()[-2::] == 'AM':
a.a_v2_ref = np.float(text[0:15]) # !V2 Coordinate of Aperture Reference Point. (same as v2_cent)
a.a_v3_ref = np.float(text[15:30]) # !V3 Coordinate of Aperture Reference Point. (same as v3_cent)
a.a_x_incr = np.float(text[30:45]) # !First Coordinate Axis increment.
a.a_y_incr = np.float(text[45:60]) # !Second Coordinate Axis increment.
elif text.rstrip()[-2::] == 'AN':
if (a.a_shape == 'PICK') and ('FGS' in a.ap_name):
# HST FGS are special in the sense that the idl_to_tel transformation is implemented via the TVS matrix
# and not the standard way
# a.set_fgs_tel_reference_point(a.a_v2_ref, a.a_v2_ref)
a.set_idl_reference_point(a.a_v2_ref, a.a_v3_ref, verbose=False)
# pass
if (a.a_shape == 'PICK') | (a.a_shape == 'CIRC'):
# TO BE IMPLEMENTED
# FGS pickle record ends here
# apertures.append(a)
# read(10,1250)Beta1, !Angle of increasing first coordinate axis.
# * Beta2, !Angle of increasing second coordinate axis.
# * a_x_ref, !X reference.
# * a_y_ref, !Y reference.
# * X_TOT_PIX, !Total X-axis pixels.
# * Y_TOT_PIX, !Total Y-axis pixels.
# * rec_type !Record type.
# 1250 format(4(G15.8),2(I5),a3)
# apertures.append(a)
apertures[a.AperName] = a
elif (text.rstrip()[-3::] == 'CAK') & (CAK_index == 0):
# Process the first 'CAK' record.
n_polynomial_coefficients = np.int(((a.ideg + 1) * (a.ideg + 2)) / 2)
# the order is
# SIAS to SICS X Transformation.
# SIAS to SICS Y Transformation.
# SICS to SIAS X Transformation.
# SICS to SIAS X Transformation.
polynomial_coefficients = np.ones((n_polynomial_coefficients, 4)) * -99
for jj in np.arange(4):
polynomial_coefficients[CAK_index, jj] = np.float(text[15 * jj:15 * (jj + 1)])
CAK_index += 1
elif (text.rstrip()[-3::] == 'CAK') & (CAK_index != 0):
# Process the remaining 'CAK' records
for jj in np.arange(4):
polynomial_coefficients[CAK_index, jj] = np.float(text[15 * jj:15 * (jj + 1)])
CAK_index += 1
elif text.rstrip()[-2::] == 'AK':
# Process the last polynomial coefficient record.
for jj in np.arange(4):
polynomial_coefficients[CAK_index, jj] = np.float(text[15 * jj:15 * (jj + 1)])
a.polynomial_coefficients = polynomial_coefficients
CAK_index = 0
apertures[a.AperName] = a
# apertures.append(a)
return apertures
| 20,022
|
def upload_model(model_file, name, tags=None):
"""Upload a tflite model file to the project and publish it."""
# Load a tflite file and upload it to Cloud Storage
print('Uploading to Cloud Storage...')
model_source = ml.TFLiteGCSModelSource.from_tflite_model_file(model_file)
# Create the model object
tflite_format = ml.TFLiteFormat(model_source=model_source)
model = ml.Model(
display_name=name,
model_format=tflite_format)
if tags is not None:
model.tags = tags
# Add the model to your Firebase project and publish it
new_model = ml.create_model(model)
ml.publish_model(new_model.model_id)
print('Model uploaded and published:')
tags = ', '.join(new_model.tags) if new_model.tags is not None else ''
print('{:<20}{:<10} {}'.format(new_model.display_name, new_model.model_id,
tags))
| 20,023
|
def glyphstr_center(gls, width=100):
""" given a width of an area (such as column heading width) it will adjust the start point of each glyph in a glyphstr_, centering the string
"""
length = glyphstr_length(gls)
glen = len(gls)
#addlen = (width-length)/(glen))
print length
print width - length
hl = (width-length)/2
for i in range(0, glen):
gl = gls[i]
flash = gl.flash
gl._flash(flash+hl)
| 20,024
|
def update_s(C,k):
"""
Args: C: 2d array
k: 1d array
Return: 1d array
"""
if np.shape(C)[0]==0:
s = np.array([1])
else:
temp = np.dot(C,k)
s = np.append(temp,1)
return s
| 20,025
|
def test_handler_callback_failure():
"""Test failure mode for inappropriate handlers."""
class BadHandler(object):
def handler(self, one):
return 'too many'
ob = EventTest()
handler = BadHandler()
with pytest.raises(TypeError):
ob.PublicEvent += handler.handler
ob.OnPublicEvent(EventArgsTest(10))
ob.PublicEvent -= handler.handler
class BadHandler(object):
def handler(self, one, two, three, four, five):
return 'not enough'
ob = EventTest()
handler = BadHandler()
with pytest.raises(TypeError):
ob.PublicEvent += handler.handler
ob.OnPublicEvent(EventArgsTest(10))
ob.PublicEvent -= handler.handler
| 20,026
|
def main_stage(game_ongoing: bool = True):
"""
Main function to let the two computer players actively play. This includes drawing cards, playing cards,
and interacting for the specialty cards (7, 8, and J)
Args:
game_ongoing: boolean which changes once the winning condition has been reached to end the game.
Returns:
Updates the respective player attributes and prints the result of the game to the console.
"""
while game_ongoing:
for player in [player1, player2]:
if len(board.remaining_cards) <= 2:
# check if cards to draw is empty, if so, shuffle in the middle cards (other than the currend active
# middle card)
board.refill_remaining_cards()
if check_specialty_card(current_board=board, current_player=player):
continue
played_card = player.play_card(remaining_cards=board.remaining_cards,
middle_card=board.middle_cards[0])
# condition of players just played his last card
if played_card == 'Empty':
game_ongoing = False
break
if played_card is not None:
board.middle_cards.insert(0, played_card)
print(f"Current middle card: {board.middle_cards[0]}")
# if isinstance(player, HumanPlayer):
print(f"{player1.name}'s hand: {player1.hand}")
print(f"Remaining cards of the opponent: {len(player2.hand)}")
board.print_board(player1_hand=player1.hand, player2_hand=player2.hand, player1_name=player1.name,
player2_name=player2.name)
| 20,027
|
def test_CenteredParameter_column():
"""Tests probflow.parameters.CenteredParameter w/ center_by=column + 2D"""
# Create the parameter
param = CenteredParameter([5, 6], center_by="column")
# posterior_mean should return mean
sample1 = param.posterior_mean()
sample2 = param.posterior_mean()
assert sample1.ndim == 2
assert sample2.ndim == 2
assert sample1.shape[0] == 5
assert sample2.shape[0] == 5
assert sample1.shape[1] == 6
assert sample2.shape[1] == 6
assert np.all(sample1 == sample2)
# mean of each column should be 0
assert np.all(np.abs(np.mean(sample1, axis=0)) < 1e-5)
# posterior_sample should return samples
sample1 = param.posterior_sample()
sample2 = param.posterior_sample()
assert sample1.ndim == 2
assert sample2.ndim == 2
assert sample1.shape[0] == 5
assert sample1.shape[1] == 6
assert sample2.shape[0] == 5
assert sample2.shape[1] == 6
assert np.all(sample1 != sample2)
# mean of each column should be 0
assert np.all(np.abs(np.mean(sample1, axis=0)) < 1e-5)
# posterior_sample should be able to return multiple samples
sample1 = param.posterior_sample(10)
sample2 = param.posterior_sample(10)
assert sample1.ndim == 3
assert sample2.ndim == 3
assert sample1.shape[0] == 10
assert sample1.shape[1] == 5
assert sample1.shape[2] == 6
assert sample2.shape[0] == 10
assert sample2.shape[1] == 5
assert sample2.shape[2] == 6
assert np.all(sample1 != sample2)
# mean of each column for each sample should be 0
assert np.all(np.abs(np.mean(sample1, axis=1)) < 1e-5)
| 20,028
|
def get_chord_type(chord):
"""'Parses' input for a chord and returns the type of chord from it"""
cleaned_chord = chord[1:]
cleaned_chord = cleaned_chord.replace('b', '')
cleaned_chord = cleaned_chord.replace('#', '')
mapping = {
'7': 'seven',
'9': 'nine',
'm7': 'minor7',
'm9': 'minor9',
'm': 'minor',
'M7': 'major7',
'M9': 'major9',
'': 'major',
}
return mapping[cleaned_chord]
| 20,029
|
def compile_stats(path):
""" combines all items from the given folder of stats arrays """
df = pd.DataFrame()
for item in os.listdir(path):
print(item)
with open(path + '/' + item, 'rb') as file:
df1 = pickle.load(file)
# df1 = df1.loc[df1.pred_var < 1.0]
# df1 = df1.loc[df1.pred_var > 0.0]
df1 = df1.loc[df1.next_hold != np.inf]
df1 = df1.loc[df1.next_hold != -np.inf]
df = df.append(df1)
return df
| 20,030
|
def transpile(model: Union[SympyOpt, Model]) -> SympyOpt:
"""Transpile optimization problem into SympyOpt model
Only accepts SympyOpt or Docplex model.
:param model: model to be transpiled
:raises ValueError: if the argument is of inappropriate type
:return: transpiled model
"""
if isinstance(model, SympyOpt):
return deepcopy(model)
elif isinstance(model, Model):
return DocplexToSympyopt().transpile(model)
elif isinstance(model, LpProblem):
return PulpToSympyopt().transpile(model)
elif isinstance(model, (QuadraticProgram, PauliSumOp)):
return QiskitToSympyopt().transpile(model)
elif isinstance(model, (BinaryQuadraticModel, ConstrainedQuadraticModel)):
return DimodToSympyopt().transpile(model)
else:
raise ValueError(f"Unknown model type: {type(model)}")
| 20,031
|
def definition():
"""View of the finances with subtotals generated."""
return sql.format(source=source)
| 20,032
|
def random_mini_batches(X, Y, mini_batch_size = 32, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples) (m, Hi, Wi, Ci)
Y -- true "label" vector (containing 0 if control, 1 if case), of shape (1, number of examples) (m, n_y)
mini_batch_size - size of the mini-batches, integer
seed -- this is only for the purpose of grading, so that you're "random minibatches are the same as ours.
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
m = X.shape[0] # number of training examples
mini_batches = []
np.random.seed(seed)
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[permutation,:,:,:]
shuffled_Y = Y[permutation,:]
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
num_complete_minibatches = int(num_complete_minibatches)
for k in range(0, int(num_complete_minibatches)):
mini_batch_X = shuffled_X[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:,:,:]
mini_batch_Y = shuffled_Y[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size : m,:,:,:]
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size : m,:]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
| 20,033
|
def file_base_features(path, record_type):
"""Return values for BASE_SCHEMA features."""
base_feature_dict = {
"record_id": path,
"record_type": record_type,
# "utc_last_access": os.stat(path).st_atime,
"utc_last_access": 1600000000.0,
}
return base_feature_dict
| 20,034
|
def split_ref(ref):
"""
セル参照をセル文字と1ベース行番号文字に分割する。
Params:
ref(str):
Returns:
Tuple[str, str]: 列、行
"""
m = re_cellref.match(ref)
if m:
return m.group(1), m.group(2)
return None, None
| 20,035
|
def table_definition(dataset):
"""print an azure synapse table definition for a kartothek dataset"""
index_col = list(dataset.dataset_metadata.index_columns)[
0
] ##works only with one index column
cols = synapse_columns(
dataset.dataset_metadata.table_meta[dataset.table], index_col
)
template = """
with {dataset.dataset_uuid} as (
SELECT
result.filepath(1) as [{index_col}],
*
FROM
OPENROWSET(
BULK '{dataset.url}/{index_col}=*/*.parquet',
FORMAT='PARQUET'
) with(
{cols}
) as [result]
)
select top 100 * from {dataset.dataset_uuid};
"""
return template.format(dataset=dataset, cols=cols, index_col=index_col)
| 20,036
|
def test_home(client, db):
"""
GIVEN a user who wants to visit the home page
WHEN he accesses the page
THEN assert the right page is sent
"""
page = "/"
response = client.get(page)
assert response.status_code == 200
assertTemplateUsed(response, "core/home.html")
| 20,037
|
def identify_image_set(imagedir, image_names_pattern):
"""
Find all the images within the *imagedir*.
:param imagedir:
:param image_names_pattern:
:return: a list of image names that are part of the image set
"""
image_names_from_os = sorted(os.listdir(imagedir))
image_names = [re_identify_image_set(fn, image_names_pattern) for fn in image_names_from_os]
image_names = [name for name in image_names if name is not None]
return image_names
| 20,038
|
def node_extractor(dataframe, *columns):
"""
Extracts the set of nodes from a given dataframe.
:param dataframe: dataframe from which to extract the node list
:param columns: list of column names that contain nodes
:return: list of all unique nodes that appear in the provided dataset
"""
data_list = [dataframe[column].unique().tolist() for column in columns]
return list(set(itertools.chain.from_iterable(data_list)))
| 20,039
|
def _get_bag(environ, bag_name):
"""
Get the named bag out of the store.
"""
store = environ['tiddlyweb.store']
bag = Bag(bag_name)
try:
bag = store.get(bag)
except NoBagError as exc:
raise HTTP404('%s not found, %s' % (bag.name, exc))
return bag
| 20,040
|
def missing_values_operation(files):
"""Will take iterable file objects and eliminate features or samples with missing values or inputing missing values if necessary"""
for i in files:
with open(i,'rw') as f:
if missing_values(f)==True:
file_data=load_data(i)
#Dropping rows with missing values
file_data.dropna(axis=0)
#Dropping columns with missing values
file_data.dropna(axis=1)
return "dropped rows and columns"
else:
return "no values to be dropped"
| 20,041
|
def remove_from_repo_history(repo_source: str, drop_files: Sequence[str],
github_token: str, keep_backup: bool = True,
auto_push_remove: bool = False, backup_dir: Optional[str] = None,
follow_renames: bool = True):
"""
Remove the passed files from the repo history entirely
:param repo_source: clone url (remote) or file path (local) of repo that should be split
:param drop_files: files to be dropped in the repo history
:param github_token: personal access token for Github
:param keep_backup: whether to retain a backup of the original repo in case something went wrong in removing history
:param auto_push_remove: pass True to avoid prompt for whether to push the original repo with history removed
:param backup_dir: pass file path to put backup of old repo there, otherwise uses repo_dest
:param follow_renames: Whether to track previous names of files from the history and also include those
:return:
"""
if keep_backup:
backup_dir = _set_backup_dir(backup_dir, os.getcwd())
backup_repo = clone_repo(repo_source, backup_dir, all_branches=True)
print(f'Cleaning up what was split off in the old repo')
with tempfile.TemporaryDirectory(dir=os.path.expanduser('~')) as repo_temp_dest:
repo = _clone_and_connect(repo_source, repo_temp_dest, github_token)
print(f'Removing history in the original repo for files which were split off. '
f'Note: this will take a long time for larger repos')
remove_history_for_files_matching(repo, drop_files, follow_renames=follow_renames)
if not auto_push_remove:
print('Success. Please inspect the old repo to make sure nothing that was needed was removed.')
print('Once the history looks correct, enter Y to replace the remote repo contents')
print('If there is an issue with the history, enter N to exit')
push_repo_raw = input(f'Push modified history to {repo_source}? Y/N: ')
push_repo_str = push_repo_raw.strip().lower()[0]
push_repo = push_repo_str == 'y'
else:
print('auto_push_remove passed. Will automatically push to remote for original repo.')
push_repo = True
if push_repo:
print('Pushing to remote for the original repo')
push_all_force(repo)
print('If there is an issue, '
'then you can go to your original local repo and run git push --all --force to reverse it')
else:
print('Not pushing modified history to original remote.')
print('Removing temporary directory')
| 20,042
|
def _object_id(value):
"""Return the object_id of the device value.
The object_id contains node_id and value instance id
to not collide with other entity_ids.
"""
object_id = "{}_{}".format(slugify(_value_name(value)),
value.node.node_id)
# Add the instance id if there is more than one instance for the value
if value.instance > 1:
return "{}_{}".format(object_id, value.instance)
return object_id
| 20,043
|
def sentence_length_distribution(trainFile="atec/training.csv"):
"""
分析训练数据中句子长度分布
"""
raw_data = read_cut_file(file_path=trainFile, with_label=True)
df=pd.DataFrame(raw_data)
level=["w","c"]
for l in level:
s1="sent1"+l+"_len"
print(df.loc[df[s1].argmax()])
print(df[s1].describe())
s2="sent2"+l+"_len"
print(df.loc[df[s2].argmax()])
print(df[s2].describe())
df_=pd.DataFrame({s1:df[s1],s2:df[s2]})
fig=plt.figure(figsize=(32,18))
df_.boxplot()
plt.legend()
plt.show()
fig.savefig(trainFile.replace(".csv","_sl_"+l+".png"))
| 20,044
|
def _apply_attention_constraint(
e, last_attended_idx, backward_window=1, forward_window=3
):
"""Apply monotonic attention constraint.
**Note** This function is copied from espnet.nets.pytorch_backend.rnn.attention.py
"""
if e.size(0) != 1:
raise NotImplementedError(
"Batch attention constraining is not yet supported.")
backward_idx = last_attended_idx - backward_window
forward_idx = last_attended_idx + forward_window
if backward_idx > 0:
e[:, :backward_idx] = -float("inf")
if forward_idx < e.size(1):
e[:, forward_idx:] = -float("inf")
return e
| 20,045
|
def RetryInvocation(return_handler, exc_handler, max_retry, functor, *args,
**kwds):
"""Generic retry loop w/ optional break out depending on exceptions.
Generally speaking you likely want RetryException or RetryReturned
rather than this; they're wrappers around this and are friendlier for
end usage.
Arguments:
return_handler: A functor invoked with the returned results from
functor(*args, **kwds). If it returns True, then a retry
is attempted. If False, the result is returned.
If this value is None, then no retries are attempted for
non-excepting invocations of functor(*args, **kwds) .
exc_handler: A functor invoked w/ the exception instance that
functor(*args, **kwds) threw. If it returns True, then a
retry is attempted. If False, the exception is re-raised.
If this value is None, then no exception based retries will
occur.
max_retry: A positive integer representing how many times to retry
the command before giving up. Worst case, the command is invoked
(max_retry + 1) times before failing.
functor: A callable to pass args and kargs to.
args: Positional args passed to functor.
kwds: Optional args passed to functor.
sleep: Optional keyword. Multiplier for how long to sleep between
retries; will delay (1*sleep) the first time, then (2*sleep),
continuing via attempt * sleep.
Returns:
Whatever functor(*args, **kwds) returns.
Raises:
Exception: Whatever exceptions functor(*args, **kwds) throws and
isn't suppressed is raised. Note that the first exception encountered
is what's thrown; in the absense of an exception (meaning ran out
of retries based on testing the result), a generic RetriesExhausted
exception is thrown.
"""
if max_retry < 0:
raise ValueError("max_retry needs to be zero or more: %s" % max_retry)
sleep = kwds.pop('sleep', 0)
stopper = lambda x: False
return_handler = stopper if return_handler is None else return_handler
exc_handler = stopper if exc_handler is None else exc_handler
exc_info = None
for attempt in xrange(max_retry + 1):
if attempt and sleep:
time.sleep(sleep * attempt)
try:
ret = functor(*args, **kwds)
if not return_handler(ret):
return ret
except Exception as e:
# Note we're not snagging BaseException, so MemoryError/KeyboardInterrupt
# and friends don't enter this except block.
if not exc_handler(e):
raise
# We intentionally ignore any failures in later attempts since we'll
# throw the original failure if all retries fail.
if exc_info is None:
exc_info = sys.exc_info()
#pylint: disable=E0702
if exc_info is None:
raise RetriesExhausted(max_retry, functor, args, kwds)
raise Exception(exc_info[0], exc_info[1], exc_info[2])
| 20,046
|
def parse_reolink(email):
"""Parse Reolink tracking numbers."""
tracking_numbers = []
soup = BeautifulSoup(email[EMAIL_ATTR_BODY], 'html.parser')
links = [link.get('href') for link in soup.find_all('a')]
for link in links:
if not link:
continue
match = re.search('qtc_tLabels1=(.*?)$', link)
if match and match.group(1) not in tracking_numbers:
tracking_numbers.append(match.group(1))
return tracking_numbers
| 20,047
|
def main():
"""
Función principal que invoca a los servicios para crear, insertar, actualizar, obtener y eliminar datos de la base de datos
Descomentar para realizar las acciones (en caso de que estén comentadas)
"""
# Crea una instancia de la clase Crud
crud = Crud(DB_FILE)
# ================== CREATE =====================
# Llama al servicio para crear tablas
serv_crear_tablas(crud)
# ================== INSERT =====================
# Llama al servicio para insertar datos
serv_insertar_datos(crud)
# ================== UPDATE =====================
# Llama al servicio para actualizar datos
serv_actualizar_datos(crud)
# ================== SELECT =====================
# Llama al servicio para obtener todos datos
print("\nObtener todos los registros")
serv_obtener_datos(crud, by_id=False)
print("\nObtener registro por ID")
# Llama al servicio para obtener datos por id
serv_obtener_datos(crud, by_id=True)
print("")
# ================== DELETE =====================
# Llama al servicio para eliminar datos por id
serv_eliminar_datos(crud)
| 20,048
|
def BuildObjcDoc(api):
"""Builds documentation for the Objective-C variant of engine."""
checkout = GetCheckoutPath(api)
with api.os_utils.make_temp_directory('BuildObjcDoc') as temp_dir:
objcdoc_cmd = [checkout.join('flutter/tools/gen_objcdoc.sh'), temp_dir]
with api.context(cwd=checkout.join('flutter')):
api.step('build obj-c doc', objcdoc_cmd)
api.zip.directory(
'archive obj-c doc', temp_dir, checkout.join('out/ios-objcdoc.zip')
)
if api.bucket_util.should_upload_packages():
api.bucket_util.safe_upload(
checkout.join('out/ios-objcdoc.zip'),
GetCloudPath(api, 'ios-objcdoc.zip')
)
| 20,049
|
def setting_modules(app, modules):
""" 注册Blueprint模块 """
for module, url_prefix in modules:
app.register_blueprint(module, url_prefix = url_prefix)
| 20,050
|
def HMF(state, Delta, N):
"""Computes the result of the MF hamiltonian acting on a given state."""
#kinetic term: sum_i(eps(i)*(n_i,up + n_i,down))
kinetic_state = dict_list_sum(
[dict_prod(eps(i, N), dict_sum(number_op(state, i, 0, N), number_op(state, i, 1, N))) for i in range(N)])
#interaction term: sum_i( Delta c_iUP^dag c_iDOWN^dag + conj.(Delta) c_iDOWN c_iUP )
interaction_state = dict_list_sum(
[dict_sum(dict_prod(Delta, cr(cr(state, i, 1, N), i, 0, N)), dict_prod(np.conj(Delta), an(an(state, i, 0, N), i, 1, N))) for i in range(N)])
return dict_sum(kinetic_state, interaction_state)
| 20,051
|
def make_baseline_curve(df,num_iterations):
""" Makes a ROC curve for logistic regression trained on average product rating only,
for comparison with user-specific predictions which use both product avg rating as
well as computed similarity scores. """
factory = lr_wrapper(df,feature_columns=['rating_mean'],y_column='class')
roc_aucs = np.zeros(num_iterations)
tprs = []
base_fpr = np.linspace(0, 1, 101)
for z in range(num_iterations):
y_test, y_probas = factory.fit_and_return_probas()
roc_aucs[z] = roc_auc_score(y_test, y_probas)
fpr, tpr, _ = roc_curve(y_test, y_probas)
tpr = interp(base_fpr, fpr, tpr)
tpr[0] = 0.0
tprs.append(tpr)
tprs = np.array(tprs)
mean_tprs = tprs.mean(axis=0)
roc_auc = roc_aucs.mean()
plt.plot(base_fpr,mean_tprs,'g',label='User Agnostic AUROC: %.2f' % roc_auc)
| 20,052
|
def calculate_probability_of_multicoincidence(ambient_size: int = 0,
set_sizes: tuple = (),
intersection_size: int = 0):
"""
Calculates the probability that subsets of a set of a given size, themselves of
prescribed sizes, have mutual intersection of a given cardinality.
Parameters
----------
ambient_size : int
The size of the ambient set.
set_sizes : tuple
The integer sizes of some subsets.
intersection_size : int
The size of the intersection of the subsets.
Returns
-------
probability : float
The probability. Calculated as the number of configurations with the given
intersection size, divided by the number of all configurations.
"""
reduced_sizes = [size - intersection_size for size in set_sizes]
if any(size < 0 for size in reduced_sizes):
return 0
initial_choices = binom(
ambient_size=ambient_size,
subset_size=intersection_size,
)
reduced_ambient_size = ambient_size - intersection_size
covers_of_remaining = compute_number_of_covers(
set_sizes=tuple(reduced_ambient_size - size for size in reduced_sizes),
ambient_size=reduced_ambient_size,
)
all_configurations = count_all_configurations(
set_sizes=set_sizes,
ambient_size=ambient_size,
)
return initial_choices * covers_of_remaining / all_configurations
| 20,053
|
def flush_queue(queue):
"""
Flush the queue.
:param queue: queue to flush
:return: Nothing
"""
while True:
try:
queue.get(block=False)
except Empty:
break
| 20,054
|
def check_date(option, opt, value):
"""check a file value
return the filepath
"""
try:
return DateTime.strptime(value, "%Y/%m/%d")
except DateTime.Error :
raise OptionValueError(
"expected format of %s is yyyy/mm/dd" % opt)
| 20,055
|
def no_cloud_fixture():
"""Multi-realization cloud data cube with no cloud present."""
cloud_area_fraction = np.zeros((3, 10, 10), dtype=np.float32)
thresholds = [0.265, 0.415, 0.8125]
return cloud_probability_cube(cloud_area_fraction, thresholds)
| 20,056
|
def main(ctx, endpoint, debug):
""" Command-line Interface for the fortune cookie service
"""
if ctx.obj is None:
ctx.obj = {}
ctx.obj['endpoint'] = endpoint
if ctx.invoked_subcommand is None:
cmd_fortune(ctx)
| 20,057
|
def stats_imputation_stats():
"""Get statistics related to missing values"""
seoul_stn_code = 108
station_name = "종로구"
input_dir = Path("/input")
aes_dir = input_dir / "aerosol"
wea_dir = input_dir / "weather" / "seoul"
data_dir = Path("/mnt/data/")
stat_dir = Path("/mnt/data/impute_stats")
Path.mkdir(stat_dir, parents=True, exist_ok=True)
fdate = dt.datetime(2008, 1, 1, 1).astimezone(SEOULTZ)
tdate = dt.datetime(2020, 10, 31, 23).astimezone(SEOULTZ)
dates = pd.date_range(fdate, tdate, freq="1H", tz=SEOULTZ)
aes_fea = ["SO2", "CO", "NO2", "PM10", "PM25"]
wea_fea = ["temp", "pres", "wind_spd", "wind_dir", "humid", "prep"]
print(
f'Parsing Range from {fdate.strftime("%Y/%m/%d %H")}'
f' to {tdate.strftime("%Y/%m/%d %H")} ...',
flush=True,
)
print("Parsing Weather dataset...", flush=True)
df_wea = parse_raw_weathers(
wea_dir, wea_fea, fdate, tdate, seoul_stn_code=seoul_stn_code
)
df_wea_isna = df_wea.isna().sum()
df_wea_isnotna = df_wea.notna().sum()
df_wea_isna.to_csv(stat_dir / "stats_wea_isna.csv")
df_wea_isnotna.to_csv(stat_dir / "stats_wea_isnotna.csv")
df_wea.to_csv(data_dir / ("df_wea.csv"))
print(df_wea.head(5))
print(df_wea.tail(5))
print(f"Parsing Aerosol dataset - {station_name}...", flush=True)
df_aes = parse_raw_aerosols(
aes_dir, aes_fea, fdate, tdate, station_name=station_name
)
df_aes_isna = df_aes.isna().sum()
df_aes_isnotna = df_aes.notna().sum()
df_aes_isna.to_csv(stat_dir / f"stats_aes_{station_name}_isna.csv")
df_aes_isnotna.to_csv(stat_dir / f"stats_aes_{station_name}_isnotna.csv")
df_aes.to_csv(data_dir / ("df_aes.csv"))
print(df_aes.head(5))
print(df_aes.tail(5))
df = df_aes.join(df_wea, on="date", how="left")
df.to_csv(data_dir / ("df_raw_no_impute.csv"))
print(f"Length of weather dataframe : {len(df_wea.index)}")
print(f"Length of aerosol dataframe : {len(df_aes.index)}")
print(f"Length of dates : {len(dates)}")
print(list(set(list(dates)) - set(list(df_wea.index))))
print(list(set(list(dates)) - set(list(df_aes.index))))
print(list(set(list(df_wea.index)) - set(list(df_aes.index))))
| 20,058
|
async def download_file(self, Bucket, Key, Filename, ExtraArgs=None,
Callback=None, Config=None):
"""Download an S3 object to a file.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.meta.client.download_file('mybucket', 'hello.txt', '/tmp/hello.txt')
Similar behavior as S3Transfer's download_file() method,
except that parameters are capitalized.
"""
with open(Filename, 'wb') as open_file:
await download_fileobj(self, Bucket, Key, open_file, ExtraArgs=ExtraArgs, Callback=Callback, Config=Config)
| 20,059
|
def status():
"""
Print the statuses of all datasets in the schema.
"""
max_id_len = SCHEMA["id"].apply(len).max()
for _, dataset in SCHEMA.iterrows():
dataset_id = dataset["id"]
dataset_type = dataset["type"]
downloaded_name = dataset["downloaded_name"]
id_bold = f"{bcolors.BOLD}{dataset_id}{bcolors.ENDC}"
print(f"{id_bold}:", end="")
print((max_id_len - len(dataset_id)) * " ", end="")
if is_downloadable(dataset_id):
download_path = DOWNLOAD_DIR / downloaded_name
if dataset_type == "reference":
download_path = REFERENCE_DIR / downloaded_name
if file_exists(download_path):
print(f"{bcolors.OKGREEN}downloaded{bcolors.ENDC}\t", end="")
else:
print(f"{bcolors.FAIL}not downloaded{bcolors.ENDC}\t", end="")
else:
print("\t\t", end="")
if is_processable(dataset_id):
if file_exists(PROCESSED_DIR / f"{dataset_id}.h5"):
print(f"{bcolors.OKGREEN}processed{bcolors.ENDC}", end="")
else:
print(f"{bcolors.FAIL}not processed{bcolors.ENDC}", end="")
print()
| 20,060
|
def make_slicer_query(
database: Database,
base_table: Table,
joins: Iterable[Join] = (),
dimensions: Iterable[Field] = (),
metrics: Iterable[Field] = (),
filters: Iterable[Filter] = (),
orders: Iterable = (),
):
"""
Creates a pypika/SQL query from a list of slicer elements.
This is the base implementation shared by two implementations: the query to fetch data for a slicer request and
the query to fetch choices for dimensions.
This function only handles dimensions (select+group by) and filtering (where/having), which is everything needed
for the query to fetch choices for dimensions.
The slicer query extends this with metrics, references, and totals.
:param database:
:param base_table:
pypika.Table - The base table of the query, the one in the FROM clause
:param joins:
A collection of joins available in the slicer. This should include all slicer joins. Only joins required for
the query will be used.
:param dimensions:
A collection of dimensions to use in the query.
:param metrics:
A collection of metrics to use in the query.
:param filters:
A collection of filters to apply to the query.
:param orders:
A collection of orders as tuples of the metric/dimension to order by and the direction to order in.
:return:
"""
query = database.query_cls.from_(base_table, immutable=False)
elements = flatten([metrics, dimensions, filters])
# Add joins
join_tables_needed_for_query = find_required_tables_to_join(elements, base_table)
for join in find_joins_for_tables(joins, base_table, join_tables_needed_for_query):
query = query.join(join.table, how=join.join_type).on(join.criterion)
# Add dimensions
for dimension in dimensions:
dimension_term = make_term_for_field(dimension, database.trunc_date)
query = query.select(dimension_term)
query = query.groupby(dimension_term)
# Add filters
for fltr in filters:
query = (
query.having(fltr.definition)
if fltr.is_aggregate
else query.where(fltr.definition)
)
# Add metrics
metric_terms = [make_term_for_field(metric) for metric in metrics]
if metric_terms:
query = query.select(*metric_terms)
# In the case that the orders are determined by a field that is not selected as a metric or dimension, then it needs
# to be added to the query.
select_aliases = {el.alias for el in query._selects}
for (orderby_field, orientation) in orders:
orderby_term = make_term_for_field(orderby_field)
query = query.orderby(orderby_term, order=orientation)
if orderby_term.alias not in select_aliases:
query = query.select(orderby_term)
return query
| 20,061
|
def build_grid_generator(cfg, input_shape):
"""
Built an grid generator from `cfg.MODEL.GRID_GENERATOR.NAME`.
"""
grid_generator = cfg.MODEL.GRID_GENERATOR.NAME
return GRID_GENERATOR_REGISTRY.get(grid_generator)(cfg, input_shape)
| 20,062
|
def main_page(request):
"""
This function is used to display the main page of programme_curriculum
@param:
request - contains metadata about the requested page
"""
return render(request, 'programme_curriculum/mainpage.html')
| 20,063
|
def nt_recv_capture_rx_test(dut):
"""Test bench main function."""
# start the clock
cocotb.fork(clk_gen(dut.clk, CLK_FREQ_MHZ))
# do not issue software reset
dut.rst_sw <= 0
# reset the dut
yield rstn(dut.clk, dut.rstn)
# instantiate an AXI4-Stream writer, connect and reset it
axis_writer = AXIS_Writer()
axis_writer.connect(dut, dut.clk, DATAPATH_BIT_WIDTH)
yield axis_writer.rst()
# generate a couple of random Ethernet packets. For each packet, generate
# a 16 bit latency value and a 26 bit inter-packet time value
pkts = []
latencies = []
inter_packet_times = []
for _ in range(N_PACKETS):
pkts.append(gen_packet())
latencies.append(randint(0, 2**24-1))
inter_packet_times.append(randint(0, 2**28-1))
# meta and data FIFOs never become full
dut.fifo_meta_full_i <= 0
dut.fifo_data_full_i <= 0
# test 0: capture disabled
print("Performing test 1/%d" % (N_REPEATS+3))
yield perform_test(dut, axis_writer, pkts, latencies, inter_packet_times,
False, 0)
# test 1: max capture size: 1514 byte
print("Performing test 2/%d" % (N_REPEATS+3))
yield perform_test(dut, axis_writer, pkts, latencies, inter_packet_times,
True, 1514)
# test 2: max capture size: 0 byte
print("Performing test 3/%d" % (N_REPEATS+3))
yield perform_test(dut, axis_writer, pkts, latencies, inter_packet_times,
True, 0)
# perform some more tests for random capture sizes
for i in range(N_REPEATS):
print("Performing test %d/%d" % (3+i, N_REPEATS+3))
yield perform_test(dut, axis_writer, pkts, latencies,
inter_packet_times, True, randint(64, 1514))
| 20,064
|
def _write_detailed_dot(graph, dotfilename):
"""Create a dot file with connection info
digraph structs {
node [shape=record];
struct1 [label="<f0> left|<f1> mid\ dle|<f2> right"];
struct2 [label="<f0> one|<f1> two"];
struct3 [label="hello\nworld |{ b |{c|<here> d|e}| f}| g | h"];
struct1:f1 -> struct2:f0;
struct1:f0 -> struct2:f1;
struct1:f2 -> struct3:here;
}
"""
text = ['digraph structs {', 'node [shape=record];']
# write nodes
edges = []
replacefunk = lambda x: x.replace('_', '').replace('.', ''). \
replace('@', '').replace('-', '')
for n in nx.topological_sort(graph):
nodename = str(n)
inports = []
for u, v, d in graph.in_edges_iter(nbunch=n, data=True):
for cd in d['connect']:
if isinstance(cd[0], str):
outport = cd[0]
else:
outport = cd[0][0]
inport = cd[1]
ipstrip = 'in' + replacefunk(inport)
opstrip = 'out' + replacefunk(outport)
edges.append('%s:%s:e -> %s:%s:w;' % (str(u).replace('.', ''),
opstrip,
str(v).replace('.', ''),
ipstrip))
if inport not in inports:
inports.append(inport)
inputstr = '{IN'
for ip in sorted(inports):
inputstr += '|<in%s> %s' % (replacefunk(ip), ip)
inputstr += '}'
outports = []
for u, v, d in graph.out_edges_iter(nbunch=n, data=True):
for cd in d['connect']:
if isinstance(cd[0], str):
outport = cd[0]
else:
outport = cd[0][0]
if outport not in outports:
outports.append(outport)
outputstr = '{OUT'
for op in sorted(outports):
outputstr += '|<out%s> %s' % (replacefunk(op), op)
outputstr += '}'
srcpackage = ''
if hasattr(n, '_interface'):
pkglist = n._interface.__class__.__module__.split('.')
interface = n._interface.__class__.__name__
if len(pkglist) > 2:
srcpackage = pkglist[2]
srchierarchy = '.'.join(nodename.split('.')[1:-1])
nodenamestr = '{ %s | %s | %s }' % (nodename.split('.')[-1],
srcpackage,
srchierarchy)
text += ['%s [label="%s|%s|%s"];' % (nodename.replace('.', ''),
inputstr,
nodenamestr,
outputstr)]
# write edges
for edge in sorted(edges):
text.append(edge)
text.append('}')
filep = open(dotfilename, 'wt')
filep.write('\n'.join(text))
filep.close()
return text
| 20,065
|
def ResidualBlock(name, input_dim, output_dim, filter_size, inputs, resample=None, he_init=True, bn=False):
"""
resample: None, 'down', or 'up'
"""
if resample=='down':
conv_shortcut = MeanPoolConv
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim)
conv_2 = functools.partial(ConvMeanPool, input_dim=input_dim, output_dim=output_dim)
elif resample=='up':
conv_shortcut = UpsampleConv
conv_1 = functools.partial(UpsampleConv, input_dim=input_dim, output_dim=output_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)
elif resample==None:
conv_shortcut = lib.ops.conv2d.Conv2D
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim)
else:
raise Exception('invalid resample value')
if output_dim==input_dim and resample==None:
shortcut = inputs # Identity skip-connection
else:
shortcut = conv_shortcut(name+'.Shortcut', input_dim=input_dim, output_dim=output_dim, filter_size=1,
he_init=False, biases=True, inputs=inputs)
output = inputs
if bn:
output = Normalize(name+'.BN1', [0,2,3], output)
output = tf.nn.relu(output)
output = conv_1(name+'.Conv1', filter_size=filter_size, inputs=output, he_init=he_init, biases=False)
if bn:
output = Normalize(name+'.BN2', [0,2,3], output)
output = tf.nn.relu(output)
output = conv_2(name+'.Conv2', filter_size=filter_size, inputs=output, he_init=he_init)
return shortcut + output
| 20,066
|
def callback_save_poly():
"""Perform polyfit once regions selected
Globals: cal_fname, data (read-only, so no declaration)
"""
import pylleo
import yamlord
def _check_param_regions(param, regions, cal_dict):
msg = """
<b>{}</b> was not found in the calibration dictionary.
Process that parameter and then try saving the polyfit again.
""".format(
param
)
params_present = True
if param not in cal_dict["parameters"]:
params_present = False
msg.format(param)
else:
for region in regions:
if region not in cal_dict["parameters"][param]:
params_present = False
msg.format("{}/{}".format(param, region))
output_window.text = output_template.format(msg)
return params_present
def _check_index_order(param, regions, cal_dict):
"""Check that index positions exist for each calibration region"""
indices_present = True
for region in regions:
start = cal_dict["parameters"][param][region]["start"]
end = cal_dict["parameters"][param][region]["end"]
# Check if start comes after end
if int(start) > int(end):
indices_present = False
msg = """
The start index ({}) comes after the end index ({}).
Please set new start/end indexes for <b>{}/{}</b>
""".format(
start, end, param, region
)
msg.format(start, end, param, region)
output_window.text = output_template.format(msg)
return indices_present
if datadirs_select.value != "None":
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, cal_fname)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Get currently selected parameter
param = param_select.value
regions = region_select.options
# Check that index positions have been recorded in `cal.yml`
if not _check_index_order(param, regions, cal_dict):
return None
# Check that index positions are in sequence
if not _check_index_order(param, regions, cal_dict):
return None
param = (param_select.value).lower().replace("-", "_")
try:
msg = """
Saved polyfit for <b>{}</b> to <b>{}</b>.
""".format(
param, cal_fname
)
output_window.text = output_template.format(msg)
lower, upper = pylleo.lleocal.get_cal_data(data, cal_dict, param)
poly = list(pylleo.lleocal.fit1d(lower, upper))
poly = [float(str(i)) for i in poly]
cal_dict["parameters"][param]["poly"] = poly
yamlord.write_yaml(cal_dict, cal_yaml_path)
except Exception as e:
msg = "Problem saving polyfit: {}".format(e)
output_window.text = output_template.format(msg)
else:
msg = """
You must first load data and select indices for calibration
regions before you can save to polyfit to `cal.yml`
"""
output_window.text = output_template.format(msg)
return None
| 20,067
|
def timer(func):
"""Logging elapsed time of funciton (decorator)."""
@wraps(func)
def wrapper(*args, **kwargs):
with timing(func.__name__):
return func(*args, **kwargs)
return wrapper
| 20,068
|
def peak_ana(x, y, nb=3, plotpoints_axis=None):
""" nb = number of point (on each side) to use as background"""
## get background
xb = np.hstack((x[0:nb], x[-(nb):]))
yb = np.hstack((y[0:nb], y[-(nb):]))
a = np.polyfit(xb, yb, 1)
b = np.polyval(a, x)
yf = y - b
yd = np.diff(yf)
## determine whether peak or step
ispeak = np.abs(skew(yf)) > np.abs(skew(yd))
if ispeak:
yw = yf
xw = x
else:
yw = yd
xw = (x[1:] + x[0:-1]) / 2
## get background
xwb = np.hstack((xw[0:nb], xw[-(nb):]))
ywb = np.hstack((yw[0:nb], yw[-(nb):]))
aw = np.polyfit(xwb, ywb, 1)
bw = np.polyval(aw, xw)
yw = yw - bw
Iw = (xw[1:] - xw[0:-1]) * (yw[1:] + yw[0:-1]) / 2
if sum(Iw) < 0:
yw = -yw
## get parameters
mm = yw.argmax(0)
PEAK = xw[mm]
ywmax = yw[mm]
gg = (yw[:mm][::-1] < (ywmax / 2)).argmax()
ip = interp1d(
yw.take([mm - gg - 1, mm - gg]), xw.take([mm - gg - 1, mm - gg]), kind="linear"
)
xhm1 = ip(ywmax / 2)
gg = (yw[mm:] < (ywmax / 2)).argmax()
ip = interp1d(
yw.take([mm + gg, mm + gg - 1]), xw.take([mm + gg, mm + gg - 1]), kind="linear"
)
xhm2 = ip(ywmax / 2)
FWHM = np.abs(xhm2 - xhm1)
CEN = (xhm2 + xhm1) / 2
if plotpoints_axis and ispeak:
# plot the found points for center and FWHM edges
plotpoints_axis.plot(x, b, "g--")
plotpoints_axis.plot(x, b + ywmax, "g--")
plotpoints_axis.plot([xhm1, xhm1], np.polyval(a, xhm1) + [0, ywmax], "g--")
plotpoints_axis.plot([xhm2, xhm2], np.polyval(a, xhm2) + [0, ywmax], "g--")
plotpoints_axis.plot([CEN, CEN], np.polyval(a, CEN) + [0, ywmax], "g--")
plotpoints_axis.plot([xhm1, xhm2], [np.polyval(a, xhm1), np.polyval(a, xhm2)] + ywmax / 2, "gx")
if not ispeak:
try:
# findings start of step coming from left.
std0 = sp.std(y[0:nb])
nt = nb
while (sp.std(y[0:nt]) < (2 * std0)) and (nt < len(y)):
nt = nt + 1
lev0 = sp.mean(y[0:nt])
# findings start of step coming from right.
std0 = sp.std(y[-nb:])
nt = nb
while (sp.std(y[-nt:]) < (2 * std0)) and (nt < len(y)):
nt = nt + 1
lev1 = sp.mean(y[-nt:])
gg = np.abs(y - ((lev0 + lev1) / 2)).argmin()
ftx = y[gg - 2 : gg + 2]
fty = x[gg - 2 : gg + 2]
if ftx[-1] < ftx[0]:
ftx = ftx[::-1]
fty = fty[::-1]
ip = interp1d(ftx, fty, kind="linear")
CEN = ip((lev0 + lev1) / 2)
gg = np.abs(y - (lev1 + (lev0 - lev1) * 0.1195)).argmin()
ftx = y[gg - 2 : gg + 2]
fty = x[gg - 2 : gg + 2]
if ftx[-1] < ftx[0]:
ftx = ftx[::-1]
fty = fty[::-1]
# print " %f %f %f %f %f" % (ftx[0],ftx[1],fty[0],fty[1],lev1+(lev0-lev1)*0.1195)
ip = interp1d(ftx, fty, kind="linear")
H1 = ip((lev1 + (lev0 - lev1) * 0.1195))
# print "H1=%f" % H1
gg = np.abs(y - (lev0 + (lev1 - lev0) * 0.1195)).argmin()
ftx = y[gg - 2 : gg + 2]
fty = x[gg - 2 : gg + 2]
if ftx[-1] < ftx[0]:
ftx = ftx[::-1]
fty = fty[::-1]
# print " %f %f %f %f %f" % (ftx[0],ftx[1],fty[0],fty[1],lev0+(lev1-lev0)*0.1195)
ip = interp1d(ftx, fty, kind="linear")
H2 = ip((lev0 + (lev1 - lev0) * 0.1195))
# print "H2=%f" % abs(H2-H1)
FWHM = abs(H2 - H1)
if plotpoints is True:
# plot the found points for center and FWHM edges
plotpoints_axis.plot([x.min(), x.max()], [lev0, lev0], "g--")
plotpoints_axis.plot([x.min(), x.max()], [lev1, lev1], "g--")
plotpoints_axis.plot([H2, H2], [lev0, lev1], "g--")
plotpoints_axis.plot([H1, H1], [lev0, lev1], "g--")
plotpoints_axis.plot([CEN, CEN], [lev0, lev1], "g--")
plotpoints_axis.plot(
[H2, CEN, H1],
[
lev0 + (lev1 - lev0) * 0.1195,
(lev1 + lev0) / 2,
lev1 + (lev0 - lev1) * 0.1195,
],
"gx",
)
except:
CEN = np.nan
FWHM = np.nan
PEAK = np.nan
return (CEN, FWHM, PEAK)
| 20,069
|
def raisealert(severity, msg, process_name=None):
""" Writes the alert message"""
#timeStr=str(time.ctime())
if process_name is not None:
log = '['+severity +']'+" " + '['+process_name+']' + " " + msg +"\n"
else:
log = '['+severity+']' + " " + msg +"\n"
logging.basicConfig(level=logging.INFO,filename='/var/log/routerServiceMonitor.log',format='%(asctime)s %(message)s')
logging.info(log)
msg = 'logger -t monit '+ log
pout = Popen(msg, shell=True, stdout=PIPE)
| 20,070
|
def run_benchmarks(benchmark_params, test_root, force=False):
"""Run the benchmarks
For every row in benchmark params, run a trace on the input video
using the params specified.
benchmark_params: DataFrame with columns corresponding to keywords
to pass to pipeline_trace. Should have columns 'name',
'input_video', 'chunk_sz_frames', 'epoch_sz_frames',
'frame_start', 'frame_stop', 'n_trace_processes', etc
Returns:
test_results, durations
test_results : Dict from test['name'] to results read from hdf5 file
durations : list of durations taken
"""
WhiskiWrap.utils.probe_needed_commands()
test_results = {}
durations = []
for idx, test in benchmark_params.iterrows():
print(test['name'])
test_dir = os.path.expanduser(os.path.join(test_root, test['name']))
fn = setup_session_directory(test_dir, test['input_video'], force=force)
# Run
start_time = time.time()
WhiskiWrap.pipeline_trace(
fn.video('mp4'),
fn.hdf5,
chunk_sz_frames=test['chunk_sz_frames'],
epoch_sz_frames=test['epoch_sz_frames'],
frame_start=test['frame_start'],
frame_stop=test['frame_stop'],
n_trace_processes=test['n_trace_processes'])
stop_time = time.time()
durations.append(stop_time - start_time)
# Get the summary
with tables.open_file(fn.hdf5) as fi:
test_results[test['name']] = pandas.DataFrame.from_records(
fi.root.summary.read())
return test_results, durations
| 20,071
|
def floodPacket(con, inport, packet, buf, bufid=None):
"""Flood a packet on a switch
"""
sendCommand(con, FloodPacketCommand(inport, packet, buf, bufid))
print con, "flooded packet"
| 20,072
|
def sample_sep01(nn, xi=1., beta=0.):
"""
Samples from the skew exponential power distribution with location zero and scale one.
Definition
----------
def sample_sep01(nn, xi=1., beta=0.):
Input
-----
nn number of samples
Optional Input
--------------
xi parameter which controls the skewness
beta parameter which controls the kurtosis
Output
------
Samples from the standardized skew exponential power distribution
Examples
--------
None
Literature
--------
Schoups G & Vrugt JA (2010) A formal likelihood function for parameter and predictive
inference of hydrologic models with correlated, heteroscedastic, and non-Gaussian errors.
Water Resources Research 46, W10531.
--> Steps (6) described on page 5
History
-------
Written, JM, May 2016
"""
from jams.distributions import sep_fs_mean, sep_fs_std
SEP_fs = sample_sep01_fs(nn, xi=xi, beta=beta)
# (6) Standardize SEP_fs
mean_sep_fs = sep_fs_mean(xi=xi, beta=beta)
std_sep_fs = sep_fs_std(xi=xi, beta=beta)
sSEP = (SEP_fs - mean_sep_fs) / std_sep_fs # standardized SEP (=Schoups and Vrugt's a_t)
return sSEP
| 20,073
|
def pandas_from_feather(file: str = None) -> pd.DataFrame:
""" Load a feather file to a pandas DataFrame.
Uses pyarrow to load a csv file into a [pyarrow.Table](https://arrow.apache.org/docs/python/generated/pyarrow.Table.html) and convert to pandas format.
Args:
file (str): the feather file path.
"""
return feather.read_feather(file).to_pandas()
| 20,074
|
def _get_user_name():
"""
Get the current user.
"""
return pwd.getpwuid(os.getuid())[0]
| 20,075
|
async def save_monobank_info(pools, telegram_id, token):
"""Retrieve user's data by his token from monobank API."""
endpoint = f"{MONOBANK_API}/personal/client-info"
headers = {"X-Token": token}
http, postgres = pools["http"], pools["postgres"]
response, status = await http.get(url=endpoint, headers=headers)
if status != 200:
LOG.error("Couldn't retrieve user`s=%s data from monobank. Error: %s", telegram_id, response)
return
last_name, first_name = response.get("name", "").split(" ")
try:
await postgres.execute(
UPDATE_USER_NAME_AND_TOKEN,
telegram_id,
first_name,
last_name,
token
)
except exceptions.PostgresError as err:
LOG.error("Could not update user=%s name. Error: %s", telegram_id, err)
| 20,076
|
def email_coas():
"""
Email certificates of analysis to their recipients.
"""
# Get the certificate data.
# Email links (optional attachments) to the contacts.
return NotImplementedError
| 20,077
|
def after_cv_imshow():
"""name
close all the show window if press 'esc'
set after cv2.imshow()
Args:
Returns:
"""
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
| 20,078
|
def dropout(x, key, keep_rate):
"""Implement a dropout layer.
Arguments:
x: np array to be dropped out
key: random.PRNGKey for random bits
keep_rate: dropout rate
Returns:
np array of dropped out x
"""
# The shenanigans with np.where are to avoid having to re-jit if
# keep rate changes.
do_keep = random.bernoulli(key, keep_rate, x.shape)
kept_rates = np.where(do_keep, x / keep_rate, 0.0)
return np.where(keep_rate < 1.0, kept_rates, x)
| 20,079
|
def test_make_all_master_seals(m_single, m_multi, prom_edit):
""" Test the make_all_master_seals method """
prom_edit.make_all_master_seals()
m_single.assert_called_once_with(prom_edit._game_config["items"]["offsets"], 2)
m_multi.assert_called_once_with(prom_edit._game_config["items"]["offsets"], 2)
| 20,080
|
def band_spd_spin_polarized(
folder,
output='band_spd_sp.png',
scale_factor=2,
order=['s', 'p', 'd'],
color_dict=None,
legend=True,
linewidth=0.75,
band_color='black',
unprojected_band_color='gray',
unprojected_linewidth=0.6,
fontsize=7,
annotations=['$\\uparrow$ ', '$\\downarrow$ '],
annotation_xy=(0.02, 0.98),
figsize=(4, 3),
erange=[-6, 6],
stack='vertical',
hse=False,
kpath=None,
n=None,
save=True,
):
"""
This function generates a spin polarized s, p, d projected band structure. This will plot two plots
stacked on top or eachother or next to eachother. The top or left plot will project on the
spin up bands and the bottom or right plot will project onto the spin down bands.
Parameters:
folder (str): This is the folder that contains the VASP files
output (str): File name of the resulting plot.
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
order (list): This determines the order in which the points are plotted on the
graph. This is an option because sometimes certain orbitals can be hidden
under others because they have a larger weight. For example, if the
weights of the d orbitals are greater than that of the s orbitals, it
might be smart to choose ['d', 'p', 's'] as the order so the s orbitals are
plotted over the d orbitals.
color_dict (dict[str][str]): This option allow the colors of the s, p, and d
orbitals to be specified. Should be in the form of:
{'s': <s color>, 'p': <p color>, 'd': <d color>}
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (str): Color of the plain band structure
unprojected_band_color (str): Color of the unprojected band
unprojected_linewidth (float): Line width of the unprojected bands
annotations (list): Annotations to put on the top and bottom (left and right) figures.
By default it will show the spin up and spin down arrows.
annotation_xy (list / tuple): Fractional (x, y) coordinated of the annotation location
figsize (list / tuple): Desired size of the image in inches (width, height)
erange (list / tuple): Range of energy to show in the plot [low, high]
stack (str): Determines how the plots are stacked (vertical or horizontal)
kpath (str): High symmetry k-point path of band structure calculation
Due to the nature of the KPOINTS file for HSE calculations this
information is a required input for proper labeling of the figure
for HSE calculations. This information is extracted from the KPOINTS
files for non-HSE calculations. (G is automaticall converted to \\Gamma)
n (int): Number of points between each high symmetry points.
This is also only required for HSE calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing. (fig, ax1, ax2)
"""
band_up = Band(
folder=folder,
spin='up',
projected=True,
hse=hse,
kpath=kpath,
n=n,
)
band_down = Band(
folder=folder,
spin='down',
projected=True,
hse=hse,
kpath=kpath,
n=n,
)
if stack == 'vertical':
fig = plt.figure(figsize=(figsize[0], 2 * figsize[1]), dpi=400)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
_figure_setup(ax=ax1, fontsize=fontsize, ylim=[erange[0], erange[1]])
_figure_setup(ax=ax2, fontsize=fontsize, ylim=[erange[0], erange[1]])
elif stack == 'horizontal':
fig = plt.figure(figsize=(2 * figsize[0], figsize[1]), dpi=400)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
_figure_setup(ax=ax1, fontsize=fontsize, ylim=[erange[0], erange[1]])
_figure_setup(ax=ax2, fontsize=fontsize, ylim=[erange[0], erange[1]])
bbox = dict(boxstyle='round', fc='white',
edgecolor='gray', alpha=0.95, pad=0.3)
ax1.annotate(
annotations[0],
xy=annotation_xy,
xycoords='axes fraction',
va='top',
ha='left',
bbox=bbox,
fontsize=fontsize,
)
ax2.annotate(
annotations[1],
xy=annotation_xy,
xycoords='axes fraction',
va='top',
ha='left',
bbox=bbox,
fontsize=fontsize,
)
band_up.plot_spd(
ax=ax1,
scale_factor=scale_factor,
order=order,
color_dict=color_dict,
legend=legend,
linewidth=linewidth,
band_color=band_color,
)
band_down.plot_plain(
ax=ax1,
color=unprojected_band_color,
linewidth=unprojected_linewidth,
)
band_down.plot_spd(
ax=ax2,
scale_factor=scale_factor,
order=order,
color_dict=color_dict,
legend=legend,
linewidth=linewidth,
band_color=band_color,
)
band_up.plot_plain(
ax=ax2,
color=unprojected_band_color,
linewidth=unprojected_linewidth,
)
plt.tight_layout(pad=0.2)
if save:
plt.savefig(output)
else:
return fig, ax1, ax2
| 20,081
|
def initial_queries(bo):
"""
script which explores the initial query points of a BayesianOptimization
instance, reports errors to Slack
Input: instance of a BayesianOptimization
"""
# loop to try a second time in case of error
errcount = 0
for i in range(2):
try:
bo.maximize(init_points=3, n_iter=1, kappa=5) # would be just this line without errorhandling
except KeyBoardInterrupt:
raise
except:
if errcount == 1:
text = "Exception occured twice in initialization, aborting!"
print(text)
sc.api_call("chat.postMessage",channel="CA26521FW",
text=text,username="Botty",
unfurl_links="true")
raise
errcount =+ 1
return bo
| 20,082
|
def _parse_start_test_log(start_test_log):
"""Parse start_test logfile and return results in python data structure.
:type start_test_log: str
:arg start_test_log: start_test log filename
:rtype: list of dicts
:returns: list of dicts; each dict contains info about a single test case
"""
logging.debug('Parsing start_test log: {0}'.format(start_test_log))
with codecs.open(start_test_log, 'r', encoding='utf-8', errors='ignore') as fp:
start_test_lines = fp.readlines()
logging.debug('Read {0} lines from "{1}".'.format(
len(start_test_lines), start_test_log))
test_cases = []
while len(start_test_lines) > 0:
subtest_start, subtest_end = _get_block(
start_test_lines, '[Starting subtest - ', '[Finished subtest ')
# No more sub_tests; delete the remaining lines and finish up.
if subtest_start == -1:
del start_test_lines[:]
continue
# Copy subtest lines into new list for further processing and delete
# them from start_test_lines.
sub_test_lines = start_test_lines[subtest_start:subtest_end+1:1]
del start_test_lines[:subtest_end+1]
while len(sub_test_lines) > 0:
test_start, test_end = _get_block(
sub_test_lines,
'[test: ',
'[Elapsed time to compile and execute all versions of "')
test_start_skip, test_end_skip = _get_block(
sub_test_lines,
'[test: ',
'[Skipping')
test_skipped = False
if test_end_skip != -1 and (test_end == -1 or test_end_skip < test_end):
test_start, test_end = test_start_skip, test_end_skip
test_skipped = True
noperf_start, noperf_end = _get_block(
sub_test_lines,
'[test: ',
'[Skipping noperf test:')
# If the test was skipped because it did not have performance
# configuration files, drop the lines and continue. We don't
# care about these for performance tests (as opposed to real
# perf tests that are skipped due to environment/etc).
if noperf_end != -1:
del sub_test_lines[noperf_start:noperf_end+1]
continue
# If test_end is still -1 (i.e. not found), look for end of subtest
# call (usually means subtest failed and did not tests).
if test_start != -1 and test_end == -1:
test_start, test_end = _get_block(
sub_test_lines,
'[test: ',
'[Finished subtest "')
if test_end == -1:
raise ValueError('Failed to parse test case from: {0}'.format(
sub_test_lines))
# No more test cases; delete remaining lines and finish up.
if test_start == -1:
del sub_test_lines[:]
continue
# Copy test lines into new list for further processing and delete
# from sub_test_lines.
test_case_lines = sub_test_lines[test_start:test_end+1:1]
del sub_test_lines[:test_end+1]
# Extract test name from "[test: <path to .chpl file>]" line.
classname, test_name = _get_test_name(test_case_lines)
if test_skipped:
test_time = 0.0
error = None
else:
test_time = _get_test_time(test_case_lines)
error = _get_test_error(test_case_lines)
test_content = ''.join(test_case_lines)
test_case = {
'name': test_name,
'classname': classname,
'time': test_time,
'error': error,
'skipped': test_skipped,
'system-out': test_content,
}
test_cases.append(test_case)
logging.info('Parsed {0} test cases from "{1}".'.format(
len(test_cases), start_test_log))
return test_cases
| 20,083
|
def cell2AB(cell):
"""Computes orthogonalization matrix from unit cell constants
:param tuple cell: a,b,c, alpha, beta, gamma (degrees)
:returns: tuple of two 3x3 numpy arrays (A,B)
A for crystal(x) to Cartesian(X) transformations A*x = np.inner(A,x) =X
B (= inverse of A) for Cartesian to crystal transformation
B*X = np.inner(B,X) = x
in reciprocal space
X* = B.T @ x* or x @ B
A = |ax bx cx| B = |a*x a*y a*z|
|ay by cy| |b*x b*y b*z|
|az bz cz| |c*x c*y c*z|
"""
G, g = cell2Gmat(cell)
cellstar = Gmat2cell(G)
A = np.zeros(shape=(3, 3))
# from Giacovazzo (Fundamentals 2nd Ed.) p.75
A[0, 0] = cell[0] # a
A[0, 1] = cell[1] * cosd(cell[5]) # b cos(gamma)
A[0, 2] = cell[2] * cosd(cell[4]) # c cos(beta)
A[1, 1] = cell[1] * sind(cell[5]) # b sin(gamma)
# - c cos(alpha*) sin(beta)
A[1, 2] = -cell[2] * cosd(cellstar[3]) * sind(cell[4])
A[2, 2] = 1. / cellstar[2] # 1/c*
B = nl.inv(A)
return A, B
| 20,084
|
def test_fun_run() -> None:
"""Test running python function."""
cmd = p.python_funsie(
capitalize, inputs={"in": Encoding.blob}, outputs={"in": Encoding.blob}
)
inp = {"in": BytesIO(b"bla bla bla")}
out = p.run_python_funsie(cmd, inp)
assert out["in"] == b"BLA BLA BLA"
| 20,085
|
def deploy():
"""Push to GitHub pages"""
env.msg = git.Repo().active_branch.commit.message
clean()
preview()
local("ghp-import {deploy_path} -m \"{msg}\" -b {github_pages_branch}".format(**env))
local("git push origin {github_pages_branch}".format(**env))
| 20,086
|
def _parse_header(line: bytes) -> Tuple[HeaderLine, bytes]:
"""
Parse the header line of the received input.
:param line:
:return: a tuple of the parsed header and the remaining input that is not
part of the header.
"""
end_index = line.find(b"\r\n")
header, remaining = line[:end_index], line[end_index + 2 :]
del line
if len(header) < 2:
raise exceptions.HeaderParseError("header is too short")
# Determine the status category.
try:
category_value = int(chr(header[0]))
except ValueError:
raise exceptions.HeaderParseError(
f"status category '{chr(header[0])}' is not an integer"
)
try:
category = constants.Category(category_value)
except ValueError:
category = constants.Category.UNKNOWN
# Determine the status detail.
try:
detail_value = int(chr(header[1]))
except ValueError:
raise exceptions.HeaderParseError(
f"status detail '{chr(header[1])}' is not an integer"
)
detail = constants.CATEGORY_TO_DETAILS_MAP[category].get(
detail_value, constants.Detail.UNKNOWN
)
# Determine the meta line, which is the rest of the line.
meta = header[3:].decode()
# TODO: further parsing of the meta line.
return HeaderLine(category, category_value, detail, detail_value, meta), remaining
| 20,087
|
def sendMessage(qry):
"""
Message sending handling, either update if the query suggests it otherwise send the message.
:param qry: current query
:return: Status of Message sending.
"""
try: getUserName()
except: return _skypeError()
if(qry == "skype update"):
_writeFriends()
_getAvatars()
return len(_readFriends()).__str__()+" friends found and cached!"
else:
m = qry.partition(": ")
ret = skype("MESSAGE " + m[0]+" "+m[2])
if("SENDING" in ret):
return "Message sent to "+m[0]
else:
return "ERROR sending message to: "+m[0]
| 20,088
|
def scrape_detail_page(response):
"""
get detail page info as dict type
"""
root = lxml.html.fromstring(response.content)
ebook = {
'url': response.url,
'title': root.cssselect('#bookTitle')[0].text_content(),
'price': root.cssselect('.buy')[0].text,
'content': [h3.text_content() for h3 in root.cssselect('#content > h3')],
}
return ebook
| 20,089
|
def reload_from_numpy(device, metadata, reload_dir):
"""Reload the output of voice conversion model."""
conv_mels = []
for pair in tqdm(metadata["pairs"]):
file_path = Path(reload_dir) / pair["mel_path"]
conv_mel = torch.load(file_path)
conv_mels.append(conv_mel.to(device))
return metadata, conv_mels
| 20,090
|
def jaccard_similarity_coefficient(A, B, no_positives=1.0):
"""Returns the jaccard index/similarity coefficient between A and B.
This should work for arrays of any dimensions.
J = len(intersection(A,B)) / len(union(A,B))
To extend to probabilistic input, to compute the intersection, use the min(A,B).
To compute the union, use max(A,B).
Assumes that a value of 1 indicates the positive values.
A value of 0 indicates the negative values.
If no positive values (1) in either A or B, then returns no_positives.
"""
# Make sure the shapes are the same.
if not A.shape == B.shape:
raise ValueError("A and B must be the same shape")
# Make sure values are between 0 and 1.
if np.any( (A>1.) | (A<0) | (B>1.) | (B<0)):
raise ValueError("A and B must be between 0 and 1")
# Flatten to handle nd arrays.
A = A.flatten()
B = B.flatten()
intersect = np.minimum(A,B)
union = np.maximum(A, B)
# Special case if neither A or B have a 1 value.
if union.sum() == 0:
return no_positives
# Compute the Jaccard.
J = float(intersect.sum()) / union.sum()
return J
| 20,091
|
def get_argument_from_call(call_node: astroid.Call,
position: int = None,
keyword: str = None) -> astroid.Name:
"""Returns the specified argument from a function call.
:param astroid.Call call_node: Node representing a function call to check.
:param int position: position of the argument.
:param str keyword: the keyword of the argument.
:returns: The node representing the argument, None if the argument is not found.
:rtype: astroid.Name
:raises ValueError: if both position and keyword are None.
:raises NoSuchArgumentError: if no argument at the provided position or with
the provided keyword.
"""
if position is None and keyword is None:
raise ValueError('Must specify at least one of: position or keyword.')
if position is not None:
try:
return call_node.args[position]
except IndexError:
pass
if keyword and call_node.keywords:
for arg in call_node.keywords:
if arg.arg == keyword:
return arg.value
raise NoSuchArgumentError
| 20,092
|
def snake_string(ls):
"""
Question 7.11: Write a string sinusoidally
"""
result = []
strlen = len(ls)
for idx in xrange(1, strlen, 4):
result.append(ls[idx])
for idx in xrange(0, strlen, 2):
result.append(ls[idx])
for idx in xrange(3, strlen, 4):
result.append(ls[idx])
return ''.join(result)
| 20,093
|
def _prepare_memoization_key(args, kwargs):
"""
Make a tuple of arguments which can be used as a key
for a memoized function's lookup_table. If some object can't be hashed
then used its __repr__ instead.
"""
key_list = []
for arg in args:
try:
hash(arg)
key_list.append(arg)
except:
key_list.append(repr(arg))
for (k, v) in kwargs.items():
try:
hash(k)
hash(v)
key_list.append((k, v))
except:
key_list.append((repr(k), repr(v)))
return tuple(key_list)
| 20,094
|
def processing_other_notification(
notification:Notification,
api:Mastodon
) -> None:
"""
Обработка уведомления в остальных случаях
:param notification: Объект с информацией уведомления
:type notification: Notification
:param api: API обращения к Mastodon
:type api: Mastodon
"""
# если в упомянутых есть админы бота
# то пропускаем дальнейшую обработку
if set(BOT_ADMINS) & notification.mentioned_accounts:
# админы всегда упомянуты - нет смысла передавать сообщение
log.info(
f"User {notification.sender}"
" is already in admins list"
)
return
response_sent = api.status_post(
f'@{notification.sender} {BOT_MESSAGE}',
in_reply_to_id=notification.status_id,
visibility='direct',
language=BOT_LANG,
)
# если видимость поста не публичная и количество указанных админов
# больше 0
# то рассылаем сообщение всем админам и отправителю
if (
(notification.visibility != 'public')
and
(len(BOT_ADMINS) > 0)
):
send_to_admins(notification, api, response_sent)
| 20,095
|
def kl_divergence_with_logits(p_logits = None,
q_logits = None,
temperature = 1.):
"""Compute the KL between two categorical distributions from their logits.
Args:
p_logits: [..., dim] array with logits for the first distribution.
q_logits: [..., dim] array with logits for the second distribution.
temperature: the temperature for the softmax distribution, defaults at 1.
Returns:
an array of KL divergence terms taken over the last axis.
"""
chex.assert_type([p_logits, q_logits], float)
chex.assert_equal_shape([p_logits, q_logits])
p_logits /= temperature
q_logits /= temperature
p = jax.nn.softmax(p_logits)
log_p = jax.nn.log_softmax(p_logits)
log_q = jax.nn.log_softmax(q_logits)
kl = jnp.sum(p * (log_p - log_q), axis=-1)
## KL divergence should be positive, this helps with numerical stability
loss = jax.nn.relu(kl)
return loss
| 20,096
|
def show_slices(
data3d,
contour=None,
seeds=None,
axis=0,
slice_step=None,
shape=None,
show=True,
flipH=False,
flipV=False,
first_slice_offset=0,
first_slice_offset_to_see_seed_with_label=None,
slice_number=None,
kwargs_contour=None,
):
"""
Show slices as tiled image
:param data3d: Input data
:param contour: Data for contouring
:param seeds: Seed data
:param axis: Axis for sliceing
:param slice_step: Show each "slice_step"-th slice, can be float
:param shape: tuple(vertical_tiles_number, horisontal_tiles_number), set shape of output tiled image. slice_step is
estimated if it is not set explicitly
:param first_slice_offset: set offset of first slice
:param first_slice_offset_to_see_seed_with_label: find offset to see slice with seed with defined label
:param slice_number: int, Number of showed slices. Overwrites shape and slice_step.
:param kwargs_contour: (default: cmap='hsv', levels: calculated from contour)
"""
kwargs_contour = kwargs_contour if kwargs_contour else dict(
cmap='hsv',
linewidths=2,
levels=None, # good for plt.contours
# levels=np.unique(contour) # good for ax.contours
)
# print(kwargs_contour)
if slice_number is not None:
slice_step = data3d.shape[axis] / slice_number
# odhad slice_step, neni li zadan
# slice_step estimation
# TODO make precise estimation (use np.linspace to indexing?)
if slice_step is None:
if shape is None:
slice_step = 1
else:
slice_step = (data3d.shape[axis] - first_slice_offset) / float(
np.prod(shape)
)
if first_slice_offset_to_see_seed_with_label is not None:
if seeds is not None:
inds = np.nonzero(seeds == first_slice_offset_to_see_seed_with_label)
# print(inds)
# take first one with defined seed
# ind = inds[axis][0]
# take most used index
ind = np.median(inds[axis])
first_slice_offset = ind % slice_step
data3d = _import_data(
data3d, axis=axis, slice_step=slice_step, first_slice_offset=first_slice_offset
)
contour = _import_data(
contour, axis=axis, slice_step=slice_step, first_slice_offset=first_slice_offset
)
seeds = _import_data(
seeds, axis=axis, slice_step=slice_step, first_slice_offset=first_slice_offset
)
number_of_slices = data3d.shape[axis]
# square image
# nn = int(math.ceil(number_of_slices ** 0.5))
# sh = [nn, nn]
# 4:3 image
meta_shape = shape
if meta_shape is None:
na = int(math.ceil(number_of_slices * 16.0 / 9.0) ** 0.5)
nb = int(math.ceil(float(number_of_slices) / na))
meta_shape = [nb, na]
dsh = __get_slice(data3d, 0, axis).shape
slimsh = [int(dsh[0] * meta_shape[0]), int(dsh[1] * meta_shape[1])]
slim = np.zeros(slimsh, dtype=data3d.dtype)
slco = None
slse = None
if seeds is not None:
slse = np.zeros(slimsh, dtype=seeds.dtype)
if contour is not None:
slco = np.zeros(slimsh, dtype=contour.dtype)
# slse =
# f, axarr = plt.subplots(sh[0], sh[1])
for i in range(0, number_of_slices):
cont = None
seeds2d = None
im2d = __get_slice(data3d, i, axis, flipH=flipH, flipV=flipV)
if contour is not None:
cont = __get_slice(contour, i, axis, flipH=flipH, flipV=flipV)
slco = __put_slice_in_slim(slco, cont, meta_shape, i)
if seeds is not None:
seeds2d = __get_slice(seeds, i, axis, flipH=flipH, flipV=flipV)
slse = __put_slice_in_slim(slse, seeds2d, meta_shape, i)
# plt.axis('off')
# plt.subplot(sh[0], sh[1], i+1)
# plt.subplots_adjust(wspace=0, hspace=0)
slim = __put_slice_in_slim(slim, im2d, meta_shape, i)
# show_slice(im2d, cont, seeds2d)
show_slice(slim, slco, slse, kwargs_contour=kwargs_contour)
if show:
plt.show()
| 20,097
|
def convert_bosch_datetime(dt: Any = None) -> datetime:
"""Create a datetime object from the string (or give back the datetime object) from Bosch. Checks if a valid number of milliseconds is sent."""
if dt:
if isinstance(dt, str):
if dt.find(".") > 0:
return datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S.%f%z")
return datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S%z")
if isinstance(dt, datetime):
return dt
return None
| 20,098
|
def day_log_add_id(day_log):
"""
その日のログにID(day_id)を割り振る
:param day_log:
:return:
"""
for v in range(len(day_log)):
day_log[v]['day_id'] = v + 1
return day_log
| 20,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.