content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def get_unique_filepath(stem):
"""NOT thread-safe!
return stems or stem# where # is the smallest
positive integer for which the path does not exist.
useful for temp dirs where the client code wants an
obvious ordering.
"""
fp = stem
if os.path.exists(stem):
n = 1
fp = stem + str(n)
while os.path.exists(fp):
n += 1
fp = stem + str(n)
return fp | 5,325,000 |
def write_h5_header(hf, run_params, model):
"""Write header information about the run.
"""
serialize = {'run_params': run_params,
'model_params': [functions_to_names(p.copy()) for p in model.config_list],
'paramfile_text': paramfile_string(**run_params)}
for k, v in list(serialize.items()):
try:
hf.attrs[k] = json.dumps(v) #, cls=NumpyEncoder)
except(TypeError):
# Should this fall back to pickle.dumps?
hf.attrs[k] = pick(v)
print("Could not JSON serialize {}, pickled instead".format(k))
except:
hf.attrs[k] = unserial
print("Could not serialize {}".format(k))
hf.flush() | 5,325,001 |
def optimize_model(input,
model_type='bert',
num_heads=0,
hidden_size=0,
optimization_options=None,
opt_level=None,
use_gpu=False,
only_onnxruntime=False):
""" Optimize Model by OnnxRuntime and/or python fusion logic.
ONNX Runtime has graph optimizations (https://onnxruntime.ai/docs/resources/graph-optimizations.html).
However, the coverage is limited. We also have graph fusions that implemented in Python to improve the coverage.
They can combined: ONNX Runtime will run first when opt_level > 0, then graph fusions in Python will be applied.
To use ONNX Runtime only and no Python fusion logic, use only_onnxruntime flag and a positive opt_level like
optimize_model(input, opt_level=1, use_gpu=False, only_onnxruntime=True)
When opt_level is None, we will choose default optimization level according to model type.
When opt_level is 0 and only_onnxruntime is False, only python fusion logic is used and onnxruntime is disabled.
When opt_level > 1, use_gpu shall set properly since the optimized graph might contain operators for GPU or CPU only.
If your model is intended for GPU inference only (especially float16 or mixed precision model), it is recommended to
set use_gpu to be True, otherwise the model is not optimized for GPU inference.
For BERT model, num_heads and hidden_size are optional. For other model types, you need specify these parameters.
Args:
input (str): input model path.
model_type (str, optional): model type - like bert, bert_tf, bert_keras or gpt2. Defaults to 'bert'.
num_heads (int, optional): number of attention heads. Defaults to 0.
0 allows detect the parameter from graph automatically (for model_type "bert" only).
hidden_size (int, optional): hidden size. Defaults to 0.
0 allows detect the parameter from graph automatically (for model_type "bert" only).
optimization_options (FusionOptions, optional): optimization options that turn on/off some fusions. Defaults to None.
opt_level (int, optional): onnxruntime graph optimization level (0, 1, 2 or 99) or None. Defaults to None.
When the value is None, default value (1 for bert and gpt2, 0 for other model types) will be used.
When the level > 0, onnxruntime will be used to optimize model first.
use_gpu (bool, optional): use gpu or not for onnxruntime. Defaults to False.
only_onnxruntime (bool, optional): only use onnxruntime to optimize model, and no python fusion. Defaults to False.
Returns:
object of an optimizer class.
"""
assert opt_level is None or opt_level in [0, 1, 2, 99]
if model_type != "bert" and (num_heads == 0 or hidden_size == 0):
logger.warning("Please specify parameters of num_heads and hidden_size when model_type is not 'bert'")
(optimizer_class, producer, default_opt_level) = MODEL_TYPES[model_type]
if opt_level is None:
opt_level = default_opt_level
temp_model_path = None
if opt_level > 1:
temp_model_path = optimize_by_onnxruntime(input, use_gpu=use_gpu, opt_level=opt_level)
elif opt_level == 1:
# basic optimizations (like constant folding and cast elimation) are not specified to exection provider.
# CPU provider is used here so that there is no extra node for GPU memory copy.
temp_model_path = optimize_by_onnxruntime(input, use_gpu=False, opt_level=1)
if only_onnxruntime and not temp_model_path:
logger.warning("Please specify a positive value for opt_level when only_onnxruntime is True")
model = load_model(temp_model_path or input, format=None, load_external_data=True)
if model.producer_name and producer != model.producer_name:
logger.warning(
f"Model producer not matched: Expect {producer}, Got {model.producer_name} {model.producer_version}. Please specify correct --model_type parameter."
)
if optimization_options is None:
optimization_options = FusionOptions(model_type)
optimizer = optimizer_class(model, num_heads, hidden_size)
if not only_onnxruntime:
optimizer.optimize(optimization_options)
# Remove the temporary model.
if temp_model_path:
os.remove(temp_model_path)
logger.debug("Remove tempoary model: {}".format(temp_model_path))
optimizer.model.producer_name = "onnxruntime.transformers"
from onnxruntime import __version__ as onnxruntime_version
optimizer.model.producer_version = onnxruntime_version
return optimizer | 5,325,002 |
def transect_rotate(adcp_transect,rotation,xy_line=None):
"""
Calculates all possible distances between a list of ADCPData objects (twice...ineffcient)
Inputs:
adcp_obs = list ADCPData objects, shape [n]
Returns:
centers = list of centorids of ensemble locations of input ADCPData objects, shape [n]
distances = xy distance between centers, shape [n-1]
"""
"""
Rotates ADCPTransectData U and V velocities.
Inputs:
adcp_transect = ADCPTransectData object
rotation = one of:
None - no rotation of averaged velocity profiles
'normal' - rotation based upon the normal to the plotline (default rotation type)
'pricipal flow' - uses the 1st principal component of variability in uv flow direction
'Rozovski' - individual rotation of each verticle velocity to maximize U
'no transverse flow' - rotation by the net flow vector is used to minnumize V
xy_line = numpy array of line defined by 2 points: [[x1,y1],[x2,y2]], or None
Returns
adcp_transect = ADCPTransectData object with rotated uv velocities
"""
if rotation == "normal":
# find angle of line:
if xy_line is None:
if adcp_transect.xy is None:
raise Exception,"transect_rotate() error: ADCPData must be xy projected, or input xy_line must be supplied for normal rotation"
xy_line = adcpy.util.map_xy_to_line(adcp_transect.xy)
theta = adcpy.util.calc_normal_rotation(xy_line)
elif rotation == "no transverse flow":
flows = adcp_transect.calc_ensemble_flow(range_from_velocities=True)
theta = adcpy.util.calc_net_flow_rotation(flows[:,0],flows[:,1])
elif rotation == "Rozovski":
flows = adcp_transect.calc_ensemble_flow(range_from_velocities=True)
theta = adcpy.util.calc_Rozovski_rotation(flows[:,0],flows[:,1])
elif rotation == "principal flow":
flows = adcp_transect.calc_ensemble_flow(range_from_velocities=True)
theta = adcpy.util.principal_axis(flows[:,0],flows[:,1],calc_type='EOF')
elif type(rotation) is str:
raise Exception,"In transect_rotate(): input 'rotation' string not understood: %s"%rotation
else:
theta = rotation
adcp_transect.set_rotation(theta,'uv')
return adcp_transect | 5,325,003 |
def F(x, t, *args, **kwds):
"""
F(x) = ddx
"""
return -kwds.get('μ', 1) * x / np.sum(x**2)**(3/2) | 5,325,004 |
def test_granule_id(granule: Granule, expected_result: str):
"""Check that the granule id is correct."""
assert granule.granule_id == expected_result | 5,325,005 |
def get_capacity_potential_per_country(countries: List[str], is_onshore: float, filters: Dict,
power_density: float, processes: int = None):
"""
Return capacity potentials (GW) in a series of countries.
Parameters
----------
countries: List[str]
List of ISO codes.
is_onshore: bool
Whether the technology is onshore located.
filters: Dict
Dictionary containing a set of values describing the filters to apply to obtain land availability.
power_density: float
Power density in MW/km2
processes: int (default: None)
Number of parallel processes
Returns
-------
pd.Series
Series containing the capacity potentials (GW) for each code.
"""
which = 'onshore' if is_onshore else 'offshore'
shapes = get_shapes(countries, which=which, save=True)["geometry"]
land_availability = get_land_availability_for_shapes(shapes, filters, processes)
return pd.Series(land_availability*power_density/1e3, index=shapes.index) | 5,325,006 |
def all_statements(tree: ast.AST) -> Set[ast.stmt]:
"""
Return the set of all ast.stmt nodes in a tree.
"""
return {node for node in ast.walk(tree) if isinstance(node, ast.stmt)} | 5,325,007 |
def calc_predicted_points_for_pos(
pos, gw_range, team_model, player_model, season, tag, session
):
"""
Calculate points predictions for all players in a given position and
put into the DB
"""
predictions = {}
df_player = None
if pos != "GK": # don't calculate attacking points for keepers.
df_player = get_fitted_player_model(player_model, pos, season, session)
for player in list_players(position=pos, dbsession=session):
predictions[player.player_id] = calc_predicted_points(
player, team_model, df_player, season, tag, session, gw_range
)
return predictions | 5,325,008 |
def test_cache_catches_nothing(client):
"""
GIVEN a cache of last found secrets same as config ignored-matches
WHEN I run a scan (therefore finding no secret)
THEN config matches is unchanged and cache is empty
"""
c = Commit()
c._patch = _MULTIPLE_SECRETS
config = Config()
config.matches_ignore = FOUND_SECRETS
cache = Cache()
cache.last_found_secrets = FOUND_SECRETS
with my_vcr.use_cassette("multiple_secrets"):
results = c.scan(
client=client,
cache=cache,
matches_ignore=config.matches_ignore,
all_policies=True,
verbose=False,
)
assert results == []
assert config.matches_ignore == FOUND_SECRETS
assert cache.last_found_secrets == set() | 5,325,009 |
def resmgr_cli():
"""
Managing processor resources
"""
initLogging() | 5,325,010 |
def image_preprocess2(img):
"""
image preprocess version 2
using: yellow threshold, white threshold, sobelX, sobelY, ROI
Parameters
----------
img: image (np.array())
Return
----------
the source points
"""
# set white and yellow threshold
white = rgb_threshold(img, r_threshold=(200, 255), g_threshold=(200, 255), b_threshold=(200, 255))
yellow = hsv_threshold(img, h_threshold=(20, 34), s_threshold=(43, 255), v_threshold=(46, 255))
# set sobelX and sobelY threshold
gradx = abs_sobel_thresh(img, orient='x', thresh=(35, 120))
grady = abs_sobel_thresh(img, orient='y', thresh=(30, 120))
# filtrate the image using color information (white lane -- RGB space) (yellow lane -- HSV space)
# and graident information (SobleX, Soble Y)
combined = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint8)
combined[(white == 1) | (yellow == 1) | ((gradx == 1) & (grady == 1))] = 1
# region of interest
height = img.shape[0]
width = img.shape[1]
vertices = np.array([[(0, height), (width, height), (width * 0.6, height * 0.5), (width * 0.4, height * 0.5)]],
dtype=np.int32)
roi_image = region_of_interest(img, vertices)
return roi_image | 5,325,011 |
def get_all_feeds(cb: CbThreatHunterAPI, include_public=True) -> Dict:
"""Retrieve all feeds owned by the caller.
Provide include_public=true parameter to also include public community feeds.
"""
url = f"/threathunter/feedmgr/v2/orgs/{cb.credentials.org_key}/feeds"
params = {"include_public": include_public}
result = cb.get_object(url, query_parameters=params)
return result.get("results", []) | 5,325,012 |
def spkltc(
targ: int, et: float, ref: str, abcorr: str, stobs: ndarray
) -> Tuple[ndarray, float, float]:
"""
Return the state (position and velocity) of a target body
relative to an observer, optionally corrected for light time,
expressed relative to an inertial reference frame.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkltc_c.html
:param targ: Target body.
:param et: Observer epoch.
:param ref: Inertial reference frame of output state.
:param abcorr: Aberration correction flag.
:param stobs: State of the observer relative to the SSB.
:return:
One way light time between observer and target,
Derivative of light time with respect to time
"""
assert len(stobs) == 6
targ = stypes.c_int(targ)
et = ctypes.c_double(et)
ref = stypes.string_to_char_p(ref)
abcorr = stypes.string_to_char_p(abcorr)
stobs = stypes.to_double_vector(stobs)
starg = stypes.empty_double_vector(6)
lt = ctypes.c_double()
dlt = ctypes.c_double()
libspice.spkltc_c(
targ, et, ref, abcorr, stobs, starg, ctypes.byref(lt), ctypes.byref(dlt)
)
return stypes.c_vector_to_python(starg), lt.value, dlt.value | 5,325,013 |
def test_calculate_angle_90():
"""Test the calculate_distance function"""
r1 = np.array([1,0,0])
r2 = np.array([0,0,0])
r3 = np.array([0,1,0])
expected_theta = (math.pi) / 2
calculated_theta = geometry_analysis.calculate_angle(r1, r2, r3)
assert expected_theta == calculated_theta | 5,325,014 |
def mode(bot, trigger):
"""Set a user mode on Sopel. Can only be done in privmsg by an admin."""
mode = trigger.group(3)
if not mode:
bot.reply('What mode should I set?')
bot.write(('MODE', bot.nick, mode)) | 5,325,015 |
def remove_outlier_from_time_average(df, time=4, multiplier=3):
"""
Remove outliers when averaging transients before performing the fitting routines, used to improve the signal to noise ratio in low biomass systems.
The function sets a time window to average over, using upper and lower limits for outlier detection.
The upper and lower limits are determined by mean ± std * [1].
The multiplier [1] can be adjusted by the user.
Parameters
----------
df : pandas.DataFrame
A dataframe of the raw data, can either be imported from pandas.read_csv or the output from phyto_photo_utils.load
time : int, default=4
The time window to average over, e.g. 4 = 4 minute averages
multiplier : int, default=3
The multiplier to apply to the standard deviation for determining the upper and lower limits.
Returns
-------
df : pandas.DataFrame
A dataframe of the time averaged data with outliers excluded.
Example
-------
>>> ppu.remove_outlier_from_time_average(df, time=2, multiplier=3)
"""
# Convert time window to string
dt = str(time)+'T'
# Convert dtype of the datetime column
df['datetime'] = df.datetime.astype('datetime64')
# Group data by time window and flashlet number
grp = df.groupby([Grouper(key='datetime', freq=dt), 'flashlet_number'])
# Calculate means, standard deviations and counts of the groups
mean = grp.mean()
std = grp.std()
c = grp.count()
# Calculate upper and lower limits of each group, and repeat each value by its count
ulim = repeat((mean.flevel.values + std.flevel.values * multiplier), c.flevel.values)
llim = repeat((mean.flevel.values - std.flevel.values * multiplier), c.flevel.values)
# Get indexes of data used to create each group
idx = []
for i, items in enumerate(grp.indices.items()):
idx.append(items[-1])
idx = concatenate(idx, axis=0)
# Create pandas DataFrame of upper and lower using original indexes of data
mask = DataFrame([ulim, llim, idx]).T
mask.columns = ['ulim','llim','index']
mask = mask.set_index('index').sort_index()
# Create boolean array using mask DataFrame
m = (df.flevel.values > mask.ulim) | (df.flevel.values < mask.llim)
# Where condition is True, set values of fluorescence yield to NaN
df.loc[m.values,'flevel'] = nan
# Group data that is now corrected
df = df.groupby([Grouper(key='datetime', freq=dt), 'flashlet_number']).mean().reset_index()
# Return number of measurements that is used to create each average
df['nseq'] = c.flevel.values
return df | 5,325,016 |
def main():
"""Start up NagCat, profiling things as requested"""
options = parse_options()
if options.profile_init or options.profile_run:
import cProfile
profiler = cProfile.Profile()
if options.profile_init:
profiler.runcall(init, options)
else:
init(options)
if options.profile_run:
profiler.runcall(reactor.run)
else:
reactor.run()
if options.profile_init or options.profile_run:
if options.profile_dump:
log.info("Dumping profiler data to %s" % options.profile_dump)
profiler.dump_stats(options.profile_dump)
else:
log.info("Generating profiler stats...")
import pstats
stats = pstats.Stats(profiler)
stats.strip_dirs()
stats.sort_stats('time', 'cumulative')
stats.print_stats(40) | 5,325,017 |
def on_backtest_finished(context, indicator):
"""
:param context:
:param indicator:
https://www.myquant.cn/docs/python/python_object_trade#bd7f5adf22081af5
:return:
"""
logger = context.logger
logger.info(str(indicator))
logger.info("回测结束 ... ")
cash = context.account().cash
for k, v in indicator.items():
if isinstance(v, float):
indicator[k] = round(v, 4)
row = OrderedDict({
"研究标的": ", ".join(list(context.symbols_map.keys())),
"回测开始时间": context.backtest_start_time,
"回测结束时间": context.backtest_end_time,
"累计收益率": indicator['pnl_ratio'],
"最大回撤": indicator['max_drawdown'],
"年化收益率": indicator['pnl_ratio_annual'],
"夏普比率": indicator['sharp_ratio'],
"盈利次数": indicator['win_count'],
"亏损次数": indicator['lose_count'],
"交易胜率": indicator['win_ratio'],
"累计出入金": int(cash['cum_inout']),
"累计交易额": int(cash['cum_trade']),
"累计手续费": int(cash['cum_commission']),
"累计平仓收益": int(cash['cum_pnl']),
"净收益": int(cash['pnl']),
})
df = pd.DataFrame([row])
df.to_excel(os.path.join(context.data_path, "回测结果.xlsx"), index=False)
logger.info("回测结果:{}".format(row))
content = ""
for k, v in row.items():
content += "{}: {}\n".format(k, v)
push_text(content=content, key=context.wx_key)
for symbol in context.symbols:
# 查看买卖详情
file_bs = os.path.join(context.cache_path, "{}_bs.txt".format(symbol))
if os.path.exists(file_bs):
lines = [eval(x) for x in open(file_bs, 'r', encoding="utf-8").read().strip().split("\n")]
df = pd.DataFrame(lines)
print(symbol, "\n", df.desc.value_counts())
print(df) | 5,325,018 |
def parse_tb_file(path, module):
"""
Parse a translation block coverage file generated by S2E's
``TranslationBlockCoverage`` plugin.
"""
with open(path, 'r') as f:
try:
tb_coverage_data = json.load(f)
except Exception:
logger.warning('Failed to parse translation block JSON file %s',
path)
return None
if not tb_coverage_data:
logger.warning('Translation block JSON file %s is empty', path)
return None
if module not in tb_coverage_data:
logger.warning('Target %s not found in translation block JSON file %s',
module, path)
return None
return tb_coverage_data[module] | 5,325,019 |
def load(filename):
""" Load nifti2 single or pair from `filename`
Parameters
----------
filename : str
filename of image to be loaded
Returns
-------
img : Nifti2Image or Nifti2Pair
nifti2 single or pair image instance
Raises
------
ImageFileError: if `filename` doesn't look like nifti2
IOError : if `filename` does not exist
"""
try:
img = Nifti2Image.load(filename)
except ImageFileError:
return Nifti2Pair.load(filename)
return img | 5,325,020 |
def run_sklearn(args, out_dir, out_flp, ldrs):
"""
Trains an sklearn model according to the supplied parameters. Returns the
test error (lower is better).
"""
# Unpack the dataloaders.
ldr_trn, _, ldr_tst = ldrs
# Construct the model.
print("Building model...")
net = models.MODELS[args["model"]](out_dir)
net.log(f"\n\nArguments: {args}")
if path.exists(out_flp):
# The output file already exists with these parameters, so do not
# retrain the model.
print(
"Skipping training because a trained model already exists with "
f"these parameters: {out_flp}")
print(f"Loading model: {out_flp}")
with open(out_flp, "rb") as fil:
net.net = pickle.load(fil)
tim_trn_s = 0
else:
net.new(**{param: args[param] for param in net.params})
# Extract the training data from the training dataloader.
print("Extracting training data...")
dat_in, dat_out = list(ldr_trn)[0]
print("Training data:")
utils.visualize_classes(net, dat_out)
# Training.
print("Training...")
tim_srt_s = time.time()
net.train(ldr_trn.dataset.fets, dat_in, dat_out)
tim_trn_s = time.time() - tim_srt_s
print(f"Finished training - time: {tim_trn_s:.2f} seconds")
# Save the model.
print(f"Saving final model: {out_flp}")
with open(out_flp, "wb") as fil:
pickle.dump(net.net, fil)
# Testing.
#
# Use .raw() instead of loading the dataloader because we need dat_extra.
fets, dat_in, dat_out, dat_extra = ldr_tst.dataset.raw()
print("Test data:")
utils.visualize_classes(net, dat_out)
print("Testing...")
tim_srt_s = time.time()
acc_tst = net.test(
fets, dat_in, dat_out, dat_extra,
graph_prms={
"out_dir": out_dir, "sort_by_unfairness": True, "dur_s": None})
print(f"Finished testing - time: {time.time() - tim_srt_s:.2f} seconds")
# Optionally perform feature elimination.
if args["analyze_features"]:
utils.select_fets(
utils.analyze_feature_correlation(
net, out_dir, dat_in, args["clusters"]),
utils.analyze_feature_importance(
net, out_dir, dat_in, dat_out, args["fets_to_pick"],
args["perm_imp_repeats"]))
return acc_tst, tim_trn_s | 5,325,021 |
def create_softmax_loss(scores, target_values):
"""
:param scores: [batch_size, num_candidates] logit scores
:param target_values: [batch_size, num_candidates] vector of 0/1 target values.
:return: [batch_size] vector of losses (or single number of total loss).
"""
return tf.nn.softmax_cross_entropy_with_logits(logits=scores, labels=target_values) | 5,325,022 |
def predict_8_bit_rgb(image_path, model_weights_path, output_json_path):
""" Sample call:
python3 classifier.py predict-8-bit-rgb sample_images/15OCT22183656-S2AS_R5C4-056155973040_01_P001_8_bit.jpg /data/working/ALL_IN_ONE/model_weights/model_2/weights_for_epoch_29.h5 annotation.json
"""
gpu_number = 0 if torch.cuda.is_available() else None
predict_body(image_path, model_weights_path, None, None, output_json_path, gpu_number) | 5,325,023 |
def page_not_found(e):
"""
Catches 404 errors and render a 404 page stylized with the design of the web app.
Returns 404 static page.
"""
return render_template('404.html'), 404 | 5,325,024 |
def effort_remaining_after_servicing_tier_2_leads():
"""
Real Name: Effort Remaining after Servicing Tier 2 Leads
Original Eqn: MAX(Effort Remaining after Servicing Existing Clients - Effort Devoted to Tier 2 Leads, 0)
Units: Hours/Month
Limits: (None, None)
Type: component
Subs: None
How much effort remains after higher priority sales and maintenance
activities are complete?
"""
return np.maximum(
effort_remaining_after_servicing_existing_clients()
- effort_devoted_to_tier_2_leads(),
0,
) | 5,325,025 |
def sliceThreshold(volume, block_size = 5):
"""
convert slice into binary using adaptive local ostu method
volume --- 3D volume
block_size --- int value
"""
if type(volume) != np.ndarray:
raise TypeError('the input must be numpy array!')
x, y, z = volume.shape
segImg = np.empty_like(volume)
for i in range(z):
binary_adaptive = threshold_adaptive(volume[:,:,i], block_size, offset=0)
segImg[:,:,i] = binary_adaptive
return segImg | 5,325,026 |
def rastrigin_d_dim(x: chex.Array) -> chex.Array:
"""
D-Dim. Rastrigin function. x_i ∈ [-5.12, 5.12]
f(x*)=0 - Minimum at x*=[0,...,0]
"""
A = 10
return A * x.shape[0] + jnp.sum(x ** 2 - A * jnp.cos(2 * jnp.pi * x)) | 5,325,027 |
def v(tag, message): # pylint: disable=invalid-name
"""Print a verbose message (non-debug)"""
date = __get_date()
if LOG_LEVEL_STDOUT >= 4:
__log_to_stdout(colors.COLOR_VERB, date, tag+"/V", message)
if LOG_LEVEL_FILE >= 4:
__log_to_file(date, tag+"/V", message) | 5,325,028 |
def fMaxConfEV(arr3_EvtM_bol, arr3_Evt, arr3_Conf):
""" Return highest confidence and its corresponding timing, given
arr3_EvtM_bol already masked to year of interest.
Something in this fuction or calling it is broken.
"""
print('\t\tStats (max conf)...', end='')
arr3_ConfM_bolY = arr3_EvtM_bol * arr3_Conf
arrOutConfMax = arr3_ConfM_bolY.max(axis=0)
print('\tGet matching event...', end='')
arrOutEvent = np.zeros(arrOutConfMax.shape)
for b in range(arr3_ConfM_bolY.shape[0]):
arrOutEvent = np.where(arrOutConfMax == arr3_ConfM_bolY[b, :, :],
arr3_Evt[b, :, :], arrOutEvent)
return arrOutEvent, arrOutConfMax | 5,325,029 |
def set_infosec (db, cl, nodeid, new_values) :
""" When going from open->approving, set the infosec attributes on
the PR
"""
apr = db.pr_status.lookup ('approving')
opn = db.pr_status.lookup ('open')
ost = cl.get (nodeid, 'status')
nst = new_values.get ('status', ost)
if ost != opn or nst != apr :
return
mrt = prlib.max_risk_type (db, nodeid)
# Can happen if none of the product groups has a security level
if not mrt :
return
new_values ['purchase_risk_type'] = mrt.id
if mrt.order > 40 :
raise Reject (_ ('Risk is too high: "%s"') % mrt.name)
ois = new_values.get ('offer_items', cl.get (nodeid, 'offer_items'))
ilm = None
for oid in ois :
oi = db.pr_offer_item.getnode (oid)
if oi.infosec_level :
il = db.infosec_level.getnode (oi.infosec_level)
if ilm is None or il.order > ilm.order :
ilm = il
if ilm is not None :
new_values ['infosec_level'] = ilm.id | 5,325,030 |
def regex_validation_recursion(node: dict) -> (bool, str):
"""
Validates the regex inside a singular node of a Spcht Descriptor
:param dict node:
:return: True, msg or False, msg if any one key is wrong
:rtype: (bool, str)
"""
# * mapping settings
if 'map_setting' in node:
if '$regex' in node['mapping_settings']:
if node['mapping_settings']['$regex'] == True and 'mapping' in node:
for key in node['mapping']:
if not validate_regex(key):
return False, "mapping"
if 'cut' in node:
if not validate_regex(node['cut']):
return False, "cut"
if 'match' in node:
if not validate_regex(node['match']):
return False, "match"
if 'fallback' in node:
return regex_validation_recursion(node['fallback'])
return True, "none" | 5,325,031 |
def get_beat_times(audio_file, beats_folder, include_beat_numbers=False):
"""
Read beat times from annotation file.
:param audio_file: path to audio files
:param beats_folder: folder with preanalysed beat times (in .beats.txt format per track)
:return: beat times in seconds
"""
file_name = os.path.splitext(os.path.basename(audio_file))[0]
beats_file = os.path.join(beats_folder, file_name + '.beats.txt')
if not os.path.isfile(beats_file):
print(f"Extracting beat times for {audio_file}")
os.system(f"DBNDownBeatTracker single '{audio_file}' -o '{beats_file}'")
t = pd.read_table(beats_file, header=None)
if include_beat_numbers:
return t[0].values, t[1].values
else:
return t[0].values | 5,325,032 |
def build_probability_matrix(graph):
"""Get square matrix of shape (n, n), where n is number of nodes of the
given `graph`.
Parameters
----------
graph : :class:`~gensim.summarization.graph.Graph`
Given graph.
Returns
-------
numpy.ndarray, shape = [n, n]
Eigenvector of matrix `a`, n is number of nodes of `graph`.
"""
dimension = len(graph.nodes())
matrix = empty_matrix((dimension, dimension))
probability = 1.0 / float(dimension)
matrix.fill(probability)
return matrix | 5,325,033 |
def clear_config_except_panel_css():
"""Reset pn.config except for panel css"""
# pylint: disable=protected-access
pn.config.raw_css = []
pn.config.js_files = {}
pn.config.css_files = [
file for file in pn.config.css_files if TemporaryResources._is_panel_style_file(file)
] | 5,325,034 |
def test_im_templates(self):
"""Test appropriate selection of templates for the IM for 1.11.0.0."""
post_setup(self)
with open(self.config, "r") as c:
with open(self.updated_config, "w") as n:
for line in c:
if "<information_model>1.5.0.0</information_model>" in line:
n.write("<information_model>1.11.0.0</information_model>\n")
elif (
"<xml_model>http://pds.nasa.gov/pds4/pds/v1/"
"PDS4_PDS_1500.sch</xml_model>\n" in line
):
n.write(
"<xml_model>http://pds.nasa.gov/pds4/pds/v1/"
"PDS4_PDS_1B00.sch</xml_model>\n"
)
elif (
"<schema_location>http://pds.nasa.gov/pds4/pds/v1 "
"http://pds.nasa.gov/pds4/pds/v1/PDS4_PDS_1500.xsd</schema_location>" in line
):
n.write(
"<schema_location>http://pds.nasa.gov/pds4/pds/"
"v1 http://pds.nasa.gov/pds4/pds/v1/"
"PDS4_PDS_1B00.xsd</schema_location>\n"
)
else:
n.write(line)
main(self.updated_config, faucet=self.faucet, silent=True, log=True)
line_in_log = False
with open('working/insight_release_01.log', 'r') as log:
for line in log:
if 'Label templates will use the ones from information model 1.11.0.0.' in line:
line_in_log = True
self.assertTrue(line_in_log) | 5,325,035 |
def create_image_parser(subparsers):
"""Create the argument parser for ``openproblems-cli image``."""
parser = subparsers.add_parser(
"image", help="Fetch a Docker image associated with a function"
)
parser.add_argument(
"-t",
"--task",
type=str,
help="Select functions from a specific task",
required=True,
)
parse_function_type(parser)
parser.add_argument("name", type=str, help="Name of the selected method") | 5,325,036 |
def yuanshanweir_transfer_loss_amount():
"""
Real Name: YuanShanWeir Transfer Loss Amount
Original Eqn: (Tranfer From YuanShanWeir To DaNanWPP+Transfer From YuanShanWeir To BanXinWPP)/(1-WPP Transfer Loss Rate)*WPP Transfer Loss Rate
Units: m3
Limits: (None, None)
Type: component
Subs: None
"""
return (
(
tranfer_from_yuanshanweir_to_dananwpp()
+ transfer_from_yuanshanweir_to_banxinwpp()
)
/ (1 - wpp_transfer_loss_rate())
* wpp_transfer_loss_rate()
) | 5,325,037 |
def create_request(request: Request) -> Request:
"""Create a database entry (mongo Document based Request object) from a brewtils
Request model object. Some transformations happen on a copy of the supplied Request
prior to saving it to the database. The returned Request object is derived from this
transformed copy, while the input Request object remains unmodified.
Args:
request: The brewtils Request object from which a database entry will be created
Returns:
Request: A brewtils Request model based on the newly created database entry.
The parameters of the returned object may have been modified from the
during processing of files in "bytes" type parameters.
"""
# TODO: This deepcopy could be very memory intensive if the request contains large
# file parameters. This should be revisited to see if there is a way to persist
# remote requests locally without the base64 encoded data while avoiding this copy.
request = deepcopy(request)
replace_with_raw_file = request.namespace == config.get("garden.name")
remove_bytes_parameter_base64(request.parameters, replace_with_raw_file)
return db.create(request) | 5,325,038 |
def _gifti_to_array(gifti):
""" Converts tuple of `gifti` to numpy array
"""
return np.hstack([load_gifti(img).agg_data() for img in gifti]) | 5,325,039 |
def learn_naive_factorization(
data: np.ndarray,
distributions: List[Type[Leaf]],
domains: List[Union[list, tuple]],
scope: List[int],
learn_leaf_func: LearnLeafFunc,
**learn_leaf_kwargs
) -> Node:
"""
Learn a leaf as a naive factorized model.
:param data: The data.
:param distributions: The distribution of the random variables.
:param domains: The domain of the random variables.
:param scope: The scope of the leaf.
:param learn_leaf_func: The function to use to learn the sub-distributions parameters.
:param learn_leaf_kwargs: Additional parameters for learn_leaf_func.
:return: A naive factorized model.
:raises ValueError: If there are inconsistencies between the data, distributions and domains.
"""
_, n_features = data.shape
if len(scope) != len(distributions) or len(domains) != len(distributions):
raise ValueError("Each data column should correspond to a random variable having a distribution and a domain")
node = Product(scope)
for i, s in enumerate(scope):
leaf = learn_leaf_func(data[:, [i]], [distributions[i]], [domains[i]], [s], **learn_leaf_kwargs)
leaf.id = i + 1 # Set the leaves ids sequentially
node.children.append(leaf)
return node | 5,325,040 |
def opt_IA_search_assist(fun, lbounds, ubounds, budget):
"""Efficient implementation of uniform random search between
`lbounds` and `ubounds`
"""
lbounds, ubounds = np.array(lbounds), np.array(ubounds)
dim, x_min, f_min = len(lbounds), None, None
opt_ia = optIA.OptIA(fun, lbounds, ubounds, ssa=True)
max_chunk_size = 1 + 4e4 / dim
x_min = opt_ia.opt_ia(budget)
return x_min | 5,325,041 |
def load_investigation(fp):
"""Used for rules 0005
:param fp: A file-like buffer object pointing to an investigation file
:return: Dictionary of DataFrames for each section
"""
def check_labels(section, labels_expected, df):
"""Checks each section is syntactically structured correctly
:param section: The section of interest
:param labels_expected: The list of expected labels in the section
:param df: The DataFrame slice of the investigation file we are
checking
:return: None
"""
labels_found = set([x for x in df.columns if isinstance(x, str)])
if not labels_expected.issubset(labels_found):
missing_labels = labels_expected - labels_found
log.fatal("(F) In {} section, expected labels {} not found in {}"
.format(section, missing_labels, labels_found))
if len(labels_found - labels_expected) > 0:
# check extra labels, i.e. make sure they're all comments
extra_labels = labels_found - labels_expected
for label in extra_labels:
if _RX_COMMENT.match(label) is None:
log.fatal("(F) In {} section, label {} is not allowed"
.format(section, label))
validator_errors.append({
"message": "Invalid label found in investigation file",
"supplemental": "In {} section, label {} is not "
"allowed".format(section, label),
"code": 5
})
elif len(_RX_COMMENT.findall(label)) == 0:
log.warning("(W) In {} section, label {} is missing a "
"name".format(section, label))
validator_warnings.append({
"message": "Missing name in Comment[] label",
"supplemental": "In {} section, label {} is missing a "
"name".format(section, label),
"code": 4014
})
# Read in investigation file into DataFrames first
df_dict = read_investigation_file(fp)
log.debug("Loading ONTOLOGY SOURCE REFERENCE section")
labels_expected = {'Term Source Name', 'Term Source File',
'Term Source Version', 'Term Source Description'}
check_labels('ONTOLOGY SOURCE REFERENCE',
labels_expected, df_dict['ontology_sources'])
for i in range(0, len(df_dict['studies'])):
log.debug("Loading STUDY section")
labels_expected = {'Study Identifier', 'Study Title',
'Study Description',
'Study Submission Date',
'Study Public Release Date',
'Study File Name'}
check_labels('STUDY', labels_expected, df_dict['studies'][i])
log.debug("Loading STUDY DESIGN DESCRIPTORS section")
labels_expected = {'Study Design Type',
'Study Design Type Term Accession Number',
'Study Design Type Term Source REF'}
check_labels('STUDY DESIGN DESCRIPTORS', labels_expected,
df_dict['s_design_descriptors'][i])
log.debug("Loading STUDY PUBLICATIONS section")
labels_expected = {'Study PubMed ID', 'Study Publication DOI'}
check_labels('STUDY PUBLICATIONS', labels_expected,
df_dict['s_publications'][i])
log.debug("Loading STUDY ASSAYS section")
labels_expected = {
'Study Assay Measurement Type',
'Study Assay Measurement Type Term Accession Number',
'Study Assay Measurement Type Term Source REF',
'Study Assay Technology Type',
'Study Assay Technology Type Term Accession Number',
'Study Assay Technology Type Term Source REF',
'Study Assay Technology Platform',
'Study Assay File Name'}
check_labels('STUDY ASSAYS', labels_expected, df_dict['s_assays'][i])
log.debug("Loading STUDY PROTOCOLS section")
labels_expected = {
'Study Protocol Name', 'Study Protocol Type',
'Study Protocol Type Term Accession Number',
'Study Protocol Type Term Source REF',
'Study Protocol Description',
'Study Protocol Parameters Name',
'Study Protocol Parameters Name Term Accession Number',
'Study Protocol Parameters Name Term Source REF',
'Study Protocol Components Name',
'Study Protocol Components Type',
'Study Protocol Components Type Term Accession Number',
'Study Protocol Components Type Term Source REF'}
check_labels('STUDY PROTOCOLS', labels_expected,
df_dict['s_protocols'][i])
log.debug("Loading STUDY CONTACTS section")
labels_expected = {
'Study Person Last Name', 'Study Person First Name',
'Study Person Mid Initials', 'Study Person Email',
'Study Person Phone', 'Study Person Fax',
'Study Person Address', 'Study Person Affiliation',
'Study Person Roles', 'Study Person Roles',
'Study Person Roles Term Accession Number',
'Study Person Roles Term Source REF'}
check_labels('STUDY CONTACTS', labels_expected,
df_dict['s_contacts'][i])
return df_dict | 5,325,042 |
def _fi18n(text):
"""Used to fake translations to ensure pygettext retrieves all the strings we want to translate.
Outside of the aforementioned use case, this is exceptionally useless,
since this just returns the given input string without
any modifications made.
"""
return text | 5,325,043 |
def animate(dlist, tlist, mlist):
"""function to move or animate objects on screen"""
# fixation_cross()
for d in dlist:
d.detect_collision(mlist)
d.draw_circle(win)
for t in tlist:
t.detect_collision(mlist)
t.draw_circle(win)
"""for d in dlist:
for t in tlist:
d.detect_collision(mlist)
t.detect_collision(mlist)
d.draw_circle(win)
t.draw_circle(win)"""
pg.display.update() | 5,325,044 |
def is_required_version(version, specified_version):
"""Check to see if there's a hard requirement for version
number provided in the Pipfile.
"""
# Certain packages may be defined with multiple values.
if isinstance(specified_version, dict):
specified_version = specified_version.get("version", "")
if specified_version.startswith("=="):
return version.strip() == specified_version.split("==")[1].strip()
return True | 5,325,045 |
def draw_tree(artist, colors, start_size = 20):
"""this function draws the christmas tree"""
while start_size != 0:
draw_row(artist, start_size, colors[20-start_size])
move_up_row(artist, start_size)
start_size -= 1 | 5,325,046 |
async def cmd_project_uninstall(ls: TextXLanguageServer, params) -> bool:
"""Command that uninstalls a textX language project.
Args:
params: project name
Returns:
True if textX project is uninstalled successfully, otherwise False
Raises:
None
"""
project_name = params[0]
ls.show_message("Uninstalling project {}".format(project_name))
try:
await uninstall_project_async(project_name, ls.python_path, ls.show_message_log)
ls.show_message("Project {} is successfully uninstalled.".format(project_name))
return True
except UninstallTextXProjectError as e:
ls.show_errors(str(e), e.detailed_err_msg)
return False | 5,325,047 |
def test_timespan_args_week_str_and_thru_str():
"""Tests timespan arguments: week_str, thru_str.
"""
start, end = timespan(week_str=REF_BEGIN_ISO, thru_str=REF_THRU_STR)
begin = datetime.fromisocalendar(2020, 10, 1).astimezone()
assert isinstance(start, datetime)
assert start.isoformat() == begin.isoformat()
assert isinstance(end, datetime)
assert end.isoformat() == REF_THRU_STR | 5,325,048 |
def _check_load_mat(fname, uint16_codec):
"""Check if the mat struct contains 'EEG'."""
read_mat = _import_pymatreader_funcs('EEGLAB I/O')
eeg = read_mat(fname, uint16_codec=uint16_codec)
if 'ALLEEG' in eeg:
raise NotImplementedError(
'Loading an ALLEEG array is not supported. Please contact'
'mne-python developers for more information.')
if 'EEG' in eeg: # fields are contained in EEG structure
eeg = eeg['EEG']
eeg = eeg.get('EEG', eeg) # handle nested EEG structure
eeg = Bunch(**eeg)
eeg.trials = int(eeg.trials)
eeg.nbchan = int(eeg.nbchan)
eeg.pnts = int(eeg.pnts)
return eeg | 5,325,049 |
def local_desired_velocity(env, veh_ids, fail=False):
"""
Encourage proximity to a desired velocity.
We only observe the velocity of the specified car.
If a collison or failure occurs, we return 0.
"""
vel = np.array(env.k.vehicle.get_speed(veh_ids))
num_vehicles = len(veh_ids)
if any(vel < -100) or fail or num_vehicles == 0:
return 0.
target_vel = env.env_params.additional_params['target_velocity']
max_cost = np.array([target_vel] * num_vehicles)
max_cost = np.linalg.norm(max_cost)
cost = vel - target_vel
cost = np.linalg.norm(cost)
# epsilon term (to deal with ZeroDivisionError exceptions)
eps = np.finfo(np.float32).eps
return max(max_cost - cost, 0) / (max_cost + eps) | 5,325,050 |
def All(q, value):
"""
The All operator selects documents where the value of the field is an list
that contains all the specified elements.
"""
return Condition(q._path, to_refs(value), '$all') | 5,325,051 |
def download_dataset(local: bool = True):
"""
:param local: on a local machine only use subset of the data. on a server uses full dataset
"""
base_data_dir = "./data"
dataset_dir = base_data_dir + "/celeba_dataset_medium"
# Create the directories if necessary
if not os.path.exists(base_data_dir):
os.makedirs(base_data_dir)
if not os.path.exists(dataset_dir):
os.makedirs(dataset_dir)
os.makedirs(dataset_dir + "/images")
# Subset of the dataset
if local:
DatasetDownloader(dataset_dir=dataset_dir,
url='https://drive.google.com/uc?id'
'=1Y3LkdANNDsdq_6_Vwkauz_CzUCuXrSmX',
filename="labels.txt", unzip=False)
DatasetDownloader(dataset_dir=dataset_dir + "/images",
url='https://drive.google.com/uc?id=1'
'-gkTnvMb8ojsW1cFFkL4JA1CAy1xa6UH',
filename="images.zip", unzip=True, preprocess=True,
number_of_positives=4)
# Full dataset
else:
DatasetDownloader(dataset_dir=dataset_dir,
url='https://drive.google.com/uc?id'
'=1BEk3Lyw89zMWdCs9RT5G6bPQ5QMiEVuY',
filename="labels.txt", unzip=False)
DatasetDownloader(dataset_dir=dataset_dir + "/images",
url='https://drive.google.com/uc?id'
'=1Uqqt7EDq1gQp6hfOixVG8vZUtBVBMwVg',
filename="images.zip", unzip=True, preprocess=True,
number_of_positives=10) | 5,325,052 |
def key(i):
"""
Helper method to generate a meaningful key.
"""
return 'key{}'.format(i) | 5,325,053 |
def pool_metatile_batch(pool, fn, metatiles, size):
"""
Use the pool 'pool' (a multiprocessing.Pool) to execute 'fn' on
'metatiles' batched into 'size' batches.
"""
pool.map(fn,metatile_batches(metatiles, size)) | 5,325,054 |
def patch():
"""monkey-patch class methods in web/form.py"""
# update the render methods
web.form.Form.render = Form.__dict__['render']
web.form.Form.rendernote = Form.__dict__['rendernote']
web.form.Input.render = Input.__dict__['render']
web.form.File.render = File.__dict__['render']
web.form.Textarea.render = Textarea.__dict__['render']
web.form.Dropdown.render = Dropdown.__dict__['render']
web.form.GroupedDropdown.render = GroupedDropdown.__dict__['render']
web.form.Radio.render = Radio.__dict__['render']
web.form.Checkbox.render = Checkbox.__dict__['render']
web.form.Button.render = Button.__dict__['render']
# add the new bootstrap-specific methods
web.form.Input.group_start = Input.__dict__['group_start']
web.form.Input.group_end = Input.__dict__['group_end']
web.form.Input.group_title = Input.__dict__['group_title']
web.form.Checkbox.group_start = Checkbox.__dict__['group_start']
web.form.Checkbox.group_end = Checkbox.__dict__['group_end']
web.form.Checkbox.group_title = Checkbox.__dict__['group_title']
web.form.Radio.group_start = Radio.__dict__['group_start']
web.form.Radio.group_end = Radio.__dict__['group_end']
web.form.Radio.group_title = Radio.__dict__['group_title']
web.form.Input.has_error = Input.__dict__['has_error'] | 5,325,055 |
def svn_repos_fs_change_rev_prop3(*args):
"""
svn_repos_fs_change_rev_prop3(svn_repos_t repos, svn_revnum_t rev, char author, char name,
svn_string_t new_value, svn_boolean_t use_pre_revprop_change_hook,
svn_boolean_t use_post_revprop_change_hook,
svn_repos_authz_func_t authz_read_func,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_fs_change_rev_prop3(*args) | 5,325,056 |
def read_dir(p: Path):
"""
Recursively read through a directory and subdirectories
For each pdf file found, extract PFT results by calling parse_pdf()
"""
logging.info('Reading {0}'.format(p.name))
for f in p.glob('*.pdf'):
if 'TREND' in str(f): continue
parse_pdf(f, Parsetype.PT_FULL_PFT)
for d in [x for x in p.iterdir() if x.is_dir()]:
read_dir(d) | 5,325,057 |
def test_extension():
"""Basic test for flask extension."""
app = Flask(__name__)
app.config['SWAG_TITLE'] = "Test application."
app.config['SWAG_API_VERSION'] = '1.0.1'
swag = Swag(app)
with app.test_request_context('/swagger/swagger.json'):
swagger_json = app.generate_swagger()
client = app.test_client()
response = client.get('/swagger/swagger.json')
assert 200 == response.status_code
assert swagger_json == json.loads(response.data.decode('utf-8')) | 5,325,058 |
def session_travis(session):
"""On travis, just run with python3.4 and don't run slow or flaky tests."""
session_tests(
session, 'python3.4', extra_pytest_args=['-m not slow and not flaky'])
session_gae(
session, extra_pytest_args=['-m not slow and not flaky']) | 5,325,059 |
def pretty_ct(ct):
"""
Pretty-print a contingency table
Parameters
----------
ct :
the contingency table
Returns
-------
pretty_table :
a fancier string representation of the table
"""
output = StringIO()
rich_ct(ct).to_csv(output)
output.seek(0)
try:
pretty_table = prettytable.from_csv(output)
pretty_table.padding_width = 0
pretty_table.align = 'r'
pretty_table.align[pretty_table.field_names[0]] = 'l'
return pretty_table
except _csv.Error:
exc_info = sys.exc_info()
print >> sys.stderr, "[Warning] pretty_table raised an exception :", \
exc_info[1]
if exc_info[1].message == "Could not determine delimiter":
pt = None
output.seek(0)
rd = csv.reader(output, delimiter=',')
pt = prettytable.PrettyTable(next(rd))
for row in rd:
pt.add_row(row)
else:
raise exc_info[0], exc_info[1], exc_info[2] | 5,325,060 |
def mark(symbol):
"""Wrap the symbol's result in a tuple where the first element is `symbol`.
Used where the information about "which branch of the grammar was used"
must be propagated upwards for further checks.
"""
def mark_action(x):
return (symbol, x)
return mark_action << symbol | 5,325,061 |
def restore_dimensions(array, from_dims, result_like, result_attrs=None):
"""
Restores a numpy array to a DataArray with similar dimensions to a reference
Data Array. This is meant to be the reverse of get_numpy_array.
Parameters
----------
array : ndarray
The numpy array from which to create a DataArray
from_dims : list of str
The directions describing the numpy array. If being used to reverse
a call to get_numpy_array, this should be the same as the out_dims
argument used in the call to get_numpy_array.
'x', 'y', and 'z' indicate any axes
registered to those directions with
:py:function:`~sympl.set_direction_names`. '*' indicates an axis
which is the flattened collection of all dimensions not explicitly
listed in out_dims, including any dimensions with unknown direction.
result_like : DataArray
A reference array with the desired output dimensions of the DataArray.
If being used to reverse a call to get_numpy_array, this should be
the same as the data_array argument used in the call to get_numpy_array.
result_attrs : dict, optional
A dictionary with the desired attributes of the output DataArray. If
not given, no attributes will be set.
Returns
-------
data_array : DataArray
The output DataArray with the same dimensions as the reference
DataArray.
See Also
--------
:py:function:~sympl.get_numpy_array: : Retrieves a numpy array with desired
dimensions from a given DataArray.
"""
current_dim_names = {}
for dim in from_dims:
if dim != '*':
current_dim_names[dim] = [dim]
direction_to_names = get_input_array_dim_names(
result_like, from_dims, current_dim_names)
original_shape = []
original_dims = []
original_coords = []
for direction in from_dims:
if direction in direction_to_names.keys():
for name in direction_to_names[direction]:
original_shape.append(len(result_like.coords[name]))
original_dims.append(name)
original_coords.append(result_like.coords[name])
if np.product(array.shape) != np.product(original_shape):
raise ShapeMismatchError
data_array = DataArray(
np.reshape(array, original_shape),
dims=original_dims,
coords=original_coords).transpose(
*list(result_like.dims))
if result_attrs is not None:
data_array.attrs = result_attrs
return data_array | 5,325,062 |
def test_woe_mixed_vars(titanic):
"""Numeric and string data, with missings"""
data = titanic
target_var = 'Survived'
explanatory = ['Age', 'Sex']
X = data[explanatory]
y = data[target_var]
woe_transformer = WoETransformer()
woe_transformer.fit(X, y)
woe_transformer.transform(X)
assert True | 5,325,063 |
def _combine_qc_samples(samples):
"""Combine split QC analyses into single samples based on BAM files.
"""
by_bam = collections.defaultdict(list)
for data in [utils.to_single_data(x) for x in samples]:
batch = dd.get_batch(data) or dd.get_sample_name(data)
if not isinstance(batch, (list, tuple)):
batch = [batch]
batch = tuple(batch)
by_bam[(dd.get_align_bam(data) or dd.get_work_bam(data), batch)].append(data)
out = []
for data_group in by_bam.values():
data = data_group[0]
alg_qc = []
qc = {}
metrics = {}
for d in data_group:
qc.update(dd.get_summary_qc(d))
metrics.update(dd.get_summary_metrics(d))
alg_qc.extend(dd.get_algorithm_qc(d))
data["config"]["algorithm"]["qc"] = alg_qc
data["summary"]["qc"] = qc
data["summary"]["metrics"] = metrics
out.append([data])
return out | 5,325,064 |
def prepare_outdir():
"""
prepares the directory structure on disk,
returns output directory as well as the s3 destination folder
"""
out_dir, s3_dest_folder = file_destination()
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.makedirs(out_dir)
return out_dir, s3_dest_folder | 5,325,065 |
def doc(
package_name: str,
plugin_name: str,
long_doc: bool = True,
include_details: bool = False,
) -> str:
"""Document one plug-in
Documentation is taken from the module doc-string. If the plug-in is not part of the
package an UnknownPluginError is raised.
Args:
package_name: Name of package containing plug-ins.
plugin_name: Name of the plug-in (module).
long_doc: Use long doc-string or short one-line string.
include_details: Include development details like parameters and return values?
Returns:
Documentation of the plug-in.
"""
# Get Plugin-object and pick out doc-string
doc = load(package_name, plugin_name).doc
if long_doc:
# Strip short description and indentation
lines = [d.strip() for d in "\n\n".join(doc.split("\n\n")[1:]).split("\n")]
# Stop before Args:, Returns: etc if details should not be included
idx_args = len(lines)
if not include_details:
re_args = re.compile("(Args:|Returns:|Details:|Attributes:)$")
try:
idx_args = [re_args.match(l) is not None for l in lines].index(True)
except ValueError:
pass
return "\n".join(lines[:idx_args]).strip()
else:
# Return short description
return doc.split("\n\n")[0].replace("\n", " ").strip() | 5,325,066 |
def microsecond(dt):
""":yaql:property microsecond
Returns microseconds of given datetime.
:signature: datetime.microsecond
:returnType: integer
.. code::
yaql> datetime(2006, 11, 21, 16, 30, 2, 123).microsecond
123
"""
return dt.microsecond | 5,325,067 |
def decompose_label_vector(label_vector, n_xgrids, n_ygrids, mean_lwh,
xlim=(0.0, 70.0), ylim=(-50.0,50.0), zlim=(-10.0,10.0),
conf_thres=0.5, nms=True, iou_thres=0.1):
""" Build the ground-truth label vector
given a set of poses, classes, and
number of grids.
Input:
label_vector: label vector outputted from the model
n_xgrids: number of grids in the x direction
n_ygrids: number of grids in the y direction
Output:
poses: list of object poses [x,y,z,l,w,h,yaw]
classes: list of object classes
"""
conf = []
poses = []
classes = []
label_dict_list = []
# obtain x index
xstop = (xlim[1] - xlim[0]) / float(n_xgrids)
# obtain y index
ystop = (ylim[1] - ylim[0]) / float(n_ygrids)
# length of each object label
obj_label_len = pose_vec_len + len(label_map) # 8 for poses, rest for object classes
# reshape the vector
label_vector_reshaped = np.reshape(label_vector, (-1, obj_label_len))
# get each element
obj_confidences = label_vector_reshaped[:, 0]
obj_poses = label_vector_reshaped[:, 1:pose_vec_len]
obj_class_one_hot = label_vector_reshaped[:, pose_vec_len:]
# iterate through each element
for i, obj_conf in enumerate(obj_confidences):
if obj_conf > conf_thres:
# pose vector
x_norm, y_norm, z_norm, l_norm, w_norm, h_norm, cos_yaw_norm, sin_yaw_norm = obj_poses[i]
cls_ = idx_to_label(np.argmax(obj_class_one_hot[i]))
mean_lwh_cls = mean_lwh[cls_]
# get indices
x_idx = math.floor(i / n_xgrids)
y_idx = i - (x_idx * n_xgrids)
# denormalize pose
x = (x_norm * xstop) + (x_idx * xstop) + xlim[0]
y = (y_norm * ystop) + (y_idx * ystop) + ylim[0]
z = (z_norm * (zlim[1] - zlim[0])) + zlim[0]
l = mean_lwh_cls[0]*math.exp(l_norm)
w = mean_lwh_cls[1]*math.exp(w_norm)
h = mean_lwh_cls[2]*math.exp(h_norm)
cos_yaw = (cos_yaw_norm * 2.0) - 1.0
sin_yaw = (sin_yaw_norm * 2.0) - 1.0
yaw = np.arctan2(sin_yaw, cos_yaw)
# add poses, classes, and conf
label_dict = {}
label_dict['conf'] = obj_conf
label_dict['x'] = x
label_dict['y'] = y
label_dict['z'] = z
label_dict['l'] = l
label_dict['w'] = w
label_dict['h'] = h
label_dict['yaw'] = yaw
label_dict['class'] = idx_to_label(np.argmax(obj_class_one_hot[i]))
# label_dict['conf'] = np.max(obj_class_one_hot[i])
label_dict_list.append(label_dict)
# non-max suppression
if nms == True:
label_dict_list = non_max_suppression(label_dict_list, iou_threshold=iou_thres)
# return label dictionary
return label_dict_list | 5,325,068 |
def gen_delay_phs(fqs, ants, dly_rng=(-20, 20)):
"""
Produce a set of mock complex phasors corresponding to cables delays.
Args:
fqs (array-like): shape=(NFREQS,), GHz
the spectral frequencies of the bandpasses
ants (iterable):
the indices/names of the antennas
dly_range (2-tuple): ns
the range of the delay
Returns:
g (dictionary):
a dictionary of ant:exp(2pi*i*tau*fqs) pairs where keys are elements
of ants and values are complex arrays with shape (NFREQS,)
See Also:
:meth:`~gen_gains`: uses this function to generate full gains.
"""
phs = {}
for ai in ants:
dly = np.random.uniform(dly_rng[0], dly_rng[1])
phs[ai] = np.exp(2j * np.pi * dly * fqs)
return phs | 5,325,069 |
def test_genitive_complements():
"""Test whether "berauben" is in the list of verbs that can take genitive complements"""
genitive_complements = germanet_data.frames.extract_gentive_complement()
berauben = germanet_data.get_lexunit_by_id('l74138')
np.testing.assert_equal(berauben in genitive_complements, True) | 5,325,070 |
def convOutp(images, hidSums, targets, numModulesX, paddingStart, filterSizeX, moduleStride, numImgColors):
"""
images - (n_images, img_w**2 * n_chans)
hidSums - (n_images, n_locs**2 * n_filters)
targets - (n_filters, filter_w**2 * n_chans)
"""
numGroups = 1
partialSum = 0
numImages = images.shape[0]
numFilters = hidSums.shape[1] / (numModulesX**2)
assert targets.shape == (numFilters, numImgColors * filterSizeX * filterSizeX), '%s %d %d-%d-%d' % (targets.shape.__str__(), numFilters, numImgColors, filterSizeX, filterSizeX)
_ConvNet.convOutp(images.p_mat, hidSums.p_mat, targets.p_mat, numModulesX, filterSizeX, -paddingStart, moduleStride, numImgColors, 1, 0) | 5,325,071 |
def sharpen(img, bg=None, t='laplace', blur_radius=30, blur_guided_eps=1e-8,
use_guidedfilter='if_large_img'):
"""Use distortion model to deblur image. Equivalent to usharp mask:
1/t * img - (1-1/t) * blurry(img)
Then, apply guided filter to smooth result but preserve edges.
img - image to sharpen, assume normalized in [0,1]
bg - image background
t - the transmission map (inverse amount of sharpening)
can be scalar, matrix of same (h, w) as img, or 3 channel image.
By default, use a multi-channel sharpened laplace filter on a smoothed
image with 10x10 kernel. For enhancing fine details in large images.
use_guidedfilter - a bool or the string 'if_large_img' determining whether
to clean up the resulting sharpened image. If the min image dimension is
less that 1500, this cleanup operation may blur the
image, ruining its quality.
"""
if bg is None:
bg = np.zeros(img.shape[:2], dtype='bool')
else:
img = img.copy()
img[bg] = 0
# assert np.isnan(img).sum() == 0
# assert np.isnan(t).sum() == 0
# blurring (faster than ndi.gaussian_filter(I)
A = cv2.ximgproc.guidedFilter(
# radiance.astype('float32'),
img.astype('float32'),
img.astype('float32'),
blur_radius, blur_guided_eps)
if t == 'laplace':
t = 1-util.norm01(sharpen(ndi.morphological_laplace(
img, (2,2,1), mode='wrap'), bg, 0.15), bg)
# t = 1-util.norm01(ndi.morphological_laplace(
# img, (2,2,1), mode='wrap'), bg)
# todo note: laplace t is 01 normalized. should we keep the max
# and just normalize the lower range (or vice versa or something)?
# note2: if laplace is all zeros (due to bad input img), t will be all nan.
if len(np.shape(t)) + 1 == len(img.shape):
t_refined = np.expand_dims(t, -1).astype('float')
else:
t_refined = t
if np.shape(t):
t_refined[bg] = 1 # ignore background, but fix division by zero
J = (
img.astype('float')-A) / np.maximum(1e-8, np.maximum(t_refined, np.min(t_refined)/2)) + A
# assert np.isnan(J).sum() == 0
if bg is not None:
J[bg] = 0
# applying a guided filter for smoothing image at this point can be
# problematic to the image quality, significantly blurring it.
if use_guidedfilter == 'if_large_img':
# note: at some point, find a better threshold? This works.
use_guidedfilter = min(J.shape[0], J.shape[1]) >= 1500
if not use_guidedfilter:
J = check_and_fix_nan(J, img)
return J
r2 = cv2.ximgproc.guidedFilter(
img.astype('float32'),
J.astype('float32'),
2, 1e-8)
r2 = check_and_fix_nan(r2, img)
if bg is not None:
r2[bg] = 0
return r2 | 5,325,072 |
def mypy(_):
"""Run mypy optional static type checker"""
pass | 5,325,073 |
def what_to_add(qtype, origword, newword, terminate):
"""Return a qtype that is needed to finish a partial word.
For example, given an origword of '\"frog' and a newword of '\"frogston',
returns either:
terminate=False: 'ston'
terminate=True: 'ston\"'
This is useful when calculating tab completion strings for readline.
Args:
qtype: the type of quoting to use (ie. the first character of origword)
origword: the original word that needs completion.
newword: the word we want it to be after completion. Must start with
origword.
terminate: true if we should add the actual quote character at the end.
Returns:
The string to append to origword to produce (quoted) newword.
"""
if not newword.startswith(origword):
return ''
else:
qold = quotify(qtype, origword, terminate=False)
return quotify(qtype, newword, terminate=terminate)[len(qold):] | 5,325,074 |
def test_kwargs() -> None:
"""Test functionality when signature uses ``**kwargs``. """
@argcomb(a="b")
def f(**kwargs: Any) -> None:
...
f(a=1, b=1)
f(b=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1) | 5,325,075 |
def intersect(start1, end1, start2, end2):
"""Return the intersection point of two lines, else return None.
Ideas:
For parallel lines to intercept (equal slope and y-intercept),
they must be overlapping segments of the same infinite line.
Intersection point is given by solving line equation 1 = line equation 2,
m1 * x + c1 = m2 * x + c2
x = (c2 - c1) / (m1 - m2)
Additionally, the intersection must exist within the x-y boundaries of the two lines.
"""
if start1.x > end1.x:
tmp = end1
end1 = start1
start1 = tmp
if start2.x > end2.x:
tmp = end2
end2 = start2
start2 = tmp
if start1.x > start2.x:
tmp = start2
start2 = start1
start1 = tmp
tmp = end2
end2 = end1
end1 = tmp
l1 = Line(start1, end1)
l2 = Line(start2, end2)
if l1.slope == l2.slope:
if l1.intercept == l2.intercept and start2.is_between(start1, end1):
return start2
return None
x = (l2.intercept - l1.intercept) / (l1.slope - l2.slope)
y = x * l1.slope + l1.intercept
res = Point(x, y)
if res.is_between(start1, end1) and res.is_between(start2, end2):
return res
return None | 5,325,076 |
def start(update, context):
"""Displays welcome message."""
# choose_lang = True
# If we're starting over we don't need do send a new message
if not context.user_data.get(START_OVER):
user = update.message.from_user
try:
context.user_data[LANG] = user.language_code
logger.info(
f'User language: {texts.LANGUAGE[context.user_data[LANG]]["name"]}')
# choose_lang = False
except:
# Default lang
context.user_data[LANG] = 'en'
update.message.reply_text(
texts.WELCOME[context.user_data[LANG]] + ' \U0001F5FA', parse_mode=ParseMode.HTML)
text = texts.COMMANDS[context.user_data[LANG]]
update.message.reply_text(
text=text, parse_mode=ParseMode.HTML,
# resize_keyboard=True, reply_markup=keyboard
)
# Clear user context
context.user_data.clear()
context.user_data[START_OVER] = True
return select_lang(update, context) | 5,325,077 |
def model_dir_str(model_dir, hidden_units, logits, processor=lambda: pc.IdentityProcessor(),
activation=tf.nn.relu, uuid=None):
"""Returns a string for the model directory describing the network.
Note that it only stores the information that describes the layout of the network - in particular it does not
describe any training hyperparameters (in particular dropout rate).
"""
layer_counter = [(k, sum(1 for _ in g)) for k, g in it.groupby(hidden_units)]
for layer_size, layer_repeat in layer_counter:
if layer_repeat == 1:
model_dir += '{}_'.format(layer_size)
else:
model_dir += '{}x{}_'.format(layer_size, layer_repeat)
model_dir += '{}__'.format(logits)
model_dir += processor().__class__.__name__
if isinstance(activation, ft.partial):
activation_fn = activation.func
alpha = str(activation.keywords['alpha']).replace('.', '')
else:
activation_fn = activation
alpha = '02'
model_dir += '_' + activation_fn.__name__.replace('_', '')
if activation_fn is tf.nn.leaky_relu:
model_dir += alpha
if uuid not in (None, ''):
model_dir += '_' + str(uuid)
return model_dir | 5,325,078 |
def validate_search_inputs(row_id, search_column, search_value):
"""Function that determines if row_id, search_column and search_value are defined correctly"""
return_value = {
"valid": True,
"msg": None
}
a_search_var_defined = True if search_column or search_value else False
if row_id and a_search_var_defined:
return_value["valid"] = False
return_value["msg"] = "Only 'row_id' or the 'search_column and search_value' pair can be defined"
elif not row_id and not a_search_var_defined:
return_value["valid"] = False
return_value["msg"] = "You must define either 'row_id' or the 'search_column and search_value' pair"
return return_value | 5,325,079 |
def B1(i,n,t):
"""Restituisce il polinomio di Bernstein (i,n) valutato in t,
usando la definizione binomiale"""
if i < 0 or i > n:
return 0
return binom(n,i)* t**i * (1-t)**(n-i) | 5,325,080 |
def tint(bot, update, args):
"""Set background tint percentage."""
usrin = 100 - int(args[0]) # invert percentage
usrin = 'back@{},{},{}'.format(
int(usrin * 2.56), int(usrin * 2.56),
int(usrin * 2.56)) # format string to be passed to handler
update.message.reply_text(colorSplit(usrin)) | 5,325,081 |
def get_text_between(text, before_text, after_text):
"""Return the substring of text between before_text and after_text."""
pos1 = text.find(before_text)
if pos1 != -1:
pos1 += len(before_text)
pos2 = text.find(after_text, pos1)
if pos2 != -1:
return text[pos1:pos2].strip()
else:
error_message = f"Can't find '{after_text}' within a longer text."
raise VersionParsingError(error_message)
else:
error_message = f"Can't find '{before_text}' within a longer text."
raise VersionParsingError(error_message) | 5,325,082 |
def test_open_group_from_paths(zarr_version):
"""Verify zarr_version is applied to both the store and chunk_store."""
store = tempfile.mkdtemp()
chunk_store = tempfile.mkdtemp()
atexit.register(atexit_rmtree, store)
atexit.register(atexit_rmtree, chunk_store)
path = 'g1'
g = open_group(store, path=path, chunk_store=chunk_store, zarr_version=zarr_version)
assert g._store._store_version == g._chunk_store._store_version == zarr_version | 5,325,083 |
def configure_mq_backend(admin_user: str, admin_pass: str,
config_path: str = None,
run_rabbit_mq: bool = True,
allow_bind_existing: bool = False,
url: str = "http://0.0.0.0:15672",
services: dict = None,
users_config: dict = None,
neon_mq_user_auth: dict = None):
"""
Configure a new Diana RabbitMQ backend
:param url: URL of admin portal (default=http://0.0.0.0:15672)
:param admin_user: username to configure for RabbitMQ configuration
:param admin_pass: password associated with admin_user
:param run_rabbit_mq: start RabbitMQ Docker container to configure
:param allow_bind_existing: allow configuring an existing rabbitMQ server
:param config_path: local path to write configuration files
(default=NEON_CONFIG_PATH)
:param services: dict of services to configure on this backend
:param users_config: dict of user permissions to configure
:param neon_mq_user_auth: dict of MQ service names to credentials
"""
# Start container for local configuration
if run_rabbit_mq:
container = run_clean_rabbit_mq_docker(allow_bind_existing)
container_logs = container.logs(stream=True)
for log in container_logs:
if b"Server startup complete" in log:
break
else:
container = None
api = RabbitMQAPI(url)
# Configure Administrator
api.login("guest", "guest")
api.configure_admin_account(admin_user, admin_pass)
# Configure vhosts
vhosts_to_configure = _parse_vhosts(services)
LOG.debug(f"vhosts={vhosts_to_configure}")
for vhost in vhosts_to_configure:
api.add_vhost(vhost)
# Configure users
LOG.debug(f"users={users_config}")
credentials = api.create_default_users(list(users_config.keys()))
api.add_user("neon_api_utils", "Klatchat2021")
# Configure user permissions
for user, vhost_config in users_config.items():
for vhost, permissions in vhost_config.items():
if not api.configure_vhost_user_permissions(vhost, user, **permissions):
LOG.error(f"Error setting Permission! {user} {vhost}")
raise
# Export and save rabbitMQ Config
rabbit_mq_config_file = join(expanduser(config_path), "rabbit_mq_config.json") if config_path else None
write_rabbit_config(api, rabbit_mq_config_file)
# TODO: Generate config map DM
if container:
cleanup_docker_container(container)
# Write out MQ Connector config file
for service in neon_mq_user_auth.values():
service["password"] = credentials[service["user"]]
neon_mq_config_file = join(expanduser(config_path), "mq_config.json") if config_path else None
write_neon_mq_config(neon_mq_user_auth, neon_mq_config_file)
# # Generate docker-compose file
# docker_compose_file = join(expanduser(config_path), "docker-compose.yml") if config_path else None
# write_docker_compose(docker_compose_configuration, docker_compose_file,
# volume_driver, volumes)
#
# # Generate Kubernetes spec file
# write_kubernetes_spec(kubernetes_configuration, config_path, namespace) | 5,325,084 |
def add_video(eplist, playlist, current_playlist, var):
"""Add video to playlist being created."""
current = eplist.get(ACTIVE)
playlist.insert(END, current)
current_playlist.append(current) # adds the video to playback list.
var.set(1) | 5,325,085 |
def avi_common_argument_spec():
"""
Returns common arguments for all Avi modules
:return: dict
"""
credentials_spec = dict(
controller=dict(default=os.environ.get('AVI_CONTROLLER', '')),
username=dict(default=os.environ.get('AVI_USERNAME', '')),
password=dict(default=os.environ.get('AVI_PASSWORD', ''), no_log=True),
api_version=dict(default='16.4.4', type='str'),
tenant=dict(default='admin'),
tenant_uuid=dict(default='', type='str'),
port=dict(type='int'),
token=dict(default='', type='str', no_log=True),
timeout=dict(default=300, type='int'),
session_id=dict(default='', type='str', no_log=True),
csrftoken=dict(default='', type='str', no_log=True)
)
return dict(
controller=dict(default=os.environ.get('AVI_CONTROLLER', '')),
username=dict(default=os.environ.get('AVI_USERNAME', '')),
password=dict(default=os.environ.get('AVI_PASSWORD', ''), no_log=True),
tenant=dict(default='admin'),
tenant_uuid=dict(default=''),
api_version=dict(default='16.4.4', type='str'),
avi_credentials=dict(default=None, type='dict',
options=credentials_spec),
api_context=dict(type='dict'),
avi_disable_session_cache_as_fact=dict(default=False, type='bool')) | 5,325,086 |
def _get_functional_form_section(input_string):
""" grabs the section of text containing all of the job keywords
for functional form of PIPs
"""
pattern = (escape('$functional_form') + LINE_FILL + NEWLINE +
capturing(one_or_more(WILDCARD, greedy=False)) +
escape('$end'))
section = first_capture(pattern, input_string)
assert section is not None
return section | 5,325,087 |
async def test_select_source(hass, remote):
"""Test for select_source."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SELECT_SOURCE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_INPUT_SOURCE: "HDMI"},
True,
)
# key and update called
assert remote.control.call_count == 2
assert remote.control.call_args_list == [call("KEY_HDMI"), call("KEY")] | 5,325,088 |
def rfftn(a, s=None, axes=None):
"""Multi-dimensional discrete Fourier transform for real input.
Compute the multi-dimensional discrete Fourier transform for real input.
This function is a wrapper for :func:`pyfftw.interfaces.numpy_fft.rfftn`,
with an interface similar to that of :func:`numpy.fft.rfftn`.
Parameters
----------
a : array_like
Input array (taken to be real)
s : sequence of ints, optional (default None)
Shape of the output along each transformed axis (input is cropped
or zero-padded to match).
axes : sequence of ints, optional (default None)
Axes over which to compute the DFT.
Returns
-------
af : complex ndarray
DFT of input array
"""
return pyfftw.interfaces.numpy_fft.rfftn(
a, s=s, axes=axes, overwrite_input=False,
planner_effort='FFTW_MEASURE', threads=pyfftw_threads) | 5,325,089 |
async def ChannelLogoAPI(
channel_id:str = Path(..., description='チャンネル ID 。ex:gr011'),
):
"""
チャンネルのロゴを取得する。
"""
# チャンネル情報を取得
channel = await Channels.filter(channel_id=channel_id).get_or_none()
# 指定されたチャンネル ID が存在しない
if channel is None:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail='Specified channel_id was not found',
)
# ブラウザにキャッシュしてもらえるようにヘッダーを設定
# ref: https://qiita.com/yuuuking/items/4f11ccfc822f4c198ab0
header = {
'Cache-Control': 'public, max-age=2592000', # 30日間
}
# ***** 同梱のロゴを利用(存在する場合)*****
# 放送波から取得できるロゴはどっちみち画質が悪いし、取得できていないケースもありうる
# そのため、同梱されているロゴがあればそれを返すようにする
# ロゴは NID32736-SID1024.png のようなファイル名の PNG ファイル (256x256) を想定
if pathlib.Path.exists(LOGO_DIR / f'{channel.id}.png'):
return FileResponse(LOGO_DIR / f'{channel.id}.png', headers=header)
# ***** ロゴが全国共通なので、チャンネル名の前方一致で決め打ち *****
## NHK総合
if channel.channel_type == 'GR' and channel.channel_name.startswith('NHK総合'):
return FileResponse(LOGO_DIR / 'NID32736-SID1024.png', headers=header)
## NHKEテレ
if channel.channel_type == 'GR' and channel.channel_name.startswith('NHKEテレ'):
return FileResponse(LOGO_DIR / 'NID32737-SID1032.png', headers=header)
# 複数の地域で放送しているケーブルテレビの場合、コミュニティチャンネルの NID と SID は地域ごとに異なる
# ref: https://youzaka.hatenablog.com/entry/2013/06/30/154243
# J:COMテレビ
if channel.channel_type == 'GR' and channel.channel_name.startswith('J:COMテレビ'):
return FileResponse(LOGO_DIR / 'NID32397-SID23656.png', headers=header)
# J:COMチャンネル
if channel.channel_type == 'GR' and channel.channel_name.startswith('J:COMチャンネル'):
return FileResponse(LOGO_DIR / 'NID32399-SID23672.png', headers=header)
# eo光チャンネル
if channel.channel_type == 'GR' and channel.channel_name.startswith('eo光チャンネル'):
return FileResponse(LOGO_DIR / 'NID32127-SID41080.png', headers=header)
# ZTV
if channel.channel_type == 'GR' and channel.channel_name.startswith('ZTV'):
return FileResponse(LOGO_DIR / 'NID32047-SID46200.png', headers=header)
# ***** サブチャンネルのロゴを取得 *****
# 地デジでかつサブチャンネルのみ、メインチャンネルにロゴがあればそれを利用する
if channel.channel_type == 'GR' and channel.is_subchannel is True:
# メインチャンネルの情報を取得
# ネットワーク ID が同じチャンネルのうち、一番サービス ID が若いチャンネルを探す
main_channel = await Channels.filter(network_id=channel.network_id).order_by('service_id').first()
# メインチャンネルが存在し、ロゴも存在する
if main_channel is not None and pathlib.Path.exists(LOGO_DIR / f'{main_channel.id}.png'):
return FileResponse(LOGO_DIR / f'{main_channel.id}.png', headers=header)
# BS でかつサブチャンネルのみ、メインチャンネルにロゴがあればそれを利用する
if channel.channel_type == 'BS' and channel.is_subchannel is True:
# メインチャンネルのサービス ID を算出
# NHKBS1 と NHKBSプレミアム だけ特別に、それ以外は一の位が1のサービス ID を算出
if channel.service_id == 102:
main_service_id = 101
elif channel.service_id == 104:
main_service_id = 103
else:
main_service_id = int(channel.channel_number[0:2] + '1')
# メインチャンネルの情報を取得
main_channel = await Channels.filter(network_id=channel.network_id, service_id=main_service_id).first()
# メインチャンネルが存在し、ロゴも存在する
if main_channel is not None and pathlib.Path.exists(LOGO_DIR / f'{main_channel.id}.png'):
return FileResponse(LOGO_DIR / f'{main_channel.id}.png', headers=header)
# ***** Mirakurun からロゴを取得 *****
if CONFIG['general']['backend'] == 'Mirakurun':
# Mirakurun 形式のサービス ID
# NID と SID を 5 桁でゼロ埋めした上で int に変換する
mirakurun_service_id = int(str(channel.network_id).zfill(5) + str(channel.service_id).zfill(5))
# Mirakurun の API からロゴを取得する
# 同梱のロゴが存在しない場合のみ
mirakurun_logo_api_url = f'{CONFIG["general"]["mirakurun_url"]}/api/services/{mirakurun_service_id}/logo'
mirakurun_logo_api_response:requests.Response = await asyncio.to_thread(requests.get, mirakurun_logo_api_url)
# ステータスコードが 200 であれば
# ステータスコードが 503 の場合はロゴデータが存在しない
if mirakurun_logo_api_response.status_code == 200:
# 取得したロゴデータを返す
mirakurun_logo = mirakurun_logo_api_response.content
return Response(content=mirakurun_logo, media_type='image/png', headers=header)
# ***** EDCB からロゴを取得 *****
if CONFIG['general']['backend'] == 'EDCB':
# CtrlCmdUtil を初期化
edcb = CtrlCmdUtil()
# EDCB の LogoData フォルダからロゴを取得
logo = None
files = await edcb.sendFileCopy2(['LogoData.ini', 'LogoData\\*.*']) or []
if len(files) == 2:
logo_data_ini = EDCBUtil.convertBytesToString(files[0]['data'])
logo_dir_index = EDCBUtil.convertBytesToString(files[1]['data'])
logo_id = EDCBUtil.getLogoIDFromLogoDataIni(logo_data_ini, channel.network_id, channel.service_id)
if logo_id >= 0:
# なるべく画質が良いロゴタイプのものを取得
for logo_type in [5, 2, 4, 1, 3, 0]:
logo_name = EDCBUtil.getLogoFileNameFromDirectoryIndex(logo_dir_index, channel.network_id, logo_id, logo_type)
if logo_name is not None:
files = await edcb.sendFileCopy2(['LogoData\\' + logo_name]) or []
if len(files) == 1:
logo = files[0]['data']
logo_media_type = 'image/bmp' if logo_name.upper().endswith('.BMP') else 'image/png'
break
# 取得したロゴデータを返す
if logo is not None and len(logo) > 0:
return Response(content=logo, media_type=logo_media_type, headers=header)
# ***** デフォルトのロゴ画像を利用 *****
# 同梱のロゴファイルも Mirakurun や EDCB からのロゴもない場合のみ
return FileResponse(LOGO_DIR / 'default.png', headers=header) | 5,325,090 |
def ImportFromNpb(
db: bytecode_database.Database, cmake_build_root: pathlib.Path
) -> int:
"""Import the cmake files from the given build root."""
bytecodes_to_process = FindBitcodesToImport(cmake_build_root)
i = 0
with sqlutil.BufferedDatabaseWriter(db, max_buffer_length=10) as writer:
for i, bytecode in enumerate(
[ProcessBitcode(b) for b in (bytecodes_to_process)]
):
app.Log(1, "%s:%s", bytecode.source_name, bytecode.relpath)
writer.AddOne(bytecode)
return i | 5,325,091 |
def _partial_ema_scov_init(n_dim=None, r:float=0.025, n_emp=None, target:float=None)->dict:
""" Initialize object to track partial moments
r: Importance of current data point
n_emp: Discouraged. Really only used for tests.
This is the number of samples for which empirical is used, rather
than running updates. By default n_emp ~ 1/r
"""
s = dict([ (q,_ema_scov_init(n_dim=n_dim,r=r,n_emp=n_emp)) for q in QUADRANTS ])
q = next(iter(s.keys())) # Choose any
s['n_dim'] = s[q]['n_dim']
s['n_emp'] = s[q]['n_emp']
s['rho'] = s[q]['rho']
s['target'] = target
s['sma'] = sma({},n_dim,r=r)
return s | 5,325,092 |
def handle_enterprise_cookies_for_logistration(request, response, context):
"""
Helper method for setting or deleting enterprise cookies on logistration response.
Arguments:
request (HttpRequest): The request for the logistration page.
response (HttpResponse): The response for the logistration page.
context (dict): Context for logistration page.
"""
# This cookie can be used for tests or minor features,
# but should not be used for payment related or other critical work
# since users can edit their cookies
_set_experiments_is_enterprise_cookie(request, response, context['enable_enterprise_sidebar'])
# Remove enterprise cookie so that subsequent requests show default login page.
response.delete_cookie(
configuration_helpers.get_value('ENTERPRISE_CUSTOMER_COOKIE_NAME', settings.ENTERPRISE_CUSTOMER_COOKIE_NAME),
domain=configuration_helpers.get_value('BASE_COOKIE_DOMAIN', settings.BASE_COOKIE_DOMAIN),
) | 5,325,093 |
def inner(thing):
""" one level """
if isinstance(thing, DataPackage):
return thing,
else:
return list(thing) | 5,325,094 |
def MPC_SendNFC_WUP_RES(did: Union[int, bytes] = 0xFF,
crc: Union[int, bytes] = 0) -> None:
"""Sends WUP_RES response
Parameters
----------
did : int or bytes, optional
1-byte DID
crc : int or bytes, optional
2-byte CRC
"""
if isinstance(did, bytes):
if len(did) != 1:
raise TypeError('did must be an instance of 1 byte')
did_value = did[0]
elif isinstance(did, int):
_check_limits(c_uint8, did, 'did')
did_value = did
else:
raise TypeError('did must be an instance of int or 1 byte')
if isinstance(crc, bytes):
if len(crc) != 2:
raise TypeError('crc must be an instance of 2 bytes')
crc_value = crc[0] << 8
crc_value |= crc[1]
elif isinstance(crc, int):
_check_limits(c_uint16, crc, 'crc')
crc_value = crc
else:
raise TypeError('crc must be an instance of int or 2 bytes')
CTS3Exception._check_error(_MPuLib.MPC_SendNFC_WUP_RES(
c_uint8(0),
byref(c_uint8(did_value)),
byref(c_uint16(crc_value)))) | 5,325,095 |
def load_dataset(data_name):
"""Load dataset.
Args:
data_name (str): The name of dataset.
Returns:
dataset (pgl.dataset): Return the corresponding dataset, containing graph information, feature, etc.
data_mode (str): Currently we have 's' and 'm' mode, which mean small dataset and medium dataset respectively.
"""
data_name = data_name.lower()
if data_name == 'reddit':
data_mode = 'm'
dataset = pgl.dataset.RedditDataset()
y = np.zeros(dataset.graph.num_nodes, dtype="int64")
y[dataset.train_index] = dataset.train_label
y[dataset.val_index] = dataset.val_label
y[dataset.test_index] = dataset.test_label
dataset.y = y
elif data_name == 'arxiv':
data_mode = 'm'
dataset = pgl.dataset.OgbnArxivDataset()
dataset.graph = to_undirected(dataset.graph, copy_node_feat=False)
dataset.graph = add_self_loops(dataset.graph, copy_node_feat=False)
elif data_name == 'cora':
data_mode = 's'
dataset = pgl.dataset.CoraDataset()
elif data_name == 'pubmed':
data_mode = 's'
dataset = pgl.dataset.CitationDataset("pubmed", symmetry_edges=True)
elif data_name == 'citeseer':
data_mode = 's'
dataset = pgl.dataset.CitationDataset("citeseer", symmetry_edges=True)
else:
raise ValueError(data_name + " dataset doesn't exist currently.")
if data_mode == 's':
def normalize(feat):
return feat / np.maximum(np.sum(feat, -1, keepdims=True), 1)
indegree = dataset.graph.indegree()
dataset.graph.node_feat["words"] = normalize(dataset.graph.node_feat[
"words"])
dataset.feature = dataset.graph.node_feat["words"]
dataset.train_mask = generate_mask(dataset.graph.num_nodes,
dataset.train_index)
dataset.val_mask = generate_mask(dataset.graph.num_nodes,
dataset.val_index)
dataset.test_mask = generate_mask(dataset.graph.num_nodes,
dataset.test_index)
return dataset, data_mode | 5,325,096 |
def require_dataset(hdf5_data, path, shape, dtype, maxshape=(None)):
"""
Create or update a dataset, making sure that its shape is resized
if needed
Args:
hdf5_data: object, an already opened hdf5 file
path: string, the path to the dataset
shape: tuple of integers, the shape of the dataset
dtype: string or int, the type of the dataset
maxshape: tuple of integers, the maximum shape to which the dataset can be
resized to. (Unused currently)
Returns:
The dataset newly created or updated.
"""
dset = hdf5_data.get(path, default = None)
# Dataset not existing
if dset is None:
maxshape = [None for i in xrange(len(shape))]
dset = hdf5_data.create_dataset(path, shape, dtype, maxshape=tuple(maxshape))
else:
# Dataset is already existing
dset.resize(shape)
return dset | 5,325,097 |
def test_set_primary(email_factory):
"""
Setting an email as the primary should update all of the user's
other email addresses to not be the primary.
"""
old = email_factory(is_primary=True)
new = email_factory(is_primary=False, user=old.user)
new.set_primary()
old.refresh_from_db()
assert new.is_primary
assert not old.is_primary | 5,325,098 |
def findquote(lrrbot, conn, event, respond_to, query):
"""
Command: !findquote QUERY
Section: quotes
Search for a quote in the quote database.
"""
quotes = lrrbot.metadata.tables["quotes"]
with lrrbot.engine.begin() as pg_conn:
fts_column = sqlalchemy.func.to_tsvector('english', quotes.c.quote)
query = sqlalchemy.select([
quotes.c.id, quotes.c.quote, quotes.c.attrib_name, quotes.c.attrib_date, quotes.c.context
]).where(
(fts_column.op("@@")(sqlalchemy.func.plainto_tsquery('english', query))) & (~quotes.c.deleted)
)
row = common.utils.pick_random_elements(pg_conn.execute(query), 1)[0]
if row is None:
return conn.privmsg(respond_to, "Could not find any matching quotes.")
qid, quote, name, date, context = row
conn.privmsg(respond_to, format_quote("Quote", qid, quote, name, date, context)) | 5,325,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.