content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def remove_sus_from_Reff(strain, data_date):
"""
This removes the inferred susceptibility depletion from the Reff estimates out of EpyReff.
The inferred Reff = S(t) * Reff_1 where S(t) is the effect of susceptible depletion (i.e. a
factor between 0 and 1) and Reff_1 is the Reff without the effect of a reducing susceptibility
pool.
"""
from params import pop_sizes
data_date = pd.to_datetime(data_date)
# read in Reff samples
df_Reff = pd.read_csv(
"results/EpyReff/Reff_"
+ strain
+ "_samples"
+ data_date.strftime("%Y-%m-%d")
+ "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
# read in assumed CA
CA = pd.read_csv(
"results/"
+ "CA_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# read in cases by infection dates
cases = pd.read_csv(
"results/"
+ "cases_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["date_inferred"]
)
# scale the local cases by the assumed CA
cases["local_scaled"] = cases["local"]
cases.loc[cases.date_inferred <= pd.to_datetime("2021-12-09"), "local_scaled"] *= 1 / 0.75
cases.loc[cases.date_inferred > pd.to_datetime("2021-12-09"), "local_scaled"] *= 1 / 0.50
# read in the inferred susceptibility depletion factor and convert to a simple array
samples = pd.read_csv(
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/posterior_sample_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
sus_dep_factor = samples["phi"][:2000]
sus_dep_factor.to_csv(
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "sampled_susceptible_depletion_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
sus_dep_factor = sus_dep_factor.to_numpy()
states = sorted(["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"])
# init a dataframe to hold the Reff samples without susceptible depletion
df_Reff_adjusted = pd.DataFrame()
df_cases_adjusted = pd.DataFrame()
for state in states:
# filter cases by the state and after March 2020
cases_state = cases.loc[cases.STATE == state]
dates_complete = pd.DataFrame(
pd.date_range(
start=df_Reff.INFECTION_DATES.min(),
end=max(df_Reff.INFECTION_DATES)
),
columns=["date_inferred"],
)
# merging on date_inferred forces missing dates to be added into cases_state
cases_state = dates_complete.merge(right=cases_state, how='left', on='date_inferred')
cases_state.fillna(0, inplace=True)
cases_state.loc[cases_state.date_inferred <= "2021-06-25", "local_scaled"] = 0
cases_state["cum_local_scaled"] = cases_state["local_scaled"].cumsum()
df_cases_adjusted = pd.concat((df_cases_adjusted, cases_state), axis=0)
cases_state = cases_state.cum_local_scaled.to_numpy()
cases_state = np.tile(cases_state, (2000, 1)).T
# invert the susceptible depletion factor for the model
scaling_factor = 1 / (1 - sus_dep_factor * cases_state / pop_sizes[state])
df_Reff_state = df_Reff.loc[df_Reff.STATE == state]
df_Reff_state.iloc[:, :-2] = df_Reff_state.iloc[:, :-2] * scaling_factor
df_Reff_adjusted = pd.concat((df_Reff_adjusted, df_Reff_state), axis=0)
# save the unscaled Reff
df_Reff_adjusted.to_csv(
"results/EpyReff/Reff_"
+ strain
+ "_samples"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
index=False,
)
df_cases_adjusted.to_csv(
"results/EpyReff/cases_adjusted_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
index=False,
)
return None
| 10,300
|
def test_get_notification_by_id_raise():
"""Will return information for notifications with id.
:return: Should catch the ValueError.
"""
syn = syncope.Syncope(syncope_url="http://192.168.1.145:9080", username="admin", password="password")
with pytest.raises(ValueError) as excinfo:
syn.get_notification_by_id()
assert excinfo.value.message == 'This search needs an id to work!'
| 10,301
|
def test_save_matplotlib_figures(gallery_conf, ext, req_mpl, req_pil):
"""Test matplotlib figure save."""
if ext == 'svg':
gallery_conf['image_scrapers'] = (matplotlib_svg_scraper(),)
import matplotlib.pyplot as plt # nest these so that Agg can be set
plt.plot(1, 1)
fname_template = os.path.join(gallery_conf['gallery_dir'], 'image{0}.png')
image_path_iterator = ImagePathIterator(fname_template)
block = ('',) * 3
block_vars = dict(image_path_iterator=image_path_iterator)
image_rst = save_figures(block, block_vars, gallery_conf)
assert len(image_path_iterator) == 1
fname = '/image1.{0}'.format(ext)
assert fname in image_rst
fname = gallery_conf['gallery_dir'] + fname
assert os.path.isfile(fname)
# Test capturing 2 images with shifted start number
image_path_iterator.next()
image_path_iterator.next()
plt.plot(1, 1)
plt.figure()
plt.plot(1, 1)
image_rst = save_figures(block, block_vars, gallery_conf)
assert len(image_path_iterator) == 5
for ii in range(4, 6):
fname = '/image{0}.{1}'.format(ii, ext)
assert fname in image_rst
fname = gallery_conf['gallery_dir'] + fname
assert os.path.isfile(fname)
| 10,302
|
def word2bytes(word, big_endian=False):
""" Converts a 32-bit word into a list of 4 byte values.
"""
return unpack_bytes(pack_word(word, big_endian))
| 10,303
|
def statfcn(status, _id, _ret):
"""
Callback for libngspice to report simulation status like 'tran 5%'
"""
logger.warn(status.decode('ascii'))
return 0
| 10,304
|
def decode_eventdata(sensor_type, offset, eventdata, sdr):
"""Decode extra event data from an alert or log
Provide a textual summary of eventdata per descriptions in
Table 42-3 of the specification. This is for sensor specific
offset events only.
:param sensor_type: The sensor type number from the event
:param offset: Sensor specific offset
:param eventdata: The three bytes from the log or alert
"""
if sensor_type == 5 and offset == 4: # link loss, indicates which port
return 'Port {0}'.format(eventdata[1])
elif sensor_type == 8 and offset == 6: # PSU cfg error
errtype = eventdata[2] & 0b1111
return psucfg_errors.get(errtype, 'Unknown')
elif sensor_type == 0xc and offset == 8: # Memory spare
return 'Module {0}'.format(eventdata[2])
elif sensor_type == 0xf:
if offset == 0: # firmware error
return firmware_errors.get(eventdata[1], 'Unknown')
elif offset in (1, 2):
return firmware_progress.get(eventdata[1], 'Unknown')
elif sensor_type == 0x10:
if offset == 0: # Correctable error logging on a specific memory part
return 'Module {0}'.format(eventdata[1])
elif offset == 1:
return 'Reading type {0:02X}h, offset {1:02X}h'.format(
eventdata[1], eventdata[2] & 0b1111)
elif offset == 5:
return '{0}%'.format(eventdata[2])
elif offset == 6:
return 'Processor {0}'.format(eventdata[1])
elif sensor_type == 0x12:
if offset == 3:
action = (eventdata[1] & 0b1111000) >> 4
return auxlog_actions.get(action, 'Unknown')
elif offset == 4:
sysactions = []
if eventdata[1] & 0b1 << 5:
sysactions.append('NMI')
if eventdata[1] & 0b1 << 4:
sysactions.append('OEM action')
if eventdata[1] & 0b1 << 3:
sysactions.append('Power Cycle')
if eventdata[1] & 0b1 << 2:
sysactions.append('Reset')
if eventdata[1] & 0b1 << 1:
sysactions.append('Power Down')
if eventdata[1] & 0b1:
sysactions.append('Alert')
return ','.join(sysactions)
elif offset == 5: # Clock change event, either before or after
if eventdata[1] & 0b10000000:
return 'After'
else:
return 'Before'
elif sensor_type == 0x19 and offset == 0:
return 'Requested {0] while {1}'.format(eventdata[1], eventdata[2])
elif sensor_type == 0x1d and offset == 7:
return restart_causes.get(eventdata[1], 'Unknown')
elif sensor_type == 0x21:
return '{0} {1}'.format(slot_types.get(eventdata[1], 'Unknown'),
eventdata[2])
elif sensor_type == 0x23:
phase = eventdata[1] & 0b1111
return watchdog_boot_phases.get(phase, 'Unknown')
elif sensor_type == 0x28:
if offset == 4:
return 'Sensor {0}'.format(eventdata[1])
elif offset == 5:
islogical = (eventdata[1] & 0b10000000)
if islogical:
if eventdata[2] in sdr.fru:
return sdr.fru[eventdata[2]].fru_name
else:
return 'FRU {0}'.format(eventdata[2])
elif sensor_type == 0x2a and offset == 3:
return 'User {0}'.format(eventdata[1])
elif sensor_type == 0x2b:
return version_changes.get(eventdata[1], 'Unknown')
elif sensor_type == 0x2c:
cause = (eventdata[1] & 0b11110000) >> 4
cause = fru_states.get(cause, 'Unknown')
oldstate = eventdata[1] & 0b1111
if oldstate != offset:
try:
cause += '(change from {0})'.format(
ipmiconst.sensor_type_offsets[0x2c][oldstate]['desc'])
except KeyError:
pass
| 10,305
|
def get_log_path():
"""
Requests the logging path to the external python library (that calls
the bindings-common).
:return: The path where to store the logs.
"""
if __debug__:
logger.debug("Requesting log path")
log_path = compss.get_logging_path()
if __debug__:
logger.debug("Log path received: %s" % log_path)
return log_path
| 10,306
|
def do_monitor_redis_check_create(client, args):
""" Create redis check monitor """
kwargs = {
'group_id': args.redis_id,
'metric': args.metric,
'threshold': args.threshold,
'operator': args.operator,
}
if args.tenant:
kwargs['tenant'] = args.tenant
if args.description:
kwargs['description'] = args.description
if args.system:
kwargs['is_system'] = True
item = client.redischeck.create(**kwargs)
utils.print_dict(item)
| 10,307
|
def about_incumbent(branch_df):
"""
number of incumbent updates
incumbent throughput: num_updates / num_nodes
max_improvement, min_improvement, avg_improvement
avg incumbent improvement / first incumbent value
max, min, avg distance between past incumbent updates
distance between last update and last node explored
"""
abs_improvement = pd.Series(abs(branch_df['best_integer'].diff(1)))
bool_updates = pd.Series((abs_improvement != 0))
avg_improvement = abs_improvement.sum() / bool_updates.sum() if bool_updates.sum() != 0 else None
nnz_idx = branch_df['best_integer'].to_numpy.nonzero()
first_incumbent = branch_df['best_integer'].iloc[nnz_idx[0][0]] if len(nnz_idx[0]) != 0 else None
num_updates = bool_updates.sum() # real number of updates (could be 0)
second = float(num_updates) / branch_df['num_nodes'].iloc[-1] if branch_df['num_nodes'].iloc[-1] != 0 else None
sixth = avg_improvement / first_incumbent if avg_improvement and first_incumbent else None
# add dummy 1 (update) at the end of bool_updates
bool_updates[bool_updates.shape[0]] = 1.
non_zeros = bool_updates.values == 1
zeros = ~non_zeros
zero_counts = np.cumsum(zeros)[non_zeros]
zero_counts[1:] -= zero_counts[:-1].copy() # distance between two successive incumbent updates
zeros_to_last = zero_counts[-1]
zero_counts = zero_counts[:-1] # removes last count (to the end) to compute max, min, avg
try:
zeros_stat = [zero_counts.max(), zero_counts.min(), zero_counts.mean(), zeros_to_last]
except ValueError:
zeros_stat = [None]*4
incumbent_list = [
num_updates,
second,
abs_improvement.max(),
abs_improvement.min(),
abs_improvement.mean(),
sixth
]
incumbent_list.extend(zeros_stat)
if len(incumbent_list) != 10:
print("***len(incumbent_list): {}".format(len(incumbent_list)))
return incumbent_list, len(incumbent_list)
| 10,308
|
def unique_chars(texts: List[str]) -> List[str]:
"""
Get a list of unique characters from list of text.
Args:
texts: List of sentences
Returns:
A sorted list of unique characters
"""
return sorted(set("".join(texts)))
| 10,309
|
def adaptive_max_pool1d(input, output_size):
"""Apply the 1d adaptive max pooling to input.
Parameters
----------
input : dragon.vm.torch.Tensor
The input tensor.
output_size : Union[int, Sequence[int]]
The target output size.
Returns
-------
dragon.vm.torch.Tensor
The output tensor.
See Also
--------
`torch.nn.AdaptiveMaxPool1d(...)`_
"""
args = utils._get_adaptive_pool_args(
input.size()[-1:], utils._single(output_size))
return _pool('MAX', utils._single, input, **args)
| 10,310
|
def test_is_card_useful_already_played(card_value, played_value):
"""
If a card's number is less than or equal to the played stack of its
color, it should no longer be useful.
"""
game = Game([])
game.stacks[cards.Colors.BLUE] = played_value
card = cards.Card(cards.Colors.BLUE, card_value)
expected = card_value > played_value
assert game.is_card_useful(card) == expected
| 10,311
|
def download_file(url, path):
"""Download a file.
:param string url: The URL of the remote file.
:param string path: The path to save the file to.
"""
r = requests.get(url, stream=True)
with open(path, 'wb') as f:
size = int(r.headers.get('content-length')) / 1024 + 1
for chunk in bar(r.iter_content(chunk_size=8192), expected_size=size):
if chunk:
f.write(chunk)
f.flush()
| 10,312
|
def cmd_log(ctx, *args):
"""
logs the args
"""
print(*args)
| 10,313
|
def get_return_nb(input_value, output_value):
"""Get return from input and output value."""
if input_value == 0:
if output_value == 0:
return 0.
return np.inf * np.sign(output_value)
return_value = (output_value - input_value) / input_value
if input_value < 0:
return_value *= -1
return return_value
| 10,314
|
def get_rocauc(val,num_iterations):
""" Trains a logistic regression and calculates the roc auc
for classifying products as >=4 stars """
recalls = np.zeros(num_iterations)
precisions = np.zeros(num_iterations)
f1s = np.zeros(num_iterations)
roc_aucs = np.zeros(num_iterations)
factory = lr_wrapper(val,feature_columns=['sim_score_db','sim_score_dm','rating_mean'],y_column='class')
for z in range(num_iterations):
# Slightly annoying thing here that each call to factory uses its own
# train_test_split, so y_test used for recalls will be different than
# y_test used in roc aucs
y_test,y_preds = factory.fit_and_return_preds()
recalls[z] = recall_score(y_test,y_preds)
precisions[z] = precision_score(y_test,y_preds)
f1s[z] = f1_score(y_test,y_preds)
y_test,y_probas = factory.fit_and_return_probas()
roc_aucs[z] = roc_auc_score(y_test, y_probas)
# print(roc_aucs)
return np.mean(recalls),np.mean(precisions),np.mean(f1s),np.mean(roc_aucs)
| 10,315
|
def split_expList(expList, max_nr_of_instr: int=8000,
verbose: bool=True):
"""
Splits a pygsti expList into sub lists to facilitate running on the CCL
and not running into the instruction limit.
Assumptions made:
- there is a fixed instruction overhead per program
- there is a fixed instruction overhead per kernel (measurement + init)
- every gate (in the gatestring) consists of a single instruction
"""
# FIXME: platform dependency on CClight
fixed_program_overhad = 12 + 3 # declare registers + infinite loop
kernel_overhead = 4 # prepz wait and measure
instr_cnt = 0
instr_cnt += fixed_program_overhad
# Determine where to split the expLists
cutting_indices = [0]
for i, gatestring in enumerate(expList):
instr_cnt += kernel_overhead
instr_cnt += len(gatestring)
if instr_cnt > max_nr_of_instr:
cutting_indices.append(i)
instr_cnt = fixed_program_overhad
# Create the expSubLists, a list contain expList objects for each part
expSubLists = []
if len(cutting_indices) == 1:
expSubLists.append(expList)
else:
for exp_num, start_idx in enumerate(cutting_indices[:-1]):
stop_idx = cutting_indices[exp_num+1]
expSubLists.append(expList[start_idx:stop_idx])
# Final slice is not by default included in the experiment list
expSubLists.append(expList[cutting_indices[-1]:])
if verbose:
print("Splitted expList into {} sub lists".format(len(expSubLists)))
return expSubLists
| 10,316
|
def __make_node_aliases(data: list[str]):
"""Alias a genes ID to their families
in order to build edges between them"""
famcom = {}
elems = [tokens for tokens in data if tokens[2] in ["FAMILY", "COMPLEX"]]
# Add all (gene) containers first
for tokens in elems:
famcom[tokens[1]] = AliasItem(tokens[3], [])
log.debug(famcom)
elems = [tokens for tokens in data if tokens[2] == "GENE"]
for tokens in elems:
# Add gene to its parent
famcom[tokens[3]].genes.append(GeneElem(tokens[0], tokens[1], tokens[3]))
return famcom
| 10,317
|
def poly_edges_min_length(P, T, distFcn=norm):
"""
Returns the per polygon min edge length
Parameters
----------
P : Tensor
a (N, D,) points set tensor
T : LongTensor
a (M, T,) topology tensor
Returns
-------
Tensor
the (T, M,) min edge length tensor
"""
return torch.min(poly_edges_length(P, T, distFcn=distFcn), dim=1, keepdim=True)[0]
| 10,318
|
def reportData_to_report(report_data: ReportData) -> Report:
"""Create a report object from the given thrift report data."""
main = {
"check_name": report_data.checkerId,
"description": report_data.checkerMsg,
"issue_hash_content_of_line_in_context": report_data.bugHash,
"location": {
"line": report_data.line,
"col": report_data.column,
"file": 0,
},
}
bug_path = None
files = {0: report_data.checkedFile}
# TODO Can not reconstruct because only the analyzer name was stored
# it should be a analyzer_name analyzer_version
return Report(main, bug_path, files, metadata=None)
| 10,319
|
def new_order(sender, instance, created, **kwargs):
"""
Freeze the amount needed to fulfill the order when it is created.
Perform the trade if there are compatible orders to match.
"""
if created:
instance_wallet = get_object_or_404(Wallet, profile=instance.profile)
if instance.type == 'B':
# Freeze the dollar amount if it is a buy order
amount = instance.quantity * instance.price
instance_wallet.available_dollar -= amount
instance_wallet.frozen_dollar += amount
instance_wallet.save()
# Check sell orders to match
sell_orders = Order.objects.filter(type='S', status=True).exclude(profile=instance.profile)
for sell_order in sell_orders:
if instance.price >= sell_order.price and instance.quantity == sell_order.quantity:
sell_order_wallet = get_object_or_404(Wallet, profile=sell_order.profile)
perform_trade(instance, instance_wallet, sell_order, sell_order_wallet)
break
elif instance.type == 'S':
# Freeze the bitcoin amount if it is a sell order
amount = instance.quantity
instance_wallet.available_bitcoin -= amount
instance_wallet.frozen_bitcoin += amount
instance_wallet.save()
# Check buy orders to match
buy_orders = Order.objects.filter(type='B', status=True).exclude(profile=instance.profile)
for buy_order in buy_orders:
if buy_order.price >= instance.price and buy_order.quantity == instance.quantity:
buy_order_wallet = get_object_or_404(Wallet, profile=buy_order.profile)
perform_trade(buy_order, buy_order_wallet, instance, instance_wallet)
break
| 10,320
|
def play_against_human(
player, env_algorithm: Callable, opponent: str, env_algorithm_kwargs=None
):
"""Executes a function controlling the player while facing opponent.
The env_algorithm function is executed with the player environment as first
argument. It exposes the open ai gym API.
Additional arguments can be passed to the env_algorithm function with
env_algorithm_kwargs.
Battles against opponent will be launched as long as env_algorithm is running.
When env_algorithm returns, the current active battle will be finished randomly
if it is not already.
:param env_algorithm: A function that controls the player. It must accept the
player as first argument. Additional arguments can be passed with the
env_algorithm_kwargs argument.
:type env_algorithm: callable
:param opponent: A player against with the env player will player.
:type opponent: Player
:param env_algorithm_kwargs: Optional arguments to pass to the env_algorithm.
Defaults to None.
"""
player._start_new_battle = True
async def launch_battles(player: EnvPlayer, opponent: str):
battles_coroutine = asyncio.gather(
player.send_challenges(
opponent=opponent,
n_challenges=1
)
)
await battles_coroutine
def env_algorithm_wrapper(player, kwargs):
env_algorithm(player, **kwargs)
player._start_new_battle = False
while True:
try:
player.complete_current_battle()
player.reset()
except OSError:
break
loop = asyncio.get_event_loop()
if env_algorithm_kwargs is None:
env_algorithm_kwargs = {}
thread = Thread(
target=lambda: env_algorithm_wrapper(player, env_algorithm_kwargs)
)
thread.start()
while player._start_new_battle:
loop.run_until_complete(launch_battles(player, opponent))
thread.join()
| 10,321
|
def softmax(x):
"""A softmax implementation."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
| 10,322
|
def get_cazy_class_fam_genbank_records(args, session, config_dict):
"""GenBank acc query results from the local CAZyme database for CAZyme from specific classes/fams
:param args: cmd-line argument parser
:param session: open SQLite db session
:param config_dict: dict, defines CAZy classes and families to get sequences for
Return CAZy class and CAZy family GenBank accession query results
"""
logger = logging.getLogger(__name__)
if args.update: # retrieve all GenBank accessions
if args.primary:
logger.warning(
"Retrieving sequences for PRIMARY GenBank accessions that:\n"
"belong to specific CAZy classes/families AND\n"
"do not have a sequence in the db OR the sequence has been updated in NCBI"
)
(
genbank_query_class,
genbank_query_family,
) = query_sql_db.get_prim_gnbk_acc_from_clss_fams(
session,
config_dict,
)
else:
logger.warning(
"Retrieving sequences for PRIMARY GenBank accessions that:\n"
"belong to specific CAZy classes/families AND\n"
"do not have a sequence in the db OR the sequence has been updated in NCBI"
)
(
genbank_query_class,
genbank_query_family,
) = query_sql_db.get_all_gnbk_acc_from_clss_fams(
session,
config_dict,
)
else: # retrieve GenBank accesions of records that don't have a sequence
if args.primary:
logger.warning(
"Retrieving sequences for PRIMARY GenBank accessions that:\n"
"belong to specific CAZy classes/families AND do not have a sequence in the db"
)
(
genbank_query_class,
genbank_query_family,
) = query_sql_db.get_prim_gnbk_acc_from_clss_fams_no_seq(
session,
config_dict,
)
else:
logger.warning(
"Retrieving sequences for PRIMARY GenBank accessions that:\n"
"belong to specific CAZy classes/families AND do not have a sequence in the db"
)
(
genbank_query_class,
genbank_query_family,
) = query_sql_db.get_all_gnbk_acc_from_clss_fams_no_seq(
session,
config_dict,
)
return genbank_query_class, genbank_query_family
| 10,323
|
def assign_bias_ID(data, bias_params=None, bias_name='bias_ID', key_name=None, bias_model=None):
"""
Assign a value to each data point that determines which biases are applied to it.
parameters:
data: pointCollection.data instance
bias_parameters: a list of parameters, each unique combination of which defines a different bias
bias_name: a name for the biases
key_name: an optional parameter which will be used as the dataset name, otherwise a key will be built from the parameter values
bias_model: a dict containing entries:
E_bias: a dict of expected bias values for the each biasID, determined from the sigma_corr parameter of the data
bias_ID_dict: a dict giving the parameter values for each bias_ID (or the key_name if provided)
bias_param_dict: a dict giving the mapping from parameter values to bias_ID values
"""
if bias_model is None:
bias_model={'E_bias':dict(), 'bias_param_dict':dict(), 'bias_ID_dict':dict()}
bias_ID=np.zeros(data.size)+-9999
p0=len(bias_model['bias_ID_dict'].keys())
if bias_params is None:
# assign all data the same bias
bias_model['bias_ID_dict'][p0+1]=key_name
bias_ID=p0+1
bias_model['E_bias'][p0+1]=np.nanmedian(data.sigma_corr)
else:
bias_ID=np.zeros(data.size)
temp=np.column_stack([getattr(data, bp) for bp in bias_params])
u_p, i_p=unique_by_rows(temp, return_index=True)
bias_model['bias_param_dict'].update({param:list() for param in bias_params})
bias_model['bias_param_dict'].update({'ID':list()})
for p_num, param_vals in enumerate(u_p):
this_mask=np.ones(data.size, dtype=bool)
param_vals_dict={}
#Identify the data that match the parameter values
for i_param, param in enumerate(bias_params):
this_mask = this_mask & (getattr(data, param)==param_vals[i_param])
param_vals_dict[param]=param_vals[i_param]
#this_name += '%s%3.2f' % (param, param_vals[i_param])
bias_model['bias_param_dict'][param].append(param_vals[i_param])
bias_model['bias_param_dict']['ID'].append(p0+p_num)
this_ind=np.where(this_mask)[0]
bias_ID[this_ind]=p0+p_num
bias_model['bias_ID_dict'][p0+p_num]=param_vals_dict
bias_model['E_bias'][p0+p_num]=np.nanmedian(data.sigma_corr[this_ind])
data.assign({bias_name:bias_ID})
return data, bias_model
| 10,324
|
def check(s):
"""
:param s:str. the input of letters
:return: bool.
"""
if len(s) == 7 and len(s.split(' ')) == 4:
for unit in s.split(' '):
if unit.isalpha():
return True
| 10,325
|
def pyeval(*args):
"""
.. function:: pyeval(expression)
Evaluates with Python the expression/s given and returns the result
>>> sql("pyeval '1+1'")
pyeval('1+1')
-------------
2
>>> sql("select var('test')") # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
OperatorError: Madis SQLError:
Operator VAR: Variable 'test' does not exist
>>> sql("select var('test', pyeval('1+1'))")
var('test', pyeval('1+1'))
--------------------------
2
>>> sql("select var('test')")
var('test')
-----------
2
>>> sql('''pyeval '1+1' '"-"' '3+1' ''')
pyeval('1+1','"-"','3+1')
-------------------------
2-4
>>> sql("var 'testvar' of select 5")
var('testvar',(select 5))
-------------------------
5
>>> sql("pyeval 'testvar+5'")
pyeval('testvar+5')
-------------------
10
>>> sql('''pyeval keywords('lala') ''')
pyeval('keywords(''lala'')')
----------------------------
lala
"""
if len(args) == 0:
return
r = ''
for i in args:
r = r + str(eval(i, functions.variables.__dict__, functions.rowfuncs.__dict__))
return r
| 10,326
|
def test_hinge_loss_backward():
"""
Tests the backward pass of the hinge loss function
"""
from your_code import HingeLoss
X = np.array([[-1, 2, 1], [-3, 4, 1]])
w = np.array([1, 2, 3])
y = np.array([1, -1])
loss = HingeLoss(regularization=None)
_true = np.array([-1.5, 2, 0.5])
_est = loss.backward(X, w, y)
assert np.allclose(_true, _est)
| 10,327
|
def is_buggy(battery: Dict) -> bool:
"""
This method returns true in case an acpi bug has occurred.
In this case the battery is flagged unavailable and has no capacity information.
:param battery: the battery dictionary
:return: bool
"""
return battery['design_capacity'] is None
| 10,328
|
def with_hyperparameters(uri: Text):
"""Constructs an ImporterNode component that imports a `standard_artifacts.HyperParameters`
artifact to use for future runs.
Args:
uri (Text): Hyperparameter artifact's uri
Returns: ImporterNode
"""
return ImporterNode(
instance_name='with_hyperparameters',
source_uri=uri,
artifact_type=standard_artifacts.HyperParameters)
| 10,329
|
def update_user_controller(user_repository_spy): # pylint: disable=W0621
"""montagem de update_user_controller utilizando spy"""
usecase = UpdateUser(user_repository_spy, PasswordHash())
controller = UpdateUserController(usecase)
return controller
| 10,330
|
def blockList2Matrix(l):
""" Converts a list of matrices into a corresponding big block-diagonal one. """
dims = [m.shape[0] for m in l]
s = sum(dims)
res = zeros((s, s))
index = 0
for i in range(len(l)):
d = dims[i]
m = l[i]
res[index:index + d, index:index + d] = m
index += d
return res
| 10,331
|
def log_new_fit(new_fit, log_gplus, mode='residual'):
"""Log the successful refits of a spectrum.
Parameters
----------
new_fit : bool
If 'True', the spectrum was successfully refit.
log_gplus : list
Log of all previous successful refits of the spectrum.
mode : str ('positive_residual_peak', 'negative_residual_peak', 'broad', 'blended')
Specifies the feature that was refit or used for a new successful refit.
Returns
-------
log_gplus : list
Updated log of successful refits of the spectrum.
"""
if not new_fit:
return log_gplus
modes = {'positive_residual_peak': 1, 'negative_residual_peak': 2, 'broad': 3, 'blended': 4}
log_gplus.append(modes[mode])
return log_gplus
| 10,332
|
def prepare_hr_for_compromised_credentials(hits: list) -> str:
"""
Prepare human readable format for compromised credentials
:param hits: List of compromised credentials
:return: Human readable format of compromised credentials
"""
hr = []
for hit in hits:
source = hit.get('_source', {})
created_date = source.get('breach', {}).get('created_at', {}).get('date-time')
created_date = arg_to_datetime(created_date)
if created_date:
created_date = created_date.strftime(READABLE_DATE_FORMAT) # type: ignore
first_observed_date = source.get('breach', {}).get('first_observed_at', {}).get('date-time')
first_observed_date = arg_to_datetime(first_observed_date)
if first_observed_date:
first_observed_date = first_observed_date.strftime(READABLE_DATE_FORMAT) # type: ignore
data = {
'FPID': source.get('fpid', ''),
'Email': source.get('email', ''),
'Breach Source': source.get('breach', {}).get('source'),
'Breach Source Type': source.get('breach', {}).get('source_type'),
'Password': source.get('password'),
'Created Date (UTC)': created_date,
'First Observed Date (UTC)': first_observed_date
}
hr.append(data)
return tableToMarkdown("Compromised Credential(s)", hr, ['FPID', 'Email', 'Breach Source', 'Breach Source Type',
'Password', 'Created Date (UTC)',
'First Observed Date (UTC)'], removeNull=True)
| 10,333
|
def test_walk_directory_artifacts():
"""Ensure walk_directory_artifacts works as expected."""
for filepath in archive.walk_directory_artifacts(
TEST_DIRECTORY_PATH, include={"test_*.py"}, exclude={"test_archive.py"}
):
assert filepath.is_file()
assert filepath.name.startswith("test_")
assert filepath.name != "test_archive.py"
| 10,334
|
def red_bg(text):
""" Adds a red background to the given text. """
return colorize(text, "\033[48;5;167m")
| 10,335
|
def update_inc_admins_statistics_row_name(row_name: str) -> None:
"""
row_name must only be in (orioks_scheduled_requests, orioks_success_logins, orioks_failed_logins)
"""
if row_name not in ('orioks_scheduled_requests', 'orioks_success_logins', 'orioks_failed_logins'):
raise Exception('update_inc_admins_statistics_row_name() -> row_name must only be in ('
'orioks_scheduled_requests, orioks_success_logins, orioks_failed_logins)')
db = sqlite3.connect(config.PATH_TO_DB)
sql = db.cursor()
with open(os.path.join(config.PATH_TO_SQL_FOLDER, 'update_inc_admins_statistics_row_name.sql'), 'r') as sql_file:
sql_script = sql_file.read()
sql.execute(sql_script.format(row_name=row_name), {
'row_name': row_name
})
db.commit()
db.close()
| 10,336
|
def model_utils(decoy: Decoy) -> ModelUtils:
"""Get mock ModelUtils."""
return decoy.mock(cls=ModelUtils)
| 10,337
|
def getLeftTopOfTile(tilex, tiley):
"""Remember from the comments in the getStartingBoard() function that we have two sets of coordinates in this program. The first set are the pixel coordinates, which on the x-axis ranges from 0 to WINDOWWIDTH - 1, and the y-axis ranges from 0 to WINDOWHEIGHT - 1.
Lembrando que a partir dos comentários na função getStartingBoard() temos dois conjuntos de coordenadas neste programa. O primeiro conjunto são as coordenadas dos pixels, que no intervalo do eixo-x vai de 0 até WINDOWWIDTH - 1 e no intervalo do eixo-y vai de 0 até WINDOWHEIGHT - 1.
The other coordinate system is used to refer to the tiles on the game board. The upper left tile is at 0, 0. The x-axis ranges from 0 to COLS - 1, and the y-axis ranges from 0 to ROWS - 1.
O outro sistema de coordenadas é usado para se referir as peças do jogo no tabuleiro. A peça superior esquerda está em 0,0. O intervalo do eixo-x vai de 0 até COLS -1, e o intervalo do eixo-y vai de 0 até ROWS -1."""
left = XMARGIN + (tilex * TILESIZE) + (tilex - 1)
top = YMARGIN + (tiley * TILESIZE) + (tiley - 1)
return (left, top)
| 10,338
|
def count_str(text, sub, start=None, end=None):
"""
Computes the number of non-overlapping occurrences of substring ``sub`` in ``text[start:end]``.
Optional arguments start and end are interpreted as in slice notation.
:param text: The string to search
:type text: ``str``
:param sub: The substring to count
:type sub: ``str``
:param start: The start of the search range
:type start: ``int``
:param end: The end of the search range
:type end: ``int``
:return: The number of non-overlapping occurrences of substring ``sub`` in ``text[start:end]``.
:rtype: ``int``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.count(sub,start,end)
| 10,339
|
def test_alarm_set_get(monkeypatch):
"""
test set functions
"""
act = AlarmAction.RAISE
assert act is not None
sev = AlarmSeverity.CRITICAL
assert sev is not None
det = AlarmDetail("1", "2", 3, AlarmSeverity.MINOR, "4", "5")
assert det[alarm.KEY_MANAGED_OBJECT_ID] == "1"
assert det[alarm.KEY_APPLICATION_ID] == "2"
assert det[alarm.KEY_SPECIFIC_PROBLEM] == 3
assert det[alarm.KEY_PERCEIVED_SEVERITY] == AlarmSeverity.MINOR.name
assert det[alarm.KEY_IDENTIFYING_INFO] == "4"
assert det[alarm.KEY_ADDITIONAL_INFO] == "5"
# missing environment variables
with pytest.raises(InitFailed):
alarm.AlarmManager(MRC_SEND, "missing", "envvars")
# invalid environment variables
monkeypatch.setenv(ALARM_MGR_SERVICE_NAME_ENV, "0")
monkeypatch.setenv(ALARM_MGR_SERVICE_PORT_ENV, "a")
with pytest.raises(InitFailed):
alarm.AlarmManager(MRC_SEND, "bogus", "envvars")
# good environment variables
monkeypatch.setenv(ALARM_MGR_SERVICE_NAME_ENV, "127.0.0.1") # do NOT use localhost
monkeypatch.setenv(ALARM_MGR_SERVICE_PORT_ENV, "4567") # any int is ok here
mgr = alarm.AlarmManager(MRC_SEND, "moid2", "appid2")
assert mgr is not None
assert mgr.managed_object_id == "moid2"
assert mgr.application_id == "appid2"
| 10,340
|
async def test_password_status():
"""
Test password status.
"""
service = PasswordService(user)
res = await service.password_status()
assert isinstance(res, dict)
assert res.get("status") == "change"
| 10,341
|
def tanh(x, out=None):
"""
Raises a ValueError if input cannot be rescaled to a dimensionless
quantity.
"""
if not isinstance(x, Quantity):
return np.tanh(x, out)
return Quantity(
np.tanh(x.rescale(dimensionless).magnitude, out),
dimensionless,
copy=False
)
| 10,342
|
def epacems(
states: Optional[Sequence[str]] = None,
years: Optional[Sequence[int]] = None,
columns: Optional[Sequence[str]] = None,
epacems_path: Optional[Path] = None,
) -> dd.DataFrame:
"""Load EPA CEMS data from PUDL with optional subsetting.
Args:
states: subset by state abbreviation. Defaults to None (which gets all states).
years: subset by year. Defaults to None (which gets all years).
columns: subset by column. Defaults to None (which gets all columns).
epacems_path: path to parquet dir. By default it automatically loads the path
from :mod:`pudl.workspace`
Returns:
The requested epacems data
"""
all_states = pudl.constants.WORKING_PARTITIONS['epacems']['states']
if states is None:
states = all_states # all states
else:
nonexistent = [state for state in states if state not in all_states]
if nonexistent:
raise ValueError(
f"These input states are not in our dataset: {nonexistent}")
states = list(states)
all_years = pudl.constants.WORKING_PARTITIONS['epacems']['years']
if years is None:
years = all_years
else:
nonexistent = [year for year in years if year not in all_years]
if nonexistent:
raise ValueError(f"These input years are not in our dataset: {nonexistent}")
years = list(years)
# columns=None is handled by dd.read_parquet; gives all columns
if columns is not None:
# nonexistent columns are handled by dd.read_parquet; raises ValueError
columns = list(columns)
if epacems_path is None:
pudl_settings = pudl.workspace.setup.get_defaults()
epacems_path = Path(pudl_settings["parquet_dir"]) / "epacems"
epacems = dd.read_parquet(
epacems_path,
use_nullable_dtypes=True,
columns=columns,
filters=year_state_filter(
states=states,
years=years,
),
)
return epacems
| 10,343
|
def check_termination_criteria(
theta: Optional[float],
num_iterations: Optional[int]
) -> Tuple[float, int]:
"""
Check theta and number of iterations.
:param theta: Theta.
:param num_iterations: Number of iterations.
:return: Normalized values.
"""
# treat theta <= 0 as None, as the caller wants to ignore it.
if theta is not None and theta <= 0:
theta = None
# treat num_iterations <= 0 as None, as the caller wants to ignore it.
if num_iterations is not None and num_iterations <= 0:
num_iterations = None
if theta is None and num_iterations is None:
raise ValueError('Either theta or num_iterations (or both) must be provided.')
logging.info(f'Starting evaluation (theta={theta}, num_iterations={num_iterations}).')
return theta, num_iterations
| 10,344
|
def test_real_complex():
"""Test converting ScalarValue to float/complex"""
val = ScalarValue(1 - 2j)
with pytest.raises(TypeError):
float(val)
c = complex(val)
assert c == 1 - 2j
assert c == val
assert c.real == 1
assert c.imag == -2
assert isinstance(c, complex)
val = ScalarValue(1.25)
f = float(val)
assert f == 1.25
assert f == val
assert isinstance(f, float)
alpha = ScalarValue(symbols('alpha'))
with pytest.raises(TypeError):
assert float(alpha) == 0
with pytest.raises(TypeError):
assert complex(alpha) == 0
| 10,345
|
def _get_rank_info():
"""
get rank size and rank id
"""
rank_size = int(os.environ.get("RANK_SIZE", 1))
if rank_size > 1:
rank_size = int(os.environ.get("RANK_SIZE"))
rank_id = int(os.environ.get("RANK_ID"))
else:
rank_size = 1
rank_id = 0
return rank_size, rank_id
| 10,346
|
def verify_password(password, hash):
"""Verify if a hash was generated by the password specified.
:password: a string object (plaintext).
:hash: a string object.
:returns: True or False.
"""
method = get_hash_algorithm(flask.current_app.config['HASH_ALGORITHM'])
return method.verify(password, hash)
| 10,347
|
def _conversion_sample2v_from_meta(meta_data):
"""
Interpret the meta data to extract an array of conversion factors for each channel
so the output data is in Volts
Conversion factor is: int2volt / channelGain
For Lf/Ap interpret the gain string from metadata
For Nidq, repmat the gains from the trace counts in `snsMnMaXaDw`
:param meta_data: dictionary output from spikeglx.read_meta_data
:return: numpy array with one gain value per channel
"""
def int2volts(md):
""" :return: Conversion scalar to Volts. Needs to be combined with channel gains """
if md.get('typeThis', None) == 'imec':
return md.get('imAiRangeMax') / 512
else:
return md.get('niAiRangeMax') / 32768
int2volt = int2volts(meta_data)
# interprets the gain value from the metadata header:
if 'imroTbl' in meta_data.keys(): # binary from the probes: ap or lf
sy_gain = np.ones(int(meta_data['snsApLfSy'][-1]), dtype=np.float32)
# imroTbl has 384 entries regardless of no of channels saved, so need to index by n_ch
n_chn = _get_nchannels_from_meta(meta_data) - 1
# the sync traces are not included in the gain values, so are included for broadcast ops
gain = re.findall(r'([0-9]* [0-9]* [0-9]* [0-9]* [0-9]*)', meta_data['imroTbl'])[:n_chn]
out = {'lf': np.hstack((np.array([1 / np.float32(g.split(' ')[-1]) for g in gain]) *
int2volt, sy_gain)),
'ap': np.hstack((np.array([1 / np.float32(g.split(' ')[-2]) for g in gain]) *
int2volt, sy_gain))}
elif 'niMNGain' in meta_data.keys(): # binary from nidq
gain = np.r_[
np.ones(int(meta_data['snsMnMaXaDw'][0],)) / meta_data['niMNGain'] * int2volt,
np.ones(int(meta_data['snsMnMaXaDw'][1],)) / meta_data['niMAGain'] * int2volt,
np.ones(int(meta_data['snsMnMaXaDw'][2], )) * int2volt, # no gain for analog sync
np.ones(int(np.sum(meta_data['snsMnMaXaDw'][3]),))] # no unit for digital sync
out = {'nidq': gain}
return out
| 10,348
|
def read_split_csv(input_files, delimiter='\t', names=['src', 'dst'],
dtype=['int32', 'int32']):
"""
Read csv for large datasets which cannot be read directly by dask-cudf
read_csv due to memory requirements. This function takes large input
split into smaller files (number of input_files > number of gpus),
reads two or more csv per gpu/worker and concatenates them into a
single dataframe. Additional parameters (delimiter, names and dtype)
can be specified for reading the csv file.
"""
client = default_client()
n_files = len(input_files)
n_gpus = get_n_gpus()
n_files_per_gpu = int(n_files/n_gpus)
worker_map = []
for i, w in enumerate(client.has_what().keys()):
files_per_gpu = input_files[i*n_files_per_gpu: (i+1)*n_files_per_gpu]
worker_map.append((files_per_gpu, w))
new_ddf = [client.submit(_read_csv, part, delimiter, names, dtype,
workers=[worker]) for part, worker in worker_map]
wait(new_ddf)
return new_ddf
| 10,349
|
def _gnurl( clientID ):
"""
Helper function to form URL to Gracenote_ API service.
:param str clientID: the Gracenote_ client ID.
:returns: the lower level URL to the Gracenote_ API.
:rtype: str
"""
clientIDprefix = clientID.split('-')[0]
return 'https://c%s.web.cddbp.net/webapi/xml/1.0/' % clientIDprefix
| 10,350
|
def test_pep8_conformance():
"""Test source code for PEP8 conformance"""
try:
import pep8
except:
print("Skipping pep8 Tests because pep8.py not installed.")
return
# Skip test if pep8 is not new enough
pep8_version = parse_version(get_distribution('pep8').version)
needed_version = parse_version('1.0')
if pep8_version < needed_version:
print("Skipping pep8 Tests because pep8.py is too old")
return
pep8style = pep8.StyleGuide(max_line_length=120)
report = pep8style.options.report
report.start()
pep8style.options.exclude.append('git_archive_all.py')
pep8style.input_dir(os.path.join('..', 'vcstools', 'src'))
report.stop()
assert report.total_errors == 0, "Found '{0}' code style errors (and warnings).".format(report.total_errors)
| 10,351
|
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. It is assumed that the keys do not need to be XML-escaped.
If the passed dictionary is empty, then return an empty string.
jsled: stolen from django newforms/util.py...
"""
return u''.join([u' %s="%s"' % (k, escape(v)) for k, v in attrs.items()])
| 10,352
|
def fetch_item_info(session, observations, claims, verbose=False):
"""
Fetches information about wikidata items.
:Parameters:
session : :class:`mwapi.Session`
An API session to use for querying
observations : `iterable`(`dict`)
A collection of observations to annotate
claims : `list` ( `str` )
A set of property names to look up claims for
verbose : `bool`
Print dots and stuff
:Returns:
An `iterator` of observations augmented with an `autolabel` field
containing the requested information. Note that observations that
can't be found will be excluded.
"""
batches = chunkify(observations, 25)
executor = ThreadPoolExecutor(max_workers=4)
_fetch_item_info = build_fetch_item_info(session, claims)
for annotated_batch in executor.map(_fetch_item_info, batches):
for annotated_item in annotated_batch:
yield annotated_item
if verbose:
sys.stderr.write(".")
sys.stderr.flush()
if verbose:
sys.stderr.write("\n")
| 10,353
|
def draw_bboxes(img,boxes,classes):
"""
Draw bounding boxes on top of an image
Args:
img : Array of image to be modified
boxes: An (N,4) array of boxes to draw, where N is the number of boxes.
classes: An (N,1) array of classes corresponding to each bounding box.
Outputs:
An array of the same shape as 'img' with bounding boxes
and classes drawn
"""
source = Image.fromarray(img)
draw = ImageDraw.Draw(source)
w2,h2 = (img.shape[0],img.shape[1])
idx = 0
for i in range(len(boxes)):
xmin,ymin,xmax,ymax = boxes[i]
c = classes[i]
draw.text((xmin+15,ymin+15), str(c))
for j in range(4):
draw.rectangle(((xmin+j, ymin+j), (xmax+j, ymax+j)), outline="red")
return source
| 10,354
|
def main(dataset_name):
"""Clean the data from the clinical datasets.
We removed excluded subjects outside the age range [47,73] based on the UK Biobank data.
"""
# ----------------------------------------------------------------------------------------
participants_path = PROJECT_ROOT / 'data' / dataset_name / 'participants.tsv'
ids_path = PROJECT_ROOT / 'data' / dataset_name / 'freesurferData.csv'
output_ids_filename = dataset_name + '_cleaned_ids.csv'
# ----------------------------------------------------------------------------------------
outputs_dir = PROJECT_ROOT / 'outputs'
dataset = load_demographic_data(participants_path, ids_path)
dataset = dataset.loc[(dataset['Age'] >= 47) & (dataset['Age'] <= 73)]
dataset = dataset.drop_duplicates(subset='participant_id')
output_ids_df = dataset[['Image_ID']]
assert sum(output_ids_df.duplicated()) == 0
output_ids_df.to_csv(outputs_dir / output_ids_filename, index=False)
| 10,355
|
def test_grdcut_file_in_file_out():
"""
grdcut an input grid file, and output to a grid file.
"""
with GMTTempFile(suffix=".nc") as tmpfile:
result = grdcut("@earth_relief_01d", outgrid=tmpfile.name, region="0/180/0/90")
assert result is None # return value is None
assert os.path.exists(path=tmpfile.name) # check that outgrid exists
result = grdinfo(tmpfile.name, C=True)
assert result == "0 180 0 90 -8182 5651.5 1 1 180 90 1 1\n"
| 10,356
|
def markdown(context, template_path):
""" {% markdown 'terms-of-use.md' %} """
return mark_safe(get_markdown(context, template_path)[0])
| 10,357
|
def investorMasterGetSubaccAssetDetails(email, recvWindow=""):
"""# Query managed sub-account asset details(For Investor Master Account)
#### `GET /sapi/v1/managed-subaccount/asset (HMAC SHA256)`
### Weight:
1
### Parameters:
Name |Type |Mandatory |Description
--------|--------|--------|--------
email |STRING |YES |
recvWindow |LONG |NO |
timestamp |LONG |YES |
"""
endpoint = '/sapi/v1/managed-subaccount/asset'
params = {
"email": email
}
if recvWindow: params["recvWindow"] = recvWindow
return getbinancedata_sig(endpoint, params)
| 10,358
|
def ema_indicator(close, n=12, fillna=False):
"""EMA
Exponential Moving Average via Pandas
Args:
close(pandas.Series): dataset 'Close' column.
n_fast(int): n period short-term.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
ema_ = ema(close, n, fillna)
return pd.Series(ema_, name='ema')
| 10,359
|
def connect(addr=None, proto=None, name=None, pgrok_config=None, **options):
"""
Establish a new ``pgrok`` tunnel for the given protocol to the given port, returning an object representing
the connected tunnel.
If a `tunnel definition in pgrok's config file matches the given ``name``, it will be loaded and used to
start the tunnel. When ``name`` is ``None`` and a "pgrok_default" tunnel definition exists in ``pgrok``'s
config, it will be loaded and use. Any ``kwargs`` passed as ``options`` will
override properties from the loaded tunnel definition.
If ``pgrok`` is not installed at :class:`~pgrok.PgrokConfig`'s ``pgrok_path``, calling this method
will first download and install ``pgrok``.
If ``pgrok`` is not running, calling this method will first start a process with
:class:`~pgrok.PgrokConfig`.
.. note::
``pgrok``'s default behavior for ``http`` when no additional properties are passed is to open *two* tunnels,
one ``http`` and one ``https``. This method will return a reference to the ``http`` tunnel in this case. If
only a single tunnel is needed, pass ``bind_tls=True`` and a reference to the ``https`` tunnel will be returned.
"""
if pgrok_config is None:
pgrok_config = get_default_config()
config = get_pgrok_config(pgrok_config.config_path) if os.path.exists(pgrok_config.config_path) else {}
# If a "pgrok-default" tunnel definition exists in the pgrok config, use that
tunnel_definitions = config.get("tunnels", {})
if not name and "pgrok_default" in tunnel_definitions:
name = "pgrok_default"
# Use a tunnel definition for the given name, if it exists
if name and name in tunnel_definitions:
tunnel_definition = tunnel_definitions[name]
proto_map = tunnel_definition.get("proto", {})
protocol = [k for k in proto_map.keys() if k in ['http', 'https', 'tcp']]
assert len(protocol) > 0, \
ValueError("Invalid proto in config should be http|https|tcp")
addr = proto_map[protocol[0]] if not addr else addr
proto = proto if proto else protocol[0]
# Use the tunnel definition as the base, but override with any passed in options
tunnel_definition.update(options)
options = tunnel_definition
addr = str(addr) if addr else "80"
if not proto:
proto = "http"
if not name:
if not addr.startswith("file://"):
name = "{}-{}-{}".format(proto, addr, uuid.uuid4())
else:
name = "{}-file-{}".format(proto, uuid.uuid4())
logger.info("Opening tunnel named: {}".format(name))
# Create a temporary config yml and if config_path not set earlier
if not os.path.exists(pgrok_config.config_path) or not validate_config(config):
with tempfile.NamedTemporaryFile(suffix='.yml') as tmp:
_default_config['tunnels'].pop('pgrok_default', None)
tunnel_name = {}
tunnel_name['proto'] = {proto: addr}
tunnel_name['proto'].update(options)
_default_config['tunnels'][name] = tunnel_name
pgrok_config.config_path = tmp.name
process = get_pgrok_process(pgrok_config, service_name=name)
# Set tunnel parameter
_tunnelcfg = {
"name": name,
"addr": addr,
"proto": proto
}
options.update(_tunnelcfg)
options['api_url'] = process.api_url
options['public_url'] = process.public_url
tunnel = PgrokTunnel(options, pgrok_config)
logger.debug("Creating tunnel with options: {}".format(options))
_current_tunnels[tunnel.public_url] = tunnel
return tunnel
| 10,360
|
def u_glob(U, elements, nodes, resolution_per_element=51):
"""
Compute (x, y) coordinates of a curve y = u(x), where u is a
finite element function: u(x) = sum_i of U_i*phi_i(x).
Method: Run through each element and compute cordinates
over the element.
"""
x_patches = []
u_patches = []
for e in range(len(elements)):
Omega_e = (nodes[elements[e][0]], nodes[elements[e][-1]])
local_nodes = elements[e]
d = len(local_nodes) - 1
X = np.linspace(-1, 1, resolution_per_element)
x = affine_mapping(X, Omega_e)
x_patches.append(x)
u_element = 0
for r in range(len(local_nodes)):
i = local_nodes[r] # global node number
u_element += U[i]*phi_r(r, X, d)
u_patches.append(u_element)
x = np.concatenate(x_patches)
u = np.concatenate(u_patches)
return x, u
| 10,361
|
def date(value) -> DateValue:
"""Return a date literal if `value` is coercible to a date.
Parameters
----------
value
Date string
Returns
-------
DateScalar
A date expression
"""
raise NotImplementedError()
| 10,362
|
def aggregate_experiments_results(precomputed_name = "", multianno_only = True):
"""
Load all of the results from the SALAMI experiment and plot
the annotator agreements
"""
# Step 1: Extract feature-based agreements
names = ['MFCCs', 'Chromas', 'Tempogram', 'Crema', 'Fused Tgram/Crema', 'Fused MFCC/Chroma', 'Fused', 'interanno']
prls = {name:np.zeros((0, 3)) for name in names} # Dictionary of precison, recall, and l-scores
idxs = [] #Indices of
if len(precomputed_name) == 0:
for num in [int(s) for s in os.listdir(AUDIO_DIR)]:
matfilename = '%s/%i/results.mat'%(AUDIO_DIR, num)
if os.path.exists(matfilename):
res = sio.loadmat(matfilename)
thisnanno = 0
for name in names:
if name in res:
nres = res[name]
nres = np.reshape(nres, (int(nres.size/3), 3))
nanno = nres.shape[0]
thisnanno = max(thisnanno, nanno)
if (not (name == 'interanno')) and nanno < 2 and multianno_only:
continue
prls[name] = np.concatenate((prls[name], nres), 0)
idxs += [num]*thisnanno
idxs = np.array(idxs)
print("idxs.shape = ", idxs.shape)
res = {a:prls[a] for a in prls}
res['idxs'] = idxs
sio.savemat("allresults.mat", res)
else:
res = sio.loadmat(precomputed_name)
idxs = res['idxs'].flatten()
print("idxs.shape = ", idxs.shape)
counts = {}
for idx in idxs:
if not idx in counts:
counts[idx] = 0
counts[idx] += 1
to_keep = np.ones_like(idxs)
for i, idx in enumerate(idxs):
if counts[idx] < 2:
to_keep[i] = 0
print(to_keep.shape)
res.pop('idxs')
for name in names:
res[name] = res[name][to_keep == 1, :]
prls = res
print("Plotting statistics for %i examples"%(res['MFCCs'].shape[0]/2))
interanno = res['interanno']
names.remove('interanno')
# Step 2: Plot distribution and KS-score of feature-based agreements
# versus inter-annotator agreements
plt.figure(figsize=(15, 5))
for i, plotname in enumerate(['Precision', 'Recall', 'L-Measure']):
plt.subplot(1, 3, i+1)
legend = ['interanno']
sns.kdeplot(interanno[:, i], shade=True)
for name in names:
prl = prls[name]
sns.kdeplot(prl[:, i], shade=True)
k = stats.ks_2samp(interanno[:, i], prl[:, i])[0]
legend.append('%s, K=%.3g, Mean=%.3g'%(name, k, np.mean(prl[:, i])))
plt.legend(legend)
plt.title("Salami %s"%plotname)
plt.xlabel(plotname)
plt.ylabel("Probability Density")
plt.xlim([0, 1])
ymax = min(plt.gca().get_ylim()[1], 8)
plt.ylim([0, ymax])
plt.ylim([0, 5])
plt.savefig("Results.svg", bbox_inches='tight')
plt.clf()
interanno = prls['interanno']
## Step 3: Scatter inter-annotator scores against fused scores
for i, plotname in enumerate(['Precision', 'Recall', 'L-Measure']):
plt.subplot(1, 3, i+1)
prl = prls['Fused']
plt.scatter(prl[0::2, i], interanno[:, i])
plt.scatter(prl[1::2, i], interanno[:, i])
plt.title("Salami %s"%plotname)
plt.xlabel("Annotator-fused agreement")
plt.ylabel("Annotator-annotator agreement")
plt.savefig("Results_AnnotatorAgreement.png", bbox_inches='tight')
## Step 4: Report top 10 recall improvements of fusion over other features
improvement = np.ones(prls['Fused'].shape[0])
for name in names:
if name == 'Fused' or name == 'interanno':
continue
improvement += prls[name][:, i]
print("idxs.size = ", idxs.size)
print("improvement.size = ", improvement.size)
print(idxs[np.argsort(-improvement)][0:20])
| 10,363
|
def test_queue_enqueue_multiple_nodes():
"""
Can successfully enqueue multiple items into a queue
"""
grocery_checkout_queue = Queue()
grocery_checkout_queue.enqueue('Adam')
grocery_checkout_queue.enqueue('Sue')
grocery_checkout_queue.enqueue('Michael')
assert grocery_checkout_queue.front.value == 'Adam'
assert grocery_checkout_queue.rear.value == 'Michael'
| 10,364
|
def keyring_rgw_create(**kwargs):
"""
Create rgw bootstrap keyring for cluster.
Args:
**kwargs: Arbitrary keyword arguments.
cluster_uuid : Set the cluster UUID. Defaults to value found in
ceph config file.
cluster_name : Set the cluster name. Defaults to "ceph".
"""
params = dict(kwargs)
params["keyring_type"] = "rgw"
return keyring_create(**params)
| 10,365
|
def read_tm224_data(filename: str, folder: str = None) -> pandas.DataFrame:
"""
Read data stored by Lakeshore TM224 temperature monitor software.
Args:
filename: string
name of ".xls" file on disk
folder: string
location of file on disk
Returns:
df : pandas.DataFrame
DataFrame with all .xls columns and converted matplotlib timestamps
"""
if not filename.endswith(".xls"):
filename += ".xls"
# Extract only the timestamp
timestamp = pd.read_excel(folder + filename, skiprows=1, nrows=1, usecols=[1], header=None)[1][0]
# Parse datetime object from timestamp
timestamp_dt = parser.parse(timestamp, tzinfos={"CET": 0 * 3600})
# Create DataFrame
df = pd.read_excel(folder + filename, skiprows=3)
# Add matplotlib datetimes to DataFrame
time_array = []
for milliseconds in df["Time"]:
time_array.append(timestamp_dt + datetime.timedelta(milliseconds=milliseconds))
# noinspection PyUnresolvedReferences
df["MPL_datetimes"] = matplotlib.dates.date2num(time_array)
return df
| 10,366
|
def create_db_system(**kwargs):
"""Creates a DbSystem with the given id
If no id is given, it will prompt the user for the id.
Args:
**kwargs: Optional parameters
Keyword Args:
db_system_name (str): The new name of the DB System.
description (str): The new description of the DB System.
availability_domain (str): The name of the availability_domain
shape (str): The compute shape name to use for the instance
subnet_id (str): The OCID of the subnet to use
configuration_id (str): The OCID of the MySQL configuration
data_storage_size_in_gbs (int): The data storage size in gigabytes
mysql_version (str): The MySQL version
admin_username (str): The name of the administrator user account
admin_password (str): The password of the administrator account
private_key_file_path (str): The file path to an SSH private key
par_url (str): The PAR url used for initial data import
perform_cleanup_after_import (bool): Whether the bucket and PARs should
be kept or deleted if an import took place
source_mysql_uri (str): The MySQL Connection URI if data should
be imported from an existing MySQL Server instance
source_mysql_password (str): The passwort to use when data
should be imported from an existing MySQL Server instance
source_local_dump_dir (str): The path to a local directory that
contains a dump
source_bucket (str): The name of the source bucket that contains
a dump
host_image_id (str): OCID of the host image to use for this Instance.
Private API only.
defined_tags (dict): The defined_tags of the dynamic group.
freeform_tags (dict): The freeform_tags of the dynamic group
compartment_id (str): The OCID of the compartment
config (object): An OCI config object or None.
interactive (bool): Ask the user for input if needed
return_object (bool): Whether to return the object when created
Returns:
None or the new DB System object if return_object is set to true
"""
db_system_name = kwargs.get("db_system_name")
description = kwargs.get("description")
availability_domain = kwargs.get("availability_domain")
shape = kwargs.get("shape")
subnet_id = kwargs.get("subnet_id")
configuration_id = kwargs.get("configuration_id")
data_storage_size_in_gbs = kwargs.get("data_storage_size_in_gbs")
mysql_version = kwargs.get("mysql_version")
admin_username = kwargs.get("admin_username")
admin_password = kwargs.get("admin_password")
private_key_file_path = kwargs.get(
"private_key_file_path", "~/.ssh/id_rsa")
par_url = kwargs.get("par_url")
perform_cleanup_after_import = kwargs.get(
"perform_cleanup_after_import")
source_mysql_uri = kwargs.get("source_mysql_uri")
source_mysql_password = kwargs.get("source_mysql_password")
source_local_dump_dir = kwargs.get("source_local_dump_dir")
source_bucket = kwargs.get("source_bucket")
host_image_id = kwargs.get("host_image_id")
defined_tags = kwargs.get("defined_tags")
# Conversion from Shell Dict type
if defined_tags:
defined_tags = dict(defined_tags)
freeform_tags = kwargs.get("freeform_tags")
# Conversion from Shell Dict type
if freeform_tags:
freeform_tags = dict(freeform_tags)
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
interactive = kwargs.get("interactive", True)
return_object = kwargs.get("return_object", False)
try:
# Get the active config and compartment
config = configuration.get_current_config(config=config)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
import oci.mysql
from pathlib import Path
import mysqlsh
from mds_plugin import compartment, compute, network, object_store
import datetime
import time
# Set the import_source_type to 0 to default to a clean new DB System
import_source_type = 0
# Check if source_* parameters are given and if so, set the correct
# import_source_type
if source_mysql_uri is not None:
# Import from an existing MySQL Server instance
import_source_type = 1
elif source_local_dump_dir is not None:
# Import from a local data dir
import_source_type = 2
elif source_bucket is not None:
# Import from an existing bucket
import_source_type = 3
# If the user did not specify a par_url, or other source paremeter,
# let him choose if he wants to import data from a given source
if interactive and import_source_type == 0 and par_url is None:
print("Choose one of the following options of how to create the "
"MySQL DB System:\n")
import_sources = [
"Create a clean MySQL DB System",
("Create a MySQL DB System from an existing MySQL Server "
"instance"),
"Create a MySQL DB System from a local dump",
("Create a MySQL DB System from a dump stored on OCI "
"Object Storage")
]
import_source = core.prompt_for_list_item(
item_list=import_sources,
prompt_caption=("Please enter the index of an option listed "
"above: "),
prompt_default_value='', print_list=True)
if import_source == "":
print("Operation cancelled.")
return
import_source_type = import_sources.index(import_source)
# Get a name
if not db_system_name and interactive:
db_system_name = core.prompt(
"Please enter the name for the new DB System: ").strip()
if not db_system_name:
raise Exception("No name given. "
"Operation cancelled.")
# Get a description
if not description and interactive:
description = core.prompt(
"Please enter a description for the new DB System: ").strip()
# Get an admin_username
if not admin_username and interactive:
admin_username = core.prompt(
"MySQL Administrator account name [admin]: ",
{'defaultValue': 'admin'}).strip()
if not admin_username:
raise Exception("No admin username given. "
"Operation cancelled.")
# Get an admin_password
if not admin_password and interactive:
admin_password = get_validated_mysql_password(
password_caption="MySQL Administrator account")
if not admin_password:
raise Exception("No admin password given. "
"Operation cancelled.")
# Get data_storage_size_in_gbs
if not data_storage_size_in_gbs and interactive:
data_storage_size_in_gbs = core.prompt(
"Please enter the amount of data storage size in gigabytes "
"with a minimum of 50 GB [50]: ",
{'defaultValue': '50'}).strip()
try:
data_storage_size_in_gbs = int(data_storage_size_in_gbs)
except ValueError:
ValueError("Please enter a number for data storage size.\n")
if not data_storage_size_in_gbs:
raise Exception("No data storage size given. "
"Operation cancelled.")
# Get the availability_domain name
availability_domain_obj = compartment.get_availability_domain(
random_selection=not interactive,
compartment_id=compartment_id,
availability_domain=availability_domain,
config=config, interactive=interactive,
return_python_object=True)
if not availability_domain_obj:
raise Exception("No availability domain selected. "
"Operation cancelled.")
availability_domain = availability_domain_obj.name
if interactive:
print(f"Using availability domain {availability_domain}.")
# Get the shapes
shape_id = compute.get_shape_name(
shape_name=shape, limit_shapes_to=[
"VM.Standard.E2.1", "VM.Standard.E2.2",
"VM.Standard.E2.4", "VM.Standard.E2.8"],
compartment_id=compartment_id,
availability_domain=availability_domain, config=config,
interactive=interactive)
if shape_id is None or shape_id == "":
print("Compute Shape not set or found. Operation cancelled.")
return
if interactive:
print(f"Using shape {shape_id}.")
# Get private subnet
subnet = network.get_subnet(
subnet_id=subnet_id, public_subnet=False,
compartment_id=compartment_id, config=config,
interactive=interactive)
if subnet is None:
print("Operation cancelled.")
return
if interactive:
print(f"Using subnet {subnet.display_name}.")
# Get mysql_version
mysql_version = get_mysql_version(compartment_id=compartment_id,
config=config)
if mysql_version is None:
print("Operation cancelled.")
return
print(f"Using MySQL version {mysql_version}.")
# Get mysql_configuration
mysql_configuration = get_db_system_configuration(
configuration_id=configuration_id, shape=shape_id,
availability_domain=availability_domain,
compartment_id=compartment_id, config=config)
if mysql_configuration is None:
print("Operation cancelled.")
return
print(f"Using MySQL configuration {mysql_configuration.display_name}.")
# TODO Check Limits
# limits.list_limit_values(config["tenancy"], "mysql").data
# limits.get_resource_availability(
# service_name="mysql", limit_name="vm-standard-e2-4-count",
# compartment_id=config["tenancy"],
# availability_domain="fblN:US-ASHBURN-AD-1").data
# limits.get_resource_availability(
# service_name="compute", limit_name="standard-e2-core-ad-count",
# compartment_id=config["tenancy"],
# availability_domain="fblN:US-ASHBURN-AD-1").data
# If requested, prepare import
if import_source_type > 0:
# If a bucket needs to be created, define a name for it
if import_source_type == 1 or import_source_type == 2:
# Take all alphanumeric chars from the DB System name
# to create the bucket_name
bucket_name = (
f"{''.join(e for e in db_system_name if e.isalnum())}_import_"
f"{datetime.datetime.now():%Y%m%d%H%M%S}")
print(f"\nCreating bucket {bucket_name}...")
bucket = object_store.create_bucket(
bucket_name=bucket_name, compartment_id=compartment_id,
config=config, return_object=True)
if bucket is None:
print("Cancelling operation")
return
if perform_cleanup_after_import is None:
perform_cleanup_after_import = True
# Create a MySQL DB System from an existing MySQL Server instance
if import_source_type == 1:
# Start the dump process
if not util.dump_to_bucket(bucket_name=bucket.name,
connection_uri=source_mysql_uri,
connection_password=source_mysql_password,
create_bucket_if_not_exists=True,
object_name_prefix="",
interactive=interactive,
return_true_on_success=True):
print(f"Could not dump the given instance to the object "
f"store bucket {bucket.name}")
return
# Create a MySQL DB System from local dir
elif import_source_type == 2:
if interactive and source_local_dump_dir is None:
source_local_dump_dir = mysqlsh.globals.shell.prompt(
"Please specify the directory path that contains the "
"dump: ",
{'defaultValue': ''}).strip()
if source_local_dump_dir == "":
print("Operation cancelled.")
return
elif source_local_dump_dir is None:
print("No directory path given. Operation cancelled.")
return
# Upload the files from the given directory to the bucket
file_count = object_store.create_bucket_objects_from_local_dir(
local_dir_path=source_local_dump_dir,
bucket_name=bucket.name,
object_name_prefix="",
compartment_id=compartment_id, config=config,
interactive=False)
if file_count is None:
print("Cancelling operation")
return
elif import_source_type == 3:
# Create a MySQL DB System from a bucket
bucket = object_store.get_bucket(
bucket_name=source_bucket,
compartment_id=compartment_id,
config=config)
if bucket is None:
print("Cancelling operation")
return
bucket_name = bucket.name
if perform_cleanup_after_import is None:
perform_cleanup_after_import = False
# Create PAR for import manifest and progress files
par, progress_par = util.create_bucket_import_pars(
object_name_prefix="",
bucket_name=bucket.name,
db_system_name=db_system_name,
compartment_id=compartment_id,
config=config)
if par is None or progress_par is None:
return
# Build URLs
par_url_prefix = object_store.get_par_url_prefix(config=config)
par_url = par_url_prefix + par.access_uri
# progress_par_url = par_url_prefix + progress_par.access_uri
# Once the API supports the new PAR based import, build the
# import_details using the given par_url
# if par_url:
# import urllib.parse
# import_details = oci.mysql.models.\
# CreateDbSystemSourceImportFromUrlDetails(
# source_type=oci.mysql.models.
# CreateDbSystemSourceImportFromUrlDetails.
# SOURCE_TYPE_IMPORTURL,
# source_url=(f'{par_url}?progressPar='
# f'{urllib.parse.quote(progress_par_url)}'))
db_system_details = oci.mysql.models.CreateDbSystemDetails(
description=description,
admin_username=admin_username,
admin_password=admin_password,
compartment_id=compartment_id,
configuration_id=mysql_configuration.id,
data_storage_size_in_gbs=data_storage_size_in_gbs,
display_name=db_system_name,
mysql_version=mysql_version,
shape_name=shape_id,
availability_domain=availability_domain,
subnet_id=subnet.id,
defined_tags=defined_tags,
freeform_tags=freeform_tags,
host_image_id=host_image_id
# source=import_details
)
# Get DbSystem Client
db_sys = core.get_oci_db_system_client(config=config)
# Create DB System
new_db_system = db_sys.create_db_system(db_system_details).data
# If there was a PAR URL given, wait till the system becomes
# ACTIVE and then perform the clean up work
if par_url is not None:
print("Waiting for MySQL DB System to become active.\n"
"This can take up to 20 minutes or more...", end="")
# Wait until the lifecycle_state == ACTIVE, 20 minutes max
cycles = 0
while cycles < 240:
db_system = db_sys.get_db_system(new_db_system.id).data
if db_system.lifecycle_state == "ACTIVE" or \
db_system.lifecycle_state == "FAILED":
break
else:
time.sleep(10)
print(".", end="")
cycles += 1
print("")
# Until the API is ready to directly import at deployment time,
# also start the import from here
if db_system.lifecycle_state == "ACTIVE":
util.import_from_bucket(
bucket_name=bucket_name,
db_system_id=new_db_system.id,
db_system_name=db_system_name,
object_name_prefix="",
admin_username=admin_username,
admin_password=admin_password,
private_key_file_path=private_key_file_path,
perform_cleanup=perform_cleanup_after_import,
compartment_id=compartment_id,
config=config,
interactive=False
)
else:
if return_object:
return new_db_system
else:
if new_db_system.lifecycle_state == "CREATING":
print(f"\nMySQL DB System {db_system_name} is being created.\n"
f"Use mds.ls() to check it's provisioning state.\n")
else:
print(f"\nThe creation of the MySQL DB System {db_system_name} "
"failed.\n")
except oci.exceptions.ServiceError as e:
if not interactive:
raise
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
return
except (ValueError, oci.exceptions.ClientError) as e:
if not interactive:
raise
print(f'ERROR: {e}')
return
| 10,367
|
def load_keypoints2d_file(file_path, njoints=17):
"""load 2D keypoints from keypoint detection results.
Only one person is extracted from the results. If there are multiple
persons in the prediction results, we select the one with the highest
detection score.
Args:
file_path: the json file path.
njoints: number of joints in the keypoint defination.
Returns:
A `np.array` with the shape of [njoints, 3].
"""
keypoint = array_nan((njoints, 3), dtype=np.float32)
det_score = 0.0
try:
with open(file_path, 'r') as f:
data = json.load(f)
except Exception as e: # pylint: disable=broad-except
logging.warning(e)
return keypoint, det_score
det_scores = np.array(data['detection_scores'])
keypoints = np.array(data['keypoints']).reshape((-1, njoints, 3))
# The detection results may contain zero person or multiple people.
if det_scores.shape[0] == 0:
# There is no person in this image. We set NaN to this frame.
return keypoint, det_score
else:
# There are multiple people (>=1) in this image. We select the one with
# the highest detection score.
idx = np.argmax(det_scores)
keypoint = keypoints[idx]
det_score = det_scores[idx]
return keypoint, det_score
| 10,368
|
def read_csv(filepath_or_buffer: str, usecols: List[int]):
"""
usage.modin: 2
"""
...
| 10,369
|
def filter_params(module, train_bn=True):
"""Yields the trainable parameters of a given module.
Args:
module: A given module
train_bn: If True, leave the BatchNorm layers in training mode
Returns:
Generator
"""
children = list(module.children())
if not children:
if not (isinstance(module, BN_TYPES) and train_bn):
for param in module.parameters():
if param.requires_grad:
yield param
else:
for child in children:
for param in filter_params(module=child, train_bn=train_bn):
yield param
| 10,370
|
def utf8_bytes(string):
""" Convert 'string' to bytes using UTF-8. """
return bytes(string, 'UTF-8')
| 10,371
|
def line_search(f, xk, pk, old_fval=None, old_old_fval=None, gfk=None, c1=1e-4,
c2=0.9, maxiter=20):
"""Inexact line search that satisfies strong Wolfe conditions.
Algorithm 3.5 from Wright and Nocedal, 'Numerical Optimization', 1999, pg. 59-61
Args:
fun: function of the form f(x) where x is a flat ndarray and returns a real
scalar. The function should be composed of operations with vjp defined.
x0: initial guess.
pk: direction to search in. Assumes the direction is a descent direction.
old_fval, gfk: initial value of value_and_gradient as position.
old_old_fval: unused argument, only for scipy API compliance.
maxiter: maximum number of iterations to search
c1, c2: Wolfe criteria constant, see ref.
Returns: LineSearchResults
"""
def restricted_func_and_grad(t):
phi, g = jax.value_and_grad(f)(xk + t * pk)
dphi = jnp.real(_dot(g, pk))
return phi, dphi, g
if old_fval is None or gfk is None:
phi_0, dphi_0, gfk = restricted_func_and_grad(0.)
else:
phi_0 = old_fval
dphi_0 = jnp.real(_dot(gfk, pk))
if old_old_fval is not None:
candidate_start_value = 1.01 * 2 * (phi_0 - old_old_fval) / dphi_0
start_value = jnp.where(candidate_start_value > 1, 1.0, candidate_start_value)
else:
start_value = 1
def wolfe_one(a_i, phi_i):
# actually negation of W1
return phi_i > phi_0 + c1 * a_i * dphi_0
def wolfe_two(dphi_i):
return jnp.abs(dphi_i) <= -c2 * dphi_0
state = _LineSearchState(
done=False,
failed=False,
# algorithm begins at 1 as per Wright and Nocedal, however Scipy has a
# bug and starts at 0. See https://github.com/scipy/scipy/issues/12157
i=1,
a_i1=0.,
phi_i1=phi_0,
dphi_i1=dphi_0,
nfev=1 if (old_fval is None or gfk is None) else 0,
ngev=1 if (old_fval is None or gfk is None) else 0,
a_star=0.,
phi_star=phi_0,
dphi_star=dphi_0,
g_star=gfk,
)
def body(state):
# no amax in this version, we just double as in scipy.
# unlike original algorithm we do our next choice at the start of this loop
a_i = jnp.where(state.i == 1, start_value, state.a_i1 * 2.)
phi_i, dphi_i, g_i = restricted_func_and_grad(a_i)
state = state._replace(nfev=state.nfev + 1,
ngev=state.ngev + 1)
star_to_zoom1 = wolfe_one(a_i, phi_i) | ((phi_i >= state.phi_i1) & (state.i > 1))
star_to_i = wolfe_two(dphi_i) & (~star_to_zoom1)
star_to_zoom2 = (dphi_i >= 0.) & (~star_to_zoom1) & (~star_to_i)
zoom1 = _zoom(restricted_func_and_grad,
wolfe_one,
wolfe_two,
state.a_i1,
state.phi_i1,
state.dphi_i1,
a_i,
phi_i,
dphi_i,
gfk,
~star_to_zoom1)
state = state._replace(nfev=state.nfev + zoom1.nfev,
ngev=state.ngev + zoom1.ngev)
zoom2 = _zoom(restricted_func_and_grad,
wolfe_one,
wolfe_two,
a_i,
phi_i,
dphi_i,
state.a_i1,
state.phi_i1,
state.dphi_i1,
gfk,
~star_to_zoom2)
state = state._replace(nfev=state.nfev + zoom2.nfev,
ngev=state.ngev + zoom2.ngev)
state = state._replace(
done=star_to_zoom1 | state.done,
failed=(star_to_zoom1 & zoom1.failed) | state.failed,
**_binary_replace(
star_to_zoom1,
state._asdict(),
zoom1._asdict(),
keys=['a_star', 'phi_star', 'dphi_star', 'g_star'],
),
)
state = state._replace(
done=star_to_i | state.done,
**_binary_replace(
star_to_i,
state._asdict(),
dict(
a_star=a_i,
phi_star=phi_i,
dphi_star=dphi_i,
g_star=g_i,
),
),
)
state = state._replace(
done=star_to_zoom2 | state.done,
failed=(star_to_zoom2 & zoom2.failed) | state.failed,
**_binary_replace(
star_to_zoom2,
state._asdict(),
zoom2._asdict(),
keys=['a_star', 'phi_star', 'dphi_star', 'g_star'],
),
)
state = state._replace(i=state.i + 1, a_i1=a_i, phi_i1=phi_i, dphi_i1=dphi_i)
return state
state = lax.while_loop(lambda state: (~state.done) & (state.i <= maxiter) & (~state.failed),
body,
state)
status = jnp.where(
state.failed,
jnp.array(1), # zoom failed
jnp.where(
state.i > maxiter,
jnp.array(3), # maxiter reached
jnp.array(0), # passed (should be)
),
)
# Step sizes which are too small causes the optimizer to get stuck with a
# direction of zero in <64 bit mode - avoid with a floor on minimum step size.
alpha_k = state.a_star
alpha_k = jnp.where((jnp.finfo(alpha_k).bits != 64)
& (jnp.abs(alpha_k) < 1e-8),
jnp.sign(alpha_k) * 1e-8,
alpha_k)
results = _LineSearchResults(
failed=state.failed | (~state.done),
nit=state.i - 1, # because iterations started at 1
nfev=state.nfev,
ngev=state.ngev,
k=state.i,
a_k=alpha_k,
f_k=state.phi_star,
g_k=state.g_star,
status=status,
)
return results
| 10,372
|
def _get_value(session_browser, field):
"""Get an input field's value."""
return session_browser.evaluate_script('$("#id_%s").val()' % field)
| 10,373
|
def search_2d(arr, target):
"""
TODO same func as in adfgvx
"""
for row in range(len(arr)):
for col in range(len(arr)):
if arr[row][col] == target:
return row, col
raise ValueError
| 10,374
|
def scale_large_images_landmarks(images, landmarks):
""" scale images and landmarks up to maximal image size
:param list(ndarray) images: list of images
:param list(ndarray) landmarks: list of landmarks
:return tuple(list(ndarray),list(ndarray)): lists of images and landmarks
>>> scale_large_images_landmarks([np.zeros((8000, 500, 3), dtype=np.uint8)],
... [None, None]) # doctest: +ELLIPSIS
([array(...)], [None, None])
"""
if not images:
return images, landmarks
scale = estimate_scaling(images)
if scale < 1.:
logging.debug(
'One or more images are larger then recommended size for visualisation,'
' an resize with factor %f will be applied', scale
)
# using float16 as image raise TypeError: src data type = 23 is not supported
images = [
resize(img, None, fx=scale, fy=scale, interpolation=INTER_LINEAR) if img is not None else None for img in images
]
landmarks = [lnds * scale if lnds is not None else None for lnds in landmarks]
return images, landmarks
| 10,375
|
def home():
"""Post-login page."""
if flask.request.method == 'POST':
rooms = get_all_open_rooms()
name = "anon"
if flask.request.form['name'] != "":
name = flask.request.form['name']
player_id = flask_login.current_user.id
game_id = ""
if flask.request.form['submit'] == 'create':
game_id, error_message = create_room(player_id, name)
if game_id is None:
flask.flash(error_message)
return flask.render_template('home.html', user=flask_login.current_user)
else:
game_id = flask.request.form['secret-key']
added, error_message = add_player_to_room(game_id, player_id, name)
if not added:
flask.flash(error_message)
return flask.render_template('home.html', user=flask_login.current_user)
else:
# notify all players that a new one has joined
update_players(game_id)
return flask.redirect(flask.url_for('.game_page'))
else:
# TODO: workout if noob or not - need DB field
return flask.render_template('home.html', user=flask_login.current_user, noob=True)
| 10,376
|
def ik(T, tf_base) -> IKResult:
""" TODO add base frame correction
"""
Rbase = tf_base[:3, :3]
Ree = T[:3, :3]
Ree_rel = np.dot(Rbase.transpose(), Ree)
# ignore position
# n s a according to convention Siciliano
n = Ree_rel[:3, 0]
s = Ree_rel[:3, 1]
a = Ree_rel[:3, 2]
A = np.sqrt(a[0] ** 2 + a[1] ** 2)
# solution with theta2 in (0, pi)
t1_1 = np.arctan2(a[1], a[0])
t2_1 = np.arctan2(A, a[2])
t3_1 = np.arctan2(s[2], -n[2])
# solution with theta2 in (-pi, 0)
t1_2 = np.arctan2(-a[1], -a[0])
t2_2 = np.arctan2(-A, a[2])
t3_2 = np.arctan2(-s[2], n[2])
q_sol = np.zeros((2, 3))
q_sol[0, 0], q_sol[0, 1], q_sol[0, 2] = t1_1, t2_1, t3_1
q_sol[1, 0], q_sol[1, 1], q_sol[1, 2] = t1_2, t2_2, t3_2
return IKResult(True, q_sol)
| 10,377
|
def standarize_ms(datas, val_index, max=(2^32 - 1)):
"""
Standarize milliseconds lapsed from Arduino reading.
Note: Only takes height for one circulation of ms from Arduino.
datas:
List of data readings
val_index:
Index of ms value in reading data entry
max:
Max time of ms - since the Arduino will output
a circular value from the time it starts.
For correct value, see https://www.arduino.cc/en/Reference/Millis.
"""
def _standarize_value(initial_value, reading):
reading[val_index] = int(reading[val_index]) - initial_value;
if(reading[val_index] <= 0):
reading[val_index] += max
return reading
initial_value = int(datas[0][val_index])
___standarize_value = functools.partial(_standarize_value, initial_value=initial_value)
res = map(lambda x: _standarize_value(initial_value, x), datas)
res = list(res)
res[0][val_index] = 0
| 10,378
|
def test_list_int_min_length_2_nistxml_sv_iv_list_int_min_length_3_1(mode, save_output, output_format):
"""
Type list/int is restricted by facet minLength with value 7.
"""
assert_bindings(
schema="nistData/list/int/Schema+Instance/NISTSchema-SV-IV-list-int-minLength-3.xsd",
instance="nistData/list/int/Schema+Instance/NISTXML-SV-IV-list-int-minLength-3-1.xml",
class_name="NistschemaSvIvListIntMinLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 10,379
|
def corr_bias(x_data, y_data, yerr, pdz1_x, pdz1_y, pdz2_x, pdz2_y):
"""
Given a correlation measurement and associated PDZs, generate a model and
fit as a bias to the measurement. Return:
1) the model [unbiased] (x and y float arrays)
2) best fit bias (float)
3) the bias PDF (x and y float arrays)
@params
x_data - The central angles of the correlation measurements
y_data - The values of the correlation measurements
yerr - The errorbars of the correlation measurements
pdz1_x - PDZ 1 redshift range to generate models from
pdz1_y - PDZ 1 probability values to generate models from
pdz2_x - PDZ 2 redshift range to generate models from
pdz2_y - PDZ 2 probability values to generate models from
pdz1_x and pdz2_x, pdz1_y and pdz2_y should be the same for an autocorrelation
@returns
xmod - the angular range associated with the generated model
ymod - the value of the model at each angle
best - The best fit bias value
(i.e. square this and multiply it by the base model for
the best fitting model)
xbias - The range of bias values tested
ybias - The probability associated with each bias value
chisq - The not reduced chi square value associated with the best
fit bias value
"""
xmod, ymod = model(pdz1_x, pdz1_y, pdz2_x, pdz2_y)
xbias, ybias, chisq, best = bias_fit(x_data, y_data, yerr, xmod, ymod)
return xmod, ymod, best, xbias, ybias, chisq
| 10,380
|
def confidence_ellipse(
x=None, y=None, cov=None, ax=None, n_std=3.0, facecolor="none", **kwargs
):
"""
Create a plot of the covariance confidence ellipse of `x` and `y`
Parameters
----------
x, y : array_like, shape (n, )
Input data.
cov : array_like, shape (2, 2)
covariance matrix. Mutually exclusive with input data.
ax : matplotlib.axes.Axes
The axes object to draw the ellipse into.
n_std : float
The number of standard deviations to determine the ellipse's radiuses.
Returns
-------
matplotlib.patches.Ellipse
Other parameters
----------------
kwargs : `~matplotlib.patches.Patch` properties
"""
if x is None and y is None:
if cov is None:
raise ValueError("Either ")
if x.size != y.size:
raise ValueError("x and y must be the same size")
cov = np.cov(x, y)
pearson = cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])
# Using a special case to obtain the eigenvalues of this
# two-dimensionl dataset.
ell_radius_x = np.sqrt(1 + pearson)
ell_radius_y = np.sqrt(1 - pearson)
ellipse = Ellipse(
(0, 0),
width=ell_radius_x * 2,
height=ell_radius_y * 2,
facecolor=facecolor,
**kwargs
)
# Calculating the stdandard deviation of x from
# the squareroot of the variance and multiplying
# with the given number of standard deviations.
scale_x = np.sqrt(cov[0, 0]) * n_std
mean_x = np.mean(x)
# calculating the stdandard deviation of y ...
scale_y = np.sqrt(cov[1, 1]) * n_std
mean_y = np.mean(y)
transf = (
transforms.Affine2D()
.rotate_deg(45)
.scale(scale_x, scale_y)
.translate(mean_x, mean_y)
)
ellipse.set_transform(transf + ax.transData)
return ax.add_patch(ellipse)
| 10,381
|
def lsR(root: Path) -> Iterator[Path]:
"""Recursive list a directory and return absolute path"""
return filter(lambda p: ".git" not in p.parts, itertools.chain.from_iterable(
map(
lambda lsdir: list(map(lambda f: Path(lsdir[0]) / f, lsdir[2])),
os.walk(root),
)
))
| 10,382
|
def adapted_fields(type) -> List[Attribute]:
"""Return the attrs format of `fields()` for attrs and dataclasses."""
if is_dataclass(type):
return [
Attribute(
attr.name,
attr.default
if attr.default is not MISSING
else (
Factory(attr.default_factory)
if attr.default_factory is not MISSING
else NOTHING
),
None,
True,
None,
True,
attr.init,
True,
type=attr.type,
)
for attr in dataclass_fields(type)
]
else:
return attrs_fields(type)
| 10,383
|
def get_parms():
"""
Use get_args to get the args, and return a dictionary of the args ready for
use in pump software.
@see get_args()
:return: dict: parms
"""
parms = {}
args = get_args()
for name, val in vars(args).items():
if val is not None:
parms[name] = val
return parms
| 10,384
|
def mv_files(source, target, workflow):
"""Move files within workspace."""
absolute_source_path = os.path.join(workflow.workspace_path, source)
if not os.path.exists(absolute_source_path):
message = "Path {} does not exist".format(source)
raise REANAWorkflowControllerError(message)
if not absolute_source_path.startswith(workflow.workspace_path):
message = "Source path is outside user workspace"
raise REANAWorkflowControllerError(message)
if not absolute_source_path.startswith(workflow.workspace_path):
message = "Target path is outside workspace"
raise REANAWorkflowControllerError(message)
try:
reana_fs = fs.open_fs(workflow.workspace_path)
source_info = reana_fs.getinfo(source)
if source_info.is_dir:
reana_fs.movedir(src_path=source, dst_path=target, create=True)
else:
reana_fs.move(src_path=source, dst_path=target)
reana_fs.close()
except Exception as e:
reana_fs.close()
message = "Something went wrong:\n {}".format(e)
raise REANAWorkflowControllerError(message)
| 10,385
|
def report_missing(item_type, output, expect, filename, reverse=False):
"""Print a message indicating a missing file."""
if reverse:
sys.stderr.write('Extra %s %s\n'
% (item_type, os.path.join(expect, filename)))
else:
sys.stderr.write('Missing %s %s\n'
% (item_type, os.path.join(output, filename)))
| 10,386
|
def set_rating(request, rating_form):
"""
Checks if rating for books exists. If exists, changes it. If not, creates a new one.
"""
try:
book_rating = BookRating.objects.get(id_user=TheUser.objects.get(id_user=request.user),
id_book=Book.objects.get(id=rating_form.cleaned_data['book']))
book_rating.rating = rating_form.cleaned_data['rating']
book_rating.save()
except ObjectDoesNotExist:
BookRating.objects.create(id_user=TheUser.objects.get(id_user=request.user),
id_book=Book.objects.get(id=rating_form.cleaned_data['book']),
rating=rating_form.cleaned_data['rating'])
finally:
logger.info("User '{}' set rating '{}' to book with id: '{}'."
.format(request.user, rating_form.cleaned_data['rating'], rating_form.cleaned_data['book']))
| 10,387
|
def cloud_backup(backup_info: dict):
"""
Send latest backup to the cloud.
Parameters
----------
backup_info: dict
Dictionary containing information in regards to date of backup and batch number.
"""
session = ftplib.FTP_TLS("u301483.your-storagebox.de")
session.login(user="u301483", passwd="dI52PgdgGeB8js0v")
try:
folder_name = backup_info["folder_name"]
if folder_name == "0000-00-00":
for parquet_file in os.listdir("0000-00-00"):
path = f"{cwd}/0000-00-00/{parquet_file}"
file = open(path, "rb")
session.storbinary(f"STOR {folder_name}\\{parquet_file}", file)
file.close()
else:
path_to_date = f"{cwd}/{folder_name}"
for parquet_file in os.listdir(path_to_date):
priority = re.search(r"\d", parquet_file)
digit = int(priority.group())
if digit <= backup_info["batch"]:
path = f"{cwd}/{folder_name}/{parquet_file}"
file = open(path, "rb")
session.storbinary(f"STOR {folder_name}\\{parquet_file}", file)
file.close()
except TypeError:
pass
session.quit()
return "Backup completed"
| 10,388
|
def redshift_session(_session_scoped_redshift_engine):
"""
A redshift session that rolls back all operations.
The engine and db is maintained for the entire test session for efficiency.
"""
conn = _session_scoped_redshift_engine.connect()
tx = conn.begin()
RedshiftSession = sa.orm.sessionmaker()
session = RedshiftSession(bind=conn)
try:
yield session
finally:
session.close()
tx.rollback()
conn.close()
| 10,389
|
def make_led_sample(n_samples=200, irrelevant=0, random_state=None):
"""Generate random samples from the 7-segment problem.
Parameters
----------
n_samples : int, optional (default=200)
The number of samples to generate.
irrelevant : int, optional (default=0)
The number of irrelevant binary features to add.
Returns
-------
X, y
"""
random_state = check_random_state(random_state)
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
data = data[random_state.randint(0, 10, n_samples)]
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
if irrelevant > 0:
X = np.hstack((X, random_state.rand(n_samples, irrelevant) > 0.5))
return X, y
| 10,390
|
def handle_source_authorization_exception(e):
""" Error handler: the data source requires authorisation
This will be triggered when opening a private HDX dataset before
the user has supplied their authorisation token.
@param e: the exception being handled
"""
if e.message:
flask.flash(e.message)
# we're using flask.g.recipe_id to handle the case where a saved recipe
# points to a formerly-public dataset that has suddenly become private
# normally, it will be None (because there's no saved recipe yet)
recipe = recipes.Recipe(recipe_id=flask.g.recipe_id)
# add an extra parameter for the /data/save form to indicate that we
# want the user to provide an authorisation token
extras = {
'need_token': 'on'
}
# note whether the resource looked like it came from HDX
if e.is_ckan:
extras['is_ckan'] = 'on'
# redirect to the /data/save page to ask the user for a token
return flask.redirect(util.data_url_for('data_save', recipe=recipe, extras=extras), 302)
| 10,391
|
def RemoveExperimentFromManualList(experiment, knob):
"""Removes an experiment from the ManuallyDisabledExperiments knob.
Args:
experiment: str, the experiment name to remove.
knob: str, the manual knob to modify
Raises:
PlistError: if the plist can't be modified.
"""
knobs = KNOBS.Knobs()
if knob not in knobs:
Output('%s list is empty, nothing to remove.' % knob)
else:
current_value = knobs.get(knob, [])
if experiment in current_value:
current_value.remove(experiment)
Output('New value of %s is %s' % (knob, ','.join(current_value)))
if not gmacpyutil.SetMachineInfoForKey(knob, ','.join(current_value)):
raise PlistError('Problem writing to plist.')
else:
Output('%s is not in %s.' % (experiment, knob))
| 10,392
|
async def bird(ctx):
"""gives a bird image"""
birdimg = await alex_api.birb()
embed = discord.Embed(title= ('BIRDS 0_0'),timestamp=datetime.datetime.utcnow(),
color=discord.Color.green())
embed.set_footer(text=ctx.author.name , icon_url=ctx.author.avatar_url)
embed.set_image(url=f"{birdimg}")
await ctx.send(embed=embed)
#await ctx.send(birbimg)
| 10,393
|
def agd_reader_multi_column_pipeline(upstream_tensorz, control_ops=None, verify=False, buffer_pool=None, share_buffer_pool=True, buffer_pool_args=pool_default_args, repack=None, name="agd_reader_multi_column_pipeline"):
"""
Create an AGDReader pipeline for an iterable of columns. Each column group is assumed to have the same first ordinal, number of records, and record id.
:param upstream_tensorz: a list of list of tensors, each item being a column group
:param verify: whether or not to invoke the verification for AGD columns
:param buffer_pool: pass in a buffer_pool to reuse
:param share_buffer_pool: if buffer_pool is not passed in, create one to share among all the AGDReader instances
:param buffer_pool_args: special buffer pool args, if it's created
:param name:
:return: yield [output_buffer_handles], num_records, first_ordinal, record_id; in order, for each column group in upstream_tensorz
"""
upstream_tensorz = sanitize_generator(upstream_tensorz)
if control_ops is not None:
control_ops = sanitize_generator(control_ops)
if len(control_ops) != len(upstream_tensorz):
raise Exception("Control ops needs to be the same length as upstream tensors. len(tensors) = {lt}, len(control_ops) = {lc}".format(
lt=len(upstream_tensorz), lc=len(control_ops)
))
else:
control_ops = itertools.repeat([])
with ops.name_scope("agd_read_multi"):
if buffer_pool is None and share_buffer_pool:
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args, name="agd_reader_buffer_pool")
assert len(upstream_tensorz) > 0
def gen_groups():
reader = partial(agd_reader_pipeline, verify=verify, buffer_pool_args=buffer_pool_args, buffer_pool=buffer_pool, name=name, repack=repack)
for upstream_tensors, control_dep in zip(upstream_tensorz, control_ops):
with ops.control_dependencies(control_dep):
yield reader(upstream_tensors=upstream_tensors)
for processed_tensors in gen_groups():
output_buffers, num_recordss, first_ordinalss, record_ids = zip(*processed_tensors)
yield output_buffers, num_recordss[0], first_ordinalss[0], record_ids[0]
| 10,394
|
def findcosmu(re0, rp0, sublat, latc, lon): # considers latc to be plaentocentric latitudes, but sublat to be planetographic
"""Takes the equitorial and polar radius of Jupiter (re0, rp0 respectively), the sub-latitude of Jupiter, latitude and
longitude (both in radians) to determine the "cos(mu)" of the photons. This effectively helps to idenify where the limb
of Jupiter occurs in the Chandra observations"""
rfactor = (re0/rp0)**2 # ratio of the equitorial radius and polar radius...
lat = np.arctan(np.tan(latc)*rfactor) # and coordinate transformation from planetocentric latitude -> planetographic latitude
ans = (rfactor * (np.cos(lon)*np.cos(sublat)*np.cos(lat)) + (np.sin(sublat)*np.sin(lat))) / np.sqrt(rfactor*np.cos(sublat)**2 \
+ np.sin(lat)**2) / np.sqrt(rfactor * np.cos(lat)**2 + np.sin(lat)**2) # to return the value(s) of cos(mu)
return ans
| 10,395
|
def RMS_energy(frames):
"""Computes the RMS energy of frames"""
f = frames.flatten()
return N.sqrt(N.mean(f * f))
| 10,396
|
def is_blacklisted_url(url):
"""
Return whether the URL blacklisted or not.
Using BLACKLIST_URLS methods against the URLs.
:param url: url string
:return: True if URL is blacklisted, else False
"""
url = urllib.parse.urlparse(url).netloc
for method in WHITELIST_URL:
for whitelist_url in WHITELIST_URL[method]:
if method(url, whitelist_url):
return False
for method in BLACKLIST_URLS:
for blacklist_url in BLACKLIST_URLS[method]:
if method(url, blacklist_url):
return True
return False
| 10,397
|
def heading(yaw):
"""A helper function to getnerate quaternions from yaws."""
q = euler2quat(0.0, 0.0, yaw)
quat = Quaternion()
quat.w = q[0]
quat.x = q[1]
quat.y = q[2]
quat.z = q[3]
return quat
| 10,398
|
def check_login_required(view_func):
"""
A decorator that checks whether login is required on this installation
and, if so, checks if the user is logged in. If login is required and
the user is not logged in, they're redirected to the login link.
"""
def _check(*args, **kwargs):
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get("auth_require_sitewide_login"):
return login_required(view_func)(*args, **kwargs)
else:
return view_func(*args, **kwargs)
return _check
| 10,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.