content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def encode(number, base):
"""Encode given number in base 10 to digits in given base.
number: int -- integer representation of number (in base 10)
base: int -- base to convert to
return: str -- string representation of number (in given base)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
# Handle unsigned numbers only for now
assert number >= 0, 'number is negative: {}'.format(number)
# TODO: Encode number in binary (base 2)
numbers = []
while number > 0:
remainder = number % base
if number < base:
remainder = number
number = number//base
numbers.append(value_digit[remainder])
numbers.reverse()
numbers_string = ''.join(numbers)
return numbers_string
# TODO: Encode number in hexadecimal (base 16)
# ...
# TODO: Encode number in any base (2 up to 36)
# ...
| 19,800
|
def Print(string, color, highlight=False):
"""
Colored print
colorlist:
red,green
"""
end="\033[1;m"
pstr=""
if color == "red":
if highlight:
pstr+='\033[1;41m'
else:
pstr+='\033[1;31m'
elif color == "green":
if highlight:
pstr+='\033[1;42m'
else:
pstr+='\033[1;32m'
elif color == "yellow":
if highlight:
pstr+='\033[1;43m'
else:
pstr+='\033[1;33m'
else:
print(("Error Unsupported color:"+color))
print((pstr+string+end))
| 19,801
|
def _parse_constants():
"""Read the code in St7API and parse out the constants."""
def is_int(x):
try:
_ = int(x)
return True
except ValueError:
return False
with open(St7API.__file__) as f_st7api:
current_comment = None
seen_comments = set()
f_stripped = (l.strip() for l in f_st7api)
for l in f_stripped:
is_comment_line = l.startswith("#")
is_blank_line = not l
is_constant_line = "=" in l and is_int(l.split("=")[1])
if is_comment_line:
if l in seen_comments:
raise ValueError(f"Duplicate comment {l}")
if is_comment_line:
current_comment = l
elif is_blank_line:
current_comment = None
elif is_constant_line:
if current_comment:
name, val = [x.strip() for x in l.split("=")]
yield current_comment, name, val
| 19,802
|
def verify_outcome(msg, prefix, lista):
"""
Compare message to list of claims: values.
:param prefix: prefix string
:param lista: list of claims=value
:return: list of possible strings
"""
assert msg.startswith(prefix)
qsl = ["{}={}".format(k, v[0]) for k, v in parse_qs(msg[len(prefix) :]).items()]
return set(qsl) == set(lista)
| 19,803
|
def equationMaker(congruency=None, beat_type=None, structure=None, n=None, perms=None, catch = False):
"""
Function to create equation stimuli, like in Landy & Goldstone, e.g. "b + d * f + y"
required inputs:
congruency: 'congruent' or 'incongruent'
beat_type : 'binary_beat' or 'ternary_beat'
structure : '+*+' or '*+*'
n : how many equations to generate
outputs:
a list of trial dictionaries of the length specified by n
"""
output_list = []
alphabet = list(string.ascii_lowercase)
letters_to_remove = ['i', 'l', 'o'] # because of similarity with other symbols
alphabet = [letter for letter in alphabet if letter not in letters_to_remove] # final letter list
op = list(structure) # list of operands
#op = [x if x != "*" else "times" for x in op] # use this line for experimenting with speech stims
eq_per_perm = int(n / len(perms)) # number of equations per permutation
#assert eq_per_perm.is_integer(), "length of perms must be evenly divisble into n"
perms = perms * eq_per_perm
shuffle(perms)
for eq in range(n):
l = list(choice(alphabet, size=5, replace=False))
equation = [l[0],op[0],l[1],op[1],l[2],op[2],l[3]]
p = itemgetter(*perms[eq][0])(l) # creates permutation of letter ordering for this iteration
probe = [p[0],op[0],p[1],op[1],p[2],op[2],p[3]]
if catch:
cat_idx = 2 * randint(0,3) # chooses one of the 4 letter indices
probe[cat_idx] = l[4] # replace with other random letter not in stimulus
trial_type = 'catch'
else:
trial_type = 'main'
probe = ' '.join(probe)
# add info on 'validity' and 'sensitivity' based on permutation used
if perms[eq][1] <= 4:
sensitivity = 'insensitive'
else:
sensitivity = 'sensitive'
if structure == '+*+':
if ( (perms[eq][1] <= 2) or (5 <= perms[eq][1] <= 6) ):
validity = 'True'
else:
validity = 'False'
elif structure == '*+*':
if ( (perms[eq][1] <= 2) or (7 <= perms[eq][1] <= 8) ):
validity = 'True'
else:
validity = 'False'
elif structure == '+++':
sensitivity = 'neutral'
if catch:
validity = 'False'
else:
validity = 'True'
# assemble trial dictionary
trial_dict = {'stim':equation, 'beat_type':beat_type,
'congruency':congruency, 'structure': structure, 'stim_number': eq + 1, 'probe': probe,
'validity': validity, 'sensitivity': sensitivity, 'trial_type':trial_type}
output_list.append(trial_dict)
return output_list
| 19,804
|
def get_deadline_delta(target_horizon):
"""Returns number of days between official contest submission deadline date
and start date of target period
(14 for week 3-4 target, as it's 14 days away,
28 for week 5-6 target, as it's 28 days away)
Args:
target_horizon: "34w" or "56w" indicating whether target period is
weeks 3 & 4 or weeks 5 & 6
"""
if target_horizon == "34w":
deadline_delta = 14
elif target_horizon == "56w":
deadline_delta = 28
else:
raise ValueError("Unrecognized target_horizon "+target_horizon)
return deadline_delta
| 19,805
|
def perform_tensorflow_model_inference(model_name, sample):
""" Perform evaluations from model (must be configured)
Args:
model_name ([type]): [description]
sample ([type]): [description]
Returns:
[type]: [description]
"""
reloaded_model = tf.keras.models.load_model(model_name)
input_dict = {name: tf.convert_to_tensor(
[value]) for name, value in sample.items()}
predictions = reloaded_model.predict(input_dict)
print('Predction; ', predictions)
# prob = tf.nn.sigmoid(predictions[0])
return predictions
| 19,806
|
def check_ntru(f, g, F, G):
"""Check that f * G - g * F = 1 mod (x ** n + 1)."""
a = karamul(f, G)
b = karamul(g, F)
c = [a[i] - b[i] for i in range(len(f))]
return ((c[0] == q) and all(coef == 0 for coef in c[1:]))
| 19,807
|
def total_price(id: int):
"""Calculates the total price for the order
Args:
id (int): The order ID
Returns:
[int]: Total price
"""
loop = asyncio.get_event_loop()
order_data = loop.run_until_complete(fetch_data(id))
| 19,808
|
def RaiseCommandException(args, returncode, output, error):
"""Raise an exception whose message describing a command failure.
Args:
args: shell command-line (as passed to subprocess.call())
returncode: status code.
error: standard error output.
Raises:
a new Exception.
"""
message = 'Command failed with status %d: %s\n' % (returncode, args)
if output:
message += 'Output:-------------------------------------------\n%s\n' \
'--------------------------------------------------\n' % output
if error:
message += 'Error message: -----------------------------------\n%s\n' \
'--------------------------------------------------\n' % error
raise Exception(message)
| 19,809
|
def edit_user(user_id):
"""
TODO: differentiate between PUT and PATCH -> PATCH partial update
"""
user = User.from_dict(request.get_json())
user.id = user_id
session_id = request.headers.get('Authorization', None)
session = auth.lookup(session_id)
if session["user"].role != Role.admin:
# We must check if the user is editing themselves
if user_id != session["user"].id:
raise UserException("Insufficient privileges", status_code=401)
# Create basic query for user updating
query = dict()
# If the user updates their profile check for all fields to be updated
if user.first_name and user.first_name != "":
query["first_name"] = user.first_name
if user.last_name and user.last_name != "":
query["last_name"] = user.last_name
if user.email and user.email != "":
query["email"] = user.email
if user.role != None and user.role >= 0:
query["role"] = user.role
if user.settings and user.settings != {}:
query['settings'] = user.settings
# In case of password change, verify that it is really him (revalidate their password)
if user.password and user.password != "":
query["password"] = auth.create_hash(user.password)
if len(query.keys()) == 0:
raise UserException("Nothing to update", status_code=400)
# Update the user and return updated document
res = db.update("users", "id", user.id, query)
# Remove password hash from the response
del res['password']
return json.dumps(res)
| 19,810
|
def Search_tau(A, y, S, args, normalize=True, min_delta=0):
"""
Complete parameter search for sparse regression method S.
Input:
A,y : from linear system Ax=y
S : sparse regression method
args : arguments for sparse regression method
normalize : boolean. Normalize columns of A?
min_delta : minimum change in tau
Returns:
X : list of all possible outputs of S(A,y,tau)
Tau : list of values of tau corresponding to each x in X
"""
X = []
Tau =[]
tau = 0
# Normalize
if normalize:
normA = np.linalg.norm(A,axis=0)
A = A @ np.diag(normA**-1)
for j in range(2**A.shape[1]):
# Apply sparse regression
x, delta_tau = S(A, y, tau, args)
delta_tau = np.max([delta_tau, min_delta])
X.append(x)
Tau.append(tau)
# Break condition
if np.max(np.abs(x)) == 0 or delta_tau == np.inf: break
# Update tau
tau = tau+delta_tau
# Renormalize x
if normalize:
X = [np.diag(normA**-1) @ x for x in X]
return X,Tau
| 19,811
|
def get_entry_accounts(entry: Directive) -> list[str]:
"""Accounts for an entry.
Args:
entry: An entry.
Returns:
A list with the entry's accounts ordered by priority: For
transactions the posting accounts are listed in reverse order.
"""
if isinstance(entry, Transaction):
return list(reversed([p.account for p in entry.postings]))
if isinstance(entry, Custom):
return [val.value for val in entry.values if val.dtype == ACCOUNT_TYPE]
if isinstance(entry, Pad):
return [entry.account, entry.source_account]
account_ = getattr(entry, "account", None)
if account_ is not None:
return [account_]
return []
| 19,812
|
def default_evade():
"""
A catch-all method to try and evade suspension from Linkedin.
Currenly, just delays the request by a random (bounded) time
"""
sleep(random.uniform(0, 0))
| 19,813
|
def enter_fastboot(adb_serial, adb_path=None):
"""Enters fastboot mode by calling 'adb reboot bootloader' for the adb_serial provided.
Args:
adb_serial (str): Device serial number.
adb_path (str): optional alternative path to adb executable
Raises:
RuntimeError: if adb_path is invalid or adb executable was not found by
get_adb_path.
Returns:
str: Output from calling 'adb reboot' or None if call fails with
non-zero
return code.
Note:
If adb_path is not provided then path returned by get_adb_path will be
used instead. If adb returns a non-zero return code then None will be
returned.
"""
return _adb_command(("reboot", "bootloader"), adb_serial, adb_path=adb_path)
| 19,814
|
def thesaurus_manager_menu_header(context, request, view, manager): # pylint: disable=unused-argument
"""Thesaurus manager menu header"""
return THESAURUS_MANAGER_LABEL
| 19,815
|
def create_compiled_keras_model():
"""Create compiled keras model."""
model = models.create_keras_model()
model.compile(
loss=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=utils.get_optimizer_from_flags('client'),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return model
| 19,816
|
def extraerWrfoutSerie(file_paths, latlong_estaciones, par, run):
""" extrae de los arechivos wrfout listados en file_paths
para la posicion (x, y) toda la serie de la variable
seleccionada"""
print(f'Processing: {file_paths}')
try:
wrf_temp = Dataset(file_paths)
except OSError:
return
for var_name in variables_list:
try:
var = wrf.getvar(wrf_temp, var_name, timeidx=wrf.ALL_TIMES)
except RuntimeError:
print("Corrupted file")
return
for key, estacion in latlong_estaciones.iterrows():
var_ema = var[:, int(estacion.y), int(estacion.x)]
dfVARema = pd.DataFrame(var_ema.to_pandas(), columns=[var_name])
if var_name == 'T2':
dfVARema['T2'] = dfVARema['T2'] - 273.15
tTSK = wrf.getvar(wrf_temp, 'TSK', timeidx=wrf.ALL_TIMES)
tTSK_ema = tTSK[:, estacion.y, estacion.x]
dfTSKema = pd.DataFrame(tTSK_ema.to_pandas(), columns=['TSK'])
dfTSKema['TSK'] = dfTSKema['TSK'] - 273.15
dfVARema = getT2product(dfVARema, dfTSKema)
var_name = 'T2P'
dfData = dfVARema[9:33]
dfData.to_csv(f'csv_output/{estacion.NOMBRE}_{var_name}_{run}_{par}.csv', mode='a', header=None)
dfData = 0
wrf_temp.close()
wrf_temp = 0
| 19,817
|
def top_mods(max_distance, top_n, min_len, stored_json):
"""Check top packages for typosquatters.
Prints top packages and any potential typosquatters
Args:
max_distance (int): maximum edit distance to check for typosquatting
top_n (int): the number of top packages to retrieve
min_len (int): a minimum length of characters
stored_json (bool): a flag to denote whether to used stored top packages json
"""
# Get list of potential typosquatters
package_names = get_all_packages()
top_packages = get_top_packages(top_n=top_n, stored=stored_json)
filtered_package_list = filter_by_package_name_len(top_packages, min_len=min_len)
squat_candidates = create_suspicious_package_dict(
package_names, filtered_package_list, max_distance
)
post_whitelist_candidates = whitelist(squat_candidates)
store_squatting_candidates(post_whitelist_candidates)
print_suspicious_packages(post_whitelist_candidates)
| 19,818
|
def train_sedinet_cat(SM, train_df, test_df, train_idx, test_idx,
ID_MAP, vars, greyscale, name, mode, batch_size, valid_batch_size,
res_folder):
"""
This function trains an implementation of SediNet
"""
##================================
## create training and testing file generators, set the weights path,
## plot the model, and create a callback list for model training
train_gen = get_data_generator_1image(train_df, train_idx, True, ID_MAP,
vars[0], batch_size, greyscale, DO_AUG) ##BATCH_SIZE
valid_gen = get_data_generator_1image(test_df, test_idx, True, ID_MAP,
vars[0], valid_batch_size, greyscale, False) ##VALID_BATCH_SIZE
if SHALLOW is True:
if DO_AUG is True:
weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\
"_"+str(IM_WIDTH)+"_shallow_"+vars[0]+"_"+CAT_LOSS+"_aug.hdf5"
else:
weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\
"_"+str(IM_WIDTH)+"_shallow_"+vars[0]+"_"+CAT_LOSS+"_noaug.hdf5"
else:
if DO_AUG is True:
weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\
"_"+str(IM_WIDTH)+"_"+vars[0]+"_"+CAT_LOSS+"_aug.hdf5"
else:
weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\
"_"+str(IM_WIDTH)+"_"+vars[0]+"_"+CAT_LOSS+"_noaug.hdf5"
if os.path.exists(weights_path):
SM.load_weights(weights_path)
print("==========================================")
print("Loading weights that already exist: %s" % (weights_path) )
print("Skipping model training")
elif os.path.exists(res_folder+os.sep+weights_path):
weights_path = res_folder+os.sep+weights_path
SM.load_weights(weights_path)
print("==========================================")
print("Loading weights that already exist: %s" % (weights_path) )
print("Skipping model training")
else:
try:
plot_model(SM, weights_path.replace('.hdf5', '_model.png'),
show_shapes=True, show_layer_names=True)
except:
pass
callbacks_list = [
ModelCheckpoint(weights_path, monitor='val_loss', verbose=1,
save_best_only=True, mode='min',
save_weights_only = True)
]
print("=========================================")
print("[INFORMATION] schematic of the model has been written out to: "+\
weights_path.replace('.hdf5', '_model.png'))
print("[INFORMATION] weights will be written out to: "+weights_path)
##==============================================
## set checkpoint file and parameters that control early stopping,
## and reduction of learning rate if and when validation
## scores plateau upon successive epochs
# reduceloss_plat = ReduceLROnPlateau(monitor='val_loss', factor=FACTOR,
# patience=STOP_PATIENCE, verbose=1, mode='auto', min_delta=MIN_DELTA,
# cooldown=STOP_PATIENCE, min_lr=MIN_LR)
#
# earlystop = EarlyStopping(monitor="val_loss", mode="min", patience=STOP_PATIENCE)
model_checkpoint = ModelCheckpoint(weights_path, monitor='val_loss',
verbose=1, save_best_only=True, mode='min',
save_weights_only = True)
#tqdm_callback = tfa.callbacks.TQDMProgressBar()
# callbacks_list = [model_checkpoint, reduceloss_plat, earlystop] #, tqdm_callback]
##==============================================
## train the model
# history = SM.fit(train_gen,
# steps_per_epoch=len(train_idx)//batch_size, ##BATCH_SIZE
# epochs=NUM_EPOCHS,
# callbacks=callbacks_list,
# validation_data=valid_gen, #use_multiprocessing=True,
# validation_steps=len(test_idx)//valid_batch_size) #max_queue_size=10 ##VALID_BATCH_SIZE
## with non-adaptive exponentially decreasing learning rate
exponential_decay_fn = exponential_decay(MAX_LR, NUM_EPOCHS)
lr_scheduler = LearningRateScheduler(exponential_decay_fn)
callbacks_list = [model_checkpoint, lr_scheduler]
## train the model
history = SM.fit(train_gen,
steps_per_epoch=len(train_idx)//batch_size, ##BATCH_SIZE
epochs=NUM_EPOCHS,
callbacks=callbacks_list,
validation_data=valid_gen, #use_multiprocessing=True,
validation_steps=len(test_idx)//valid_batch_size) #max_queue_size=10 ##VALID_BATCH_SIZE
###===================================================
## Plot the loss and accuracy as a function of epoch
plot_train_history_1var(history)
# plt.savefig(vars+'_'+str(IM_HEIGHT)+'_batch'+str(batch_size)+'_history.png', ##BATCH_SIZE
# dpi=300, bbox_inches='tight')
plt.savefig(weights_path.replace('.hdf5','_history.png'),dpi=300, bbox_inches='tight')
plt.close('all')
# serialize model to JSON to use later to predict
model_json = SM.to_json()
with open(weights_path.replace('.hdf5','.json'), "w") as json_file:
json_file.write(model_json)
return SM, weights_path
| 19,819
|
def query_top_python_repositories(stars_filter: Optional[str] = None): # FIXME: set stars_filter = None
"""
stars_filter examples:
>7
<42
42..420
"""
log.info("Querying top popular Python GitHub repositories...")
gql = GQL(endpoint=endpoint, headers=headers)
gql.load_query("top_python_repositories.gql")
if stars_filter:
gql.set_template_variables(
AFTER_CURSOR=f"after: {gql.paging.end_cursor}",
STARS_FILTER=f"stars:{stars_filter}"
)
gql.reload_query()
run = 1
try:
gql.run_query()
except ConnectionRefusedError as e:
log.error(e)
return
log.debug(
f"query_top_python_repositories(): "
f'repositoryCount={gql.query_results["repositoryCount"]} {{'
)
if "nodes" in gql.query_results and gql.query_results["nodes"]:
append_to_csv([repository(node) for node in gql.query_results["nodes"]])
while gql.paging.has_next_page:
run += 1
log.info(f"Running query #{run} (pageID: {gql.paging.end_cursor})")
try:
gql.next_page()
except ConnectionRefusedError as e:
log.error(e)
else:
append_to_csv([repository(node) for node in gql.query_results["nodes"]])
log.debug(
f"}} query_top_python_repositories(): "
f'repositoryCount={gql.query_results["repositoryCount"]}'
)
| 19,820
|
def psycopg2_string():
"""
Generates a connection string for psycopg2
"""
return 'dbname={db} user={user} password={password} host={host} port={port}'.format(
db=settings.DATABASES['default']['NAME'],
user=settings.DATABASES['default']['USER'],
password=settings.DATABASES['default']['PASSWORD'],
host=settings.DATABASES['default']['HOST'],
port=settings.DATABASES['default']['PORT'],
)
| 19,821
|
def get_phase_relation(protophase: np.ndarray, N: int = 0) -> np.ndarray:
"""
relation between protophase and phase
Parameters
----------
protophase : np.ndarray
N : int, optional
number of fourier terms need to be used
Returns
-------
np.ndarray
phase (protophase from 0 to 2pi)
"""
phase = np.linspace(0, np.pi * 2, 5000) + np.zeros(5000) * 1j
new_phase = phase.copy()
if N == 0:
N = protophase.size
for n in range(1, N + 1):
Sn = fourier_coefficient(protophase, n)
new_phase = new_phase + 2 * Sn * (np.exp(1j * n * phase) - 1) / (1j *
n)
return new_phase
| 19,822
|
def handle_articlepeople(utils, mention):
"""
Handles #articlepeople functionality.
Parameters
----------
utils : `Utils object`
extends tweepy api wrapper
mention : `Status object`
a single mention
Returns
-------
None
"""
urls = re.findall(r'(https?://[^\s]+)', mention.text)
if not urls or len(urls) != 1:
utils.rundown.update_status(
"@%s to use the #articlepeople service, your message should be in the following format: @ rundown_bot #articlepeople url" %mention.user.screen_name,
mention.id)
else:
article = ArticleReader(url = urls[0])
people = article.get_people()
if not people:
utils.rundown.update_status(
"@%s Hi! I didn't find any people in that article :(" %mention.user.screen_name,
mention.id)
else:
people = ", ".join(people)
utils.rundown.update_status(
"@%s Hi! I found these people: %s" %(
mention.user.screen_name,
people),
mention.id)
return None
| 19,823
|
def bak_del_cmd(filename:Path, bakfile_number:int, quietly=False):
""" Deletes a bakfile by number
"""
console = Console()
_bakfile = None
bakfiles = db_handler.get_bakfile_entries(filename)
if not bakfiles:
console.print(f"No bakfiles found for {filename}")
return False
if not bakfile_number:
try:
_bakfile, bakfile_number = \
__do_select_bakfile(bakfiles,
select_prompt=(("Delete which .bakfile?"),
default_select_prompt[0]),
return_index=True)
bakfile_number += 1
except TypeError:
return True
confirm = input(
f"Confirming: Delete bakfile #{bakfile_number} for {filename}? "
f"(y/N) ").lower() == 'y' if not quietly else True
if confirm:
_bakfile = _bakfile or __get_bakfile_entry(filename,
bakfile_number=bakfile_number,
console=console)
if not _bakfile:
return False
__remove_bakfiles([_bakfile])
return True
| 19,824
|
def train_test_submissions(submissions=None, force_retrain_test=False,
is_parallelize=None):
"""Train and test submission.
If submissions is None, trains and tests all submissions.
"""
if is_parallelize is not None:
app.config.update({'RAMP_PARALLELIZE': is_parallelize})
if submissions is None:
submissions = Submission.query.filter(
Submission.is_not_sandbox).order_by(Submission.id).all()
for submission in submissions:
train_test_submission(submission, force_retrain_test)
score_submission(submission)
| 19,825
|
def fetch_county_data(file_reference):
"""The name of this function is displayed to the user when there is a cache miss."""
path = file_reference.filename
return (pd
.read_csv(path)
.assign(date = lambda d: pd.to_datetime(d.date))
)
| 19,826
|
def preston_sad(abund_vector, b=None, normalized = 'no'):
"""Plot histogram of species abundances on a log2 scale"""
if b == None:
q = np.exp2(list(range(0, 25)))
b = q [(q <= max(abund_vector)*2)]
if normalized == 'no':
hist_ab = np.histogram(abund_vector, bins = b)
if normalized == 'yes':
hist_ab_norm = np.histogram(abund_vector, bins = b)
hist_ab_norm1 = hist_ab_norm[0]/(b[0:len(hist_ab_norm[0])])
hist_ab_norm2 = hist_ab_norm[1][0:len(hist_ab_norm[0])]
hist_ab = (hist_ab_norm1, hist_ab_norm2)
return hist_ab
| 19,827
|
def exp_rearrangement():
"""Example demonstrating of Word-Blot for pairwise local similarity search on
two randomly generated sequencees with motif sequences violating
collinearity :math:`S=M_1M_2M_3, T=M'_1M'_1M'_3M'_2` where motif pairs
:math:`(M_i, M'_i)_{i=1,2,3}` have lengths 200, 400, 600 and are related by
match probabilities 0.95, 0.85, and 0.75, respectively.
.. figure::
https://www.dropbox.com/s/nsvsf5gaui6t9ww/rearrangement.png?raw=1
:target:
https://www.dropbox.com/s/nsvsf5gaui6t9ww/rearrangement.png?raw=1
:alt: lightbox
Dynamic programming scores of the forward pass of Smith Waterman are
shown in color code (*left*) with seeds (word length 6) grey intensity
coded according to the local match probability assigned by Word-Blot
(minimum similarity length 200). Similar segments reported by Word-Blot
are shown as grey diagonal strips (*left*) and schematically (*right*)
color coded by their Word-Blot estimated match probabilities (note
agreement with true match probabilities).
"""
# NOTE we are running whole table DP later here; be careful with size
K = 200
wordlen = 6
A = Alphabet('ACGT')
WB_kw = {'g_max': .2, 'sensitivity': .9, 'alphabet': A, 'wordlen': wordlen,
'path': ':memory:', 'log_level': logging.INFO}
# homologies
Hs = [rand_seq(A, i) for i in [i * K for i in range(1, 4)]]
ps = [.95, .85, .75]
Ms = []
for p_match in ps:
subst = gap = 1 - np.sqrt(p_match)
print subst, gap
Ms.append(
MutationProcess(A, subst_probs=subst, ge_prob=gap, go_prob=gap)
)
# connector junk
def J(): return rand_seq(A, 2 * K)
S = J() + Hs[0] + J() + Hs[1] + J() + Hs[2] + J()
Hs = [M.mutate(hom)[0] for hom, M in zip(Hs, Ms)]
T = J() + Hs[0] + J() + Hs[0] + Hs[2] + J() + Hs[1] + J()
fig = plt.figure(figsize=(9, 6))
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
ax_seeds = plt.subplot(gs[0])
ax_mapping = plt.subplot(gs[1])
WB = WordBlot(S, T, **WB_kw)
p_min = .95 * min(ps)
scored_seeds = WB.score_seeds(K)
scored_seeds = [(WB.to_ij_coordinates(*rec['seed']), rec['p'])
for rec in scored_seeds]
plot_seeds(ax_seeds, [x[0] for x in scored_seeds])
cmap = plt.cm.get_cmap('plasma')
sim_segments = list(WB.similar_segments(K_min=K, p_min=p_min))
min_p_obs = min(rec['p'] for rec in sim_segments)
max_p_obs = max(rec['p'] for rec in sim_segments)
for rec in sim_segments:
print rec
seg = rec['segment']
(i_start, i_end), (j_start, j_end) = WB.to_ij_coordinates_seg(seg)
i_ctr, j_ctr = (i_start + i_end) / 2, (j_start + j_end) / 2
color = cmap((rec['p'] - min_p_obs) / (max_p_obs - min_p_obs))[:3]
plot_similar_segment(ax_seeds, seg, lw=5, alpha=.1, c='k')
ax_mapping.plot([1, 1], [i_start, i_end], lw=3, c=color, alpha=.7)
ax_mapping.plot([2, 2], [j_start, j_end], lw=3, c=color, alpha=.7)
ax_mapping.plot([1, 2], [i_ctr, j_ctr], marker='o', markersize=7, lw=2,
c=color, alpha=.4)
ax_mapping.set_xticks([1, 2])
ax_mapping.set_xticklabels(['S', 'T'])
ax_mapping.set_xlim(0, 3)
ax_mapping.set_ylim(0, None)
ax_c = make_axes_locatable(ax_mapping).append_axes('right', size='4%',
pad=0.05)
norm = matplotlib.colors.Normalize(vmin=min_p_obs, vmax=max_p_obs)
matplotlib.colorbar.ColorbarBase(ax_c, cmap=cmap, norm=norm,
orientation='vertical')
aligner_kw = {
'match_score': 1 / p_min - 1,
'mismatch_score': -1,
'ge_score': -1,
'go_score': 0,
'alnmode': STD_MODE,
'alntype': LOCAL,
}
print len(S), len(T)
with Aligner(S, T, **aligner_kw) as aligner:
aligner.solve()
scores = np.array(aligner.table_scores())
min_score = min(scores.flatten())
max_score = max(scores.flatten())
ax_seeds.imshow(scores, cmap='plasma', alpha=.3)
ax_c = make_axes_locatable(ax_seeds).append_axes('right', size='4%',
pad=0.05)
norm = matplotlib.colors.Normalize(vmin=min_score, vmax=max_score)
matplotlib.colorbar.ColorbarBase(ax_c, cmap='plasma', norm=norm,
orientation='vertical')
adjust_pw_plot(ax_seeds, len(S), len(T))
ax_seeds.set_xlabel('T')
ax_seeds.set_ylabel('S')
fig.tight_layout()
savefig(fig, 'rearrangement.png')
| 19,828
|
def package_install_site(name='', user=False, plat_specific=False):
"""pip-inspired, distutils-based method for fetching the
default install location (site-packages path).
Returns virtual environment or system site-packages, unless
`user=True` in which case returns user-site (typ. under `~/.local/
on linux).
If there's a distinction (on a particular system) between platform
specific and pure python package locations, set `plat_specific=True`
to retrieve the former.
"""
dist = Distribution({'name': name})
dist.parse_config_files()
inst = dist.get_command_obj('install', create=True)
# NOTE: specifying user=True will create user-site
if user:
inst.user = user
inst.prefix = ""
inst.finalize_options()
# platform-specific site vs. purelib (platform-independent) site
if plat_specific:
loc = inst.install_platlib
else:
loc = inst.install_purelib
# install_lib specified in setup.cfg has highest precedence
if 'install_lib' in dist.get_option_dict('install'):
loc = inst.install_lib
return loc
| 19,829
|
def unregisterExcept(func):
"""
Un-registers a function from the except hook queue.
Look at the sys.displayhook documentation for more information.
:param func | <callable>
"""
try:
_excepthooks.remove(weakref.ref(func))
except (AttributeError, ValueError):
pass
| 19,830
|
def atlas_slice(atlas, slice_number):
"""
A function that pulls the data for a specific atlas slice.
Parameters
----------
atlas: nrrd
Atlas segmentation file that has a stack of slices.
slice_number: int
The number in the slice that corresponds to the fixed image
for registration.
Returns
-------
sagittal: array
Sagittal view being pulled from the atlas.
coronal: array
Coronal view being pulled from the atlas.
horizontal: arrary
Horizontal view being pulled from the atlas.
"""
epi_img_data2 = atlas.get_fdata()
sagittal = epi_img_data2[140, :, :]
coronal = epi_img_data2[:, slice_number, :]
horizontal = epi_img_data2[:, :, 100]
return sagittal, coronal, horizontal
| 19,831
|
def enviar_cambio_estado(request):
"""
Cambio de estado de una nota técnica y avisar
por email al personal de stib
"""
if request.method == "POST" or request.POST.get("nota_tecnica"):
try:
nota_tecnica = get_object_or_404(NotasTecnicas, pk=request.POST.get("nota_tecnica"))
nota_tecnica.estado = request.POST.get("estado")
nota_tecnica.save()
# -- envio de email notificando el cambio de estado
subject = "Nota Técnica - Cambio de estado"
ctx = {
'administracion': nota_tecnica.edificio.user.perfil.nombre_comercial,
'edificio': nota_tecnica.edificio,
'estado': NotasTecnicas.ESTADOS[ int(request.POST.get("estado"))-1 ][1],
'descripcion': nota_tecnica.descripcion,
'fecha': nota_tecnica.creado,
'comentario': request.POST.get("comentario")
}
body = render_to_string('emails/email_cambio_estado_nota_tecnica_notificaciones.html', ctx)
_send_email(STIB_TO_EMAIL, subject, body)
# -- / envio de email notificando el cambio de estado
messages.success(request, "Se ha cambiado el estado de la Nota Técnica.")
except:
messages.error(request, "Error al cambiar el estado de la Nota Técnica.")
return HttpResponseRedirect(reverse('notas-tecnicas:detail', args=[request.POST.get("nota_tecnica")]))
else:
messages.success(request, "Error.")
return HttpResponseRedirect("/")
| 19,832
|
def ha_inpgen(filename, outfile, is_list, ref, interface):
""" Input generator for (Quasi-)Harmonic Approximation calculations.
This command requires a file (FILENAME) that will be read to provide the
input data for the input generation.
"""
outbase = os.path.splitext(filename)[0]
if isinstance(outfile, type(None)):
outfile = outbase + '_ha_input.yaml'
else:
outfile += '.yaml'
echo_highlight(quantas_title())
if not os.path.isfile(filename):
echo_error(quantas_error(), bold=True)
echo_error("'{}' is not a file".format(filename))
return
generator = QHAInputCreator(interface)
try:
completed, error = generator.read(filename, is_list, ref)
except KeyError:
echo_error(quantas_error(), bold=True)
echo_error(
"File '{}' does not appear as a valid input file".format(filename))
return
except UnicodeDecodeError:
echo_error(quantas_error(), bold=True)
echo_error(
"File '{}' is in binary format".format(filename))
return
if not completed:
echo_error(quantas_error(), bold=True)
echo_error(error)
return
echo("Preparing the input file for Quantas: '{}'".format(outfile))
if interface != 'crystal-qha':
if ref > len(generator.phondata):
echo_error(quantas_error(), bold=True)
echo_error('Invalid reference provided')
return
generator.write(outfile, ref)
else:
generator.write(outfile)
echo_highlight(biblio_header())
echo_highlight(quantas_citation())
echo_highlight(biblio_footer())
echo_highlight(quantas_finish())
return
| 19,833
|
def post_auth_logout(): # noqa: E501
"""Logout of the service
TODO: # noqa: E501
:rtype: None
"""
return 'do some magic!'
| 19,834
|
async def head(url: str) -> Dict:
"""Fetch headers returned http GET request.
:param str url:
The URL to perform the GET request for.
:rtype: dict
:returns:
dictionary of lowercase headers
"""
async with aiohttp.request("HEAD", url) as res:
response_headers = res.headers
return {k.lower(): v for k, v in response_headers.items()}
| 19,835
|
def send_recv_packet(packet, iface=None, retry=3, timeout=1, verbose=False):
"""Method sends packet and receives answer
Args:
packet (obj): packet
iface (str): interface, used when Ether packet is included
retry (int): number of retries
timeout (int): timeout to receive answer
verbose (bool): verbose mode
Returns:
tuple: answered packets, unswered packets
Raises:
event: inet_before_sendrecv_packet
event: inet_after_sendrecv_packet
"""
try:
from scapy.all import sr, srp
mh.demsg('htk_on_debug_info', mh._trn.msg('htk_inet_sending_recv_packet', iface, retry, timeout),
mh.fromhere())
ev = event.Event(
'inet_before_sendrecv_packet', packet, iface, retry, timeout, verbose)
if (mh.fire_event(ev) > 0):
packet = ev.argv(0)
iface = ev.argv(1)
retry = ev.argv(2)
timeout = ev.argv(3)
verbose = ev.argv(4)
if (ev.will_run_default()):
if (iface != None):
ans, unans = srp(
packet, iface=iface, retry=retry, timeout=timeout, verbose=verbose)
else:
ans, unans = sr(
packet, retry=retry, timeout=timeout, verbose=verbose)
mh.demsg('htk_on_debug_info', mh._trn.msg(
'htk_inet_packet_sent_recv'), mh.fromhere())
ev = event.Event('inet_after_sendrecv_packet')
mh.fire_event(ev)
return ans, unans
except (Scapy_Exception, error) as ex:
return None, None
mh.demsg('htk_on_error', ex, mh.fromhere())
| 19,836
|
def delete_by_image_name(image_name):
"""
Delete Image by their name
"""
Image.query.filter_by(image_name=image_name).delete()
db.session.commit()
| 19,837
|
def node_vectors(node_id):
"""Get the vectors of a node.
You must specify the node id in the url.
You can pass direction (incoming/outgoing/all) and failed
(True/False/all).
"""
exp = Experiment(session)
# get the parameters
direction = request_parameter(parameter="direction", default="all")
failed = request_parameter(parameter="failed",
parameter_type="bool", default=False)
for x in [direction, failed]:
if type(x) == Response:
return x
# execute the request
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/node/vectors, node does not exist")
try:
vectors = node.vectors(direction=direction, failed=failed)
exp.vector_get_request(node=node, vectors=vectors)
session.commit()
except Exception:
return error_response(error_type="/node/vectors GET server error",
status=403,
participant=node.participant)
# return the data
return success_response(vectors=[v.__json__() for v in vectors])
| 19,838
|
def newFlatDict(store, selectKeys=None, labelPrefix=''):
"""
Takes a list of dictionaries and returns a dictionary of 1D lists.
If a dictionary did not have that key or list element, then 'None' is put in its place
Parameters
----------
store : list of dicts
The dictionaries would be expected to have many of the same keys.
Any dictionary keys containing lists in the input have been split into multiple numbered keys
selectKeys : list of strings, optional
The keys whose data will be included in the return dictionary. Default ``None``, which results in all keys being returned
labelPrefix : string
An identifier to be added to the beginning of each key string.
Returns
-------
newStore : dict
The new dictionary with the keys from the keySet and the values as
1D lists with 'None' if the keys, value pair was not found in the
store.
Examples
--------
>>> store = [{'list': [1, 2, 3, 4, 5, 6]}]
>>> newFlatDict(store)
{'list_[0]': [1], 'list_[1]': [2], 'list_[2]': [3], 'list_[3]': [4], 'list_[4]': [5], 'list_[5]': [6]}
>>> store = [{'string': 'string'}]
>>> newFlatDict(store)
{'string': ["'string'"]}
>>> store = [{'dict': {1: {3: "a"}, 2: "b"}}]
>>> newFlatDict(store)
{'dict_1_3': ["'a'"], 'dict_2': ["'b'"]}
"""
keySet = flatDictKeySet(store, selectKeys=selectKeys)
newStore = {}
if labelPrefix:
labelPrefix += "_"
for key, loc in keySet.items():
newKey = labelPrefix + str(key)
if isinstance(loc, dict):
subStore = [s[key] for s in store]
keyStoreSet = newFlatDict(subStore, labelPrefix=newKey)
newStore.update(keyStoreSet)
elif isinstance(loc, (list, np.ndarray)):
for locCo in loc:
tempList = []
for s in store:
rawVal = s.get(key, None)
if rawVal is None:
tempList.append(None)
else:
tempList.append(listSelection(rawVal, locCo))
newStore.setdefault(newKey + "_" + str(locCo), tempList)
else:
vals = [repr(s.get(key, None)) for s in store]
newStore.setdefault(newKey, vals)
return newStore
| 19,839
|
async def get_races(
db: Any, token: str, raceplan_id: str
) -> List[Union[IndividualSprintRace, IntervalStartRace]]:
"""Check if the event has a races."""
races = await RacesService.get_races_by_raceplan_id(db, raceplan_id)
if len(races) == 0:
raise NoRacesInRaceplanException(
f"No races in raceplan {raceplan_id}. Cannot proceed."
)
return races
| 19,840
|
def _reshape_vectors(v1, v2, axis, dim, same_shape=True):
""" Reshape input vectors to two dimensions. """
# TODO v2 as DataArray with possibly different dimension order
v1, axis, _, _, _, _, coords, *_ = _maybe_unpack_dataarray(
v1, dim, axis, None, False
)
v2, *_ = _maybe_unpack_dataarray(v2, None, axis, None)
if v1.shape[axis] != 3 or v2.shape[axis] != 3:
raise ValueError(
f"Shape of v1 and v2 along axis {axis} must be 3, got "
f"{v1.shape[axis]} for v1 and {v2.shape[axis]} for v2"
)
if v1.ndim < 2:
raise ValueError("v1 must have at least two dimensions")
# flatten everything except spatial dimension
v1 = np.swapaxes(v1, axis, -1).reshape(-1, 3)
v2 = np.swapaxes(v2, axis, -1).reshape(-1, 3)
if same_shape and v1.shape != v2.shape:
raise ValueError("v1 and v2 must have the same shape")
return v1, v2, coords is not None
| 19,841
|
def yaml_load(data: str) -> Any:
"""Deserializes a yaml representation of known objects into those objects.
Parameters
----------
data : str
The serialized YAML blob.
Returns
-------
Any
The deserialized Python objects.
"""
yaml = yaml_import(raise_error=True)
return yaml.safe_load(data)
| 19,842
|
def _get_current_branch():
"""Retrieves the branch Git is currently in.
Returns:
(str): The name of the current Git branch.
"""
branch_name_line = _run_cmd(GIT_CMD_GET_STATUS).splitlines()[0]
return branch_name_line.split(' ')[2]
| 19,843
|
def PGAN(pretrained=False, *args, **kwargs):
"""
Progressive growing model
pretrained (bool): load a pretrained model ?
model_name (string): if pretrained, load one of the following models
celebaHQ-256, celebaHQ-512, DTD, celeba, cifar10. Default is celebaHQ.
"""
from models.progressive_gan import ProgressiveGAN as PGAN
if 'config' not in kwargs or kwargs['config'] is None:
kwargs['config'] = {}
model = PGAN(useGPU=kwargs.get('useGPU', True),
storeAVG=True,
**kwargs['config'])
checkpoint = {"celebAHQ-256": 'https://dl.fbaipublicfiles.com/gan_zoo/PGAN/celebaHQ_s6_i80000-6196db68.pth',
"celebAHQ-512": 'https://dl.fbaipublicfiles.com/gan_zoo/PGAN/celebaHQ16_december_s7_i96000-9c72988c.pth',
"DTD": 'https://dl.fbaipublicfiles.com/gan_zoo/PGAN/testDTD_s5_i96000-04efa39f.pth',
"celeba": "https://dl.fbaipublicfiles.com/gan_zoo/PGAN/celebaCropped_s5_i83000-2b0acc76.pth"}
if pretrained:
if "model_name" in kwargs:
if kwargs["model_name"] not in checkpoint.keys():
raise ValueError("model_name should be in "
+ str(checkpoint.keys()))
else:
print("Loading default model : celebaHQ-256")
kwargs["model_name"] = "celebAHQ-256"
state_dict = model_zoo.load_url(checkpoint[kwargs["model_name"]],
map_location='cpu')
model.load_state_dict(state_dict)
return model
| 19,844
|
def compare_environment(team_env, master_env, jenkins_build_terms ):
"""
compare the versions replace compare_environment
Return types
1 - Matches Master
2 - Does not match master. Master is ahead(red)
3 - branch is ahead (yellow)
:param team_env:
:param master_env:
:param jenkins_build_terms:
:return:
"""
result = 0
team_hash = team_env['version'].split('-')[-1]
master_hash = master_env['version'].split('-')[-1]
service_name = team_env['servicename'].replace('_','-')
team_branch_name = team_env['version'].replace('_','-').split('-')[1:-1]
master_branch_name = master_env['version'].replace('_','-').split('-')[1:-1]
# replace signiant-installer-service dash to underscore
# if there are more name changes in the future a seperate functions can be created
if service_name == "signiant-installer-service":
service_name = service_name.replace('-','_')
if len(team_hash) == 7 and len(master_hash) == 7:
if team_hash == master_hash:
# if commit hash match result (green)
result = 1
elif len(team_branch_name) > 0:
# if a sub team branch exist and is currently deployed in the dev environment (yellow)
result = 3
else:
if team_env['build_date'] and master_env['build_date']:
# if build dates are available for both sections
if compare_bb_commit_parents(service_name, team_hash, master_hash):
result = 1
else:
# compare build time between two environment
result = compare_build_time(team_env, master_env)
else:
# if build date does not exist for either or both team/master service (red)
result = 2
elif (len(team_hash) == 7) ^ (len(master_hash) == 7):
# if one is jenkin build number or other one is bitbucket hash (red) but not both
result = 2
elif 'master' in master_env['version'] and 'master' in team_env['version']:
# if hash len is not 7 for both master and team
# that means jenkin build master on both prod and dev comparison environment (not bitbucket way)
result = jenkins_compare_environment(team_env['version'], master_env['version'], jenkins_build_terms)
else:
# all other scenarios
result = 2
logging.debug("Bitbucket comparing %s and %s result is %s" % (team_env['version'], master_env['version'], result))
return result
| 19,845
|
def _runge_kutta_step(func,
y0,
f0,
t0,
dt,
tableau=_DORMAND_PRINCE_TABLEAU,
name=None):
"""Take an arbitrary Runge-Kutta step and estimate error.
Args:
func: Function to evaluate like `func(y, t)` to compute the time derivative
of `y`.
y0: Tensor initial value for the state.
f0: Tensor initial value for the derivative, computed from `func(y0, t0)`.
t0: float64 scalar Tensor giving the initial time.
dt: float64 scalar Tensor giving the size of the desired time step.
tableau: optional _ButcherTableau describing how to take the Runge-Kutta
step.
name: optional name for the operation.
Returns:
Tuple `(y1, f1, y1_error, k)` giving the estimated function value after
the Runge-Kutta step at `t1 = t0 + dt`, the derivative of the state at `t1`,
estimated error at `t1`, and a list of Runge-Kutta coefficients `k` used for
calculating these terms.
"""
with ops.name_scope(name, 'runge_kutta_step', [y0, f0, t0, dt]) as scope:
y0 = ops.convert_to_tensor(y0, name='y0')
f0 = ops.convert_to_tensor(f0, name='f0')
t0 = ops.convert_to_tensor(t0, name='t0')
dt = ops.convert_to_tensor(dt, name='dt')
dt_cast = math_ops.cast(dt, y0.dtype)
k = [f0]
for alpha_i, beta_i in zip(tableau.alpha, tableau.beta):
ti = t0 + alpha_i * dt
yi = y0 + _scaled_dot_product(dt_cast, beta_i, k)
k.append(func(yi, ti))
if not (tableau.c_sol[-1] == 0 and tableau.c_sol[:-1] == tableau.beta[-1]):
# This property (true for Dormand-Prince) lets us save a few FLOPs.
yi = y0 + _scaled_dot_product(dt_cast, tableau.c_sol, k)
y1 = array_ops.identity(yi, name='%s/y1' % scope)
f1 = array_ops.identity(k[-1], name='%s/f1' % scope)
y1_error = _scaled_dot_product(
dt_cast, tableau.c_error, k, name='%s/y1_error' % scope)
return (y1, f1, y1_error, k)
| 19,846
|
def _create_serialize(cls, serializers):
"""
Create a new serialize method with extra serializer functions.
"""
def serialize(self, value):
for serializer in serializers:
value = serializer(value)
value = super(cls, self).serialize(value)
return value
serialize.__doc__ = serializers[0].__doc__
return serialize
| 19,847
|
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix cm is such that cm[i, j] is equal
to the number of observations known to be in group i but predicted
to be in group j.
Parameters
----------
y_true : array, shape = [n_samples]
true targets
y_pred : array, shape = [n_samples]
estimated targets
labels : array, shape = [n_classes]
lists all labels occuring in the dataset.
If none is given, those that appear at least once
in y_true or y_pred are used.
Returns
-------
CM : array, shape = [n_classes, n_classes]
confusion matrix
References
----------
http://en.wikipedia.org/wiki/Confusion_matrix
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels, dtype=np.int)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = np.asarray(coo_matrix((np.ones(y_true.shape[0]), (y_true, y_pred)),
shape=(n_labels, n_labels),
dtype=np.int).todense())
return CM
| 19,848
|
def dialect_selector(s):
"""Return a dialect given it's name."""
s = s or 'ansi'
lookup = {
'ansi': ansi_dialect
}
return lookup[s]
| 19,849
|
def main():
"""
"Neither agreeable nor disagreeable," I answered. "It just is."
--- ALDOUS HUXLEY
"""
bg_1()
bg_2()
bg_4()
bg_3()
bg_5()
bg_6()
bg_7()
bg_8()
bg_9()
bg_10()
bg_11()
bg_12()
bg_13()
bg_14()
bg_15()
bg_16()
dot()
| 19,850
|
def dump(line, index):
""" Command for printing the last item in stack """
global stacks
global current_stack
checkStack(1, line, index)
line = line.strip().split(' ')
line.pop(0) # Removing the command itself
line = ' '.join(line)
if len(line) == 0:
ending = ''
else:
ending = eval(line)
print(stacks[current_stack][-1], end=ending)
| 19,851
|
def importlib_only(fxn):
"""Decorator to skip a test if using __builtins__.__import__."""
return unittest.skipIf(using___import__, "importlib-specific test")(fxn)
| 19,852
|
def bf(x):
""" returns the given bitfield value from within a register
Parameters:
x: a pandas DataFrame line - with a column named BF_NUMBER which holds the definition of given bit_field
reg_val: integer
Returns:
--------
res: str
the bit field value from within the register
"""
try:
reg_val = int(x[REG_VALUE][2:],16)
except:
if isnan(x[REG_VALUE]):
return nan
else:
raise
if str(x[BF_NUMBER]).find("..")>0:
#Example
#BF=3..1 => min_bit =1 , max_bit = 3
#mask = 14 = 0xE
#(1<<4) - (1<<1)= 16 - 2 =14
min_bit = int(x[BF_NUMBER].split("..")[1])
max_bit = int(x[BF_NUMBER].split("..")[0])
mask = (1<<(max_bit+1)) -(1<<(min_bit))
res= mask & reg_val
res = res>>min_bit
res = "{:04x}".format(res).upper()
res = "0x"+res
else:
mask = (1<<int(x[BF_NUMBER]))
res = mask & reg_val
res = res >> int(x[BF_NUMBER])
res = "{:04x}".format(res).upper()
res = "0x"+res
return res
| 19,853
|
def get_mean_brightness(
frame: np.ndarray,
mask: Union[
np.ndarray,
None,
] = None,
) -> int:
"""Return the mean brightness of a frame.
Load the frame, calculate a histogram, and iterate through the bins until half or more of the pixels have been counted.
Args:
`frame`: A video data frame.
`mask`: An `np.ndarray` instance that represents a bit mask, or `None`. (See, *e.g.*, <https://docs.opencv.org/master/d1/db7/tutorial_py_histogram_begins.html>.)
Returns:
A integer representing the mean brightness of the frame. (Note that this is defined relative to the number of bins in the histogram.)
"""
try:
grayscale_frame = cv.cvtColor(
frame,
cv.COLOR_RGB2GRAY,
)
except Exception as error:
print(f'Could not convert frame to grayscale. ({error})')
return False
num_pixels = frame.shape[0] * frame.shape[1]
histogram = cv.calcHist(
[grayscale_frame],
[0],
mask,
[RANGE],
[0, RANGE],
)
pixel_count = 0
bin_index = 0
while pixel_count / num_pixels <= 0.5:
pixel_count += histogram[bin_index]
bin_index += 1
return bin_index
| 19,854
|
def ban_sticker(msg, sticker_id):
"""
Банит стикер\n
:param msg:\n
:param sticker_id:\n
"""
with DataConn(db) as conn:
cursor = conn.cursor()
sql = 'SELECT * FROM `banned_stickers` WHERE `chat_id` = %s AND `sticker_id` = %s'
cursor.execute(sql, (msg.chat.id, sticker_id))
res = cursor.fetchone()
if res is None:
sql = 'INSERT INTO `banned_stickers`(`chat_id`, `chat_name`, `sticker_id`, `ban_time`) VALUES (%s, %s, %s, %s)'
try:
cursor.execute(sql, (msg.chat.id, msg.chat.title, sticker_id, int(time.time())))
conn.commit()
except Exception as e:
print(sql)
print(e)
else:
if res != msg.chat.title:
sql = 'SELECT * FROM `banned_stickers` WHERE `chat_id` = %s'
cursor.execute(sql, (msg.chat.id, ))
res = cursor.fetchall()
for i in res:
sql = 'UPDATE `banned_stickers` SET `chat_name` = %s WHERE `chat_id` = %s'
cursor.execute(sql, (msg.chat.title, msg.chat.id))
conn.commit()
| 19,855
|
def rearrange(s):
"""
Args:
s
Returns:
[]
"""
if not can_arrange_palindrome2(s):
return []
m = {}
for c in s:
if c in m:
m[c] += 1
else:
m[c] = 1
middle = ""
for k in m:
if m[k] % 2 == 0:
m[k] /= 2
else:
middle = k
if middle:
del m[middle]
res = rec_rearrange("", m)
palindromes = []
for i in res:
palindromes.append(i + middle + "".join(list(i)[::-1]))
return palindromes
| 19,856
|
def get_org_memberships(user_id: str):
"""Return a list of organizations and roles where the input user is a member"""
query = (
model.Session.query(model.Group, model.Member.capacity)
.join(model.Member, model.Member.group_id == model.Group.id)
.join(model.User, model.User.id == model.Member.table_id)
.filter(
model.User.id == user_id,
model.Member.state == "active",
model.Group.is_organization == True,
)
.order_by(model.Group.name)
)
return query.all()
| 19,857
|
def standardize_concentration(df, columns, unit="nM"):
"""Make all concentrations match the given unit.
For a given DataFrame and column, convert mM, uM, nM, and pM concentration
values to the specified unit (default nM). Rename the column to include
({unit}).
Parameters
----------
df : a pandas DataFrame
columns : str or list
column name(s) to be converted to the given unit
unit : one of ["mM", "uM", "nM", "pM"], default "nM"
Returns
-------
A modified dataframe.
Examples
--------
>>> df.head()
experiment [DNA]
A 100 nM
B 1 uM
>>> standardize_concentration(df, columns="[DNA]", unit="nM").head()
experiment [DNA] (nM)
A 100.0
B 1000.0
"""
conversions_dict = {
"mM to mM": 1,
"mM to uM": 1000,
"mM to nM": 1000000,
"mM to pM": 1000000000,
"uM to mM": 1 / 1000,
"uM to uM": 1,
"uM to nM": 1000,
"uM to pM": 1000000,
"nM to mM": 1 / 1000000,
"nM to uM": 1 / 1000,
"nM to nM": 1,
"nM to pM": 1000,
"pM to mM": 1 / 1000000000,
"pM to uM": 1 / 1000000,
"pM to nM": 1 / 1000,
"pM to pM": 1,
}
# don't modify in place
df = df.copy().reset_index(drop=True)
if type(columns) == str:
columns = [columns]
for column in columns:
for i, row in df.iterrows():
# variables that didn't exist in all concatanated dfs will be represented as NaN
if type(row[column]) is float:
if np.isnan(row[column]):
df.loc[i, column] = 0
continue
else:
raise RuntimeError(
f"Something has gone wrong in row {i}, column {column}. "
+ "Value is {row[column]}."
)
molar_index = row[column].find("M")
current_unit = row[column][molar_index - 1 : molar_index + 1]
if current_unit not in ["mM", "uM", "nM", "pM"]:
raise RuntimeError(
f"Unit {current_unit} not recognized in row {i}, column {column}."
)
value = float(row[column][: molar_index - 1])
df.loc[i, column] = value * conversions_dict[f"{current_unit} to {unit}"]
df = df.rename(columns={column: f"{column} ({unit})" for column in columns})
return df
| 19,858
|
def setup(hass, config):
""" Setup the Visonic Alarm component."""
from visonic import alarm as visonicalarm
global HUB
HUB = VisonicAlarmHub(config[DOMAIN], visonicalarm)
if not HUB.connect():
return False
HUB.update()
# Load the supported platforms
for component in ('sensor', 'alarm_control_panel'):
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
| 19,859
|
def moreparams():
""" Read list of json files or return one specific for specific time """
hour_back1 = request.args.get('hour_back1', default=1, type=int)
hour_back2 = request.args.get('hour_back2', default=0, type=int)
object_of_interest = request.args.get('object_of_interest', type=None)
#print("object_of_interest: " + str(object_of_interest)[1:-1])
cam = request.args.get('cam', default=0, type=str)
if hour_back1 != '':
hour_back1 = int(hour_back1)
else:
hour_back1 = 0 # default value: 60 min back
if hour_back2 != '':
hour_back2 = int(hour_back2)
else:
hour_back2 = 1 # default value: 60 min back
logger.debug("cam: {}, hour_back:{}, now_in_seconds:{}".format(cam, hour_back1, hour_back2))
params = gen_params(cam=cam, time1=hour_back1, time2=hour_back2 ,object_of_interest=object_of_interest)
return Response(params, mimetype='text/plain')
| 19,860
|
def generate_http_request_md_fenced_code_block(
language=None,
fence_string='```',
**kwargs,
):
"""Wraps [``generate_http_request_code``](#generate_http_request_code)
function result in a Markdown fenced code block.
Args:
fence_string (str): Code block fence string used wrapping the code.
It does not perform any check about if the fenced string is a
"valid" markdown code block fence string.
**kwargs: All other optional arguments are passed to
[``generate_http_request_code``](#generate_http_request_code)
function.
Examples:
>>> generate_http_request_md_fenced_code_block(setup=False)
"```python\\nreq = requests.get('http://localhost')\\n```"
>>> generate_http_request_md_fenced_code_block(fence_string='~~~',
... setup=False)
"~~~python\\nreq = requests.get('http://localhost')\\n~~~"
Returns:
str: Fenced code block with HTTP request code snippet inside.
"""
return '{fence_string}{language}\n{render}\n{fence_string}'.format(
language=language if language else DEFAULT_LANGUAGE,
render=generate_http_request_code(language=language, **kwargs),
fence_string=fence_string,
)
| 19,861
|
def test_detectors_with_stats(test_video_file):
""" Test all detectors functionality with a StatsManager. """
for detector in [ContentDetector, ThresholdDetector, AdaptiveDetector]:
vm = VideoManager([test_video_file])
stats = StatsManager()
sm = SceneManager(stats_manager=stats)
sm.add_detector(create_detector(detector, vm))
try:
end_time = FrameTimecode('00:00:15', vm.get_framerate())
vm.set_duration(end_time=end_time)
vm.set_downscale_factor()
vm.start()
sm.detect_scenes(frame_source=vm)
initial_scene_len = len(sm.get_scene_list())
assert initial_scene_len > 0 # test case must have at least one scene!
# Re-analyze using existing stats manager.
sm = SceneManager(stats_manager=stats)
sm.add_detector(create_detector(detector, vm))
vm.release()
vm.reset()
vm.set_duration(end_time=end_time)
vm.set_downscale_factor()
vm.start()
sm.detect_scenes(frame_source=vm)
scene_list = sm.get_scene_list()
assert len(scene_list) == initial_scene_len
finally:
vm.release()
| 19,862
|
def confident_hit_ratio(y_true, y_pred, cut_off=0.1):
"""
This function return the hit ratio of the true-positive for confident molecules.
Confident molecules are defined as confidence values that are higher than the cutoff.
:param y_true:
:param y_pred:
:param cut_off: confident value that defines if a prediction are considered confident
:return:
"""
actual_indexes = np.where(y_true==1)[0]
confident_indexes = np.where(y_pred>cut_off)[0]
confident_hit = np.intersect1d(actual_indexes, confident_indexes)
ratio = 1.0 * len(confident_hit) / len(actual_indexes)
return ratio
| 19,863
|
def onion(ctx, port, onion_version, private_key, show_private_key, detach):
"""
Add a temporary onion-service to the Tor we connect to.
This keeps an onion-service running as long as this command is
running with an arbitrary list of forwarded ports.
"""
if len(port) == 0:
raise click.UsageError(
"You must use --port at least once"
)
if private_key is not None:
if onion_version == 3 and not private_key.startswith('ED25519-V3'):
raise click.UsageError(
"Private key type is not version 3"
)
if onion_version == 2 and not private_key.startswith('RSA1024'):
raise click.UsageError(
"Private key type is not version 2"
)
def _range_check(p):
try:
p = int(p)
if p < 1 or p > 65535:
raise click.UsageError(
"{} invalid port".format(p)
)
except ValueError:
raise click.UsageError(
"{} is not an int".format(p)
)
validated_ports = []
for p in port:
if ':' in p:
remote, local = p.split(':', 1)
_range_check(remote)
# the local port can be an ip:port pair, or a unix:/
# socket so we'll let txtorcon take care
validated_ports.append((int(remote), local))
else:
_range_check(p)
validated_ports.append(int(p))
try:
onion_version = int(onion_version)
if onion_version not in (2, 3):
raise ValueError()
except ValueError:
raise click.UsageError(
"--onion-version must be 2 or 3"
)
cfg = ctx.obj
return _run_command(
carml_onion.run,
cfg,
list(validated_ports),
onion_version,
private_key,
show_private_key,
detach,
)
| 19,864
|
def determine_if_pb_should_be_filtered(row, min_junc_after_stop_codon):
"""PB should be filtered if NMD, a truncation, or protein classification
is not likely protein coding (intergenic, antisense, fusion,...)
Args:
row (pandas Series): protein classification row
min_junc_after_stop_codon (int): mininum number of junctions after stop
codon a protein can have. used in NMD determination
Returns:
int: 1 if should be filtered, 0 if should not be filtered
"""
# filter out pbs that are artifacts or noncoding
pclass = str(row['protein_classification'])
num_junc_after_stop_codon = int(row['num_junc_after_stop_codon'])
pclass_base_to_keep = ['pFSM','pNIC']
pclass_base = str(row['protein_classification_base'])
if pclass_base not in pclass_base_to_keep and num_junc_after_stop_codon > min_junc_after_stop_codon:
return 1
elif 'trunc' in pclass:
return 1
elif 'intergenic' in pclass:
return 1
elif 'antisense' in pclass:
return 1
elif 'fusion' in pclass:
return 1
elif 'orphan' in pclass:
return 1
elif 'genic' in pclass:
return 1
return 0
| 19,865
|
def sma(other_args: List[str], s_ticker: str, s_interval: str, df_stock: pd.DataFrame):
"""Plots simple moving average (SMA) over stock
Parameters
----------
other_args: List[str]
Argparse arguments
s_ticker: str
Ticker
s_interval: str
Data interval
df_stock: pd.DataFrame
Dataframe of dates and prices
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="sma",
description="""
Moving Averages are used to smooth the data in an array to
help eliminate noise and identify trends. The Simple Moving Average is literally
the simplest form of a moving average. Each output value is the average of the
previous n values. In a Simple Moving Average, each value in the time period carries
equal weight, and values outside of the time period are not included in the average.
This makes it less responsive to recent changes in the data, which can be useful for
filtering out those changes.
""",
)
parser.add_argument(
"-l",
"--length",
dest="l_length",
type=lambda s: [int(item) for item in s.split(",")],
default=[20, 50],
help="length of MA window",
)
parser.add_argument(
"-o",
"--offset",
action="store",
dest="n_offset",
type=check_positive,
default=0,
help="offset",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
fig, _ = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
if s_interval == "1440min":
plt.plot(df_stock.index, df_stock["Adj Close"].values, color="k")
else:
plt.plot(df_stock.index, df_stock["Close"].values, color="k")
l_legend = list()
l_legend.append(s_ticker)
for length in ns_parser.l_length:
if s_interval == "1440min":
df_ta = ta.sma(
df_stock["Adj Close"],
length=length,
offset=ns_parser.n_offset,
).dropna()
else:
df_ta = ta.sma(
df_stock["Close"], length=length, offset=ns_parser.n_offset
).dropna()
plt.plot(df_ta.index, df_ta.values)
l_legend.append(f"SMA{length} ")
plt.title(f"SMA on {s_ticker}")
plt.xlim(df_stock.index[0], df_stock.index[-1])
plt.xlabel("Time")
plt.ylabel(f"{s_ticker} Price ($)")
plt.legend(l_legend)
plt.grid(b=True, which="major", color="#666666", linestyle="-")
plt.minorticks_on()
plt.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2)
if gtff.USE_ION:
plt.ion()
plt.gcf().autofmt_xdate()
fig.tight_layout(pad=1)
plt.show()
print("")
except Exception as e:
print(e, "\n")
| 19,866
|
def calculate_shortest_path(draw_func, grid, start, end):
"""https://en.wikipedia.org/wiki/A*_search_algorithm"""
count = 0
open_set = queue.PriorityQueue()
open_set.put((0, count, start))
open_set_hash = {start}
came_from = {}
# g_score: Distance from start to current node
g_score = {spot: float("inf") for row in grid for spot in row} # TODO: Using objects as dict keys, is it OK?
g_score[start] = 0
# f_score: Sum of g_score and ESTIMATED distance from current to end node
f_score = {spot: float("inf") for row in grid for spot in row} # TODO: Using objects as dict keys, is it OK?
f_score[start] = start.distance(end)
while not open_set.empty():
for event in pygame.event.get():
if event.type == pygame.QUIT:
return None
current = open_set.get()[2]
open_set_hash.remove(current)
if current == end:
reconstruct_path(came_from, current, draw_func)
start.celltype = CellType.START
end.celltype = CellType.END
return True
for neighbor in current.neighbors:
new_g_score = g_score[current] + 1 # All edges are weighted equally
# If this path to neighbor is better than any previous one record it!
if new_g_score < g_score[neighbor]:
came_from[neighbor] = current
g_score[neighbor] = new_g_score
f_score[neighbor] = g_score[neighbor] + neighbor.distance(end)
if neighbor not in open_set_hash:
count += 1
open_set.put((f_score[neighbor], count, neighbor))
open_set_hash.add(neighbor)
neighbor.celltype = CellType.OPEN
draw_func()
if current != start:
current.celltype = CellType.CLOSED
return False
| 19,867
|
def _get_hashed_id(full_name: str, name_from_id: MutableMapping[int,
str]) -> int:
"""Converts the string-typed name to int-typed ID."""
# Built-in hash function will not exceed the range of int64, which is the
# type of id in metadata artifact proto.
result = int(hashlib.sha256(full_name.encode('utf-8')).hexdigest(),
16) % _INT64_MAX
name_from_id[result] = full_name
return result
| 19,868
|
def find_adjustment(tdata : tuple, xdata : tuple, ydata : tuple,
numstept=10,numstepx=10,tol=1e-6) -> tuple:
"""
Find best fit of data with temporal and spatial offset in range. Returns
the tuple err, dt, dx.
Finds a temporal and spatial offset to apply to the temporal and spatial
locations of the lif data such that the corresponding elevation data has
minimal absolute difference. find_adjustment takes a brute force approach,
and will compare the difference in ydata at overlapping tdata and xdata
locations for all offsets within plus or minus numstept and numstepx. By
default 400 possible offsets are evaluated. tdata and xdata must be
integer types in order to find the overlapping tdata and xdata locations.
Raises a TypeError for some inputs. Raises a ValueError if there is no
intersection in tdata & xdata,
"""
if not (isinstance(tdata,tuple) and len(tdata)==2):
raise TypeError("tdata must be a tuple with length 2")
elif not (tdata[0].dtype==int and tdata[1].dtype==int):
raise TypeError(f"t in tdata must have dtype int but has dtypes " \
f"{tdata[0].dtype} and {tdata[1].dtype}")
elif not (isinstance(xdata,tuple) and len(xdata)==2):
raise TypeError("xdata must be a tuple with length 2")
elif not (xdata[0].dtype==int and xdata[1].dtype==int):
raise TypeError(f"x in xdata must have dtype int but has dtypes " \
f"{xdata[0].dtype} and {xdata[1].dtype}")
elif not (isinstance(ydata,tuple) and len(ydata)==2):
raise TypeError("ydata must be a tuple with length 2")
# create all possibile pairs of offsets in the range
if numstept == 0:
dt = np.asarray([0],dtype=int)
else:
dt = np.arange(-numstept,numstept+1)
if numstepx == 0:
dx = np.asarray([0],dtype=int)
else:
dx = np.arange(-numstepx,numstepx+1)
DT, DX = tuple(np.meshgrid(dt,dx))
pos = np.transpose(np.stack([DT.ravel(),DX.ravel()]))
# for each possible offset in space and time, estimate the error
err = np.empty(DT.ravel().shape)
err[:] = np.nan # invalid by default
for idx, p in enumerate(pos):
dt, dx = p
_, tidx0, tidx1 = np.intersect1d(tdata[0],tdata[1]+dt,return_indices=True)
_, xidx0, xidx1 = np.intersect1d(xdata[0],xdata[1]+dx,return_indices=True)
# it is possible that dt and dx will push them out of overlapping
# skip in that case (err[idx] = np.nan by default)
if not ( tidx0.size==0 or xidx0.size==0
or tidx1.size==0 or xidx1.size==0 ):
yidx0 = tuple(np.meshgrid(tidx0,xidx0,indexing = 'ij'))
yidx1 = tuple(np.meshgrid(tidx1,xidx1,indexing = 'ij'))
#err[idx] = np.mean(np.abs(ydata[0][yidx0] - ydata[1][yidx1]))
err[idx] = np.mean((ydata[0][yidx0] - ydata[1][yidx1])**2)
# error out if there is no intersection of the data for any offset
if np.isnan(err).all():
raise ValueError("xdata and tdata have no intersection")
idx_min = np.nanargmin(err)
dt, dx = pos[idx_min]
return err[idx_min], dt, dx
| 19,869
|
def get_params():
"""Loads ./config.yml in a dict and returns it"""
with open(HERE/'config.yml') as file:
params = yaml.load(file)
return params
| 19,870
|
def parse_args(args, repo_dirs):
"""
Extract the CLI arguments from argparse
"""
parser = argparse.ArgumentParser(description="Sweet branch creation tool")
parser.add_argument(
"--repo",
help="Repository to create branch in",
choices=repo_dirs,
required=False,
)
parser.add_argument(
"--parent",
help="Parent branch",
default="dev",
required=False,
)
parser.add_argument("ticket", help="Ticket to build branch name from")
parser.add_argument(
"--version",
action="version",
version="jolly_brancher {ver}".format(ver=__version__),
)
parser.add_argument(
"-v",
"--verbose",
dest="loglevel",
help="set loglevel to INFO",
action="store_const",
const=logging.INFO,
)
parser.add_argument(
"-vv",
"--very-verbose",
dest="loglevel",
help="set loglevel to DEBUG",
action="store_const",
const=logging.DEBUG,
)
return parser.parse_args(args)
| 19,871
|
def simplify():
"""Our standard simplification of logic routine. What it does depende on the problem size.
For large problems, we use the &methods which use a simple circuit based SAT solver. Also problem
size dictates the level of k-step induction done in 'scorr' The stongest simplification is done if
n_ands < 20000. Then it used the clause based solver and k-step induction where |k| depends
on the problem size """
# set_globals()
## print 'simplify initial ',
## ps()
#abc('w t.aig')
n=n_ands()
abc('scl')
if n > 40000:
abc('&get;&scl;&put')
n = n_ands()
if n < 100000:
abc("&dc2;&put;dr;&get;&lcorr;&dc2;&put;dr;&get;&scorr;&fraig;&dc2;&put;dr")
run_command('ps')
print '.',
n = n_ands()
if n<60000:
abc("&get;&scorr -F 2;&put;dc2rs")
print '.',
#ps()
else:
abc("dc2rs")
print '.',
#ps()
n = n_ands()
n = n_ands()
if n <= 40000:
print '.',
#ps()
if n > 30000:
abc("dc2rs")
print '.',
## else:
## abc("scorr -F 2;dc2rs")
## print '.',
## ps()
n = max(1,n_ands())
#ps()
if n < 30000:
abc('scl;rw;dr;lcorr;rw;dr')
m = int(min( 60000/n, 16))
#print 'm = %d'%m
if m >= 1:
j = 1
while j <= m:
set_size()
#print 'j - %d'%j
#abc('scl;dr;dc2;scorr -C 5000 -F %d'%j)
if j<8:
abc('dc2')
else:
abc('dc2rs')
abc('scorr -C 5000 -F %d'%j)
if check_size():
break
j = 2*j
#ps()
continue
print '.',
| 19,872
|
def update_location(new_fix):
"""Update current location, and publishes map if it has been a while"""
g['fix'] = new_fix
if published_long_ago(): publish_map()
| 19,873
|
def test_arr_to_mat_double_small():
"""Test arr_to_mat."""
sample = np.asarray(
np.random.normal(size=(3, 3)), dtype=np.float64, order='F'
)
flag = carma.arr_to_mat_double(sample, False)
assert flag == 0, test_flags[flag]
| 19,874
|
def check_args(args):
"""Checking user input
"""
pass
| 19,875
|
def get_textgrid(path_transcription):
"""Get data from TextGrid file"""
data = textgriddf_reader(path_file=path_transcription)
text_df = textgriddf_df(data, item_no=2)
sentences = textgriddf_converter(text_df)
return sentences
| 19,876
|
def accessible_required(f):
"""Decorator for an endpoint that requires a user have accessible or read permission in the
given room. The function must take a `room` argument by name, as is typically used with flask
endpoints with a `<Room:room>` argument."""
@wraps(f)
def required_accessible_wrapper(*args, room, **kwargs):
if not room.check_accessible(g.user):
abort(http.NOT_FOUND)
return f(*args, room=room, **kwargs)
return required_accessible_wrapper
| 19,877
|
def make_erb_cos_filters_nx(signal_length, sr, n, low_lim, hi_lim, sample_factor, padding_size=None, full_filter=True, strict=True, **kwargs):
"""Create ERB cosine filters, oversampled by a factor provided by "sample_factor"
Args:
signal_length (int): Length of signal to be filtered with the generated
filterbank. The signal length determines the length of the filters.
sr (int): Sampling rate associated with the signal waveform.
n (int): Number of filters (subbands) to be generated with standard
sampling (i.e., using a sampling factor of 1). Note, the actual number of
filters in the generated filterbank depends on the sampling factor, and
will also include lowpass and highpass filters that allow for
perfect reconstruction of the input signal (the exact number of lowpass
and highpass filters is determined by the sampling factor). The
number of filters in the generated filterbank is given below:
+---------------+---------------+-+------------+---+---------------------+
| sample factor | n_out |=| bandpass |\ +| highpass + lowpass |
+===============+===============+=+============+===+=====================+
| 1 | n+2 |=| n |\ +| 1 + 1 |
+---------------+---------------+-+------------+---+---------------------+
| 2 | 2*n+1+4 |=| 2*n+1 |\ +| 2 + 2 |
+---------------+---------------+-+------------+---+---------------------+
| 4 | 4*n+3+8 |=| 4*n+3 |\ +| 4 + 4 |
+---------------+---------------+-+------------+---+---------------------+
| s | s*(n+1)-1+2*s |=| s*(n+1)-1 |\ +| s + s |
+---------------+---------------+-+------------+---+---------------------+
low_lim (int): Lower limit of frequency range. Filters will not be defined
below this limit.
hi_lim (int): Upper limit of frequency range. Filters will not be defined
above this limit.
sample_factor (int): Positive integer that determines how densely ERB function
will be sampled to create bandpass filters. 1 represents standard sampling;
adjacent bandpass filters will overlap by 50%. 2 represents 2x overcomplete sampling;
adjacent bandpass filters will overlap by 75%. 4 represents 4x overcomplete sampling;
adjacent bandpass filters will overlap by 87.5%.
padding_size (int, optional): If None (default), the signal will not be padded
before filtering. Otherwise, the filters will be created assuming the
waveform signal will be padded to length padding_size*signal_length.
full_filter (bool, default=True): If True (default), the complete filter that
is ready to apply to the signal is returned. If False, only the first
half of the filter is returned (likely positive terms of FFT).
strict (bool, default=True): If True (default), will throw an error if
sample_factor is not a power of two. This facilitates comparison across
sample_factors. Also, if True, will throw an error if provided hi_lim
is greater than the Nyquist rate.
Returns:
tuple:
A tuple containing the output:
* **filts** (*array*)-- The filterbank consisting of filters have
cosine-shaped frequency responses, with center frequencies equally
spaced on an ERB scale from low_lim to hi_lim.
* **center_freqs** (*array*) -- something
* **freqs** (*array*) -- something
Raises:
ValueError: Various value errors for bad choices of sample_factor; see
description for strict parameter.
"""
if not isinstance(sample_factor, int):
raise ValueError('sample_factor must be an integer, not %s' % type(sample_factor))
if sample_factor <= 0:
raise ValueError('sample_factor must be positive')
if sample_factor != 1 and np.remainder(sample_factor, 2) != 0:
msg = 'sample_factor odd, and will change ERB filter widths. Use even sample factors for comparison.'
if strict:
raise ValueError(msg)
else:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
if padding_size is not None and padding_size >= 1:
signal_length += padding_size
if np.remainder(signal_length, 2) == 0: # even length
n_freqs = signal_length // 2 # .0 does not include DC, likely the sampling grid
max_freq = sr / 2 # go all the way to nyquist
else: # odd length
n_freqs = (signal_length - 1) // 2 # .0
max_freq = sr * (signal_length - 1) / 2 / signal_length # just under nyquist
# verify the high limit is allowed by the sampling rate
if hi_lim > sr / 2:
hi_lim = max_freq
msg = 'input arg "hi_lim" exceeds nyquist limit for max frequency; ignore with "strict=False"'
if strict:
raise ValueError(msg)
else:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
# changing the sampling density without changing the filter locations
# (and, thereby changing their widths) requires that a certain number of filters
# be used.
n_filters = sample_factor * (n + 1) - 1
n_lp_hp = 2 * sample_factor
freqs = utils.matlab_arange(0, max_freq, n_freqs)
filts = np.zeros((n_freqs + 1 , n_filters + n_lp_hp)) # ?? n_freqs+1
# cutoffs are evenly spaced on an erb scale -- interpolate linearly in erb space then convert back
# get the actual spacing use to generate the sequence (in case numpy does something weird)
center_freqs, erb_spacing = np.linspace(freq2erb(low_lim), freq2erb(hi_lim), n_filters + 2, retstep=True) # +2 for bin endpoints
# we need to exclude the endpoints
center_freqs = center_freqs[1:-1]
freqs_erb = freq2erb(freqs)
for i in range(n_filters):
i_offset = i + sample_factor
l = center_freqs[i] - sample_factor * erb_spacing
h = center_freqs[i] + sample_factor * erb_spacing
# the first sample_factor # of rows in filts will be lowpass filters
filts[(freqs_erb > l) & (freqs_erb < h), i_offset] = make_cosine_filter(freqs_erb, l, h, convert_to_erb=False)
# be sample_factor number of each
for i in range(sample_factor):
# account for the fact that the first sample_factor # of filts are lowpass
i_offset = i + sample_factor
lp_h_ind = max(np.where(freqs < erb2freq(center_freqs[i]))[0]) # lowpass filter goes up to peak of first cos filter
lp_filt = np.sqrt(1 - np.power(filts[:lp_h_ind+1, i_offset], 2))
hp_l_ind = min(np.where(freqs > erb2freq(center_freqs[-1-i]))[0]) # highpass filter goes down to peak of last cos filter
hp_filt = np.sqrt(1 - np.power(filts[hp_l_ind:, -1-i_offset], 2))
filts[:lp_h_ind+1, i] = lp_filt
filts[hp_l_ind:, -1-i] = hp_filt
# ensure that squared freq response adds to one
filts = filts / np.sqrt(sample_factor)
# get center freqs for lowpass and highpass filters
cfs_low = np.copy(center_freqs[:sample_factor]) - sample_factor * erb_spacing
cfs_hi = np.copy(center_freqs[-sample_factor:]) + sample_factor * erb_spacing
center_freqs = erb2freq(np.concatenate((cfs_low, center_freqs, cfs_hi)))
# rectify
center_freqs[center_freqs < 0] = 1
# discard highpass and lowpass filters, if requested
if kwargs.get('no_lowpass'):
filts = filts[:, sample_factor:]
if kwargs.get('no_highpass'):
filts = filts[:, :-sample_factor]
# make the full filter by adding negative components
if full_filter:
filts = make_full_filter_set(filts, signal_length)
return filts, center_freqs, freqs
| 19,878
|
def ecr_repo_permission_policy_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ECR.3] ECR repositories should be have a repository policy configured"""
response = describe_repositories(cache)
myRepos = response["repositories"]
for repo in myRepos:
repoArn = str(repo["repositoryArn"])
repoName = str(repo["repositoryName"])
# ISO Time
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
try:
# this is a passing finding
response = ecr.get_repository_policy(repositoryName=repoName)
finding = {
"SchemaVersion": "2018-10-08",
"Id": repoArn + "/ecr-repo-access-policy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": repoArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ECR.3] ECR repositories should be have a repository policy configured",
"Description": "ECR repository "
+ repoName
+ " has a repository policy configured.",
"Remediation": {
"Recommendation": {
"Text": "If your repository should be configured to have a repository policy refer to the Amazon ECR Repository Policies section in the Amazon ECR User Guide",
"Url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcrRepository",
"Id": repoArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"RepositoryName": repoName}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-6",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 AC-3",
"NIST SP 800-53 AC-16",
"NIST SP 800-53 AC-19",
"NIST SP 800-53 AC-24",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 PE-2",
"NIST SP 800-53 PS-3",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.7.1.1",
"ISO 27001:2013 A.9.2.1",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except:
finding = {
"SchemaVersion": "2018-10-08",
"Id": repoArn + "/ecr-repo-access-policy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": repoArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[ECR.3] ECR repositories should be have a repository policy configured",
"Description": "ECR repository "
+ repoName
+ " does not have a repository policy configured. Refer to the remediation instructions if this configuration is not intended",
"Remediation": {
"Recommendation": {
"Text": "If your repository should be configured to have a repository policy refer to the Amazon ECR Repository Policies section in the Amazon ECR User Guide",
"Url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcrRepository",
"Id": repoArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"RepositoryName": repoName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-6",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 AC-3",
"NIST SP 800-53 AC-16",
"NIST SP 800-53 AC-19",
"NIST SP 800-53 AC-24",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 PE-2",
"NIST SP 800-53 PS-3",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.7.1.1",
"ISO 27001:2013 A.9.2.1",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
| 19,879
|
def t_COMMENT(t):
"""
//.*
"""
pass
| 19,880
|
def button_ld_train_first_day(criteria, min_reversal_number):
"""
This function creates a csv file for the LD Train test. Each row will be the first day the animal ran the
test. At the end, the function will ask the user to save the newly created csv file in a directory.
:param criteria: A widget that contains a string that represents the duration of the criteria as n days/n+1 days
:param min_reversal_number: An entry widget that contains a value that represents the the minimum required reversal
number for an animal
"""
# check that the inputs to the criteria widgets are valid
if ld_train_criteria_min_rev_check(criteria, min_reversal_number) is not None:
criteria_list, min_rev = ld_train_criteria_min_rev_check(criteria, min_reversal_number)
else:
mb.showerror('LD Train Criteria Error',
'button_ld_train_first_day() error: One of the three criteria is empty or invalid!')
print('button_ld_train_first_day() error: One of the three criteria is empty or invalid!')
return None
if ld_criteria_list_check(criteria_list) is not None:
criteria_value, criteria_max_days = ld_criteria_list_check(criteria_list)
else:
mb.showerror('LD Train Criteria Error',
'button_ld_train_first_day() error: The n/n+1 days criteria is empty or invalid!')
print('button_ld_train_first_day() error: The n/n+1 days criteria is empty or invalid!')
return None
df = data_setup('LD Train')
if df is not None:
ld_train_delete_other_difficulties(df)
get_ld_train_normal(df, criteria_value, criteria_max_days, min_rev)
save_file_message(df)
else:
mb.showerror('LD Train Criteria Error',
'button_ld_train_first_day() error: One of the criterias is invalid or you hit the cancel button!')
print('button_ld_train_first_day() error: One of the criterias is invalid or you hit the cancel button!')
return None
| 19,881
|
def analyze_video(file, name, api):
"""
Call Scenescoop analyze with a video
"""
args = Namespace(video=file, name=name, input_data=None, api=True)
scene_content = scenescoop(args)
content = ''
maxframes = 0
for description in scene_content:
if(len(scene_content[description]) > maxframes):
content = description
maxframes = len(scene_content[description])
if(api):
return jsonify(status="200", scene_content=scene_content, content=content, maxframes=maxframes)
else:
return content
| 19,882
|
def validategeojson(data_input, mode):
"""GeoJSON validation example
>>> import StringIO
>>> class FakeInput(object):
... json = open('point.geojson','w')
... json.write('''{"type":"Feature", "properties":{}, "geometry":{"type":"Point", "coordinates":[8.5781228542328, 22.87500500679]}, "crs":{"type":"name", "properties":{"name":"urn:ogc:def:crs:OGC:1.3:CRS84"}}}''')
... json.close()
... file = 'point.geojson'
>>> class fake_data_format(object):
... mimetype = 'application/geojson'
>>> fake_input = FakeInput()
>>> fake_input.data_format = fake_data_format()
>>> validategeojson(fake_input, MODE.SIMPLE)
True
"""
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
_get_mimetypes()
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = (mtype == data_input.data_format.mimetype == FORMATS['GEOJSON'][0])
if mode >= MODE.STRICT:
from osgeo import ogr
data_source = ogr.Open(data_input.file)
if data_source:
passed = (data_source.GetDriver().GetName() == "GeoJSON")
else:
passed = False
if mode >= MODE.VERYSTRICT:
import jsonschema
import json
# this code comes from
# https://github.com/om-henners/GeoJSON_Validation/blob/master/geojsonvalidation/geojson_validation.py
schema_home = os.path.join(_get_schemas_home(), "geojson")
base_schema = os.path.join(schema_home, "geojson.json")
geojson_base = json.load(open(base_schema))
cached_json = {
"http://json-schema.org/geojson/crs.json":
json.load(open(os.path.join(schema_home, "crs.json"))),
"http://json-schema.org/geojson/bbox.json":
json.load(open(os.path.join(schema_home, "bbox.json"))),
"http://json-schema.org/geojson/geometry.json":
json.load(open(os.path.join(schema_home, "geometry.json")))
}
resolver = jsonschema.RefResolver(
"http://json-schema.org/geojson/geojson.json",
geojson_base, store=cached_json)
validator = jsonschema.Draft4Validator(geojson_base, resolver=resolver)
try:
validator.validate(json.loads(data_input.stream.read()))
passed = True
except jsonschema.ValidationError:
passed = False
return passed
| 19,883
|
def encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):
"""
Build and train an encoder-decoder model on x and y
:param input_shape: Tuple of input shape
:param output_sequence_length: Length of output sequence
:param english_vocab_size: Number of unique English words in the dataset
:param french_vocab_size: Number of unique French words in the dataset
:return: Keras model built, but not trained
"""
# OPTIONAL: Implement
return None
| 19,884
|
def close_to_cron(crontab_time, time_struct):
"""coron的指定范围(crontab_time)中 最接近 指定时间 time_struct 的值"""
close_time = time_struct
cindex = 0
for val_struct in time_struct:
offset_min = val_struct
val_close = val_struct
for val_cron in crontab_time[cindex]:
offset_tmp = val_struct - val_cron
if offset_tmp > 0 and offset_tmp < offset_min:
val_close = val_struct
offset_min = offset_tmp
close_time[cindex] = val_close
cindex = cindex + 1
return close_time
| 19,885
|
def remove_stop_words(words_list: list) -> list:
""" Remove stop words from strings list """
en_stop_words = set(stopwords.words('english'))
return [w for w in words_list if str(w).lower not in en_stop_words]
| 19,886
|
def build_jerry_data(jerry_path):
"""
Build up a dictionary which contains the following items:
- sources: list of JerryScript sources which should be built.
- dirs: list of JerryScript dirs used.
- cflags: CFLAGS for the build.
"""
jerry_sources = []
jerry_dirs = set()
for sub_dir in ['jerry-core', 'jerry-math', os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'source')]:
for file in find_sources(os.path.normpath(jerry_path), sub_dir):
path = os.path.join('jerryscript', file)
jerry_sources.append(path)
jerry_dirs.add(os.path.split(path)[0])
jerry_cflags = [
'-DJERRY_GLOBAL_HEAP_SIZE=10',
'-DJERRY_NDEBUG',
'-DJERRY_DISABLE_HEAVY_DEBUG',
'-DJERRY_BUILTIN_NUMBER=0',
'-DJERRY_BUILTIN_STRING=0',
'-DJERRY_BUILTIN_BOOLEAN=0',
#'-DJERRY_BUILTIN_ERRORS=0',
'-DJERRY_BUILTIN_ARRAY=0',
'-DJERRY_BUILTIN_MATH=0',
'-DJERRY_BUILTIN_JSON=0',
'-DJERRY_BUILTIN_DATE=0',
'-DJERRY_BUILTIN_REGEXP=0',
'-DJERRY_BUILTIN_ANNEXB=0',
'-DJERRY_ESNEXT=0',
'-DJERRY_LCACHE=0',
'-DJERRY_PROPERTY_HASHMAP=0',
]
return {
'sources': jerry_sources,
'dirs': jerry_dirs,
'cflags': jerry_cflags,
}
| 19,887
|
def min_geodesic_distance_rotmats_pairwise_tf(r1s, r2s):
"""Compute min geodesic distance for each R1 wrt R2."""
# These are the traces of R1^T R2
trace = tf.einsum('...aij,...bij->...ab', r1s, r2s)
# closest rotation has max trace
max_trace = tf.reduce_max(trace, axis=-1)
return tf.acos(tf.clip_by_value((max_trace - 1.0) / 2.0, -1.0, 1.0))
| 19,888
|
def project_to_2D(xyz):
"""Projection to (0, X, Z) plane."""
return xyz[0], xyz[2]
| 19,889
|
def Geom2dInt_Geom2dCurveTool_D2(*args):
"""
:param C:
:type C: Adaptor2d_Curve2d &
:param U:
:type U: float
:param P:
:type P: gp_Pnt2d
:param T:
:type T: gp_Vec2d
:param N:
:type N: gp_Vec2d
:rtype: void
"""
return _Geom2dInt.Geom2dInt_Geom2dCurveTool_D2(*args)
| 19,890
|
def osm_nodes(raw_elements: List[ELEMENT_RAW_TYPE]) -> Generator[Node, None, None]:
"""Converts dictionaries to Node class instances.
Returns a generator so use results in a for loop or convert to list.
Expects dictionary structure as returned by Overpass API."""
for element in raw_elements:
tags = element.get('tags')
metadata = {
'changeset': element.get('changeset'),
'user': element.get('user'),
'uid': element.get('uid'),
'timestamp': element.get('timestamp')
}
if element.get('type') == 'node':
yield Node(
element.get('id'),
element.get('version'),
element.get('lat'),
element.get('lon'),
tags,
metadata
)
| 19,891
|
def _content_length_rewriter(state):
"""Rewrite the Content-Length header.
Even though Content-Length is not a user modifiable header, App Engine
sends a correct Content-Length to the user based on the actual response.
If the response status code indicates that the response is not allowed to
contain a body, the body will be deleted instead. If the response body is
longer than the maximum response length, the response will be turned into a
500 Internal Server Error.
Args:
state: A RewriterState to modify.
"""
# Convert the body into a list of strings, to allow it to be traversed more
# than once. This is the only way to get the Content-Length before streaming
# the output.
state.body = list(state.body)
length = sum(len(block) for block in state.body)
if state.status_code in constants.NO_BODY_RESPONSE_STATUSES:
# Delete the body and Content-Length response header.
state.body = []
del state.headers['Content-Length']
elif state.environ.get('REQUEST_METHOD') == 'HEAD':
if length:
# Delete the body, but preserve the Content-Length response header.
logging.warning('Dropping unexpected body in response to HEAD request')
state.body = []
else:
if (not state.allow_large_response and
length > constants.MAX_RUNTIME_RESPONSE_SIZE):
# NOTE: This response is too small to be visible in IE, as it replaces any
# error page with <512 bytes with its own.
# http://en.wikipedia.org/wiki/HTTP_404#Custom_error_pages
logging.error('Response too large: %d, max is %d',
length, constants.MAX_RUNTIME_RESPONSE_SIZE)
new_response = ('HTTP response was too large: %d. The limit is: %d.\n' %
(length, constants.MAX_RUNTIME_RESPONSE_SIZE))
state.status = '500 Internal Server Error'
state.headers['Content-Type'] = 'text/html'
state.headers['Content-Length'] = str(len(new_response))
state.body = [new_response]
else:
state.headers['Content-Length'] = str(length)
| 19,892
|
def notfound(request):
"""
Common notfound return message
"""
msg = CustomError.NOT_FOUND_ERROR.format(request.url, request.method)
log.error(msg)
request.response.status = 404
return {'error': 'true', 'code': 404, 'message': msg}
| 19,893
|
def l2norm(a):
"""Return the l2 norm of a, flattened out.
Implemented as a separate function (not a call to norm() for speed)."""
return np.sqrt(np.sum(np.absolute(a)**2))
| 19,894
|
def create_container(request):
""" Creates a container (empty object of type application/directory) """
storage_url = get_endpoint(request, 'adminURL')
auth_token = get_token_id(request)
http_conn = client.http_connection(storage_url,
insecure=settings.SWIFT_INSECURE)
form = CreateContainerForm(request.POST or None)
if form.is_valid():
container = form.cleaned_data['containername']
try:
client.put_container(storage_url,
auth_token,
container,
http_conn=http_conn)
messages.add_message(request, messages.SUCCESS,
_("Container created."))
actionlog.log(request.user.username, "create", container)
except client.ClientException as err:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), err))
messages.add_message(request, messages.ERROR, _('Access denied.'))
return redirect(containerview)
context = utils.update_default_context(request, {
'form': form,
})
return render_to_response('create_container.html', context,
context_instance=RequestContext(request))
| 19,895
|
def check_all_data_present(file_path):
"""Checks the data exists in location file_path"""
filenames = [
"t10k-images-idx3-ubyte",
"t10k-labels-idx1-ubyte",
"train-images-idx3-ubyte",
"train-labels-idx1-ubyte",
]
data_path = os.path.join(file_path, "data")
return tu.check_data_exists(data_path, filenames)
| 19,896
|
def get_searchable_models():
"""
Returns a list of all models in the Django project which implement ISearchable
"""
app = AppCache();
return filter(lambda klass: implements(klass, ISearchable), app.get_models())
| 19,897
|
def _worker_within(
worker_id,
global_t_nulls,
spotwise_t_nulls,
df_filt,
perms,
kernel_matrix,
null_corrs_filt,
keep_indices,
verbose=10,
compute_spotwise_pvals=False
):
"""
This function computes the test statistic on a chunk of permutations when
run in parallel mode.
"""
if verbose > 1:
print(f"Started worker {worker_id}...")
for perm_i, perm in enumerate(perms):
if verbose > 1 and perm_i % 25 == 0:
print(f"Worker {worker_id}, running permutation {perm_i}/{len(perms)}")
# Compute alternative likelihoods
perm_ll, perm_spot_lls = compute_llrts_within(
df_filt,
perm.T,
kernel_matrix,
null_corrs_filt,
keep_indices
)
# Record the test statistic for this null sample
global_t_nulls.append(perm_ll)
if compute_spotwise_pvals:
spotwise_t_nulls.append(perm_spot_lls)
| 19,898
|
def create_updated_alert_from_slack_message(payload, time, alert_json):
"""
Create an updated raw alert (json) from an update request in Slack
"""
values = payload['view']['state']['values']
for value in values:
for key in values[value]:
if key == 'alert_id':
continue
if key == 'severity':
if values[value][key].get('selected_option'):
alert_json[key] = \
values[value][key]['selected_option']['text']['text']
if key == 'active':
if values[value][key].get('selected_option'):
alert_json[key] = \
values[value][key]['selected_option']['text']['text']
else:
if values[value][key].get('value'):
alert_json[key] = values[value][key]['value']
alert_json['datetime'] = time
return alert_json
| 19,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.