content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def boolean(entry, option_key="True/False", **kwargs):
"""
Simplest check in computer logic, right? This will take user input to flick the switch on or off
Args:
entry (str): A value such as True, On, Enabled, Disabled, False, 0, or 1.
option_key (str): What kind of Boolean we are setting. What Option is this for?
Returns:
Boolean
"""
error = f"Must enter 0 (false) or 1 (true) for {option_key}. Also accepts True, False, On, Off, Yes, No, Enabled, and Disabled"
if not isinstance(entry, str):
raise ValueError(error)
entry = entry.upper()
if entry in ("1", "TRUE", "ON", "ENABLED", "ENABLE", "YES"):
return True
if entry in ("0", "FALSE", "OFF", "DISABLED", "DISABLE", "NO"):
return False
raise ValueError(error)
| 24,800
|
def verify_args(args, arg_infos, function_name):
""" Verifies that a list of arguments matches a list of argument descriptions in a HAT file
"""
# check number of args
if len(args) != len(arg_infos):
sys.exit(
f"Error calling {function_name}(...): expected {len(arg_infos)} arguments but received {len(args)}")
# for each arg
for i in range(len(args)):
arg = args[i]
arg_info = arg_infos[i]
# confirm that the arg is a numpy ndarray
if not isinstance(arg, np.ndarray):
sys.exit(
"Error calling {function_name}(...): expected argument {i} to be <class 'numpy.ndarray'> but received {type(arg)}")
# confirm that the arg dtype matches the dexcription in the hat package
if arg_info.numpy_dtype != arg.dtype:
sys.exit(
f"Error calling {function_name}(...): expected argument {i} to have dtype={arg_info.numpy_dtype} but received dtype={arg.dtype}")
# confirm that the arg shape is correct
if arg_info.numpy_shape != arg.shape:
sys.exit(
f"Error calling {function_name}(...): expected argument {i} to have shape={arg_info.numpy_shape} but received shape={arg.shape}")
# confirm that the arg strides are correct
if arg_info.numpy_strides != arg.strides:
sys.exit(
f"Error calling {function_name}(...): expected argument {i} to have strides={arg_info.numpy_strides} but received strides={arg.strides}")
| 24,801
|
def save(image, path):
"""Save and extract a docker image to a directory.
Parameters
----------
image : str
A unique identifier for a docker image.
path : str
A directory to extract the image to.
"""
# Use a temporary file because docker save (or actually tar underneath)
# complains that stdout needs to be redirected if we use Popen and PIPE.
with tempfile.NamedTemporaryFile() as stream:
sp.check_call(["docker", "save", "-o", stream.name, image])
with tarfile.open(stream.name, mode="r:") as tar:
if not op.exists(path):
lgr.debug("Creating new directory at %s", path)
os.makedirs(path)
elif os.listdir(path):
raise OSError("Directory {} is not empty".format(path))
tar.extractall(path=path)
lgr.info("Saved %s to %s", image, path)
| 24,802
|
def remove_job(cursor, arguments):
"""Remove a job from job table.
Examples:
python main.py --remove job --job_id 7
python main.py -r job -j 7
python main.py --remove job --job_title sales
python main.py -r job -t sales
:param cursor: Cursor for SQL command execution.
:param arguments: All arguments passed to program.
"""
valid_args = ('job_title', 'job_id')
remove_helper(cursor, arguments, valid_args, TableName.job.value)
| 24,803
|
def edge_preserving_filter(ref_map: np.ndarray, guided_image: np.ndarray,
window_size: int, epsilon: float = 1e-10) -> np.ndarray:
"""
Perform edge - preserving filtering on the newly created reference map.
:param ref_map: Classification reference map.
:param guided_image: Guided image as a mean over all bands from hyperspectral data.
:param window_size: Size of the convolving window.
:param epsilon: Regularizer constant.
:return: Improved classification map.
"""
print("Window size = {}".format(window_size))
col_indexes, row_indexes = \
range(0, ref_map.shape[ROW_AXIS], window_size), range(0, ref_map.shape[COLUMNS_AXIS], window_size)
print("Calculating coefficients:")
a_k_map, b_k_map = np.empty(shape=ref_map.shape), np.empty(shape=ref_map.shape)
for i in tqdm(range(ref_map.shape[SPECTRAL_AXIS]), total=ref_map.shape[SPECTRAL_AXIS]):
for row, col in product(col_indexes, row_indexes):
p_k = copy(ref_map[row:row + window_size, col:col + window_size, i])
i_k = copy(guided_image[row:row + window_size, col:col + window_size])
sum_ = np.sum(i_k * p_k - np.mean(i_k) * np.mean(p_k)) / (window_size ** 2)
a_k = sum_ / (np.var(i_k) + epsilon)
b_k = np.mean(p_k) - a_k * np.mean(i_k)
a_k_map[row:row + window_size, col:col + window_size, i] = a_k
b_k_map[row:row + window_size, col:col + window_size, i] = b_k
output_image = np.empty(shape=ref_map.shape)
print("Calculating new \"improved\" classification map:")
for i in tqdm(range(ref_map.shape[SPECTRAL_AXIS]), total=ref_map.shape[SPECTRAL_AXIS]):
for row_index, col_index in product(range(ref_map.shape[ROW_AXIS]), range(ref_map.shape[COLUMNS_AXIS])):
a_k_sum, b_k_sum = 0, 0
row_sub_indexes, col_sub_indexes = \
list(filter(lambda x: 0 <= x < ref_map.shape[ROW_AXIS],
list(range(row_index - floor(window_size / 2),
row_index + ceil(window_size / 2))))), \
list(filter(lambda x: 0 <= x < ref_map.shape[COLUMNS_AXIS],
list(range(col_index - floor(window_size / 2),
col_index + ceil(window_size / 2)))))
for sub_row_idx, sub_col_idx in product(row_sub_indexes, col_sub_indexes):
a_k_sum += a_k_map[sub_row_idx, sub_col_idx, i]
b_k_sum += b_k_map[sub_row_idx, sub_col_idx, i]
a_k_sum, b_k_sum = a_k_sum / (row_sub_indexes.__len__() * col_sub_indexes.__len__()), \
b_k_sum / (row_sub_indexes.__len__() * col_sub_indexes.__len__())
output_image[row_index, col_index, i] = a_k_sum * guided_image[row_index, col_index] + b_k_sum
output_image = np.argmax(output_image, axis=-1) + BG_CLASS
return output_image
| 24,804
|
def logout(ctx, provider):
"""Logout from a provider."""
AuthCmd(ctx, provider).logout()
| 24,805
|
def preprocess_annotated_utterance(
annotated_utterance: str,
not_entity: str = NOT_ENTITY,
) -> List[str]:
"""Character Level Entity Label Producer
Named-entity of each character is extracted by XML-like annotation.
Also, they would be collected in a list conform to the order of characters
in the sentence.
Args:
annotated_utterance (a string):
An utterance with annotations looks like <a>blabla</a>.
It is a special format for labeling named-entity in an utterance.
not_entity (a string, default = "DONT_CARE"):
A representation of words that we don't care about.
Returns:
entities (a list of string):
A list of named-entity labels in character level.
Examples:
>>> from ynlu.sdk.evaluation.utils import preprocess_annotated_utterance
>>> preprocess_annotated_utterance(
annotated_utterance="<drink>Coffee</drink>, please.",
not_entity="n",
)
>>> ["drink", "drink", "drink", "drink", "drink", "drink", "n",
"n", "n", "n", "n", "n", "n", "n", "n"]
"""
clean_utterance = remove_annotation(annotated_utterance)
entity_word_pair = FINDALL_PROG.findall(annotated_utterance)
entities = [not_entity] * len(clean_utterance)
begin_index = 0
for entity, word in entity_word_pair:
start_idx = clean_utterance.find(word, begin_index)
if start_idx == -1:
raise ValueError(
"Word {} can not be found in {}".format(word, clean_utterance),
)
entities[start_idx: start_idx + len(word)] = [entity] * len(word)
begin_index = start_idx + len(word)
return entities
| 24,806
|
def test_channel_tag_wrong_type(init_lst_proc):
"""Raise a type error when a wrong type is set for the channel number."""
wrong_type = "42"
with pytest.raises(TypeError) as exc_info:
init_lst_proc.channel_tag = wrong_type
exc_msg = exc_info.value.args[0]
assert exc_msg == "Channel number must be given as an integer."
| 24,807
|
def invitation_code_created(sender, email, **kwargs):
"""Send confirmation email to user."""
email_module_name = setting('BETA_EMAIL_MODULE', 'hunger.email')
email_module = importlib.import_module(email_module_name)
email_function_name = setting('BETA_EMAIL_CONFIRM_FUNCTION', 'beta_confirm')
email_function = getattr(email_module, email_function_name)
email_function(email, **kwargs)
| 24,808
|
def generate_constant_table(
name: str,
constants: List[Constant],
*,
data_type: str = "LREAL",
guid: str = "",
lookup_by_key: bool = False,
**kwargs
) -> Tuple[str, str]:
"""
Generate a GVL constant table, with no interpolation.
Parameters
----------
name : str
The code block name.
constants : list of Constant
Dictionary of name to dataframe.
data_type : str, optional
The data type. Defaults to LREAL.
guid : str, optional
The function block globally unique identifier / GUID.
table_prefix : str, optional
The name with which to prefix all table arrays.
lookup_input : str, optional
The function block input variable name - the indexed parameter which
you're looking up in the table.
lookup_index : int, optional
The per-row array index of the lookup value. Not fully supported
just let; leave this at 0 for now.
row_delta_variable : str, optional
The auto-generated code delta variable. Not necessary to set, unless
you really want to customize the output.
**kwargs :
Additional keyword arguments to pass to or override in the template.
Returns
-------
code : str
The constant table source code.
"""
template_kw = dict(
name=name,
guid=guid or guid_from_string(name),
data_type=data_type,
constants=constants,
)
template_kw.update(kwargs)
template_fn = (
CONSTANT_GVL_LOOKUP_TEMPLATE
if lookup_by_key
else CONSTANT_GVL_TEMPLATE
)
template = jinja2.Template(open(template_fn, "rt").read())
return template.render(template_kw)
| 24,809
|
def filter_stop_words(text):
"""
Filter all stop words from a string to reduce headline size.
:param text: text to filter
:return: shortened headline
"""
words = filter(lambda w: not w in s, text.split())
line = ""
l = 0
for w in words:
if l < 20:
line += w + " "
l += 1
else:
return line.strip()
return line.strip()
| 24,810
|
def importPublicKey(publickey):
""" Cette fonction permet de exporter la clé public,
elle prend en paramètre use clé public """
return RSA.importKey(publickey)
| 24,811
|
def _format_contact(resource, key):
"""
Return the contact field with the correct values.
This is mainly stripping out the unecessary fields from the telecom part of
the response.
"""
contacts = resource.pop(key)
resource[key] = []
for contact in contacts:
contact["telecom"] = _format_telecom(
contact,
"telecom",
add_textphone_extension=False,
whitelist=["id", "use", "period", "extension"]
)
resource[key].append(contact)
return resource[key]
| 24,812
|
def get_day_type(date):
"""
Returns if a date is a weeday or weekend
:param date datetime:
:return string:
"""
# check if date is a datetime.date
if not isinstance(date, datetime.date):
raise TypeError('date is not a datetime.date')
day_type = ""
if date.weekday() in (0, 1, 2, 3, 4):
day_type = c.WEEKDAY
else:
day_type = c.WEEKEND
return day_type
| 24,813
|
def case_insensitive_equals(name1: str, name2: str) -> bool:
"""
Convenience method to check whether two strings match, irrespective of their case and any surrounding whitespace.
"""
return name1.strip().lower() == name2.strip().lower()
| 24,814
|
def plot3(distances, mean_fc_all,
track_names, track_palette,
p_adjs,
output_fig,
ylim=[-0.3, 0.6],
yticks=[-0.3,0,0.3,0.6],
p_th1=0.05,
p_th2=0.001,
):
"""
"""
ys = {track_name: [] for track_name in track_names}
for idx, row in mean_fc_all.iterrows():
celltype = row['celltype']
for track_name in track_names:
ys[track_name].append(
np.clip(np.log2(row['mean_fc'][track_name]), -1, 1)
)
for track_name in track_names:
ys[track_name] = np.array(ys[track_name])
ys_mean = {track_name: np.nanmean(ys[track_name], axis=0) for track_name in track_names}
ys_std = {track_name: np.nanstd(ys[track_name], axis=0) for track_name in track_names}
ys_err = {track_name: ys_std[track_name]*1.96/np.sqrt(8) for track_name in track_names}
p_adjs = {'mc': [], 'atac': [], 'both': [],}
# t test compare linked vs correlated
num_celltypes, num_dists = ys['linked_mc'].shape
for catg in ['mc', 'atac', 'both']:
for i in np.arange(num_dists):
_a = ys['linked_{}'.format(catg)][:, i]
_b = ys['correlated_{}'.format(catg)][:, i]
_t, _pval = stats.ttest_rel(_a, _b)
# multiple comparison
_, p_adj, _, _ = multipletests(_pval, alpha=0.05, method='hs', is_sorted=False, returnsorted=False)
#
p_adjs[catg].append(p_adj)
fig, axs = plt.subplots(1, 3, figsize=(5*3,5), sharey=True, sharex=True)
for i, (ax, _type) in enumerate(zip(axs, ['mc', 'atac', 'both'])):
for track_name in track_names:
color = track_palette[track_name]
if track_name.endswith(_type):
pvals = p_adjs[_type]
for idx, dist in enumerate(distances):
if pvals[idx] < p_th2:
ax.text(dist, -0.3, '*\n*\n*', fontsize=15, linespacing=0.3, ha='center')
elif pvals[idx] < p_th1:
ax.text(dist, -0.3, '*', fontsize=15, ha='center')
ax.fill_between(distances,
ys_mean[track_name]-ys_err[track_name],
ys_mean[track_name]+ys_err[track_name],
color=color,
alpha=0.2
)
ax.plot(distances, ys_mean[track_name],
label=track_name, color=color,
linewidth=5,
)
ax.axhline(0, linestyle='--', color='gray')
ax.set_title(_type)
ax.set_xlabel('Genomic distance')
if i == 0:
ax.set_ylabel('log2(FC)\n(+/- 95% CI; n=8 cell types)')
ax.xaxis.set_major_formatter(mtick.EngFormatter())
ax.set_xlim([-5e3, 1.05*1e5])
ax.set_ylim(ylim)
ax.set_yticks(yticks)
axs[2].annotate("*: FDR < 0.05\n***: FDR<0.001", (1.05, 0.1), xycoords='axes fraction', fontsize=15)
handles, labels = snmcseq_utils.combine_legends(axs.flat)
# handles, labels = snmcseq_utils.dedup_legends(handles, labels)
ax.legend(handles, labels, bbox_to_anchor=(1,1), loc='upper left')
snmcseq_utils.savefig(fig, output_fig)
plt.show()
| 24,815
|
def get_access_token(cmd, subscription=None, resource=None, scopes=None, resource_type=None, tenant=None):
"""
get AAD token to access to a specified resource.
Use 'az cloud show' command for other Azure resources
"""
if resource is None and resource_type:
endpoints_attr_name = cloud_resource_type_mappings[resource_type]
resource = getattr(cmd.cli_ctx.cloud.endpoints, endpoints_attr_name)
profile = Profile(cli_ctx=cmd.cli_ctx)
creds, subscription, tenant = profile.get_raw_token(subscription=subscription, resource=resource, scopes=scopes,
tenant=tenant)
result = {
'tokenType': creds[0],
'accessToken': creds[1],
# 'expires_on': creds[2].get('expires_on', None),
'expiresOn': creds[2].get('expiresOn', None),
'tenant': tenant
}
if subscription:
result['subscription'] = subscription
return result
| 24,816
|
def ensure_listable(obj):
"""Ensures obj is a list-like container type"""
return obj if isinstance(obj, (list, tuple, set)) else [obj]
| 24,817
|
def merge_dicts(*dicts: dict) -> dict:
"""Merge dictionaries into first one."""
merged_dict = dicts[0].copy()
for dict_to_merge in dicts[1:]:
for key, value in dict_to_merge.items():
if key not in merged_dict or value == merged_dict[key]:
merged_dict[key] = value
else:
raise ValueError(
f"Test {key} already has a mark we don't want to overwrite: \n"
f"- existing: {merged_dict[key]} "
f"- new value: {value}"
)
merged_dict.update(dict_to_merge)
return merged_dict
| 24,818
|
def main():
"""Main script"""
# Load data
train_data = pd.read_csv('data/train.csv')
test_data = pd.read_csv('data/test.csv')
y_train = train_data.SalePrice
x_train = train_data.drop(['SalePrice'], axis=1)
x_test = test_data
# Encoding data
x_train = pd.get_dummies(x_train)
x_test = pd.get_dummies(x_test)
x_train, x_test = x_train.align(x_test, join='left', axis=1)
# Impute data
my_imputer = Imputer()
x_train = my_imputer.fit_transform(x_train)
x_test = my_imputer.transform(x_test)
print(x_train)
# Get model
model = RandomForestRegressor()
model.fit(x_train, y_train)
pred = model.predict(x_test)
# Output
submission = pd.DataFrame({'Id': test_data.Id, 'SalePrice': pred})
submission.to_csv("hot_encoding/submission.csv", index=False)
| 24,819
|
def winner(board):
"""
Returns the winner of the game, if there is one.
"""
for moves in _winner_moves():
if all(board[i][j] is X for (i, j) in moves):
return X
elif all(board[i][j] is O for (i, j) in moves):
return O
return None
| 24,820
|
def specified_kwargs(draw, *keys_values_defaults: KVD):
"""Generates valid kwargs given expected defaults.
When we can't realistically use hh.kwargs() and thus test whether xp infact
defaults correctly, this strategy lets us remove generated arguments if they
are of the default value anyway.
"""
kw = {}
for keyword, value, default in keys_values_defaults:
if value is not default or draw(booleans()):
kw[keyword] = value
return kw
| 24,821
|
def add_image():
"""User uploads a new landmark image, and inserts into db."""
imageURL = request.form.get("imageURL")
landmark_id = request.form.get("landmark_id")
new_image = LandmarkImage(landmark_id=landmark_id,
imageurl=imageURL)
db.session.add(new_image)
db.session.commit()
return "Success"
| 24,822
|
def merge(link1: Node, link2: Node) -> Node:
"""
Merge two linklists.
Parameters
-----------
link1: Node
link2: Node
Returns
---------
out: Node
Notes
------
"""
link = Node(None)
ptr = link
while link1 and link2:
if link1.val <= link2.val:
ptr.next = link1 #Node(link1.val)
ptr = ptr.next
link1 = link1.next
else:
ptr.next = link2 #Node(link2.val)
ptr = ptr.next
link2 = link2.next
while link1:
ptr.next = Node(link1.val)
ptr = ptr.next
link1 = link1.next
while link2:
ptr.next = Node(link2.val)
ptr = ptr.next
link2 = link2.next
return link.next
| 24,823
|
def l1_norm_optimization(a_i, b_i, c_i, w_i=None):
"""Solve l1-norm optimization problem."""
cvx.solvers.options['show_progress'] = not CVX_SUPRESS_PRINT
# Non-Weighted optimization:
if w_i is None:
# Problem must be formulated as sum |P*x - q|
P = cvx.matrix([[cvx.matrix(a_i)], [cvx.matrix(b_i)]])
q = cvx.matrix(c_i * -1)
# Solve the l1-norm problem
u = cvx.l1.l1(P, q)
# Get results
x0, y0 = u[0], u[1]
# Weighted optimization:
else:
# Problem must be formulated as sum |P*x - q|
P = cvx.matrix([[cvx.matrix(np.multiply(a_i, w_i))],
[cvx.matrix(np.multiply(b_i, w_i))]])
q = cvx.matrix(np.multiply(w_i, c_i * -1))
# Solve the l1-norm problem
u = cvx.l1.l1(P, q)
# Get results
x0, y0 = u[0], u[1]
# return resulting point
return (x0, y0)
| 24,824
|
def set_order(market, order_type, amount, price, keys, stop_price=None):
"""
Create an order
Arguments:
market (str) : market name,
order_type (str) : may be "limit", "market", "market_by_quote",
"limit_stop_loss"
amount (float) : positive if BUY order, and negative for SELL
price (float) : price of 1 ask currency in a quoted currency. Necessary only
when type is "limit"
keys (dict): {
"private" : "",
"public" : ""
}
Optional arguments:
stop_price (float) : price when activates "limit_stop_loss" type order. If
None then the same as price
Returns:
(list) [
[0] (int) order ID,
[1] (NoneType) not in use,
[2] (NoneType) not in use,
[3] (str) name of the market,
[4] (int) time stamp of the creation in ms,
[5] (int) time stamp of the update in ms,
[6] (str) initial volume,
[7] (str) order volume,
[8] (str) order type ("LIMIT" or "MARKET"),
[9] (NoneType) not in use,
[10] (NoneType) not in use,
[11] (NoneType) not in use,
[12] (NoneType) not in use,
[13] (str) order status,
[14] (NoneType) not in use,
[15] (NoneType) not in use,
[16] (str) order price,
[17] (str) average price of deals in order,
[18] (NoneType) not is use,
[19] (str) for stop price but None for other orders,
[20] (NoneType) not in use,
[21] (NoneType) not in use,
[22] (NoneType) not in use,
[23] (NoneType) not in use,
[24] (NoneType) not in use,
[25] (NoneType) not in use,
[26] (NoneType) not in use,
[27] (NoneType) not in use,
[28] (NoneType) not in use,
[29] (NoneType) not in use,
[30] (NoneType) not in use,
[31] (NoneType) not in use,
]
"""
body = {
"symbol": market,
"type": order_type,
"amount": amount,
"price": price,
"stop_price": price,
}
return _request("auth/w/order/submit", body=body, keys=keys)
| 24,825
|
def convert_ts(tt):
"""
tt: time.struct_time(tm_year=2012, tm_mon=10, tm_mday=23, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=1, tm_yday=297, tm_isdst=-1)
>>> tt = time.strptime("23.10.2012", "%d.%m.%Y")
>>> convert_ts(tt)
1350950400
tt: time.struct_time(tm_year=1513, tm_mon=1, tm_mday=1, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=2, tm_yday=1, tm_isdst=0)
>>> tt = time.strptime("1.1.1513", "%d.%m.%Y")
>>> convert_ts(tt)
0
>>> tt = 12
>>> convert_ts(tt)
"""
try:
ts = calendar.timegm(tt)
"""
As from the github issue https://github.com/prashanthellina/rsslurp/issues/680,
there are some cases where we might get timestamp in negative values, so consider
0 if the converted timestamp is negative value.
"""
if ts < 0:
ts = 0
except TypeError:
ts = None
return ts
| 24,826
|
def data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_name_post(uuid, tapi_common_name_and_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_name_post
creates tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
| 24,827
|
def receive_github_hook(request):
"""a hook is sent on some set of events, specifically:
push/deploy: indicates that the content for the repository changed
pull_request: there is an update to a pull request.
This function checks that (globally) the event is valid, and if
so, runs a function depending on the event.
"""
# We do these checks again for sanity
if request.method == "POST":
if DISABLE_WEBHOOKS:
return JsonResponseMessage(message="Webhooks disabled")
if not re.search("GitHub-Hookshot", request.META["HTTP_USER_AGENT"]):
return JsonResponseMessage(message="Agent not allowed")
# Only allow application/json content type
if request.META["CONTENT_TYPE"] != "application/json":
return JsonResponseMessage(message="Incorrect content type")
# Check that it's coming from the right place
required_headers = ["HTTP_X_GITHUB_DELIVERY", "HTTP_X_GITHUB_EVENT"]
if not check_headers(request, required_headers):
return JsonResponseMessage(message="Agent not allowed")
# Has to be a push, deployment, or pull_request
event = request.META["HTTP_X_GITHUB_EVENT"]
# Ping happens on setup
if event == "ping":
return JsonResponseMessage(
message="Ping received, no action taken.", status=200
)
# But don't allow types beyond push, deploy, pr
if event not in ["push", "deployment", "pull_request", "repository"]:
return JsonResponseMessage(message="Incorrect delivery method.")
# A signature is also required
signature = request.META.get("HTTP_X_HUB_SIGNATURE")
if not signature:
return JsonResponseMessage(message="Missing credentials.")
# Parse the body
payload = load_body(request)
repo = payload.get("repository")
repo_name = repo["full_name"]
# If it's a repository event, might be transferred or renamed
if event == "repository":
if payload.get("action") == "transferred":
owner = payload["changes"]["owner"]["from"]["user"]["login"]
repo_name = "%s/%s" % (owner, repo.get("name"))
# Retrieve the article
try:
article = Article.objects.get(repo__full_name=repo_name)
except Article.DoesNotExist:
return JsonResponseMessage(message="Article not found", status=404)
# Don't continue if the repository is archived (this shouldn't happen)
if article.archived:
return JsonResponseMessage(message="Repository is archived.")
# Validate the payload with the collection secret
status = validate_payload(
secret=str(article.secret),
payload=request.body,
request_signature=signature,
)
if not status:
return JsonResponseMessage(message="Invalid credentials.")
# Branch must be master
branch = payload.get("ref", "refs/heads/master").replace("refs/heads/", "")
# Update repo metadata that might change
article.repo = repo
article.save()
# Submit job with django_rq to update article
if event == "pull_request":
against_branch = payload["pull_request"]["base"]["ref"]
branch = payload["pull_request"]["head"]["ref"]
if not branch.startswith("update/term") or against_branch != "master":
return JsonResponseMessage(message="Ignoring branch.", status=200)
# Requesting user is derived from branch
user = branch.replace("update/term-", "").split("-")[0]
res = django_rq.enqueue(
update_pullrequest,
article_uuid=article.uuid,
user=user,
action=payload["action"],
url=payload["pull_request"]["html_url"],
number=payload["number"],
merged_at=payload["pull_request"]["merged_at"],
)
elif event in ["push", "deployment"]:
if branch != "master":
return JsonResponseMessage(message="Ignoring branch.", status=200)
article.commit = payload["after"]
article.save()
res = django_rq.enqueue(update_article, article_uuid=article.uuid)
elif event == "repository":
res = django_rq.enqueue(
repository_change,
article_uuid=article.uuid,
action=payload["action"],
repo=json.dumps(payload["repository"]),
)
return JsonResponseMessage(
message="Hook received and parsing.", status=200, status_message="Received"
)
return JsonResponseMessage(message="Invalid request.")
| 24,828
|
def get_onewire_status(code):
"""
Determine and display an XBee's OneWire sensor status, if enabled.
Args:
code (int): The status code included in the packet.
"""
opts = ("A/D sensor read", 0x01, "temperature sensor read", 0x02, "water present", 0x60)
text = ""
for i in range(0, len(opts), 2):
if code & opts[i + 1] == opts[i + 1]: text += opts[i] + ", "
# Remove the final comma and space from the message string
text = text[0:1].upper() + text[1:-2]
print(pad_text("OneWire sensor status") + text)
| 24,829
|
def main():
"""Parse dictionary for unique stems and save them as a file."""
unique_stems = set()
pattern = '([^aeiou]*?)([aeoiu].*)'
with open('../inputs/words.txt') as file:
for word in file:
stems = re.findall(pattern, word.lower().rstrip())
if stems:
unique_stems.add(stems[0][0])
with open('../inputs/stems.txt', 'w') as file:
for word in list(sorted(list(unique_stems)))[1:]:
file.writelines(word+'\n')
print('Saved words to ../inputs/stems.txt')
| 24,830
|
def dice(y, t, normalize=True, class_weight=None,
ignore_label=-1, reduce='mean', eps=1e-08):
""" Differentable Dice coefficient.
See: https://arxiv.org/pdf/1606.04797.pdf
Args:
y (~torch.Tensor): Probability
t (~torch.Tensor): Ground-truth label
normalize (bool, optional): If True, calculate the dice coefficients for each class and take the average. Defaults to True.
class_weight (list or ndarray, optional): Defaults to None.
ignore_label (int, optional): Defaults to -1.
reduce (str, optional): Defaults to 'mean'.
eps (float, optional): Defaults to 1e-08.
"""
_check_type_forward(y, t)
device = y.device
dtype = y.dtype
if class_weight is not None:
class_weight = torch.as_tensor(class_weight, dtype=dtype, device=device)
b, c = y.shape[:2]
t_onehot = to_onehot(t, n_class=c)
y = y.view(b, c, -1)
t_onehot = t_onehot.view(b, c, -1)
if ignore_label != -1:
t_onehot = torch.cat( (t_onehot[:, :ignore_label], t_onehot[:, ignore_label + 1:]), dim=1)
y = torch.cat( (y[:, :ignore_label], y[:, ignore_label + 1:]), dim=1)
intersection = y * t_onehot
cardinality = y + t_onehot
if normalize: # NOTE: channel-wise
intersection = torch.sum(intersection, dim=-1)
cardinality = torch.sum(cardinality, dim=-1)
ret = (2. * intersection / (cardinality + eps))
if class_weight is not None:
ret *= class_weight
ret = torch.mean(ret, dim=1)
else:
intersection = torch.sum(intersection, dim=(0, 2))
cardinality = torch.sum(cardinality, dim=(0, 2))
ret = (2. * intersection / (cardinality + eps))
if class_weight is not None:
ret *= class_weight
if reduce == 'mean':
ret = torch.mean(ret)
else:
raise NotImplementedError('unsupported reduce type..')
return ret
| 24,831
|
def read_start_params(path_or_database):
"""Load the start parameters DataFrame.
Args:
path_or_database (pathlib.Path, str or sqlalchemy.MetaData)
Returns:
params (pd.DataFrame): see :ref:`params`.
"""
database = load_database(**_process_path_or_database(path_or_database))
optimization_problem = read_last_rows(
database=database,
table_name="optimization_problem",
n_rows=1,
return_type="dict_of_lists",
)
start_params = optimization_problem["params"][0]
return start_params
| 24,832
|
def load_single_rec_into_tables_obj(src_dbreq,
schema_engine,
psql_schema,
rec_id):
""" Return Tables obj loaded from postgres. """
if len(psql_schema):
psql_schema += '.'
tables = create_tables_load_bson_data(schema_engine, None)
# fetch mongo rec by id from source psql
ext_tables_data = {}
for table_name, table in tables.tables.iteritems():
id_name, quotes = parent_id_name_and_quotes_for_table(table)
if quotes:
id_val = "'" + str(rec_id) + "'"
else:
id_val = rec_id
indexes = [name \
for name in table.sql_column_names \
if table.sql_columns[name].index_key()]
idx_order_by = ''
if len(indexes):
idx_order_by = "ORDER BY " + ','.join(indexes)
select_fmt = 'SELECT * FROM {schema}"{table}" \
WHERE {id_name}={id_val} {idx_order_by};'
select_req = select_fmt.format(schema=psql_schema,
table=table_name,
id_name=id_name,
id_val=id_val,
idx_order_by=idx_order_by)
getLogger(__name__).debug("Get psql data: "+select_req)
src_dbreq.cursor.execute(select_req)
ext_tables_data[table_name] = []
idx = 0
for record in src_dbreq.cursor:
record_decoded = []
if type(record) is tuple:
for titem in record:
if type(titem) is str:
record_decoded.append(titem.decode('utf-8'))
else:
record_decoded.append(titem)
record = tuple(record_decoded)
getLogger(__name__).debug("result[%d]=%s", idx, record)
ext_tables_data[table_name].append(record)
idx += 1
# set external tables data to Tables
tables.load_external_tables_data(ext_tables_data)
return tables
| 24,833
|
def calcCovariance(modes):
"""Return covariance matrix calculated for given *modes*."""
if isinstance(modes, Mode):
array = modes._getArray()
return np.outer(array, array) * modes.getVariance()
elif isinstance(modes, ModeSet):
array = modes._getArray()
return np.dot(array, np.dot(np.diag(modes.getVariances()), array.T))
elif isinstance(modes, NMA):
return modes.getCovariance()
else:
raise TypeError('modes must be a Mode, NMA, or ModeSet instance')
| 24,834
|
def sparse_tensor_value_to_texts(value):
"""
Given a :class:`tf.SparseTensor` ``value``, return an array of Python strings
representing its values.
This function has been modified from Mozilla DeepSpeech:
https://github.com/mozilla/DeepSpeech/blob/master/util/text.py
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
return sparse_tuple_to_texts((value.indices, value.values, value.dense_shape))
| 24,835
|
def test_query_avax_balances(rotkehlchen_api_server):
"""Test query the AVAX balances when multiple accounts are set up works as
expected.
"""
async_query = random.choice([False, True])
setup = setup_balances(
rotki=rotkehlchen_api_server.rest_api.rotkehlchen,
ethereum_accounts=None,
btc_accounts=None,
eth_balances=None,
token_balances=None,
btc_balances=None,
)
with ExitStack() as stack:
setup.enter_blockchain_patches(stack)
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'named_blockchain_balances_resource',
blockchain=SupportedBlockchain.AVALANCHE.value,
),
json={'async_query': async_query},
)
if async_query:
task_id = assert_ok_async_response(response)
result = wait_for_async_task_with_result(rotkehlchen_api_server, task_id)
else:
result = assert_proper_response_with_result(response)
# Check per account
account_1_balances = result['per_account']['AVAX'][AVALANCHE_ACC1_AVAX_ADDR]
assert 'liabilities' in account_1_balances
asset_avax = account_1_balances['assets']['AVAX']
assert FVal(asset_avax['amount']) >= ZERO
assert FVal(asset_avax['usd_value']) >= ZERO
account_2_balances = result['per_account']['AVAX'][AVALANCHE_ACC2_AVAX_ADDR]
assert 'liabilities' in account_2_balances
asset_avax = account_2_balances['assets']['AVAX']
assert FVal(asset_avax['amount']) >= ZERO
assert FVal(asset_avax['usd_value']) >= ZERO
# Check totals
assert 'liabilities' in result['totals']
total_avax = result['totals']['assets']['AVAX']
assert FVal(total_avax['amount']) >= ZERO
assert FVal(total_avax['usd_value']) >= ZERO
| 24,836
|
def coding_problem_45(rand5):
"""
Using a function rand5() that returns an integer from 1 to 5 (inclusive) with uniform probability, implement a
function rand7() that returns an integer from 1 to 7 (inclusive).
Note: for n >= 24, rand5() ** n is a multiple of 7 and therefore rand5() ** 24 % 7 is an unbiased implementation
of rand7(). To avoid having to rely on big integer libraries, we use the property (a + b) % n == ((a % n) + b) % n
which is easy to prove by decomposing a into a // n * n + a % n.
>>> from random import randint
>>> rand5 = lambda: randint(0, 4)
>>> rand7 = coding_problem_45(rand5)
>>> 0 <= rand7 < 7
True
"""
rand7 = 0
for _ in range(24):
rand7 = (rand7 * 5 + rand5()) % 7
return rand7
| 24,837
|
def handle_rss_api(output, kwargs):
""" Special handler for API-call 'set_config' [rss] """
name = kwargs.get('keyword')
if not name:
name = kwargs.get('name')
if not name:
return None
feed = config.get_config('rss', name)
if feed:
feed.set_dict(kwargs)
else:
config.ConfigRSS(name, kwargs)
action = kwargs.get('filter_action')
if action in ('add', 'update'):
# Use the general function, but catch the redirect-raise
try:
kwargs['feed'] = name
sabnzbd.interface.ConfigRss('/').internal_upd_rss_filter(**kwargs)
except cherrypy.HTTPRedirect:
pass
elif action == 'delete':
# Use the general function, but catch the redirect-raise
try:
kwargs['feed'] = name
sabnzbd.interface.ConfigRss('/').internal_del_rss_filter(**kwargs)
except cherrypy.HTTPRedirect:
pass
return name
| 24,838
|
def progress(job_id, user: User = Depends(auth_user), db: Session = Depends(get_db)):
"""
Get a user's progress on a specific job.
"""
job = _job(db, job_id)
check_job_user(db, user, job)
progress = rules.get_progress_report(db, job, user)
return progress
| 24,839
|
def addToMap(eeobject, vis_params=None, *unused_args):
"""Adds a layer to the default map instance.
Args:
eeobject: the object to add to the map.
vis_params: a dictionary of visualization parameters. See
ee.data.getMapId().
*unused_args: unused arguments, left for compatibility with the JS API.
This call exists to be an equivalent to the playground addToMap() call.
It uses a global MapInstance to hang on to "the map". If the MapInstance
isn't initializd, this creates a new one.
"""
# Flatten any lists to comma separated strings.
if vis_params:
vis_params = dict(vis_params)
for key in vis_params:
item = vis_params.get(key)
if (isinstance(item, collections.Iterable) and
not isinstance(item, six.string_types)):
vis_params[key] = ','.join([str(x) for x in item])
overlay = MakeOverlay(eeobject.getMapId(vis_params))
global map_instance
if not map_instance:
map_instance = MapClient()
map_instance.addOverlay(overlay)
| 24,840
|
def getRoom(borough):
"""Return a JSON dataset for property type of airbnb listing"""
prpt = db.session.query(data.Borough,
data.Room_Type, data.Price, data.Review_Rating, data.review_scores_cleanliness,
data.review_scores_value, data.host_response_rate).statement
df = pd.read_sql_query(prpt, db.session.bind)
df = df[df['Borough'] == borough]
df["host_response_rate"] = df["host_response_rate"].str.replace("%", "").astype(float)
df["review_scores_cleanliness"] = df["review_scores_cleanliness"].str.replace(".", "").astype(float)
df["review_scores_value"] = df["review_scores_value"].str.replace(".", "").astype(float)
df1 = df.groupby('Room_Type').count().reset_index()
df2 = df.groupby('Room_Type').mean().reset_index().round(2)
df = pd.merge(df1, df2, on='Room_Type')
df = df[['Room_Type', 'Borough', 'Price_y', 'Review_Rating_y', 'review_scores_cleanliness_y', 'review_scores_value_y', 'host_response_rate_y']].rename(
columns={'Price_y': 'Avg_price', 'Review_Rating_y':'RRate', 'review_scores_cleanliness_y':'RClean', 'review_scores_value_y':'RValue', 'host_response_rate_y':'HostResponseR' })
df['percent'] = round((df.Borough/df.Borough.sum())*100, 2)
d = df.to_dict('records')
return json.dumps(d)
| 24,841
|
def get_info(name_file, what='V', parent_folder='txt_files'):
"""Get data from txt file and convert to data list
:param name_file : name of the file, without txt extension
:param what : V = vertices, E = edges, R = pose
:param parent_folder"""
file_path = get_file_path(name_file, parent_folder)
if what == 'V' or what == 'R':
my_type = 'float'
else:
my_type = 'int'
data_dict = read_data_txt(file_path, my_type)
data = as_list(data_dict)
return data
| 24,842
|
def receive_message(
sock, operation, request_id, max_message_size=MAX_MESSAGE_SIZE):
"""Receive a raw BSON message or raise socket.error."""
header = _receive_data_on_socket(sock, 16)
length = _UNPACK_INT(header[:4])[0]
actual_op = _UNPACK_INT(header[12:])[0]
if operation != actual_op:
raise ProtocolError("Got opcode %r but expected "
"%r" % (actual_op, operation))
# No request_id for exhaust cursor "getMore".
if request_id is not None:
response_id = _UNPACK_INT(header[8:12])[0]
if request_id != response_id:
raise ProtocolError("Got response id %r but expected "
"%r" % (response_id, request_id))
if length <= 16:
raise ProtocolError("Message length (%r) not longer than standard "
"message header size (16)" % (length,))
if length > max_message_size:
raise ProtocolError("Message length (%r) is larger than server max "
"message size (%r)" % (length, max_message_size))
return _receive_data_on_socket(sock, length - 16)
| 24,843
|
def cycle_interval(starting_value, num_frames, min_val, max_val):
"""Cycles through the state space in a single cycle."""
starting_in_01 = (starting_value - min_val) / (max_val - min_val)
grid = np.linspace(starting_in_01, starting_in_01 + 2.,
num=num_frames, endpoint=False)
grid -= np.maximum(0, 2 * grid - 2)
grid += np.maximum(0, -2 * grid)
return grid * (max_val - min_val) + min_val
| 24,844
|
def commit_ref_info(repos, skip_invalid=False):
"""
Returns a dict of information about what commit should be tagged in each repo.
If the information in the passed-in dictionary is invalid in any way,
this function will throw an error unless `skip_invalid` is set to True,
in which case the invalid information will simply be logged and ignored.
Arguments:
repos (dict): A dict mapping Repository objects to openedx.yaml data.
skip_invalid (bool): if true, log invalid data in `repos`, but keep going.
Returns:
A dict mapping Repositories to a dict about the ref to tag, like this::
{
Repository(<full_repo_name>): {
"ref": "name of tag or branch"
"ref_type": "tag", # or "branch"
"sha": "1234566789abcdef",
"message": "The commit message"
"author": {
"name": "author's name",
"email": "author's email"
}
"committer": {
"name": "committer's name",
"email": "committer's email",
}
},
Repository(<next_repo_name>): {...},
...
}
"""
ref_info = {}
for repo, repo_data in nice_tqdm(repos.items(), desc='Find commits'):
# are we specifying a ref?
ref = repo_data["openedx-release"].get("ref")
if ref:
try:
ref_info[repo] = get_latest_commit_for_ref(repo, ref)
except (GitHubError, ValueError):
if skip_invalid:
msg = "Invalid ref {ref} in repo {repo}".format(
ref=ref,
repo=repo.full_name
)
log.error(msg)
continue
else:
raise
return ref_info
| 24,845
|
def function(x, axis=0, fast=False):
"""
Estimate the autocorrelation function of a time series using the FFT.
:param x:
The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for every
other axis.
:param axis: (optional)
The time axis of ``x``. Assumed to be the first axis if not specified.
:param fast: (optional)
If ``True``, only use the largest ``2^n`` entries for efficiency.
(default: False)
"""
x = np.atleast_1d(x)
m = [slice(None), ] * len(x.shape)
# For computational efficiency, crop the chain to the largest power of
# two if requested.
if fast:
n = int(2**np.floor(np.log2(x.shape[axis])))
m[axis] = slice(0, n)
x = x
else:
n = x.shape[axis]
# Compute the FFT and then (from that) the auto-correlation function.
f = np.fft.fft(x-np.mean(x, axis=axis), n=2*n, axis=axis)
m[axis] = slice(0, n)
acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[m].real
m[axis] = 0
return acf / acf[m]
| 24,846
|
def find_binaries(*args, **kwargs):
"""Given images data, return a list of dicts containing details of
all binaries in the image which can be identified with image_id or
image_tag.
One of image_id or image_tag must be specified.
:params: See `find_image`
:exception: exceptions.ImageNotFound
:exception: exceptions.ParameterError
:exception: exceptions.NoPackages
:return: A list of dicts:
As per the Twistlock API, each dict takes the form:
{
name: 'binary name',
path: 'full path to the binary including the name'
md5: 'md5 hash for the binary'
cveCount: 'Number of CVEs reported for the binary'
}
"""
image = find_image(*args, **kwargs)
return image['data']['binaries']
| 24,847
|
def _readFastaFile(filepath):
"""Read a FASTA file and yields tuples of 'header' and 'sequence' entries.
:param filepath: file path of the FASTA file
:yields: FASTA entries in the format ('header', 'sequence').
The 'header' string does not contain the '>' and trailing white spaces.
The 'sequence' string does not contain trailing white spaces, a '*' at
the end of the sequence is removed.
See also :func:`importProteinDatabase` and
:func:`maspy.peptidemethods.digestInSilico`.
"""
processSequences = lambda i: ''.join([s.rstrip() for s in i]).rstrip('*')
processHeaderLine = lambda line: line[1:].rstrip()
with io.open(filepath) as openfile:
#Iterate through lines until the first header is encountered
try:
line = next(openfile)
while line[0] != '>':
line = next(openfile)
header = processHeaderLine(line)
sequences = list()
except StopIteration:
errorText = 'File does not contain fasta entries.'
raise maspy.errors.FileFormatError(errorText)
for line in openfile:
if line[0] == '>':
yield header, processSequences(sequences)
header = processHeaderLine(line)
sequences = list()
else:
sequences.append(line)
#Yield last entry
if sequences:
yield header, processSequences(sequences)
| 24,848
|
def solve_version(d):
""" solve version difference,
argument map d is deepcopied.
"""
# make copy
d = copy.deepcopy(d)
v = d.get('version', 0)
# functions in _update
for f in _update_chain[v:]:
d = f(d)
return d
| 24,849
|
def start_elasticsearch_service(port=None, asynchronous=False):
"""
Starts the ElasticSearch management API (not the actual elasticsearch process.
"""
from localstack.services.es import es_api
port = port or config.PORT_ES
return start_local_api("ES", port, api="es", method=es_api.serve, asynchronous=asynchronous)
| 24,850
|
def regexp_ilike(expr, pattern):
"""
---------------------------------------------------------------------------
Returns true if the string contains a match for the regular expression.
Parameters
----------
expr: object
Expression.
pattern: object
A string containing the regular expression to match against the string.
Returns
-------
str_sql
SQL expression.
"""
expr = format_magic(expr)
pattern = format_magic(pattern)
return str_sql("REGEXP_ILIKE({}, {})".format(expr, pattern))
| 24,851
|
def extract_frames(width, height, video_filename, video_path, frames_dir, overwrite=False, start=-1, end=-1, every=1):
"""
Extract frames from a video using decord's VideoReader
:param video_path: path of the video
:param frames_dir: the directory to save the frames
:param overwrite: to overwrite frames that already exist?
:param start: start frame
:param end: end frame
:param every: frame spacing
:return: count of images saved
"""
video_path = os.path.normpath(video_path) # make the paths OS (Windows) compatible
frames_dir = os.path.normpath(frames_dir) # make the paths OS (Windows) compatible
video_dir, _ = os.path.split(video_path) # get the video path and filename from the path
assert os.path.exists(video_path) # assert the video file exists
# load the VideoReader
vr = VideoReader(video_path, ctx=cpu(0)) # can set to cpu or gpu .. ctx=gpu(0)
if start < 0: # if start isn't specified lets assume 0
start = 0
if end < 0: # if end isn't specified assume the end of the video
end = len(vr)
frames_list = list(range(start, end, every))
saved_count = 0
if every > 25 and len(frames_list) < 1000: # this is faster for every > 25 frames and can fit in memory
frames = vr.get_batch(frames_list).asnumpy()
for index, frame in zip(frames_list, frames): # lets loop through the frames until the end
save_path = os.path.join(frames_dir, "{}_{}.jpg".format(video_filename, index)) # create the save path
if not os.path.exists(save_path) or overwrite: # if it doesn't exist or we want to overwrite anyways
cv2.imwrite(
save_path,
cv2.resize(
cv2.cvtColor(frame.asnumpy(), cv2.COLOR_RGB2BGR),
(width, height),
interpolation=cv2.INTER_CUBIC)) # save the extracted image
saved_count += 1 # increment our counter by one
else: # this is faster for every <25 and consumes small memory
for index in range(start, end): # lets loop through the frames until the end
frame = vr[index] # read an image from the capture
if index % every == 0: # if this is a frame we want to write out based on the 'every' argument
save_path = os.path.join(frames_dir, "{}_{}.jpg".format(video_filename, index)) # create the save path
if not os.path.exists(save_path) or overwrite: # if it doesn't exist or we want to overwrite anyways
cv2.imwrite(
save_path,
cv2.resize(
cv2.cvtColor(frame.asnumpy(), cv2.COLOR_RGB2BGR),
(width, height),
interpolation=cv2.INTER_CUBIC)) # save the extracted image
saved_count += 1 # increment our counter by one
return saved_count
| 24,852
|
def make_tree(path):
"""Higher level function to be used with cache."""
return _make_tree(path)
| 24,853
|
def cmd(f):
"""Decorator to declare class method as a command"""
f.__command__ = True
return f
| 24,854
|
def clean():
"""Remove all .py[co] files"""
local("find . -name '*.py[co]' -delete")
| 24,855
|
def small_prior():
"""Give string format of small uniform distribution prior"""
return "uniform(0, 10)"
| 24,856
|
def retrieve_panelist_ranks(panelist_id: int,
database_connection: mysql.connector.connect
) -> List[Dict]:
"""Retrieve a list of show dates and the panelist rank for the
requested panelist ID"""
cursor = database_connection.cursor()
query = ("SELECT s.showid, s.showdate, pm.showpnlrank "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"WHERE pm.panelistid = %s "
"AND s.bestof = 0 AND s.repeatshowid IS NULL "
"AND pm.panelistscore IS NOT NULL "
"ORDER BY s.showdate ASC;")
cursor.execute(query, (panelist_id,))
result = cursor.fetchall()
cursor.close()
if not result:
return None
ranks = []
for row in result:
info = OrderedDict()
info["show_id"] = row[0]
info["show_date"] = row[1].isoformat()
info["rank"] = row[2]
ranks.append(info)
return ranks
| 24,857
|
def maxPixel(rpl):
"""maxPixel(rpl)
Computes the max pixel spectrum for the specified ripple/raw spectrum object."""
xs = epq.ExtremumSpectrum()
for r in xrange(0, rpl.getRows()):
dt2.StdOut.append(".")
if dt2.terminated:
break
for c in xrange(0, rpl.getColumns()):
rpl.setPosition(r, c)
xs.include(rpl)
return xs
| 24,858
|
def test_floordiv():
"""Ensures that add works correctly."""
floordiv = _MathExpression() // 2
assert floordiv(5) == 2
| 24,859
|
def skip_device(name):
""" Decorator to mark a test to only run on certain devices
Takes single device name or list of names as argument
"""
def decorator(function):
name_list = name if type(name) == list else [name]
function.__dict__['skip_device'] = name_list
return function
return decorator
| 24,860
|
def conv1d(inputs,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for 1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer, specifying the
length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: An integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, no bias will
be applied.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
if context.in_eager_mode():
raise ValueError(
'Functional layers are currently not compatible with eager execution.'
'Use tf.layers.Conv1D instead.')
layer = Conv1D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
dtype=inputs.dtype.base_dtype,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
| 24,861
|
def _transform(ctx):
"""Implementation for the transform rule."""
if ctx.attr.command and not ctx.attr.transform:
fail(("Target '%s' specifies 'command = ...', but this attribute is ignored when no pattern" +
" is supplied with the 'transform' attribute") % (ctx.label.name))
lines = []
for transform in ctx.attr.transform:
lines.append("%s) transform;;" % to_glob(transform))
for include in ctx.attr.include:
lines.append("%s) include;;" % to_glob(include))
transformer = ctx.actions.declare_file("%s-transformer.sh" % (ctx.label.name))
ctx.actions.expand_template(template = ctx.file._transformer, output = transformer, substitutions = {
"{root}": ctx.bin_dir.path,
"{command}": ctx.attr.command,
"{patterns}": "\n".join(lines),
"{debug}": ["", "enabled"][int(ctx.attr.debug)],
}, is_executable = True)
outputs = []
opaths = []
for iattr in ctx.attr.inputs:
for ifile in iattr.files.to_list():
opath = ifile.short_path
info = ("FROM", iattr.label, "PATH", opath, "DIR", ifile.is_directory, "ORIGIN", ifile.short_path)
if not ifile.is_directory:
debug(ctx.attr.debug, "FILE", *info)
opaths.append((ifile, ifile.path, opath))
continue
if not ifile.short_path in ctx.attr.expand:
debug(ctx.attr.debug, "TREE-FILTER", *info)
add = ctx.actions.declare_directory(opath)
outputs.append(add)
ctx.actions.run(inputs = [ifile], outputs = [add], executable = transformer, arguments = [
ifile.path,
add.path,
], tools = ctx.files.tools)
continue
debug(ctx.attr.debug, "TREE-EXPAND", *info)
outputs = []
for output in ctx.attr.expand[ifile.short_path]:
if output.endswith("/"):
add = ctx.actions.declare_directory(output[:-1])
outputs.append(add)
ctx.actions.run(inputs = [ifile], outputs = add, executable = transformer, arguments = [
ifile.path,
add.path, # ctx.bin_dir.path + "/" + ctx.label.package + "/" + opath
], tools = ctx.files.tools)
continue
opaths.append((ifile, ifile.path + "/" + output, ifile.short_path + "/" + output))
for ifile, ipath, opath in opaths:
debug(ctx.attr.debug, "GENERATING FILE", opath, "- FROM TREE?", ifile.is_directory, "- SOURCE PATH", ifile.short_path)
if matchany(opath, ctx.attr.transform, default = False):
ofile = ctx.actions.declare_file(opath)
outputs.append(ofile)
_run(ctx, ifile, ipath, ofile)
continue
if matchany(opath, ctx.attr.include):
ofile = ctx.actions.declare_file(opath)
outputs.append(ofile)
if not ifile.is_directory:
ctx.actions.symlink(output = ofile, target_file = ifile, progress_message = _message(ctx, ofile))
else:
ctx.actions.run(outputs = [ofile], inputs = [ifile], executable = "cp", arguments = ["-f", ipath, ofile.path])
continue
for o in outputs:
debug(ctx.attr.debug, "EXPECTING OUTPUT", o.short_path, "- TREE?", o.is_directory)
return [DefaultInfo(files = depset(outputs))]
| 24,862
|
def check_constraints(
df: pd.DataFrame, schema: dict
) -> List[Union[ConstraintError, ConstraintTypeError]]:
"""
Check table field constraints.
Arguments:
df: Table.
schema: Table schema (https://specs.frictionlessdata.io/table-schema).
Returns:
A list of errors.
"""
errors = []
for field in schema.get("fields", []):
constraints = field.get("constraints", {})
result = check_field_constraints(df[field["name"]], **constraints, field=field)
if result:
errors += result
return errors
| 24,863
|
def consolidate_fully(
inputs: Iterable[Tuple[core.Key, xarray.Dataset]],
*,
merge_kwargs: Optional[Mapping[str, Any]] = None,
combine_kwargs: Optional[Mapping[str, Any]] = None,
) -> Tuple[core.Key, xarray.Dataset]:
"""Consolidate chunks via merge/concat into a single (Key, Dataset) pair."""
concatenated_chunks = []
combined_offsets = {}
combined_vars = set()
for key, chunk in consolidate_chunks(inputs, combine_kwargs):
# We expect all chunks to be fully combined in all dimensions and all chunks
# to have the same offset (in each dimension). The chunks from
# consolidate_chunks() should already have this property but we explicitly
# check it here again in case consolidate_chunks changes.
for dim, offset in key.offsets.items():
if dim in combined_offsets and combined_offsets[dim] != offset:
raise ValueError('consolidating chunks fully failed because '
f'chunk\n{chunk}\n has offsets {key.offsets} '
f'that differ from {combined_offsets}')
combined_offsets[dim] = offset
concatenated_chunks.append(chunk)
combined_vars.update(chunk.keys())
# Merge variables, but unlike consolidate_variables, we merge all chunks and
# not just chunks per unique key.
kwargs = dict(
compat='equals',
join='exact',
combine_attrs='override',
)
if merge_kwargs is not None:
kwargs.update(merge_kwargs)
try:
dataset = xarray.merge(concatenated_chunks, **kwargs)
except (ValueError, xarray.MergeError) as original_error:
repr_string = '\n'.join(repr(ds) for ds in concatenated_chunks[:2])
if len(concatenated_chunks) > 2:
repr_string += '\n...'
repr_string = textwrap.indent(repr_string, prefix=' ')
raise ValueError(
f'merging dataset chunks with variables {combined_vars} failed.\n'
+ repr_string
) from original_error
return core.Key(combined_offsets, combined_vars), dataset
| 24,864
|
def first(items: Iterator[T]) -> Optional[T]:
"""Return the first item of the iterator."""
return next(items, None)
| 24,865
|
def plot_precision_recall_curve(
precisions: Sequence[float], recalls: Sequence[float],
title: str = 'Precision/Recall curve'
) -> matplotlib.figure.Figure:
"""
Plots the precision recall curve given lists of (ordered) precision
and recall values.
Args:
precisions: list of float, precision for corresponding recall values,
should have same length as *recalls*.
recalls: list of float, recall for corresponding precision values,
should have same length as *precisions*.
title: str, plot title
Returns: matplotlib.figure.Figure, reference to the figure
"""
assert len(precisions) == len(recalls)
fig, ax = plt.subplots(1, 1, tight_layout=True)
ax.step(recalls, precisions, color='b', alpha=0.2, where='post')
ax.fill_between(recalls, precisions, alpha=0.2, color='b', step='post')
ax.set(x_label='Recall', y_label='Precision', title=title)
ax.set(x_lim=(0.0, 1.05), y_lim=(0.0, 1.05))
return fig
| 24,866
|
def Serialize(obj):
"""Return a binary serialized version of object.
Depending on the serialization method, some complex objects or input
formats may not be serializable.
UTF-8 strings (by themselves or in other structures e.g. lists) are always
supported.
Args:
obj: any object
Returns:
str, possibly containing ascii values >127
Raises:
SerializeError: if an error occured during serialization
"""
try:
return json.dumps(obj)
except TypeError as e:
raise SerializeError(e)
| 24,867
|
def getDragObject(parent: QWidget, item: Union['SourceListWidgetItem', 'DestTreeWidgetItem']) -> QDrag:
"""Instantiate QDrag of type application/draggerItem with corresponding QMimeData
Parameters
----------
parent: QWidget
item: Union['SourceListWidgetItem', 'DestTreeWidgetItem']
Returns
-------
QDrag
QDrag object holding item value as QMimeData
"""
# construct dataStream with item value
itemData = QByteArray()
getData(itemData, item.value)
mimeData = QMimeData()
mimeData.setData(LISTBOX_W_VALUE_MIMETYPE, itemData)
drag = QDrag(parent)
drag.setHotSpot(QPoint(0, 0))
drag.setMimeData(mimeData)
return drag
| 24,868
|
def test_parse_xml_file(template_id, tmp_path, expected_mprage):
"""Test function `_parse_xml_file`."""
from clinica.iotools.converters.adni_to_bids.adni_json import _parse_xml_file
xml_file = _write_xml_example(tmp_path, template_id=template_id)
scan_metadata = _parse_xml_file(xml_file)
expected_subject_id = template_id[5:]
assert scan_metadata["id"] == expected_subject_id
assert scan_metadata["acq_time"] == pd.Timestamp(2017, 1, 1, 12)
assert scan_metadata["image_orig_id"] == 300
assert scan_metadata["image_orig_seq"] == expected_mprage
assert scan_metadata["MRAcquisitionType"] == "3D"
expected_pulse = "GR/IR"
expected_manufacturer = "SIEMENS"
expected_strength = "3.0"
if template_id == "ADNI_345_S_6789":
expected_pulse = "RM"
expected_manufacturer = "GE MEDICAL SYSTEMS"
expected_strength = "1.5"
assert scan_metadata["PulseSequenceType"] == expected_pulse
assert scan_metadata["Manufacturer"] == expected_manufacturer
assert scan_metadata["MagneticFieldStrength"] == expected_strength
| 24,869
|
def test_base_params():
"""Test default params object matches base params"""
param_url = ('https://raw.githubusercontent.com/jonescompneurolab/'
'hnn-core/test_data/base.json')
params_base_fname = op.join(hnn_core_root, 'param', 'base.json')
if not op.exists(params_base_fname):
urlretrieve(param_url, params_base_fname)
params_base = read_params(params_base_fname)
params = Params()
assert params == params_base
params_base['spec_cmap'] = 'viridis'
params = Params(params_base)
assert params == params_base
| 24,870
|
def test_higher_order():
"""`cwt` & `ssq_cwt` CPU & GPU outputs agreement."""
if not CAN_GPU:
return
tsigs = TestSignals(N=256)
x = tsigs.par_lchirp()[0]
x += x[::-1]
kw = dict(order=range(3), astensor=False)
for dtype in ('float32', 'float64'):
os.environ['SSQ_GPU'] = '0'
Tx0, Wx0, *_ = ssq_cwt(x, _wavelet(dtype=dtype), **kw)
os.environ['SSQ_GPU'] = '1'
Tx1, Wx1, *_ = ssq_cwt(x, _wavelet(dtype=dtype), **kw)
adiff_Tx = np.abs(Tx0 - Tx1).mean()
adiff_Wx = np.abs(Wx0 - Wx1).mean()
# less should be possible for float64, but didn't investigate
th = 2e-7 if dtype == 'float64' else 1e-4
assert adiff_Tx < th, (dtype, adiff_Tx, th)
assert adiff_Wx < th, (dtype, adiff_Wx, th)
os.environ['SSQ_GPU'] = '0'
| 24,871
|
def get_soup(url):
"""
Makes a request to the given url and returns a BeautifulSoup instance of Soup
"""
res = requests.get(url)
if not res.content:
return None
soup = BeautifulSoup(res.content, "lxml")
return soup
| 24,872
|
def spectrogam_pearson_correlation(template_name, template_list, data_list, args):
"""
:param template_name: name of the templates
:param template_list: list of the templates
:param data_list: list of the data
:return: pearson correlation evaluation of each generated syllables wrt the template elements
"""
# Load the data
n_template = np.size(template_list)
n_data = np.size(data_list)
template_spectrogram = []
for t in range(0,n_template):
sr, samples = wav.read(template_list[t])
freq, times, spectrogram = sp.signal.spectrogram(samples, sr, window='hann', nperseg=args.nperseg, noverlap=args.nperseg - args.overlap)
template_spectrogram.append(spectrogram)
# Correlation between the templates (test) with plot
template_correlation = np.zeros((n_template, n_template))
for t_1 in range(0, n_template):
for t_2 in range(0, n_template):
template_correlation[t_1, t_2] = stat.pearson_corr_coeff(template_spectrogram[t_1].flatten(), template_spectrogram[t_2].flatten())
min_val = -1
max_val = 1
fig, ax = plt.subplots()
plt.matshow(template_correlation, cmap=plt.cm.Blues)
ax.set_xlim(min_val, max_val)
ax.set_ylim(min_val, max_val)
ax.set_xticks(np.arange(max_val))
ax.set_yticks(np.arange(max_val))
plt.xticks(range(n_template), template_name, rotation=90, fontsize=5)
plt.yticks(range(n_template), template_name, fontsize=5)
plt.colorbar()
plt.tight_layout() # to avoid the cut of labels
plt.title('Pearson correlation template')
plt.savefig(args.template_dir + '/' + 'template_pearson_corr.png')
for d in range(0, n_data):
wav_list = glob.glob(data_list[d] + '/' + '*.wav')
data_spectrogram = []
for s in range(0,np.size(wav_list)):
sr, samples = wav.read(wav_list[s])
freq, times, spectrogram = sp.signal.spectrogram(samples, sr, window='hann', nperseg=args.nperseg, noverlap=args.nperseg - args.overlap)
data_spectrogram.append(spectrogram)
# Correlation between the template and the generations
data_template_correlation = np.zeros((n_template, np.size(wav_list)))
for t in range(0, n_template):
for s in range(0, np.size(wav_list)):
data_template_correlation[t, s] = stat.pearson_corr_coeff(template_spectrogram[t][:,0:np.min([np.shape(template_spectrogram[t][1]), np.shape(data_spectrogram[s][1])])].flatten(), data_spectrogram[s][:,0:np.min([np.shape(template_spectrogram[t][1]), np.shape(data_spectrogram[s][1])])].flatten())
np.save(args.data_dir + '/' + args.output_dir + '/' + 'dataVStemplate_pearson_corr' + str(d) + '.npy', data_template_correlation)
print('Done')
| 24,873
|
def _grow_segment(segment, addition):
"""Combine two segments into one, if possible."""
if _eq(segment[-1], addition[0]): # append addition
return segment + addition[1:]
elif _eq(segment[-1], addition[-1]): # append reversed addition
return segment + list(reversed(addition[:-1]))
elif _eq(segment[0], addition[-1]): # prepend addition
return addition[:-1] + segment
elif _eq(segment[0], addition[0]): # prepend reversed addition
return list(reversed(addition[1:])) + segment
else:
raise ValueError("addition doesn't fit segment")
| 24,874
|
def test_postgres_index_setup_tables(index_driver, database_conn):
"""
Tests that the postgres index database gets set up correctly.
"""
# postgres
c = database_conn.execute("""
SELECT table_name
FROM information_schema.tables
WHERE table_schema='public'
AND table_type='BASE TABLE'
""")
tables = [i[0] for i in c]
for table in INDEX_TABLES:
assert table in tables, '{table} not created'.format(table=table)
for table, schema in INDEX_TABLES.items():
# Index, column name, data type, nullable, default value, primary key
c = database_conn.execute("""
SELECT col.column_name, col.data_type, col.is_nullable,
col.column_default, c.constraint_type
FROM information_schema.columns col
left JOIN (
SELECT column_name, constraint_type
FROM information_schema.table_constraints
NATURAL JOIN information_schema.constraint_table_usage
NATURAL JOIN information_schema.constraint_column_usage
WHERE table_name = '{table}'
) c
ON col.column_name = c.column_name
WHERE table_name = '{table}'
""".format(table=table))
assert schema == [i for i in c]
| 24,875
|
def generate_headers(src_files, out_root, doc_root):
"""Generate headers with a Python methoddef array and html
documentation tables for the listed source files.
The list should contain tuples of names and paths:
[(desired-method-def-name, cpp-file-path),...]
The name is used for the generated method-def:
static PyMethodDef <name>_methods[].
doc_root indicates the folder where the generated
html-documentation should be stored.
The generated header will be named the same as the source file,
but with .cpp stripped and -method-def.hh appended.
The html file will be named the same as the source-file but
with .cpp stripped and -methods.txt appended."
"""
if not os.path.exists(out_root):
os.makedirs(out_root)
did_print_heading = False
changed = False
for (name, files) in src_files:
if files.__class__ == str:
src = files
files = (src,)
else:
src = files[0]
dst = src.replace(".hh", "-method-def.hh")
dst = dst.replace(".cpp", "-method-def.hh")
dst = os.path.join(out_root, os.path.split(dst)[1])
dst_doc = src.replace(".hh", '-methods.txt')
dst_doc = dst_doc.replace(".cpp", '-methods.txt')
dst_doc_filename = os.path.split(dst_doc)[1]
dst_doc_filename = os.path.join(doc_root, dst_doc_filename)
dst_prop_doc = src.replace(".cpp", '-properties.txt')
dst_doc_prop_filename = os.path.split(dst_prop_doc)[1]
dst_doc_prop_filename = os.path.join(doc_root, dst_doc_prop_filename)
if util.changed(src, dst):
if not did_print_heading:
print("* Generating Python method definitions.")
did_print_heading = True
generate(files, dst, dst_doc_filename, dst_doc_prop_filename, name)
changed = True
if not changed:
print("* Python method definitions up to date.")
| 24,876
|
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Proliphix thermostats. """
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
host = config.get(CONF_HOST)
import proliphix
pdp = proliphix.PDP(host, username, password)
add_devices([
ProliphixThermostat(pdp)
])
| 24,877
|
def test_subscribe_to_queue(caplog):
"""Test subscribing to a queue (producer-consumer), callback functions and unsubscribe."""
mock_cb1 = mock.Mock()
mock_cb2 = mock.Mock()
offline = OfflineTransport()
offline.connect()
with caplog.at_level(logging.INFO):
offline._subscribe(
1,
str(mock.sentinel.channel1),
mock_cb1,
transformation=mock.sentinel.transformation,
)
message = f"Subscribing to messages on {str(mock.sentinel.channel1)}"
assert caplog.record_tuples == [
(
"workflows.transport.offline_transport",
logging.INFO,
f"Offline Transport: {message}",
)
]
caplog.clear()
with caplog.at_level(logging.DEBUG):
offline._subscribe(
1,
str(mock.sentinel.channel1),
mock_cb1,
transformation=mock.sentinel.transformation,
)
message = f"Subscribing to messages on {str(mock.sentinel.channel1)}"
debug = f"subscription ID 1, callback function {str(mock_cb1)}, further keywords: {{'transformation': {str(mock.sentinel.transformation)}}}"
assert caplog.record_tuples == [
(
"workflows.transport.offline_transport",
logging.INFO,
f"Offline Transport: {message}",
),
("workflows.transport.offline_transport", logging.DEBUG, f"{debug}"),
]
caplog.clear()
with caplog.at_level(logging.INFO):
offline._subscribe(
2,
str(mock.sentinel.channel2),
mock_cb2,
retroactive=True,
selector=mock.sentinel.selector,
exclusive=True,
transformation=True,
priority=42,
)
message = f"Subscribing to messages on {str(mock.sentinel.channel2)}"
assert caplog.record_tuples == [
(
"workflows.transport.offline_transport",
logging.INFO,
f"Offline Transport: {message}",
)
]
caplog.clear()
with caplog.at_level(logging.DEBUG):
offline._subscribe(
2,
str(mock.sentinel.channel2),
mock_cb2,
retroactive=True,
selector=mock.sentinel.selector,
exclusive=True,
transformation=True,
priority=42,
)
message = f"Subscribing to messages on {str(mock.sentinel.channel2)}"
debug = f"subscription ID 2, callback function {str(mock_cb2)}, further keywords: {{'retroactive': True, 'selector': {str(mock.sentinel.selector)}, 'exclusive': True, 'transformation': True, 'priority': 42}}"
assert caplog.record_tuples == [
(
"workflows.transport.offline_transport",
logging.INFO,
f"Offline Transport: {message}",
),
("workflows.transport.offline_transport", logging.DEBUG, f"{debug}"),
]
| 24,878
|
def test_validate_extension_invalid() -> None:
"""It returns False when extension is invalid."""
assert not toml.validate_extension("file.xml")
| 24,879
|
def dsmoothlist_by_deform_exp(deform_exp, ag_mode):
"""
Automatically extract the selected artificial generations for training and validation set:
'Resp': ['respiratory_motion', 'single_frequency', 'mixed_frequency', 'zero'],
'NoResp': ['single_frequency', 'mixed_frequency', 'zero'],
'SingleOnly': ['single_frequency'],
'MixedOnly': ['mixed_frequency'],
'SingleResp': ['single_frequency', 'respiratory_motion', 'zero'],
please note that for validation set we do not need to select all of them
:param deform_exp:
:param ag_mode: artificial generation mode: 'Resp', 'NoResp', 'SingleOnly', 'MixedOnly', 'SingleResp', 'Visualization'
:return:
"""
if ag_mode not in ['Resp', 'NoResp', 'SingleOnly', 'MixedOnly', 'SingleResp', 'Visualization']:
raise ValueError("exp_mode should be in ['Resp', 'NoResp', 'SingleOnly', 'MixedOnly', 'SingleResp', 'Visualization']")
dsmoothlist_training = []
dsmoothlist_validation = []
deform_exp_setting = load_deform_exp_setting(deform_exp)
all_deform_methods = deform_exp_setting['DeformMethods']
comp_dict = {'Resp': ['respiratory_motion', 'single_frequency', 'mixed_frequency', 'zero'],
'NoResp': ['single_frequency', 'mixed_frequency', 'zero'],
'SingleOnly': ['single_frequency'],
'MixedOnly': ['mixed_frequency'],
'SingleResp': ['single_frequency', 'respiratory_motion', 'zero'],
'Visualization': []
}
for i, deform_method in enumerate(all_deform_methods):
if deform_method in comp_dict[ag_mode]:
dsmoothlist_training.append(i)
if deform_exp in ['3D_max7_D14_K', '3D_max15_D14_K', '3D_max20_D14_K', '3D_max15_SingleFrequency_Visualization']:
if ag_mode == 'Resp':
dsmoothlist_validation = [0, 5, 10]
elif ag_mode == 'NoResp':
dsmoothlist_validation = [5, 8, 10]
elif ag_mode == 'SingleResp':
dsmoothlist_validation = [4, 8, 10]
elif ag_mode == 'SingleOnly':
dsmoothlist_validation = [5, 6, 8]
elif ag_mode == 'MixedOnly':
dsmoothlist_validation = [9, 10, 12]
else:
raise ValueError('dsmoothlist_validation not found for deform_exp='+deform_exp+', please add it manually')
return dsmoothlist_training, dsmoothlist_validation
| 24,880
|
def test_stopwords_custom():
"""Assert that custom stopwords are removed."""
normalizer = Normalizer(stopwords=False, custom_stopwords=["b"])
assert normalizer.transform([["a b"]])["corpus"][0] == ["a"]
| 24,881
|
def test_file_listing_serialization(database, tmpdir):
"""Test serialization of file handles."""
view = UploadFileSerializer()
filename = 'data.json'
with database.session() as session:
manager = WorkflowGroupManager(session=session, fs=FileSystemStorage(basedir=tmpdir))
user_id = model.create_user(session, active=True)
workflow_id = model.create_workflow(session)
group_id = model.create_group(session, workflow_id, users=[user_id])
fh = manager.upload_file(
group_id=group_id,
file=io_file(data={'A': 1}),
name=filename
)
doc = view.file_handle(group_id=group_id, fh=fh)
assert doc[labels.FILE_NAME] == filename
validator('FileHandle').validate(doc)
doc = view.file_listing(
group_id=group_id,
files=manager.list_uploaded_files(group_id=group_id)
)
validator('FileListing').validate(doc)
| 24,882
|
def RunHeuristicAnalysis(analysis):
"""Performs heuristic analysis on a MasterFlakeAnalysis.
Args:
analysis (MasterFlakeAnalysis): The analysis to run heuristic results on.
Results are saved to the analysis itself as a list of FlakeCulprit
urlsafe keys.
"""
suspected_revisions = IdentifySuspectedRevisions(analysis)
SaveFlakeCulpritsForSuspectedRevisions(analysis.key.urlsafe(),
suspected_revisions)
| 24,883
|
def get_data_reader(header: Header) -> Callable[[BinaryIO], Tuple]:
"""Make a binary reader function for data."""
names = get_data_names(header)
format_ = ""
for name in names:
if "CH" in name:
format_ += "h"
elif "Pulse" in name:
format_ += "L"
elif "Logic" in name:
format_ += "H"
elif "Alarm" in name:
format_ += "H"
elif "AlOut" in name:
format_ += "H"
elif "Status" in name:
format_ += "H"
else:
raise ValueError(name)
struct = Struct(BIG_ENDIAN + format_)
def reader(f: BinaryIO) -> Tuple:
return struct.unpack(f.read(struct.size))
return reader
| 24,884
|
def proximal_region_finder(readers, region, comments=True):
"""
Returns an iterator that yields elements of the form [ <original_interval>, <closest_feature> ].
Intervals are GenomicInterval objects.
"""
primary = readers[0]
features = readers[1]
either = False
if region == 'Upstream':
up, down = True, False
elif region == 'Downstream':
up, down = False, True
else:
up, down = True, True
if region == 'Either':
either = True
# Read features into memory:
rightTree = quicksect.IntervalTree()
for item in features:
if type( item ) is GenomicInterval:
rightTree.insert( item, features.linenum, item )
for interval in primary:
if type( interval ) is Header:
yield interval
if type( interval ) is Comment and comments:
yield interval
elif type( interval ) == GenomicInterval:
chrom = interval.chrom
start = int(interval.start)
end = int(interval.end)
strand = interval.strand
if chrom not in rightTree.chroms:
continue
else:
root = rightTree.chroms[chrom] #root node for the chrom tree
result_up = []
result_down = []
if (strand == '+' and up) or (strand == '-' and down):
#upstream +ve strand and downstream -ve strand cases
get_closest_feature (root, 1, start, None, lambda node: result_up.append( node ), None)
if (strand == '+' and down) or (strand == '-' and up):
#downstream +ve strand and upstream -ve strand case
get_closest_feature (root, 0, None, end-1, None, lambda node: result_down.append( node ))
if result_up:
if len(result_up) > 1: #The results_up list has a list of intervals upstream to the given interval.
ends = []
for n in result_up:
ends.append(n.end)
res_ind = ends.index(max(ends)) #fetch the index of the closest interval i.e. the interval with the max end from the results_up list
else:
res_ind = 0
if not(either):
yield [ interval, result_up[res_ind].other ]
if result_down:
if not(either):
#The last element of result_down will be the closest element to the given interval
yield [ interval, result_down[-1].other ]
if either and (result_up or result_down):
iter_val = []
if result_up and result_down:
if abs(start - int(result_up[res_ind].end)) <= abs(end - int(result_down[-1].start)):
iter_val = [ interval, result_up[res_ind].other ]
else:
#The last element of result_down will be the closest element to the given interval
iter_val = [ interval, result_down[-1].other ]
elif result_up:
iter_val = [ interval, result_up[res_ind].other ]
elif result_down:
#The last element of result_down will be the closest element to the given interval
iter_val = [ interval, result_down[-1].other ]
yield iter_val
| 24,885
|
def test_api_mediawiki(monkeypatch):
"""The api_mediawiki test using mocks."""
result = "OpenClassrooms est une école en ligne..."
def mock_summary(*args, **kwargs):
return result
monkeypatch.setattr(
MediawikiApi, 'search', mock_summary)
wikipedia = MediawikiApi()
assert wikipedia.search('openclassrooms') == result
| 24,886
|
def check_fnr(fnr: str, d_numbers=True, h_numbers=False, logger: Callable = lambda _x: None) -> bool:
"""
Check if a number is a valid fødselsnumber.
Args:
fnr: A string containing the fodselsnummer to check
h_numbers: False (the default) if h-numbers should be accepted
d_numbers: True (the default) if d-numbers should be accepted
logger: A function used to log things
Returns:
True if it is a valid fodselsnummer, False otherwise.
"""
try:
return validate_fnr(fnr=fnr, d_numbers=d_numbers, h_numbers=h_numbers)
except ValueError as e:
logger(str(e))
return False
| 24,887
|
def test_should_raise_if_format_error():
"""
Test exception is raised if docstring syntax error
"""
docstring_error = '''
:param p1
'''
with pytest.raises(MlVToolException) as e:
parse_docstring(docstring_error)
assert isinstance(e.value.__cause__, ParseError)
| 24,888
|
def get_case_list_from_cls(test_cls_list):
"""
将测试类转化为测试用例
:return:
"""
test_list = []
for test_cls in test_cls_list:
test_cases = unittest.TestLoader().loadTestsFromTestCase(test_cls)
test_list.append(test_cases)
return test_list
| 24,889
|
def plot_image(pig_img_aug, label, rows):
"""Plots the augmented image"""
log.info('Plotting augmented image...')
global num
num = num + 1
plt.subplot(rows, 5, num)
plt.title(label)
plt.imshow(pig_img_aug)
| 24,890
|
def align_times(sync_behavioral, sync_neural, score_thresh=0.9999,
ignore_poor_alignment=False, return_model=False, verbose=False):
"""Align times across different recording systems.
Parameters
----------
sync_behavioral : 1d array
Sync pulse times from behavioral computer.
sync_neural : 1d array
Sync pulse times from neural computer.
score_thresh : float, optional, default: 0.9999
R^2 threshold value to check that the fit model is better than.
ignore_poor_alignment : bool, optional, default: False
Whether to ignore a bad alignment score.
return_model : bool, optional, default: False
Whether to return the model object. If False, returns
verbose : bool, optional, default: False
Whether to print out model information.
Returns
-------
model : LinearRegression
The fit model object. Only returned if `return_model` is True.
model_intercept : float
Intercept of the model predicting differences between sync pulses.
Returned if `return_model` is False.
model_coef : float
Learned coefficient of the model predicting differences between sync pulses.
Returned if `return_model` is False.
score : float
R^2 score of the model, indicating how good a fit there is between sync pulses.
"""
# sklearn imports are weird, so re-import here
# the sub-modules here aren't available from the global namespace
from sklearn.metrics import r2_score
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
# Reshape to column arrays for scikit-learn
sync_behavioral = sync_behavioral.reshape(-1, 1)
sync_neural = sync_neural.reshape(-1, 1)
# Linear model to predict alignment between time traces
x_train, x_test, y_train, y_test = train_test_split(\
sync_behavioral, sync_neural, test_size=0.50, random_state=42)
model = LinearRegression()
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
score = r2_score(y_test, y_pred)
bad_score_msg = 'This session has bad synchronization between brain and behavior'
if score < score_thresh:
if not ignore_poor_alignment:
raise ValueError(bad_score_msg)
else:
print(bad_score_msg)
if verbose:
print('coef', model.coef_[0], '\n intercept', model.intercept_[0])
print('score', score)
if return_model:
return model, score
else:
return model.intercept_[0], model.coef_[0][0], score
| 24,891
|
def check_media(url):
"""Check if something is available or has a new hash
Checks if url is available, uf yes, download and hash it, then see if it has changed
Args:
url: A complete url to something
Returns:
0 if available and no change.
1 if not available.
2 if it has changed
"""
media = http.download_something(url):
# If failed to download
if not media:
return 1
# Hash media
hashed_media = hashlib.sha512(media).hexdigest()
| 24,892
|
def breed(tree1, tree2):
"""My breeding function.
Basically makes a copy of tree1, and swaps sub-trees with tree2 at
a random depth. Pretty much relies on my simplistic tree structure.
I have no fucking clue if this will work. I can't even debug it since
I have no way of printing my tree.
Right now it can only swap sub-trees, which kinda sucks but the
alternative is a far more complex algorithm than I have time for.
"""
cpy = tree1.copy()
start_depth = random.randint(0, MAX_DEPTH-2)
node1_parent = cpy.get_left_node_at_depth(start_depth)
node2 = tree2.get_left_node_at_depth(start_depth+1)
node1_parent.left = node2
return cpy
| 24,893
|
def tuple_from_iterable(val: Iterable[Any]) -> Tuple[Any, ...]:
"""Builds a tuple from an iterable.
Workaround for https://github.com/python-attrs/attrs/issues/519
"""
return tuple(val)
| 24,894
|
def merge_vocabs(vocabs, vocab_size=None):
"""
Merge individual vocabularies (assumed to be generated from disjoint
documents) into a larger vocabulary.
Args:
vocabs: `torchtext.vocab.Vocab` vocabularies to be merged
vocab_size: `int` the final vocabulary size. `None` for no limit.
Return:
`torchtext.vocab.Vocab`
"""
merged = sum([vocab.freqs for vocab in vocabs], Counter())
return Vocab(merged, specials=['<pad>','<unk>','<sep>','<sos>','<eos>'], vectors = 'fasttext.en.300d')
| 24,895
|
def read_g_char(in_name, pop="ESP", debug=False):
"""
Read charges and energy from a Gaussian log file.
Parameters
----------
in_name : str
Name of the file to read
pop : str, optional
Kind of charge to read, mulliken or esp
debug : bool, optional
Return extra energy information. Turn on with care
Returns
-------
charges : list of floats
Each partial charge value in the file
energy : float
Gaussian calculated energy in Hartree
char_ener : float
Self energy of the point charges
n_char : float
Nuclei-charge interaction energy
"""
with open(in_name) as gauss_file:
content = gauss_file.readlines()
# find last occurrence of Mulliken charges
if pop.lower() == "mulliken":
last_mull = len(content) - 1 - \
content[::-1].index(" Mulliken charges:\n")
elif pop.lower() == "esp" or pop.lower() == "resp":
last_mull = len(content) - 1 - \
content[::-1].index(" ESP charges:\n")
charges = []
for line in content[last_mull + 2:]:
if line.split()[0].isdigit():
charges.append(float(line.split()[2]))
else:
break
# find each occurrence of Energy
for line in content:
if "SCF Done" in line:
energy = float(line.split()[4])
if "Total Energy" in line:
energy = float(line.split()[4])
if "Self energy of the charges" in line:
char_ener = float(line.split()[6])
if "Nuclei-charges interaction" in line:
n_char = float(line.split()[3])
if debug:
return charges, energy, char_ener, n_char
else:
return charges, energy
| 24,896
|
def mrs(ctx, group_project_filter, label, merge):
"""
List and manage merge requests of GitLab projects.
Filter syntax:
- foo/bar ... projects that have "bar" in their name,
in groups that have "foo" in their name
- foo/ ... filter for groups only, match any project
- /bar ... filter for projects only, match any group
"""
try:
group_filter, project_filter = group_project_filter.split('/')
except ValueError:
group_filter, project_filter = '', group_project_filter
mr_manager = MergeRequestManager(
uri=ctx.obj.get('uri'),
token=ctx.obj.get('token'),
insecure=ctx.obj.get('insecure'),
group_filter=group_filter,
project_filter=project_filter,
labels=list(label),
merge_style=merge,
)
if merge in ['yes', 'automatic']:
mr_manager.merge_all()
else:
mr_manager.show()
| 24,897
|
def count_nonzero(X, axis=None, sample_weight=None):
"""A variant of X.getnnz() with extension to weighting on axis 0
Useful in efficiently calculating multilabel metrics.
Parameters
----------
X : sparse matrix of shape (n_samples, n_labels)
Input data. It should be of CSR format.
axis : {0, 1}, default=None
The axis on which the data is aggregated.
sample_weight : array-like of shape (n_samples,), default=None
Weight for each row of X.
"""
if axis == -1:
axis = 1
elif axis == -2:
axis = 0
elif X.format != 'csr':
raise TypeError('Expected CSR sparse format, got {0}'.format(X.format))
# We rely here on the fact that np.diff(Y.indptr) for a CSR
# will return the number of nonzero entries in each row.
# A bincount over Y.indices will return the number of nonzeros
# in each column. See ``csr_matrix.getnnz`` in scipy >= 0.14.
if axis is None:
if sample_weight is None:
return X.nnz
else:
return np.dot(np.diff(X.indptr), sample_weight)
elif axis == 1:
out = np.diff(X.indptr)
if sample_weight is None:
# astype here is for consistency with axis=0 dtype
return out.astype('intp')
return out * sample_weight
elif axis == 0:
if sample_weight is None:
return np.bincount(X.indices, minlength=X.shape[1])
else:
weights = np.repeat(sample_weight, np.diff(X.indptr))
return np.bincount(X.indices, minlength=X.shape[1],
weights=weights)
else:
raise ValueError('Unsupported axis: {0}'.format(axis))
| 24,898
|
def isolate_shape_axis(base, target, axis_list = ['X','Y','Z']):
"""
Given a base mesh, only take axis movement on the target that is specified in axis_list.
Args:
base (str): The base mesh that has no targets applied.
target (str): The target mesh vertices moved to a different position than the base.
axis_list (list): The axises of movement allowed. If axis_list = ['X'], only vertex movement on x will be present in the result.
Returns:
str: A new mesh with verts moving only on the isolated axis.
"""
verts = cmds.ls('%s.vtx[*]' % target, flatten = True)
if not verts:
return
vert_count = len(verts)
axis_name = '_'.join(axis_list)
new_target = cmds.duplicate(target, n = '%s_%s' % (target, axis_name))[0]
for inc in range(0, vert_count):
base_pos = cmds.xform('%s.vtx[%s]' % (base, inc), q = True, t = True, ws = True)
target_pos = cmds.xform('%s.vtx[%s]' % (target, inc), q = True, t = True, ws = True)
if (base_pos == target_pos):
continue
small_x = False
small_y = False
small_z = False
if abs(base_pos[0]-target_pos[0]) < 0.0001:
small_x = True
if abs(base_pos[1]-target_pos[1]) < 0.0001:
small_y = True
if abs(base_pos[2]-target_pos[2]) < 0.0001:
small_z = True
if small_x and small_y and small_z:
continue
if not 'X' in axis_list:
target_pos[0] = base_pos[0]
if not 'Y' in axis_list:
target_pos[1] = base_pos[1]
if not 'Z' in axis_list:
target_pos[2] = base_pos[2]
cmds.xform('%s.vtx[%s]' % (new_target, inc), ws = True, t = target_pos)
return new_target
| 24,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.