content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def timeago(seconds=0, accuracy=4, format=0, lang="en", short_name=False):
"""Translate seconds into human-readable.
:param seconds: seconds (float/int).
:param accuracy: 4 by default (units[:accuracy]), determine the length of elements.
:param format: index of [led, literal, dict].
:param lang: en or cn.
:param units: day, hour, minute, second, ms.
>>> timeago(93245732.0032424, 5)
'1079 days, 05:35:32,003'
>>> timeago(93245732.0032424, 4, 1)
'1079 days 5 hours 35 minutes 32 seconds'
>>> timeago(-389, 4, 1)
'-6 minutes 29 seconds 0 ms'
"""
assert format in [0, 1,
2], ValueError("format arg should be one of 0, 1, 2")
negative = "-" if seconds < 0 else ""
is_en = lang == "en"
seconds = abs(seconds)
if is_en:
if short_name:
units = ("day", "hr", "min", "sec", "ms")
else:
units = ("day", "hour", "minute", "second", "ms")
elif lang == "cn":
if short_name:
units = (u"日", u"时", u"分", u"秒", u"毫秒")
else:
units = (u"天", u"小时", u"分钟", u"秒", u"毫秒")
times = split_seconds(seconds)
if format == 2:
return dict(zip(units, times))
day, hour, minute, second, ms = times
if format == 0:
day_str = ("%d %s%s, " %
(day, units[0], "s" if day > 1 and is_en else "")
if day else "")
mid_str = ":".join(("%02d" % i for i in (hour, minute, second)))
if accuracy > 4:
mid_str += ",%03d" % ms
return negative + day_str + mid_str
elif format == 1:
if seconds:
# find longest valid fields index (non-zero for head and tail)
for index, item in enumerate(times):
if item != 0:
head_index = index
break
for index, item in enumerate(reversed(times)):
if item != 0:
tail_index = len(times) - index
break
result_str = [
"%d %s%s" %
(num, unit, "s" if is_en and num > 1 and unit != "ms" else "")
for num, unit in zip(times, units)
][head_index:tail_index][:accuracy]
result_str = " ".join(result_str)
else:
result_str = "0 %s" % units[-1]
return negative + result_str
| 11,300
|
def copy_intervention_to_study(study, intervention):
"""
Copies the given Intervention to the given Study
"""
setting = intervention.setting.all()
i = intervention
i.pk = None
i.version = 2 # Auto upgrade old versions
i.study = study
i.save()
i.setting.set(setting)
i.save()
| 11,301
|
def test_raise_from_itself(
assert_errors,
parse_ast_tree,
default_options,
code,
):
"""Testing that are not allowed to raise an exception from itself."""
tree = parse_ast_tree(code)
visitor = WrongRaiseVisitor(default_options, tree)
visitor.run()
assert_errors(visitor, [RaiseFromItselfViolation])
| 11,302
|
def generate_headline(ids=None):
"""Generate and return an awesome headline.
Args:
ids:
Iterable of five IDs (intro, adjective, prefix, suffix, action).
Optional. If this is ``None``, random values are fetched from the
database.
Returns:
Tuple of parts and permalink (intro, adjective, prefix, suffix, action,
permalink)
"""
print('[schlagzeilengenerator] Generating a headline...')
# Correct endings
adjective_endings = {
'm': 'r',
'f': '',
's': 's',
'p': '',
}
# Get random database entries
if ids is not None:
d_intro = get_by_id('intro', ids[0])
d_adjective = get_by_id('adjective', ids[1])
d_prefix = get_by_id('prefix', ids[2])
d_suffix = get_by_id('suffix', ids[3])
d_action = get_by_id('action', ids[4])
else:
d_intro = get_random('intro')
d_adjective = get_random('adjective')
d_prefix = get_random('prefix')
d_suffix = get_random('suffix')
d_action = get_random('action')
ids = (d_intro['id'], d_adjective['id'], d_prefix['id'], d_suffix['id'], d_action['id'])
# Get data from dictionaries
case = d_suffix['case']
intro = d_intro['text']
adjective = d_adjective['text'] + adjective_endings[case]
prefix = d_prefix['text']
suffix = d_suffix['text']
if case == 'p':
action = '%s %s' % (d_action['action_p'], d_action['text'])
else:
action = '%s %s' % (d_action['action_s'], d_action['text'])
# Build permalink
permalink = b64encode(b','.join(str(i).encode('ascii') for i in ids))
return intro, adjective, prefix, suffix, action.strip(), permalink
| 11,303
|
def test_directory_exists(value, fails, allow_empty):
"""Test the bytesIO validator."""
if not fails:
validated = validators.directory_exists(value, allow_empty = allow_empty)
if value:
assert validated is not None
else:
assert validated is None
else:
with pytest.raises((ValueError, TypeError, IOError)):
validated = validators.directory_exists(value, allow_empty = allow_empty)
| 11,304
|
def check_param_or_command_type_recursive(ctxt: IDLCompatibilityContext,
field_pair: FieldCompatibilityPair,
is_command_parameter: bool):
# pylint: disable=too-many-branches,too-many-locals
"""
Check compatibility between old and new command or param type recursively.
If the old type is a syntax.Type instance, check the compatibility between the old and new
command type or parameter type recursively.
"""
old_field = field_pair.old
new_field = field_pair.new
old_type = old_field.field_type
new_type = new_field.field_type
cmd_name = field_pair.cmd_name
param_name = field_pair.field_name
# If the old field is unstable, we only add errors related to the use of 'any' as the
# bson_serialization_type. For all other errors, we check that the old field is stable
# before adding an error.
if not isinstance(new_type, syntax.Type):
if not old_field.unstable:
ctxt.add_new_command_or_param_type_enum_or_struct_error(
cmd_name, new_type.name, old_type.name, new_field.idl_file_path, param_name,
is_command_parameter)
return
allow_name: str = cmd_name + "-param-" + param_name if is_command_parameter else cmd_name
# If bson_serialization_type switches from 'any' to non-any type.
if "any" in old_type.bson_serialization_type and "any" not in new_type.bson_serialization_type:
ctxt.add_old_command_or_param_type_bson_any_error(cmd_name, old_type.name, new_type.name,
old_field.idl_file_path, param_name,
is_command_parameter)
return
# If bson_serialization_type switches from non-any to 'any' type.
if "any" not in old_type.bson_serialization_type and "any" in new_type.bson_serialization_type:
ctxt.add_new_command_or_param_type_bson_any_error(cmd_name, old_type.name, new_type.name,
new_field.idl_file_path, param_name,
is_command_parameter)
return
if "any" in old_type.bson_serialization_type:
# If 'any' is not explicitly allowed as the bson_serialization_type.
if allow_name not in ALLOW_ANY_TYPE_LIST:
ctxt.add_old_command_or_param_type_bson_any_not_allowed_error(
cmd_name, old_type.name, old_field.idl_file_path, param_name, is_command_parameter)
return
# If cpp_type is changed, it's a potential breaking change.
if old_type.cpp_type != new_type.cpp_type:
ctxt.add_command_or_param_cpp_type_not_equal_error(
cmd_name, new_type.name, new_field.idl_file_path, param_name, is_command_parameter)
# If serializer is changed, it's a potential breaking change.
if (not old_field.unstable) and old_type.serializer != new_type.serializer:
ctxt.add_command_or_param_serializer_not_equal_error(
cmd_name, new_type.name, new_field.idl_file_path, param_name, is_command_parameter)
# If deserializer is changed, it's a potential breaking change.
if (not old_field.unstable) and old_type.deserializer != new_type.deserializer:
ctxt.add_command_or_param_deserializer_not_equal_error(
cmd_name, new_type.name, new_field.idl_file_path, param_name, is_command_parameter)
if isinstance(old_type, syntax.VariantType):
if not isinstance(new_type, syntax.VariantType):
if not old_field.unstable:
ctxt.add_new_command_or_param_type_not_variant_type_error(
cmd_name, new_type.name, new_field.idl_file_path, param_name,
is_command_parameter)
else:
new_variant_types = new_type.variant_types
old_variant_types = old_type.variant_types
# Check that new variant types are a superset of old variant types.
for old_variant_type in old_variant_types:
for new_variant_type in new_variant_types:
# object->object_owned serialize to the same bson type. object_owned->object is
# not always safe so we only limit this special case to object->object_owned.
if (old_variant_type.name == "object" and new_variant_type.name == "object_owned") or \
old_variant_type.name == new_variant_type.name:
# Check that the old and new version of each variant type is also compatible.
old = FieldCompatibility(old_variant_type, old_field.idl_file,
old_field.idl_file_path, old_field.unstable,
old_field.optional)
new = FieldCompatibility(new_variant_type, new_field.idl_file,
new_field.idl_file_path, new_field.unstable,
new_field.optional)
check_param_or_command_type(
ctxt, FieldCompatibilityPair(old, new, cmd_name, param_name),
is_command_parameter)
break
else:
if not old_field.unstable:
# old_variant_type was not found in new_variant_types.
ctxt.add_new_command_or_param_variant_type_not_superset_error(
cmd_name, old_variant_type.name, new_field.idl_file_path, param_name,
is_command_parameter)
# If old and new types both have a struct as a variant type, compare old and new variant_struct_type.
# Since enums can't be part of variant types, we don't explicitly check for enums.
if old_type.variant_struct_type is not None:
if new_type.variant_struct_type is not None:
check_command_params_or_type_struct_fields(
ctxt, old_type.variant_struct_type, new_type.variant_struct_type, cmd_name,
old_field.idl_file, new_field.idl_file, old_field.idl_file_path,
new_field.idl_file_path, is_command_parameter)
# If old type has a variant struct type and new type does not have a variant struct type.
elif not old_field.unstable:
ctxt.add_new_command_or_param_variant_type_not_superset_error(
cmd_name, old_type.variant_struct_type.name, new_field.idl_file_path,
param_name, is_command_parameter)
elif not old_field.unstable:
check_superset(ctxt, cmd_name, new_type.name, new_type.bson_serialization_type,
old_type.bson_serialization_type, new_field.idl_file_path, param_name,
is_command_parameter)
| 11,305
|
def open_in_browser(path):
"""
Open directory in web browser.
"""
import webbrowser
return webbrowser.open(path)
| 11,306
|
def lstm_cell_forward(xt, a_prev, c_prev, parameters):
"""
Implement a single forward step of the LSTM-cell as described in Figure (4)
Arguments:
xt -- your input data at timestep "t", numpy array of shape (n_x, m).
a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m)
c_prev -- Memory state at timestep "t-1", numpy array of shape (n_a, m)
parameters -- python dictionary containing:
Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)
bf -- Bias of the forget gate, numpy array of shape (n_a, 1)
Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)
bi -- Bias of the update gate, numpy array of shape (n_a, 1)
Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x)
bc -- Bias of the first "tanh", numpy array of shape (n_a, 1)
Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x)
bo -- Bias of the output gate, numpy array of shape (n_a, 1)
Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a_next -- next hidden state, of shape (n_a, m)
c_next -- next memory state, of shape (n_a, m)
yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m)
cache -- tuple of values needed for the backward pass, contains (a_next, c_next, a_prev, c_prev, xt, parameters)
Note: ft/it/ot stand for the forget/update/output gates, cct stands for the candidate value (c tilde),
c stands for the cell state (memory)
"""
# Retrieve parameters from "parameters"
Wf = parameters["Wf"] # forget gate weight
bf = parameters["bf"]
Wi = parameters["Wi"] # update gate weight (notice the variable name)
bi = parameters["bi"] # (notice the variable name)
Wc = parameters["Wc"] # candidate value weight
bc = parameters["bc"]
Wo = parameters["Wo"] # output gate weight
bo = parameters["bo"]
Wy = parameters["Wy"] # prediction weight
by = parameters["by"]
# Retrieve dimensions from shapes of xt and Wy
n_x, m = xt.shape
n_y, n_a = Wy.shape
### START CODE HERE ###
# Concatenate a_prev and xt (≈1 line)
concat = np.concatenate((a_prev,xt),axis=0)
# Compute values for ft (forget gate), it (update gate),
# cct (candidate value), c_next (cell state),
# ot (output gate), a_next (hidden state) (≈6 lines)
ft = sigmoid(np.dot(Wf,concat)+bf) # forget gate
it = sigmoid(np.dot(Wi,concat)+bi) # update gate
cct = np.tanh(np.dot(Wc,concat)+bc) # candidate value
c_next = ft*c_prev+it*cct # cell state
ot = sigmoid(np.dot(Wo,concat)+bo) # output gate
a_next = ot*np.tanh(c_next) # hidden state
# Compute prediction of the LSTM cell (≈1 line)
yt_pred = softmax(np.dot(Wy,a_next)+by)
### END CODE HERE ###
# store values needed for backward propagation in cache
cache = (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters)
return a_next, c_next, yt_pred, cache
| 11,307
|
def encode_letter(letter):
"""
This will encode a tetromino letter as a small integer
"""
value = None
if letter == 'i':
value = 0
elif letter == 'j':
value = 1
elif letter == 'l':
value = 2
elif letter == 'o':
value = 3
elif letter == 's':
value = 4
elif letter == 't':
value = 5
elif letter == 'z':
value = 6
return value
| 11,308
|
def test_eval_hparams(composer_trainer_hparams: TrainerHparams):
"""Test that `eval_interval` and `eval_subset_num_batches` work when specified via hparams."""
# Create the trainer from hparams
composer_trainer_hparams.eval_interval = "2ep"
composer_trainer_hparams.eval_subset_num_batches = 2
composer_trainer_hparams.evaluators = [
EvaluatorHparams(
label="eval1",
eval_interval='3ep', # will run, since eval_at_fit_end = True
subset_num_batches=1,
eval_dataset=RandomClassificationDatasetHparams(),
),
EvaluatorHparams(
label="eval2",
eval_dataset=RandomClassificationDatasetHparams(),
metric_names=['Accuracy'],
),
]
composer_trainer_hparams.val_dataset = None
composer_trainer_hparams.callbacks = [EventCounterCallbackHparams()]
composer_trainer_hparams.max_duration = "2ep"
trainer = composer_trainer_hparams.initialize_object()
# Validate that `subset_num_batches` was set correctly
assert trainer.state.evaluators[0].subset_num_batches == composer_trainer_hparams.evaluators[0].subset_num_batches
assert trainer.state.evaluators[1].subset_num_batches == composer_trainer_hparams.eval_subset_num_batches
# Train the model
trainer.fit()
# Validate that `eval_interval` and `subset_num_batches` was set correctly for the evaluator that actually
# ran
assert "eval1" in trainer.state.current_metrics
assert "eval2" in trainer.state.current_metrics
event_counter_callback = None
for callback in trainer.state.callbacks:
if isinstance(callback, EventCounterCallback):
event_counter_callback = callback
break
assert event_counter_callback is not None
assert event_counter_callback.event_to_num_calls[Event.EVAL_START] == 2
# increment by one for the extra call to `Event.EVAL_BATCH_START` during the evaluation at FIT end.
assert event_counter_callback.event_to_num_calls[
Event.EVAL_BATCH_START] == composer_trainer_hparams.eval_subset_num_batches + 1
| 11,309
|
def test_events_api(mock_search_with_activitystream, authed_client, settings):
"""We mock the call to ActivityStream"""
document = {
"content": "The Independent Hotel Show is the only industry event ... in 2012 to support.",
"currency": "Sterling",
"enddate": "2020-03-18",
"foldername": "1920 Events",
"geocoordinates": {"lat": "49.83", "lon": "3.44"},
"id": "dit:aventri:Event:200198344",
"language": "eng",
"location": {
"address1": "Europaplein 22",
"address2": "",
"address3": "",
"city": "Amsterdam",
"country": "Netherlands",
"email": "",
"map": "",
"name": "RAI Amsterdam",
"phone": "",
"postcode": "1078 GZ",
"state": "",
},
"name": "Independent Hotel Show",
"price": "null",
"price_type": "null",
"published": "2020-03-05T12:39:18.438792",
"startdate": "2020-03-17",
"timezone": "[GMT] Greenwich Mean Time: Dublin, Edinburgh, Lisbon, London",
"type": ["Event", "dit:aventri:Event"],
"url": "https://eu.eventscloud.com/200198344",
}
mock_search_with_activitystream.return_value = create_response(
{
"took": 32,
"timed_out": "false",
"_shards": {"total": 3, "successful": 3, "skipped": 0, "failed": 0},
"hits": {
"total": 1,
"max_score": "null",
"hits": [
{
"_index": (
"objects__feed_id_aventri__date_2020-03-06__timestamp_1583508109__batch_id_hu6dz6lo__"
),
"_type": "_doc",
"_id": "dit:aventri:Event:200198344",
"_score": "null",
"_source": document,
"sort": [313.0059910186728],
}
],
},
}
)
response = authed_client.get(reverse('personalisation-events'))
assert response.status_code == 200
assert response.data == {'results': [document]}
""" What if there are no results? """
mock_search_with_activitystream.return_value = create_response(
{
'took': 17,
'timed_out': False,
'_shards': {'total': 4, 'successful': 4, 'skipped': 0, 'failed': 0},
'hits': {'total': 0, 'hits': []},
}
)
response = authed_client.get(reverse('personalisation-events'))
assert response.status_code == 200
assert response.data == {'results': []}
'''What if ActivitySteam sends an error?'''
mock_search_with_activitystream.return_value = create_response('[service overloaded]', status_code=500)
with pytest.raises(requests.exceptions.HTTPError):
authed_client.get(reverse('personalisation-events'))
'''What if ActivitySteam is down?'''
mock_search_with_activitystream.side_effect = requests.exceptions.ConnectionError
with pytest.raises(requests.exceptions.ConnectionError):
authed_client.get(reverse('personalisation-events'))
| 11,310
|
async def register_log_event(
registration: LogEventRegistration, db: Session = Depends(get_db)
):
"""
Log event registration handler.
:param db:
:param registration: Registration object
:return: None
"""
reg_id = str(uuid4())
# Generate message for registration topic
msg = LogEventRegistrationMessage(
to_address=registration.address,
keyword=registration.keyword,
position=registration.position,
)
# Produce message for registration topic
producer.produce(
topic=settings.REGISTRATIONS_TOPIC,
key=string_serializer(reg_id, key_context),
value=json_serializer(msg.dict(), value_context),
callback=acked,
)
retry_count = 0
while True:
if retry_count >= settings.MAX_CONFIRM_WAIT:
raise HTTPException(
500, "Registration not confirmed. Try again. (NOINSERT)"
)
try:
# Query the DB to check if insert was done correctly
row = crud.get_event_registration_by_id_no_404(db, reg_id)
if row:
break
else:
retry_count += 1
sleep(1)
except:
retry_count += 1
sleep(1)
# Check if query returned correct result
if (
not row.to_address == registration.address
and not row.keyword == registration.keyword
and not row.position == registration.position
):
raise HTTPException(500, "Registration not confirmed. Try again. (NOMATCH)")
return {"reg_id": reg_id, "status": "registered"}
| 11,311
|
def set_math_mode(math_mode):
"""Set the math mode used by SUSPECT.
Parameters
----------
math_mode: MathMode
the math mode to use
"""
if math_mode == MathMode.ARBITRARY_PRECISION:
from suspect.math import arbitrary_precision as arb
for member in _COMMON_MEMBERS:
globals()[member] = getattr(arb, member)
for member in _ARBITRARY_PRECISION_MEMBERS:
globals()[member] = getattr(arb, member)
elif math_mode == MathMode.FLOATING_POINT:
from suspect.math import floating_point as fp
for member in _COMMON_MEMBERS:
globals()[member] = getattr(fp, member)
else:
raise RuntimeError('Invalid MathMode')
| 11,312
|
def cvAbsDiffS(*args):
"""cvAbsDiffS(CvArr src, CvArr dst, CvScalar value)"""
return _cv.cvAbsDiffS(*args)
| 11,313
|
def do_host_evacuate_live(cs, args):
"""Live migrate all instances of the specified host
to other available hosts.
"""
hypervisors = cs.hypervisors.search(args.host, servers=True)
response = []
migrating = 0
for hyper in hypervisors:
for server in getattr(hyper, 'servers', []):
response.append(_server_live_migrate(cs, server, args))
migrating = migrating + 1
if args.max_servers is not None and migrating >= args.max_servers:
break
utils.print_list(response, ["Server UUID", "Live Migration Accepted",
"Error Message"])
| 11,314
|
def tps_word_embeddings_correlation_plot(
tps_scores: np.ndarray,
y_values: np.ndarray,
y_label: str,
tps_vs_y_correlation: float,
output_plot_filepath: str,
neighbourhood_size: int,
) -> None:
"""
Saves a correlation plot between TPS scores and some y values.
Parameters
----------
tps_scores : np.ndarray
TPS scores.
y_values : np.ndarray
Y-values to plot against TPS scores.
y_label : str
Y-axis label.
tps_vs_y_correlation : float
Correlation between TPS scores and y values.
output_plot_filepath : str
Output plot filepath.
neighbourhood_size : int
Neighbourhood size used to compute TPS scores (appended to output filepath).
"""
# Plot TPS scores to GS
fig, ax = plt.subplots(figsize=(10, 5))
scatter_h = ax.scatter(x=tps_scores, y=y_values)
if len(tps_scores) > 1000:
scatter_h.set_rasterized(True)
ax.set_xlabel(f"TPS_{neighbourhood_size}")
ax.set_ylabel(y_label)
ax.set_title(f"Correlation: {tps_vs_y_correlation:.5f}")
plt.tight_layout()
plt.savefig(
output_plot_filepath,
backend="pgf",
)
plt.close(fig)
| 11,315
|
def create_template_app(**kwargs):
"""Create a template Flask app"""
app = create_app(**kwargs)
from . import views # this must be placed here, after the app is created
app.register_blueprints()
return app
| 11,316
|
def mse(predict, actual):
"""
Examples(rounded for precision):
>>> actual = [1,2,3];predict = [1,4,3]
>>> np.around(mse(predict,actual),decimals = 2)
1.33
>>> actual = [1,1,1];predict = [1,1,1]
>>> mse(predict,actual)
0.0
"""
predict = np.array(predict)
actual = np.array(actual)
difference = predict - actual
square_diff = np.square(difference)
score = square_diff.mean()
return score
| 11,317
|
def train_generator(wav_list, feat_list, receptive_field,
batch_length=None,
batch_size=1,
feature_type="world",
wav_transform=None,
feat_transform=None,
pulse_transform=p_trans_binary_multi_channel,
shuffle=True,
upsampling_factor=80,
use_upsampling_layer=True,
use_speaker_code=False,
use_pulse=True):
"""GENERATE TRAINING BATCH.
Args:
wav_list (list): List of wav files.
feat_list (list): List of feat files.
receptive_field (int): Size of receptive filed.
batch_length (int): Batch length (if set None, utterance batch will be used.).
batch_size (int): Batch size (if batch_length = None, batch_size will be 1.).
feature_type (str): Auxiliary feature type.
wav_transform (func): Preprocessing function for waveform.
feat_transform (func): Preprocessing function for aux feats.
shuffle (bool): Whether to shuffle the file list.
upsampling_factor (int): Upsampling factor.
use_upsampling_layer (bool): Whether to use upsampling layer.
use_speaker_code (bool): Whether to use speaker code.
use_pulse (bool): use pulse signal
Returns:
generator: Generator instance.
"""
# shuffle list
if shuffle:
n_files = len(wav_list)
idx = np.random.permutation(n_files)
wav_list = [wav_list[i] for i in idx]
feat_list = [feat_list[i] for i in idx]
# check batch_length
if batch_length is not None and use_upsampling_layer:
batch_mod = (receptive_field + batch_length) % upsampling_factor
logging.warning("batch length is decreased due to upsampling (%d -> %d)" % (
batch_length, batch_length - batch_mod))
batch_length -= batch_mod
# show warning
if batch_length is None and batch_size > 1:
logging.warning("in utterance batch mode, batchsize will be 1.")
while True:
batch_x, batch_p, batch_h, batch_t = [], [], [], []
# process over all of files
for wavfile, featfile in zip(wav_list, feat_list):
# load waveform and aux feature
# x, fs = sf.read(wavfile, dtype=np.float32)
fs, data = wf.read(wavfile)
# print(data.shape)
x = data.astype(np.float) / 32768
h = read_hdf5(featfile, "/" + feature_type)
p = read_hdf5(featfile, "/" + 'world_pulse')
# p
if pulse_transform:
p = pulse_transform(p)
if not use_upsampling_layer:
h = extend_time(h, upsampling_factor)
if use_speaker_code:
sc = read_hdf5(featfile, "/speaker_code")
sc = np.tile(sc, [h.shape[0], 1])
h = np.concatenate([h, sc], axis=1)
# check both lengths are same
logging.debug("before x length = %d" % x.shape[0])
logging.debug("before h length = %d" % h.shape[0])
if use_upsampling_layer:
x, h = validate_length(x, h, upsampling_factor)
else:
x, h = validate_length(x, h)
logging.debug("after x length = %d" % x.shape[0])
logging.debug("after h length = %d" % h.shape[0])
# ---------------------------------------
# use mini batch without upsampling layer
# ---------------------------------------
if batch_length is not None and not use_upsampling_layer:
raise NotImplementedError
# ------------------------------------
# use mini batch with upsampling layer <-------This TODO
# ------------------------------------
elif batch_length is not None and use_upsampling_layer:
# make buffer array
if "x_buffer" not in locals():
x_buffer = np.empty((0), dtype=np.float32)
# p_buffer = np.empty((0), dtype=np.float32)
p_buffer = np.empty((0, p.shape[1]), dtype=np.float32)
h_buffer = np.empty((0, h.shape[1]), dtype=np.float32)
x_buffer = np.concatenate([x_buffer, x], axis=0)
p_buffer = np.concatenate([p_buffer, p], axis=0)
h_buffer = np.concatenate([h_buffer, h], axis=0)
while len(h_buffer) > (receptive_field + batch_length) // upsampling_factor:
# set batch size
h_bs = (receptive_field + batch_length) // upsampling_factor
x_bs = h_bs * upsampling_factor + 1
p_bs = h_bs * upsampling_factor + 1
# get pieces
h_ = h_buffer[:h_bs]
x_ = x_buffer[:x_bs]
p_ = p_buffer[:p_bs]
# perform pre-processing
if wav_transform is not None:
x_ = wav_transform(x_)
if feat_transform is not None:
h_ = feat_transform(h_)
if use_pulse:
h_ = np.concatenate([h_[:, 0:1], h_[:, 2:]],
axis=1) # remove cont_f0_lpf (vuv[1]+mcep[25]+ap_code[1])
# h_ = np.concatenate([h_[:, 0:1], h_[:, -1:]], axis=1) # remove cont_f0_lpf and mcep (vuv[1]+ap_code[1])
# mcep = h_[:, 1:-2] # extract mcep
# convert to torch variable
x_ = torch.from_numpy(x_).long()
p_ = torch.from_numpy(p_).float()
h_ = torch.from_numpy(h_).float()
# remove the last and first sample for training
batch_h += [h_.transpose(0, 1)] # (D x T)
batch_x += [x_[:-1]] # (T)
batch_p += [p_[:-1].transpose(0, 1)] # (C x T)
batch_t += [x_[1:]] # (T)
# set shift size
h_ss = batch_length // upsampling_factor
x_ss = h_ss * upsampling_factor
p_ss = h_ss * upsampling_factor
# update buffer
h_buffer = h_buffer[h_ss:]
x_buffer = x_buffer[x_ss:]
p_buffer = p_buffer[p_ss:]
# return mini batch
if len(batch_x) == batch_size:
batch_x = torch.stack(batch_x)
batch_p = torch.stack(batch_p)
batch_h = torch.stack(batch_h)
batch_t = torch.stack(batch_t)
# send to cuda
if torch.cuda.is_available():
batch_x = batch_x.cuda()
batch_p = batch_p.cuda()
batch_h = batch_h.cuda()
batch_t = batch_t.cuda()
yield (batch_x, batch_h, batch_p), batch_t
batch_x, batch_h, batch_p, batch_t, = [], [], [], []
# --------------------------------------------
# use utterance batch without upsampling layer
# --------------------------------------------
elif batch_length is None and not use_upsampling_layer:
raise NotImplementedError
# -----------------------------------------
# use utterance batch with upsampling layer
# -----------------------------------------
else:
raise NotImplementedError
# re-shuffle
if shuffle:
idx = np.random.permutation(n_files)
wav_list = [wav_list[i] for i in idx]
feat_list = [feat_list[i] for i in idx]
| 11,318
|
def make_demo_measurements(num_measurements, extra_tags=frozenset()):
"""Make a measurement object."""
return [
make_flexural_test_measurement(
my_id=__random_my_id(),
deflection=random.random(),
extra_tags=extra_tags
) for _ in range(num_measurements)
]
| 11,319
|
def _nrc_coron_rescale(self, res, coord_vals, coord_frame, siaf_ap=None, sp=None):
"""
Function for better scaling of NIRCam coronagraphic output for sources
that overlap the image masks.
"""
if coord_vals is None:
return res
nfield = np.size(coord_vals[0])
psf_sum = _nrc_coron_psf_sums(self, coord_vals, coord_frame, siaf_ap=siaf_ap)
if psf_sum is None:
return res
# Scale by countrate of observed spectrum
if (sp is not None) and (not isinstance(sp, list)):
nspec = 1
obs = S.Observation(sp, self.bandpass, binset=self.bandpass.wave)
sp_counts = obs.countrate()
elif (sp is not None) and (isinstance(sp, list)):
nspec = len(sp)
if nspec==1:
obs = S.Observation(sp[0], self.bandpass, binset=self.bandpass.wave)
sp_counts = obs.countrate()
else:
sp_counts = []
for i, sp_norm in enumerate(sp):
obs = S.Observation(sp_norm, self.bandpass, binset=self.bandpass.wave)
sp_counts.append(obs.countrate())
sp_counts = np.array(sp_counts)
else:
nspec = 0
sp_counts = 1
if nspec>1 and nspec!=nfield:
_log.warn("Number of spectra should be 1 or equal number of field points")
# Scale by count rate
psf_sum *= sp_counts
# Re-scale PSF by total sums
if isinstance(res, fits.HDUList):
for i, hdu in enumerate(res):
hdu.data *= (psf_sum[i] / hdu.data.sum())
elif nfield==1:
res *= (psf_sum[0] / res.sum())
else:
for i, data in enumerate(res):
data *= (psf_sum[i] / data.sum())
return res
| 11,320
|
def main():
"""main function
"""
if " !!!" in sys.argv[1]:
print("Result: p=0.9 --> WUT")
| 11,321
|
def re_list(request):
""" Returns the available relation tasks for a specific user
Accessed through a JSON API endpoint
"""
from .serializers import DocumentRelationSerializer
cmd_str = ""
with open('mark2cure/api/commands/get-relations.sql', 'r') as f:
cmd_str = f.read()
# Start the DB Connection
c = connection.cursor()
c.execute('SET @user_work_max = {rel_work_size};'.format(rel_work_size=20))
c.execute('SET @k_max = {completions};'.format(completions=settings.ENTITY_RECOGNITION_K))
c.execute('SET @user_id = {user_id};'.format(user_id=request.user.pk))
c.execute('SET @rel_ann_content_type_id = 56;')
c.execute(cmd_str)
queryset = [{'id': x[0],
'document_id': x[1],
'title': x[2],
'total_document_relationships': x[3],
'user_document_relationships': x[4],
'community_answered': x[5],
'community_completed': x[6],
'community_progress': x[7],
'user_completed': x[8],
'user_progress': x[9],
'user_answered': x[10],
'user_view_completed': x[11]} for x in c.fetchall()]
# Close the connection
c.close()
# Prevent documents from being shown that have since been completed
# by the community before the requqest.user could complete everything
for idx, item in enumerate(queryset):
if int(item['user_document_relationships']) <= 0:
document = get_object_or_404(Document, pk=item['id'])
first_section = document.section_set.first()
view = View.objects.filter(task_type='re', section=first_section, user=request.user).last()
# print(' - X:', document, first_section, view)
# (TODO) Why is there no View on these sections?
if view:
Point.objects.create(user=request.user,
amount=settings.RELATION_DOC_POINTS,
content_type=ContentType.objects.get_for_model(view),
object_id=view.id)
view.completed = True
view.save()
del queryset[idx]
serializer = DocumentRelationSerializer(queryset, many=True)
return Response(serializer.data)
| 11,322
|
def run_tc(discover):
"""
BeautifulReport模块实现测试报告
:param discover: 测试套件
:return:
"""
if not os.path.exists(path_conf.REPORT_PATH):
os.makedirs(path_conf.REPORT_PATH)
fileName = path_conf.PROJECT_NAME + '_' + time.strftime('%Y-%m-%d %H_%M_%S') + '.html'
try:
result = BeautifulReport(discover)
# theme四种用法:theme_default theme_cyan theme_candy theme_memories
result.report(filename=fileName,
description=path_conf.PROJECT_NAME + '_testreport',
report_dir=path_conf.REPORT_PATH,
theme='theme_cyan')
except Exception as e:
log.exception('Failed to generate test report')
raise e
else:
log.info('Test report generated successfully [%s]' % fileName)
return fileName
| 11,323
|
def test_majority_label():
"""Verifies that we correctly select the most common element"""
def checkme(expected_val, values):
"""Utility"""
if not hasattr(expected_val, '__iter__'):
expected_val = {expected_val}
nose.tools.assert_in(majority_label(values), expected_val)
yield checkme, 0, [0] * 30 + [1] * 10 + [2] * 20
yield checkme, 1, [0] * 10 + [1] * 30 + [2] * 20
yield checkme, 2, [0] * 10 + [1] * 20 + [2] * 30
# Multiple most frequent values
yield checkme, {0, 2}, [0] * 30 + [1] * 20 + [2] * 30
yield checkme, {0, 1, 2}, [0] * 30 + [1] * 30 + [2] * 30
| 11,324
|
def maps_subcommand(args, api_config):
"""
Maps Subcommand
"""
print("Generating maps")
builder = MapBuilder(args.height, api_config)
builder.run()
| 11,325
|
def csv_to_blob_ref(csv_str, # type: str
blob_service, # type: BlockBlobService
blob_container, # type: str
blob_name, # type: str
blob_path_prefix=None, # type: str
charset=None # type: str
):
# type: (...) -> AzmlBlobTable
"""
Uploads the provided CSV to the selected Blob Storage service, and returns a reference to the created blob in
case of success.
:param csv_str:
:param blob_service: the BlockBlobService to use, defining the connection string
:param blob_container: the name of the blob storage container to use. This is the "root folder" in azure blob
storage wording.
:param blob_name: the "file name" of the blob, ending with .csv or not (in which case the .csv suffix will be
appended)
:param blob_path_prefix: an optional folder prefix that will be used to store your blob inside the container.
For example "path/to/my/"
:param charset:
:return:
"""
# setup the charset used for file encoding
if charset is None:
charset = 'utf-8'
elif charset != 'utf-8':
print("Warning: blobs can be written in any charset but currently only utf-8 blobs may be read back into "
"DataFrames. We recommend setting charset to None or utf-8 ")
# validate inputs (the only one that is not validated below)
validate('csv_str', csv_str, instance_of=str)
# 1- first create the references in order to check all params are ok
blob_reference, blob_full_name = create_blob_ref(blob_service=blob_service, blob_container=blob_container,
blob_path_prefix=blob_path_prefix, blob_name=blob_name)
# -- push blob
blob_stream = BytesIO(csv_str.encode(encoding=charset))
# noinspection PyTypeChecker
blob_service.create_blob_from_stream(blob_container, blob_full_name, blob_stream,
content_settings=ContentSettings(content_type='text.csv',
content_encoding=charset))
# (For old method with temporary files: see git history)
return blob_reference
| 11,326
|
def set_default_file_handler(logger, log_path, log_level=logging.DEBUG):
"""
:type logger: logging.Logger
:param log_path: str
:param log_level: str
"""
logger.setLevel(log_level)
formatter = logging.Formatter(constants.log.DEFAULT_FORMAT)
fh = logging.FileHandler(filename=log_path, mode="w")
fh.setFormatter(formatter)
logger.addHandler(fh)
| 11,327
|
def col_rev_reduce(matrix, col, return_ops=False):
"""
Reduces a column into reduced echelon form by transforming all numbers above the pivot position into 0's
:param matrix: list of lists of equal length containing numbers
:param col: index of column
:param return_ops: performed operations are returned
:return: list of lists of equal length containing numbers
"""
ops = []
pivot_row = 0 # Defaults to top row
# Find pivot row of the column
for row in range(len(matrix)-1, -1, -1):
if matrix[row][col] != 0:
pivot_row = row
break
# Transform all numbers above the pivot to 0
if matrix[pivot_row][col] != 0 and matrix[pivot_row][col] != 1:
factor = 1 / matrix[pivot_row][col]
matrix = row_multiply(matrix, pivot_row, factor)
ops.append(['multiplication', pivot_row, factor])
if pivot_row != 0:
for row in range(pivot_row):
if matrix[row][col] != 0:
factor = matrix[row][col] / matrix[pivot_row][col]
matrix = row_subtract(matrix, pivot_row, row, factor)
ops.append(['subtract', pivot_row, row, factor])
if return_ops:
return matrix, ops
else:
return matrix
| 11,328
|
def calculate_correlations(tetra_z: Dict[str, Dict[str, float]]) -> pd.DataFrame:
"""Return dataframe of Pearson correlation coefficients.
:param tetra_z: dict, Z-scores, keyed by sequence ID
Calculates Pearson correlation coefficient from Z scores for each
tetranucleotide. This is done longhand here, which is fast enough,
but for robustness we might want to do something else... (TODO).
Note that we report a correlation by this method, rather than a
percentage identity.
"""
orgs = sorted(tetra_z.keys())
correlations = pd.DataFrame(index=orgs, columns=orgs, dtype=float).fillna(1.0)
for idx, org1 in enumerate(orgs[:-1]):
for org2 in orgs[idx + 1 :]:
if not sorted(tetra_z[org1].keys()) == sorted(tetra_z[org2].keys()):
raise AssertionError()
tets = sorted(tetra_z[org1].keys())
zscores = [
[tetra_z[org1][t] for t in tets],
[tetra_z[org2][t] for t in tets],
]
zmeans = [sum(zscore) / len(zscore) for zscore in zscores]
zdiffs = [
[z - zmeans[0] for z in zscores[0]],
[z - zmeans[1] for z in zscores[1]],
]
diffprods = sum(
[zdiffs[0][i] * zdiffs[1][i] for i in range(len(zdiffs[0]))]
)
zdiffs2 = [sum([z * z for z in zdiffs[0]]), sum([z * z for z in zdiffs[1]])]
correlations[org1][org2] = diffprods / math.sqrt(zdiffs2[0] * zdiffs2[1])
correlations[org2][org1] = correlations[org1][org2]
return correlations
| 11,329
|
def rowfuncbynumber(tup,othertable, number):
"""tup is the tuple of row labels for the current row. By default it is passed back unmodified.
You can supply your own rowfunc to transform it when the tables being merged do not have the
same structure or if you want to prevent the merging of certain rows. Note that the tuple
starts with "row" or "column", so the first normally visible element is tup[1].
othertable is True if the function was called while processing the "other" table
and False if processing the main table.
number is the row number of the table. If the join is just by position, you can use
this function to align the tables even if the labels are not unique
to use this function, specify ROWFUNCTION=SPSSINC_MERGE_TABLES.rowfuncbynumber"""
if debug:
print(("row:", (othertable and "other:" or "main:"), number, tup))
tup = (str(number),)
return tup
| 11,330
|
def test_build_url_filter_for_get_list_entries():
"""
Given:
Arguments To filter with.
When:
The function builds a URL filter from these arguments.
Then:
We check that the URL filter matches what the command asks for.
"""
from CiscoEmailSecurity import build_url_filter_for_get_list_entries
res = build_url_filter_for_get_list_entries({"list_type": "safelist", "view_by": "bla", "order_by": "bla"})
assert res == "?action=view&limit=20&offset=0&quarantineType=spam&orderDir=desc&viewBy=bla&orderBy=bla"
| 11,331
|
def populate_stations(path='/opt/code/vvs_data/HaltestellenVVS_simplified_utf8_stationID.csv'):
"""
parse simplified csv, add elements to database
"""
with open(path, 'r') as f:
reader = csv.reader(f, delimiter=',')
# skip first row
next(reader, None)
for row in reader:
obj, created = Station.objects.update_or_create(
station_id=row[0], defaults={
'name': row[1],
'full_name': row[2]
}
)
if created:
logging.info("Created station %s" % row[0])
else:
logging.info("Updated station %s" % row[0])
| 11,332
|
def test__get_features(first_fileGDB_path: str) -> None:
"""All fields from the 0th row of the 0th layer."""
data_source = Open(first_fileGDB_path)
layer = data_source.GetLayer()
features_generator = _get_features(layer)
*properties, geometry = next(features_generator)
shapely_object = loads(bytes(geometry))
assert tuple(properties) == (2, "C")
assert shapely_object == Point(1, 1)
| 11,333
|
def save_book_metadata() -> FlaskResponse:
"""
XHR request. Update the information about a book.
Raises:
404: if the user is not admin or the request is not POST.
"""
if not is_admin():
abort(404) # pragma: no cover
if not (
all(
x in request.form
for x in [
"input-book-id", # hidden field
"input-book-url", # hidden field
"input-book-filename", # hidden field, this is the current file before update
"input-title",
"input-description",
"input-period",
"input-status",
"input-status",
"input-crowdfunding-goal",
"input-access-level",
]
)
and (all(x in request.files for x in ["input-book-file", "input-thumbnail"]))
):
return basic_json(False, "Bad request, missing data!")
book_id = int(request.form["input-book-id"])
book_url = secure_filename(escape(request.form["input-book-url"]))
book_filename = secure_filename(escape(request.form["input-book-filename"]))
title = escape(request.form["input-title"].strip())
if not title:
return basic_json(False, "Missing title!")
description_md = escape(request.form["input-description"].strip())
if not description_md:
return basic_json(False, "Missing description!")
description_html = markdown.Markdown(
extensions=current_app.config["MD_EXT"]
).convert(description_md)
period = escape(request.form["input-period"].strip())
status = escape(request.form["input-status"]).lower()
if status not in ["released", "crowdfunding"]:
status = "draft" # reset unknown or empty status to 'draft'
try:
crowdfunding_goal = float(request.form["input-crowdfunding-goal"])
except (ValueError, TypeError):
crowdfunding_goal = 0
if status == "crowdfunding" and crowdfunding_goal <= 0:
return basic_json(False, "Crowdfunding goal required or change status!")
access_level = int(request.form["input-access-level"])
if not check_access_level_range(access_level):
return basic_json(False, "Invalid access level!")
book_dir_path = os.path.join(current_app.config["SHELF_FOLDER"], book_url)
file = request.files["input-book-file"]
new_book = file.filename != ""
if new_book:
if not file_extension(file.filename, "book"):
return basic_json(False, "Wrong book file extension!")
new_book_filename = secure_filename(
file.filename.rsplit("/", 1)[1] if "/" in file.filename else file.filename
)
if book_filename != new_book_filename: # replace the old file with the new one
old_path_file = os.path.join(book_dir_path, book_filename)
if os.path.isfile(old_path_file):
os.remove(old_path_file)
book_filename = new_book_filename
file.save(os.path.join(book_dir_path, new_book_filename))
thumbnail = request.files["input-thumbnail"]
new_thumbnail = thumbnail.filename != ""
if new_thumbnail:
thumbnail_ext = file_extension(thumbnail.filename, "any")
if thumbnail_ext != "jpg":
return basic_json(
False, "Thumbnail extension must be jpg!"
) # changes had been done if new_book anyway!
thumbnail_path = os.path.join(book_dir_path, "card.jpg")
thumbnail.save(thumbnail_path)
preview_card = preview_image(thumbnail_path).decode()
cursor = mysql.cursor()
cursor.execute(
"""UPDATE shelf SET file_name='{file_name}', title='{title}',
period='{period}', description_md='{description_md}',
description_html='{description_html}', access_level={access_level},
date_modified=CURRENT_TIMESTAMP,status='{status}',
crowdfunding_goal={crowdfunding_goal},
preview_card={preview_card}
WHERE book_id={book_id}""".format(
file_name=book_filename,
title=title,
period=period,
description_md=description_md,
description_html=description_html,
access_level=access_level,
status=status,
crowdfunding_goal=crowdfunding_goal if crowdfunding_goal > 0 else "NULL",
book_id=book_id,
preview_card="'" + preview_card + "'" if new_thumbnail else "preview_card",
)
)
mysql.commit()
return basic_json(True, "Changes saved!")
| 11,334
|
def query_category_members(category, language='en', limit=100):
"""
action=query,prop=categories
Returns all the members of a category up to the specified limit
"""
url = api_url % (language)
query_args = {
'action': 'query',
'list': 'categorymembers',
'cmtitle': category,
'format': 'json',
'cmlimit': min(limit, 500)
}
members = []
while True:
json = _run_query(query_args, language)
for member in json['query']['categorymembers']:
members.append(member['title'])
if 'query-continue' in json and len(members) <= limit:
continue_item = json['query-continue']['categorymembers']['cmcontinue']
query_args['cmcontinue'] = continue_item
else:
break
return members[0:limit]
| 11,335
|
def _tokenize_text(text: str, language: str) -> List[str]:
"""Splits text into individual words using the correct method for the given language.
Args:
text: Text to be split.
language: The configured language code.
Returns:
The text tokenized into a list of words.
"""
if language == constants.LANGUAGE_CODE_JA:
return _split_words_in_japanese(text)
else:
return text.split()
| 11,336
|
def ml64_sort_order(c):
"""
Sort function for measure contents.
Items are sorted by time and then, for equal times, in this order:
* Patch Change
* Tempo
* Notes and rests
"""
if isinstance(c, chirp.Note):
return (c.start_time, 10)
elif isinstance(c, Rest):
return (c.start_time, 10)
elif isinstance(c, MeasureMarker):
return (c.start_time, 1)
elif isinstance(c, TempoEvent):
return (c.start_time, 3)
elif isinstance(c, ProgramEvent):
return (c.start_time, 2)
else:
return (c.start_time, 5)
| 11,337
|
def binary_logistic_loss_grad(linear_o, y):
"""Derivative of the binary_logistic_loss w.r.t. the linear output"""
# Sometimes denom overflows, but it's OK, since if it's very large, it would
# be set to INF and the output correctly takes the value of 0.
# TODO: Fix overflow warnings.
denom = 1 + np.exp(y.flatten() * linear_o.flatten())
return -y / (denom * linear_o.size)
| 11,338
|
def electron_mass_MeVc2():
"""The rest mass of the electron in MeV/c**2
https://en.wikipedia.org/wiki/Electron
"""
return 0.5109989461
| 11,339
|
def test_workflow_docker_build_error():
"""
This is a test for what happens when the docker build fails.
"""
flexmock(DockerfileParser, content='df_content')
this_file = inspect.getfile(PreRaises)
mock_docker()
fake_builder = MockInsideBuilder(failed=True)
flexmock(InsideBuilder).new_instances(fake_builder)
watch_pre = Watcher()
watch_buildstep = Watcher(raise_exc=Exception())
watch_prepub = Watcher()
watch_post = Watcher()
watch_exit = Watcher()
workflow = DockerBuildWorkflow(source=MOCK_SOURCE,
prebuild_plugins=[{'name': 'pre_watched',
'args': {
'watcher': watch_pre
}}],
buildstep_plugins=[{'name': 'buildstep_watched',
'args': {
'watcher': watch_buildstep,
}}],
prepublish_plugins=[{'name': 'prepub_watched',
'args': {
'watcher': watch_prepub,
}}],
postbuild_plugins=[{'name': 'post_watched',
'args': {
'watcher': watch_post
}}],
exit_plugins=[{'name': 'exit_watched',
'args': {
'watcher': watch_exit
}}],
plugin_files=[this_file])
with pytest.raises(Exception):
workflow.build_docker_image()
# No subsequent build phases should have run except 'exit'
assert watch_pre.was_called()
assert watch_buildstep.was_called()
assert not watch_prepub.was_called()
assert not watch_post.was_called()
assert watch_exit.was_called()
| 11,340
|
def test_pipette_settings_update_none(mutable_config: str):
"""Should accept none values."""
s = settings.PipetteSettingsUpdate(fields={mutable_config: None})
assert s.setting_fields[mutable_config] is None
| 11,341
|
def opensearch_plugin(request):
"""Render an OpenSearch Plugin."""
host = "%s://%s" % ("https" if request.is_secure() else "http", request.get_host())
# Use `render_to_response` here instead of `render` because `render`
# includes the request in the context of the response. Requests
# often include the session, which can include pickable things.
# `render_to_respones` doesn't include the request in the context.
return render_to_response(
"search/plugin.html",
{"host": host, "locale": request.LANGUAGE_CODE},
content_type="application/opensearchdescription+xml",
)
| 11,342
|
def serialize_time(output_value: datetime.time) -> str:
""" Serializes an internal value to include in a response. """
return output_value.isoformat()
| 11,343
|
def find_posix_python(version):
"""Find the nearest version of python and return its path."""
from . import persist
if version:
# Try the exact requested version first
path = find_executable('python' + version)
persist.debug('find_posix_python: python{} => {}'.format(version, path))
# If that fails, try the major version
if not path:
path = find_executable('python' + version[0])
persist.debug('find_posix_python: python{} => {}'.format(version[0], path))
# If the major version failed, see if the default is available
if not path:
path = find_executable('python')
persist.debug('find_posix_python: python =>', path)
else:
path = find_executable('python')
persist.debug('find_posix_python: python =>', path)
return path
| 11,344
|
def asg_sync_cmdb():
"""最小和所需都为0的ASG数据同步"""
asg_list = get_asg_list()
with DBContext('w') as session:
session.query(Asg).delete(synchronize_session=False) # 清空数据库的所有记录
for asg in asg_list:
asg_name = asg.get("AutoScalingGroupName", "")
asg_arn = asg.get("AutoScalingGroupARN", "")
launch_template = str(asg.get("LaunchTemplate", ""))
min_size = asg.get("MinSize", "")
max_size = asg.get("MaxSize", "")
desirced_capacity = asg.get("DesiredCapacity", "")
availability_zones = asg.get("AvailabilityZones", "")[0]
health_check_type = asg.get("HealthCheckType", "")
asg_created_time = str(asg.get("CreatedTime", ""))
new_asg = Asg(
asg_name=asg_name, asg_arn=asg_arn, launch_template=launch_template,
min_size=min_size, max_size=max_size, desirced_capacity=desirced_capacity,
availability_zones=availability_zones, health_check_type=health_check_type,
asg_created_time=asg_created_time)
session.add(new_asg)
session.commit()
| 11,345
|
def decode_binary(state_int):
"""
Decode binary representation into the list view
:param state_int: integer representing the field
:return: list of GAME_COLS lists
"""
assert isinstance(state_int, int)
bits = int_to_bits(state_int, bits=GAME_COLS*GAME_ROWS + GAME_COLS*BITS_IN_LEN)
res = []
len_bits = bits[GAME_COLS*GAME_ROWS:]
for col in range(GAME_COLS):
vals = bits[col*GAME_ROWS:(col+1)*GAME_ROWS]
lens = bits_to_int(len_bits[col*BITS_IN_LEN:(col+1)*BITS_IN_LEN])
if lens > 0:
vals = vals[:-lens]
res.append(vals)
return res
| 11,346
|
def clean_features(vgsales):
"""
This function cleans up some of the dataset's features. The dataset is
quite messy as many values are missing from both categorical and numerical
features. Many of these features are difficult to impute in a reasonable
manner.
<class 'pandas.core.frame.DataFrame'>
Index: 16719 entries, Wii Sports to Winning Post 8 2016
Data columns (total 9 columns):
Platform 16719 non-null category
Release 16450 non-null Int64
Genre 16717 non-null category
Publisher 16665 non-null category
Sales 16719 non-null float64
Metacritic 8137 non-null float64
Metacritic_N 8137 non-null Int64
Developer 10096 non-null category
ESRB 9950 non-null category
dtypes: Int64(2), category(5), float64(2)
memory usage: 1.5+ MB
Some of the hardest features to impute (genre or platform, for example)
don't have many nulls. Others, like the review averages, can be imputed.
:param path: A path to a Video_Games_Sales_as_at_22_Dec_2016.csv compatible
dataset.
"""
# A few of the release years are set to 2020 or other years past 2016.
# Just setting them to 2016 here. They're not a lot of them anyway.
vgsales.Release.loc[vgsales.Release > 2016] = 2016
# =============================================================================
# https://en.wikipedia.org/wiki/Entertainment_Software_Rating_Board
#
# The ESRB feature will be converted to an ordinal variable for machine
# learning during preprocessing later. Thus, we organize them here and
# add an NA for missing values.
# =============================================================================
esrb_ordinal = ["NA", "RP", "EC", "E", "E10+", "T", "M", "AO"]
vgsales.ESRB.cat.set_categories(esrb_ordinal, True, False, True)
return vgsales
| 11,347
|
def get_object(proposition):
"""[75]
Returns the object of a given proposition
"""
return proposition[2][0]
| 11,348
|
def __verify_hmac(data: bytes, ohmac: bytes, key: bytes) -> bool:
"""
This function verifies that a provided HMAC matches a computed HMAC for
the data given a key.
Args:
data: the data to HMAC and verify
ohmac: the original HMAC, normally appended to the data
key: the key to HMAC with for verification
Returns:
a boolean value denoting whether or not the HMAC's match
"""
return compare_digest(ohmac, hmac(key, data, HMAC_HS).digest())
| 11,349
|
def create_block_option_from_template(text: str, value: str):
"""Helper function which generates the option block for modals / views"""
return {"text": {"type": "plain_text", "text": str(text), "emoji": True}, "value": str(value)}
| 11,350
|
def get_token_payload(token: str) -> Any:
"""Extract the payload from the token.
Args:
token (str):
A JWT token containing the session_id and other data.
Returns:
dict
"""
decoded = json.loads(_base64_decode(token.split('.')[0]))
del decoded['session_id']
return decoded
| 11,351
|
def create_mock_target(number_of_nodes, number_of_classes):
"""
Creating a mock target vector.
"""
return torch.LongTensor([np.random.randint(0, number_of_classes-1) for node in range(number_of_nodes)])
| 11,352
|
def _i2045(node: ast.ImportFrom) -> Iterable[Tuple[int, int, str, type]]:
"""
The from syntax should not be used.
"""
yield _error_tuple(2045, node)
| 11,353
|
def initFindAndFit(parameters):
"""
Initialize and return a SplinerFISTAFinderFitter object.
"""
# Create spline object.
spline_fn = splineToPSF.loadSpline(parameters.getAttr("spline"))
# Create peak finder.
finder = SplinerFISTAPeakFinder(parameters = parameters,
psf_object = spline_fn)
# Create cubicFitC.CSplineFit object.
mfitter = findPeaksStd.initFitter(finder, parameters, spline_fn)
# Create peak fitter.
fitter = fitting.PeakFitterArbitraryPSF(mfitter = mfitter,
parameters = parameters)
# Specify which properties we want from the analysis.
properties = ["background", "error", "height", "sum", "x", "y", "z"]
return fitting.PeakFinderFitter(peak_finder = finder,
peak_fitter = fitter,
properties = properties)
| 11,354
|
def UpdateDatabase(asset, images, status):
"""Update the database entries of the given asset with the given data."""
return {'asset': asset}
| 11,355
|
def get_max_word_length(days: dict, keys: list) -> int:
"""
Находит длину самого длинного слова.
"""
max_word_len = 0
for key in keys:
if days.get(key):
for _, data in days.get(key).items():
value = data.split(" ")
for word in value:
if len(word) > max_word_len:
max_word_len = len(word)
return int(max_word_len)
| 11,356
|
def rateCBuf(co2: float, par: float, params: dict,
rates: dict, states: dict) -> float:
"""
Rate of increase of carbohydrates in the buffer
During the light period, carbohydrates produced by
photosynthesis are stored in the buffer and, whenever
carbohydrates are available in the buffer, carbohydrates flow
to the plant organs. This carbohydrate flow stops when the
buffer approaches its lower limit. When the buffer approaches
its upper limit, further carbohydrates cannot be stored and
photosynthesis will be inhibited.
Parameters
----------
co2 : float
Carbon dioxide concentration on air [μmol {CO2} mol-1 {air}]
par : float
Photosynthetic active radiation [μmol {photons} m-2 s-1]
params : dict
Parameters saved as model constants
rates : dict
Auxiliary variable including rates and
flows for the different fruit development stages
states : dict
State variables of the model
Returns
-------
float
Rate of accumulation of carbohydrates in the buffer [mg m-2 s-1]
"""
# These rates relate to the carbs available in the buffer by the maximum
# value available for the buffer. So in theory even if all of them
# are maximum, they would be compatible. However, the buffer is not always
# in the maximum. So they could reach their potential and demand more
# carbs than are available in the buffer.
# If there are not enough, there is the inhibition phenomena, but right
# now they don't seem compatible, as there is growth without
# enough carbs because of the different treatment of the first fruit
# stage.
rates["MCBufLeaf"] = mcBufOrg(organ="leaf", params=params, states=states)
rates["MCBufFruit"] = mcBufOrg(organ="fruit", params=params, states=states)
rates["MCBufStem"] = mcBufOrg(organ="stem", params=params, states=states)
co2_st = co2Stom(co2=co2, params=params)
# Photosynthesis Rate
mcAirBuf_ = mcAirBuf(co2=co2_st, par=par, params=params, states=states)
# Growth respiration
mcBufAir_ = mcBufAir(params=params, states=states)
cBuf_ = (mcAirBuf_ - rates["MCBufLeaf"] - rates["MCBufFruit"] - rates["MCBufStem"] -
mcBufAir_)
return cBuf_
| 11,357
|
def get_config_tag(config):
"""Get configuration tag.
Whenever configuration changes making the intermediate representation
incompatible the tag value will change as well.
"""
# Configuration attributes that affect representation value
config_attributes = dict(frame_sampling=config.proc.frame_sampling)
sha256 = hashlib.sha256()
sha256.update(json.dumps(config_attributes).encode("utf-8"))
return sha256.hexdigest()[:40]
| 11,358
|
def ctf_to_pickle(trace_directory: str, target: Pickler) -> int:
"""
Load CTF trace, convert events, and dump to a pickle file.
:param trace_directory: the trace directory
:param target: the target file to write to
:return: the number of events written
"""
ctf_events = get_trace_ctf_events(trace_directory)
count = 0
count_written = 0
for event in ctf_events:
count += 1
pod = event_to_dict(event)
target.dump(pod)
count_written += 1
return count_written
| 11,359
|
def list_lines(lines):
"""Returns the list of trimmed lines.
@param lines Multi-line string
"""
return list(filter(None, (x.strip() for x in lines.splitlines())))
| 11,360
|
def test_can_log_in_returns_200(user, testapp):
"""Login successful (irrespective of casing)."""
# Goes to homepage.
res = testapp.get('/')
# Fills out login form.
username_with_different_casing = user.email.upper() # Default would be userN@example.com.
form = res.forms['loginForm']
form['username'] = username_with_different_casing
form['password'] = 'myPrecious'
# Submits.
res = form.submit().follow()
assert res.status_code == 200
| 11,361
|
def _histogram_2d_vectorized(
*args, bins=None, weights=None, density=False, right=False, block_size=None
):
"""Calculate the histogram independently on each row of a 2D array"""
N_inputs = len(args)
a0 = args[0]
# consistency checks for inputa
for a, b in zip(args, bins):
assert a.ndim == 2
assert b.ndim == 1
assert a.shape == a0.shape
if weights is not None:
assert weights.shape == a0.shape
nrows, ncols = a0.shape
nbins = [len(b) for b in bins]
hist_shapes = [nb + 1 for nb in nbins]
# a marginally faster implementation would be to use searchsorted,
# like numpy histogram itself does
# https://github.com/numpy/numpy/blob/9c98662ee2f7daca3f9fae9d5144a9a8d3cabe8c/numpy/lib/histograms.py#L864-L882
# for now we stick with `digitize` because it's easy to understand how it works
# Add small increment to the last bin edge to make the final bin right-edge inclusive
# Note, this is the approach taken by sklearn, e.g.
# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/calibration.py#L592
# but a better approach would be to use something like _search_sorted_inclusive() in
# numpy histogram. This is an additional motivation for moving to searchsorted
bins = [np.concatenate((b[:-1], b[-1:] + 1e-8)) for b in bins]
# the maximum possible value of of digitize is nbins
# for right=False:
# - 0 corresponds to a < b[0]
# - i corresponds to bins[i-1] <= a < b[i]
# - nbins corresponds to a a >= b[1]
each_bin_indices = [digitize(a, b) for a, b in zip(args, bins)]
# product of the bins gives the joint distribution
if N_inputs > 1:
bin_indices = ravel_multi_index(each_bin_indices, hist_shapes)
else:
bin_indices = each_bin_indices[0]
# total number of unique bin indices
N = reduce(lambda x, y: x * y, hist_shapes)
bin_counts = _dispatch_bincount(
bin_indices, weights, N, hist_shapes, block_size=block_size
)
# just throw out everything outside of the bins, as np.histogram does
# TODO: make this optional?
slices = (slice(None),) + (N_inputs * (slice(1, -1),))
bin_counts = bin_counts[slices]
return bin_counts
| 11,362
|
def carbon_offset_cost(kWh):
"""
Donation to Cool Earth (in USD) needed to offset carbon emssions.
"""
return KG_CO2_PER_KWH * USD_PER_KG_CO2 * kWh
| 11,363
|
def run(
duration: int, runtime_mode: str, connection_mode: str
) -> List[Tuple[str, Union[int, float]]]:
"""Test memory usage."""
# pylint: disable=import-outside-toplevel,unused-import
# import manually due to some lazy imports in decision_maker
import aea.decision_maker.default # noqa: F401
resources = Resources()
if connection_mode not in CONNECTION_MODES:
raise ValueError(
f"bad connection mode {connection_mode}. valid is one of {list(CONNECTION_MODES.keys())}"
)
base_cls = CONNECTION_MODES[connection_mode]
conn_cls = type("conn_cls", (TestConnectionMixIn, base_cls), {})
connection = conn_cls.make() # type: ignore # pylint: disable=no-member
resources.add_connection(connection)
agent = make_agent(runtime_mode=runtime_mode, resources=resources)
agent.resources.add_skill(make_skill(agent, handlers={"test": TestHandler}))
t = Thread(target=agent.start, daemon=True)
t.start()
wait_for_condition(lambda: agent.is_running, timeout=5)
connection.enable()
time.sleep(duration)
connection.disable()
time.sleep(0.2) # possible race condition in stop?
agent.stop()
t.join(5)
latency = mean(
map(
lambda x: x[1] - x[0],
zip(
connection.sends,
connection.recvs,
),
)
)
total_amount = len(connection.recvs)
rate = total_amount / duration
return [
("envelopes received", len(connection.recvs)),
("envelopes sent", len(connection.sends)),
("latency(ms)", 10**6 * latency),
("rate(envelopes/second)", rate),
]
| 11,364
|
def tf_fermion_massmatrix(t_A3, t_potential, tc_masses_factor):
"""Computes the spin-1/2 mass matrix from the A3-tensor."""
# The extra factor 2.0 relative to https://arxiv.org/abs/1906.00207
# makes the fermion masses align with the way particle states are
# grouped into SUSY multiplets in appendix (B.2) of:
# https://arxiv.org/abs/1909.10969
return mu.tfc128(2.0) * tf.einsum(
'ij,ik->jk',
t_A3, tf.math.conj(t_A3)) * (
tc_masses_factor /
tf.cast(t_potential, tf.complex128))
| 11,365
|
def filter_nsa_catalog_to_approximate_sky_area(nsa, bricks, visualise=False):
"""
DECALS is only in a well-defined portion of sky (which depends on the data release version). Filter the NSA catalog
so that it only includes galaxies in that approximate area. This saves time matching later.
Args:
nsa (astropy.Table): NSA catalog of SDSS galaxies
bricks (astropy.Table): catalog of DECALS imaging bricks
visualise (bool): if True, plot and save sky footprint of NSA catalog
Returns:
(astropy.Table) NSA catalog filtered to galaxies within the approximate sky area imaged by DECALS
"""
if visualise:
fig, ((ul, ur), (ll, lr)) = plt.subplots(2, 2)
ul.hist(bricks['dec'])
ul.set_title('brick dec')
ur.hist(nsa['dec'])
ur.set_title('nsa dec')
ll.hist(bricks['ra'])
ll.set_title('brick ra')
lr.hist(nsa['ra'])
lr.set_title('nsa ra')
plt.tight_layout()
plt.savefig('nsa_catalog_sky_coverage.png')
brick_maxdec = max(bricks['dec2'])
brick_mindec = min(bricks['dec1'])
# ra spans 0 through 360, do not filter
declim = (nsa['dec'] >= brick_mindec) & (nsa['dec'] <= brick_maxdec) # approximately -25 to +30 degrees
nsa_in_decals_area = nsa[declim]
return nsa_in_decals_area
| 11,366
|
def apply_variants(variants: Dict[Union[str, List[str]], int], parameters: Optional[Dict[Any, Any]] = None, variant=DEFAULT_VARIANT_VARIANTS) -> Tuple[PetriNet, Marking, Marking]:
"""
Apply the chosen IM algorithm to a dictionary/list/set of variants obtaining a Petri net along with an initial and final marking
Parameters
-----------
variants
Dictionary/list/set of variants in the log
variant
Variant of the algorithm to apply, possible values:
- Variants.IMd
parameters
Parameters of the algorithm, including:
Parameters.ACTIVITY_KEY -> attribute of the log to use as activity name
(default concept:name)
Returns
-----------
net
Petri net
initial_marking
Initial marking
final_marking
Final marking
"""
return exec_utils.get_variant(variant).apply_variants(variants, parameters=parameters)
| 11,367
|
def test_skip_include_tags(
_create_ecr_repository, _docker_build, _docker_tag, _docker_push, ys3
):
"""Only includes the stage with the corresponding tag."""
with patch("builtins.open", mock_open(read_data=ys3)) as mock_file:
pipeline = process_image(
image_name="image0",
skip_tags=["tag0"],
include_tags=["tag1"],
build_args={},
build_options={"pipeline": True},
)
assert pipeline["image0"]["stage0"] == {"skipping-stage": "stage0"}
assert "skipping-stage" not in pipeline["image0"]["stage1"]
| 11,368
|
def test_unionfind_compression():
"""
Test path compression and the union by rank.
"""
# Test the ranking
elements = list(range(100))
u = UnionFind(elements)
for i in range(len(elements) - 1):
u.union(elements[i], elements[i + 1])
assert max(u._rank.values()) == 1
# Test path compression
parent_nodes = list(u._parent.values())
assert all(parent == parent_nodes[0] for parent in parent_nodes)
| 11,369
|
def query_update(request: HttpRequest, **kwargs: str) -> str:
"""Update the query string with new values."""
updated = request.GET.copy()
for key, value in kwargs.items():
updated[key] = value
return updated.urlencode()
| 11,370
|
def get_user_info(user_id):
""" Fetches User Info Based On User ID
:param user_id:
:return: user
"""
user = session.query(User).filter_by(id=user_id).one_or_none()
return user
| 11,371
|
def test_load_db_data_from_json_content(fxtr_setup_logger_environment):
"""Test Load Database Data - disallowed database table."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
initial_database_data_path = pathlib.Path(cfg.glob.setup.initial_database_data)
initial_database_data_path_directory = os.path.dirname(initial_database_data_path)
initial_database_data_path_file_name = os.path.basename(initial_database_data_path)
initial_database_data_path_file_name_test = "initial_database_data_content.json"
# copy test file
shutil.copy(
utils.get_full_name(pytest.helpers.get_test_inbox_directory_name(), initial_database_data_path_file_name_test),
utils.get_full_name(initial_database_data_path_directory, initial_database_data_path_file_name),
)
with pytest.raises(SystemExit) as expt:
cfg.glob.db_core = db.cls_db_core.DBCore(is_admin=True)
cfg.glob.db_core.create_database()
assert expt.type == SystemExit
assert expt.value.code == 1
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
| 11,372
|
def gatk_version(request) -> GATKVersion:
"""Given a version number, return a GATKVersion."""
return GATKVersion(request.param)
| 11,373
|
def load_objs(name_obj_dat, sim, obj_ids, auto_sleep=True):
"""
- name_obj_dat: List[(str, List[
transformation as a 4x4 list of lists of floats,
int representing the motion type
])
"""
static_obj_ids = []
for i, (name, obj_dat) in enumerate(name_obj_dat):
if len(obj_ids) == 0:
obj_id = add_obj(name, sim)
else:
obj_id = obj_ids[i]
trans = obj_dat[0]
obj_type = obj_dat[1]
use_trans = mn.Matrix4(trans)
sim.set_transformation(use_trans, obj_id)
sim.set_linear_velocity(mn.Vector3(0, 0, 0), obj_id)
sim.set_angular_velocity(mn.Vector3(0, 0, 0), obj_id)
sim.set_object_motion_type(MotionType(obj_type), obj_id)
static_obj_ids.append(obj_id)
if len(obj_ids) != 0:
return obj_ids
return static_obj_ids
| 11,374
|
def test_quickSort_with_list():
"""To test quickSort method with a list."""
s = Sort([3, 2, 7, 4, 6, 5])
s.quickSort()
assert s.lst == [2, 3, 4, 5, 6, 7]
| 11,375
|
def test_refresh_repositories_error(nexus_mock_client):
"""
Ensure the method does't modify the existing repositories attribute when
the client request fails.
"""
nexus_mock_client._request.return_value.status_code = 400
nexus_mock_client.repositories._repositories_json = None
with pytest.raises(exception.NexusClientAPIError):
nexus_mock_client.repositories.refresh()
assert nexus_mock_client.repositories._repositories_json is None
| 11,376
|
def get_mod_metadata(module: Module):
"""
Get descriptions for produced dependencies.
"""
meta = {}
has_meta = hasattr(module, 'prod_meta')
for prod in module.produces:
prod = prod.replace('?', '').replace('!', '')
if not has_meta:
meta[prod] = '<no descritption>'
continue
prod_meta = module.prod_meta.get(prod)
meta[prod] = prod_meta if prod_meta else '<no description>'
return meta
| 11,377
|
def register_module():
"""Registers this module in the registry."""
dashboard.dashboard.DashboardRegistry.add_analytics_section(
dashboard.analytics.QuestionScoreHandler)
global_handlers = []
for path, handler_class in mapreduce_main.create_handlers_map():
# The mapreduce and pipeline libraries are pretty casual about
# mixing up their UI support in with their functional paths.
# Here, we separate things and give them different prefixes
# so that the only-admin-access patterns we define in app.yaml
# can be reasonably clean.
if path.startswith('.*/pipeline'):
if 'pipeline/rpc/' in path or path == '.*/pipeline(/.+)':
path = path.replace('.*/pipeline', '/mapreduce/ui/pipeline')
else:
path = path.replace('.*/pipeline', '/mapreduce/worker/pipeline')
else:
if '_callback' in path:
path = path.replace('.*', '/mapreduce/worker', 1)
elif '/list_configs' in path:
# This needs mapreduce.yaml, which we don't distribute. Not
# having this prevents part of the mapreduce UI front page
# from loading, but we don't care, because we don't want
# people using the M/R front page to relaunch jobs anyhow.
continue
else:
path = path.replace('.*', '/mapreduce/ui', 1)
# The UI needs to be guarded by a config so that casual users aren't
# exposed to the internals, but advanced users can investigate issues.
if '/ui/' in path or path.endswith('/ui'):
if (hasattr(handler_class, 'dispatch') and
not hasattr(handler_class, 'real_dispatch')):
handler_class.real_dispatch = handler_class.dispatch
handler_class.dispatch = ui_access_wrapper
global_handlers.append((path, handler_class))
# Wrap worker handlers with check that request really is coming
# from task queue.
else:
if (hasattr(handler_class, 'dispatch') and
not hasattr(handler_class, 'real_dispatch')):
handler_class.real_dispatch = handler_class.dispatch
handler_class.dispatch = authorization_wrapper
global_handlers.append((path, handler_class))
# Tell map/reduce internals that this is now the base path to use.
mapreduce_parameters.config.BASE_PATH = '/mapreduce/worker'
global custom_module
custom_module = custom_modules.Module(
MODULE_NAME,
'Provides support for analysis jobs based on map/reduce',
global_handlers, [])
return custom_module
| 11,378
|
def theta_b(wlen, d, n=1):
"""return the Bragg angle, $\theta_{B}$, (deg) for a given wavelength
(\AA$^{-1}$) and d-spacing (\AA)"""
if not (d == 0):
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_thb = np.rad2deg(np.arcsin(((wlen * n) / (2 * d))))
return _thb
except Exception:
return 0
else:
return 0
| 11,379
|
def replace_newlines(s, replacement=' / ', newlines=(u"\n", u"\r")):
"""
Used by the status message display on the buddy list to replace newline
characters.
"""
# turn all carraige returns to newlines
for newline in newlines[1:]:
s = s.replace(newline, newlines[0])
# while there are pairs of newlines, turn them into one
while s.find(newlines[0] * 2) != -1:
s = s.replace( newlines[0] * 2, newlines[0])
# replace newlines with the newline_replacement above
return s.strip().replace(newlines[0], replacement)
| 11,380
|
def get_user_data(prs, client_id, client_secret):
"""Get user data from PR data."""
users = {}
for owner, repo, number, pr in prs:
username = pr.username
# Initialize the User if needed
if username not in users:
print(pr.user_url, file=sys.stderr)
payload = {
'client_id': client_id,
'client_secret': client_secret
}
resp = requests.get(pr.user_url, params=payload)
# Abort if the return is an error
out = resp.json()
if 'message' in out:
pprint.pprint(out, file=sys.stderr)
raise Exception(resp.text)
user = User(out)
users[username] = user
users[username].add_pr(pr)
return users
| 11,381
|
def unassign_images(project, image_names):
"""Removes assignment of given images for all assignees.With SDK,
the user can be assigned to a role in the project with the share_project
function.
:param project: project name or folder path (e.g., "project1/folder1")
:type project: str
:param image_names: list of image unassign
:type image_names: list of str
"""
if not image_names:
return
project, folder = get_project_and_folder_metadata(project)
folder_name = 'root'
if folder:
folder_name = folder['name']
logs = _unassign_images(folder_name=folder_name,image_names=image_names,project_id=project['id'],team_id=project['team_id'])
for log in logs:
logger.warn(log)
| 11,382
|
def default_bucket_name():
"""Obtain the default Google Storage bucket name for this application.
Returns:
A string that is the name of the default bucket.
"""
return files._default_gs_bucket_name()
| 11,383
|
def hold_out_validation(train_data, test_data, ratio):
"""
:param train_data: 所有训练数据
:param test_data: 测试数据集
:param ratio: 验证数据集的比例
"""
num_validation_samples = round(len(train_data) * ratio)
# 打乱训练数据,保证随机性,防止数据的某种特性在某个区域过于集中
np.random.shuffle(train_data)
# 定义训练数据集和验证数据集
training_data = train_data[num_validation_samples:]
validation_data = train_data[:num_validation_samples]
# 使用训练集训练数据,使用验证集评估数据
model = get_model()
model.train(training_data)
validation_score = model.evaluate(validation_data)
# 使用所有训练数据训练模型,再使用测试集数据评估模型
model = get_model()
model.train(train_data)
test_score = model.evaluate(test_data)
| 11,384
|
def LineGaussSeidel_i(Uo, Beta):
"""Return the numerical solution of dependent variable in the model eq.
This routine uses the Line-Gauss Seidel method along constant i
direction (parallel to y-axis)
to obtain the solution of the Poisson's equation.
Call signature:
LineGaussSeidel_i(Uo, Beta)
Parameters
----------
Uo : 2D array
The dependent variable obtained from the previous iteration
level, n.
Beta : float
Coefficient in the Poissons finite difference approximation.
Beta = dX/dY
Returns
-------
U : 2D array
The dependent variable calculated at time level (n+1) within the
entire domain.
"""
shapeU = Uo.shape # Obtain Dimension
if len(shapeU) == 1:
raise DimensionError("1D", "POISSONS")
# Proceed to numerical solution
U = Uo.copy() # Initialize U
iMax, jMax = shapeU
B2 = Beta*Beta
A = [B2 for j in range(jMax)]
B = [-2.0*(1.0 + B2) for j in range(jMax)]
C = [B2 for j in range(jMax)]
D = [0 for j in range(jMax)]
UU = [0 for j in range(jMax)]
# NOTE that in the POISSON'S SOLVERS formulation, the dependent
# variable U
# is used on RHS of discretized eqn instead of Uo as in other MODELS,
# which is due to the formulation requirement to use values of
# dependent
# variable from advanced time steps (k+1) at points (i-1,j) or (i,j-1).
for i in range(1, iMax-1):
UU[0] = U[i][0] # Convert U to 1-D array for Tridiagonal solver
UU[-1] = U[i][jMax-1]
for j in range(1, jMax-1):
D[j] = -(U[i+1][j] + U[i-1][j])
UU = TridiagonalSolver(jMax, A, B, C, D, UU)
for j in range(1, jMax-1):
U[i][j] = UU[j]
return U
| 11,385
|
def add_padding_to_grid(
component,
grid_size=127,
x=10,
y=10,
bottom_padding=5,
layers=[pp.LAYER.PADDING],
suffix="p",
):
""" returns component width a padding layer on each side
matches a minimum size
"""
c = pp.Component(name=f"{component.name}_{suffix}")
c << component
c.ports = component.ports
if c.size_info.height < grid_size:
y_padding = grid_size - c.size_info.height
else:
n_grids = np.ceil(c.size_info.height / grid_size)
y_padding = n_grids * grid_size - c.size_info.height
if c.size_info.width < grid_size:
x_padding = grid_size - c.size_info.width
else:
n_grids = np.ceil(c.size_info.width / grid_size)
x_padding = n_grids * grid_size - c.size_info.width
x_padding -= x
y_padding -= y
points = [
[c.xmin - x_padding / 2, c.ymin - bottom_padding],
[c.xmax + x_padding / 2, c.ymin - bottom_padding],
[c.xmax + x_padding / 2, c.ymax + y_padding - bottom_padding],
[c.xmin - x_padding / 2, c.ymax + y_padding - bottom_padding],
]
for layer in layers:
c.add_polygon(points, layer=layer)
return c
| 11,386
|
def _find_role(oneandone_conn, role):
"""
Given a name, validates that the role exists
whether it is a proper ID or a name.
Returns the role if one was found, else None.
"""
for _role in oneandone_conn.list_roles(per_page=1000):
if role in (_role['id'], _role['name']):
return _role
| 11,387
|
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
"""
# TODO: Implement function
# (DONE) Use tf.saved_model.loader.load to load the model and weights
vgg_tag = 'vgg16'
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
tf.saved_model.loader.load(sess, [vgg_tag],vgg_path)
graph = tf.get_default_graph()
w1 = graph.get_tensor_by_name(vgg_input_tensor_name)
# w1.set_shape((None, 160, 576, 3))
keep = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3_out = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4_out = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7_out = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
if DEBUG_MODEL:
print ("VGG:")
print ("\tInput: ", w1.shape)
print ("\tKeep: ", keep.shape)
print ("\tLayer 3: ", layer3_out.shape)
print ("\tLayer 4: ", layer4_out.shape)
print ("\tLayer 7: ", layer7_out.shape)
return w1, keep, layer3_out, layer4_out, layer7_out
| 11,388
|
def created_median_mask(disparity_map, valid_depth_mask, rect=None):
"""生成掩模,使得矩形中不想要的区域的掩模值为0,想要的区域的掩模值为1"""
if rect is not None:
x, y, w, h = rect
disparity_map = disparity_map[y:y + h, x:x + w]
valid_depth_mask = valid_depth_mask[y:y + h, x:x + w]
# 获得中位数
median = np.median(disparity_map)
# 当有效的视差值与平均视差值相差12 或者更多时,可以将像素看做噪声。12 这个值是根据经验
return np.where((valid_depth_mask == 0) | (abs(disparity_map - median) < 12), 1.0, 0.0)
| 11,389
|
def l2_loss(pred_traj, pred_traj_gt, mode='sum'):
"""
Input:
- pred_traj: Tensor of shape (seq_len, batch, 2). Predicted trajectory.
- pred_traj_gt: Tensor of shape (seq_len, batch, 2). Groud truth
predictions.
- mode: Can be one of sum, average, raw
Output:
- loss: l2 loss depending on mode
"""
seq_len, batch, _ = pred_traj.size()
loss = (pred_traj_gt.permute(1, 0, 2) - pred_traj.permute(1, 0, 2))**2
if mode == 'sum':
return torch.sum(loss)
elif mode == 'raw':
return loss.sum(dim=2).sum(dim=1)
| 11,390
|
def test_member_access():
"""Check .field functionality of namedtuple."""
t = Task('buy milk', 'brian')
assert t.summary == 'buy milk'
assert t.owner == 'brian'
assert (t.done, t.id) == (False, None)
| 11,391
|
def printt(*args):
"""printt(*args)
Same exact functionality as print *args, except output will be written to log
file as well as stdout. Similar to Unix command "tee", hence the extra t.
If the logfile has not been initialized, same as print.
"""
printed = ''
for s in args:
printed += str(s) + ' '
printed = printed[:-1]
print printed
if opts != {} and opts['log']:
if logfile != None:
logfile.write(printed + '\n')
logfile.flush()
| 11,392
|
def findpeer(port = None, os = None):
"""Args: port (defaults to any port)
Finds a socket, which is connected to the specified port.
Leaves socket in ESI."""
if os == 'linux':
code = """
findpeer:
push -1
push SYS_socketcall_getpeername
mov ebp, esp
pop ebx
pop esi
.loop:
push SYS_socketcall
pop eax
inc esi
lea ecx, [esp-32]
push 4
pushad
int 0x80
"""
if port == None:
return code + """
test eax, eax
popad
pop edx
jnz .loop
"""
else:
return code + """
popad
pop edx
shr eax, 16
cmp ax, 0x%04x
jne .loop
""" % htons(int(port))
elif os == 'freebsd':
code = """
findpeer:
push -1
pop esi
push SYS_getpeername
pop eax
mov ebp, esp
pushad
.loop:
inc esi
pushad
int 0x80
"""
if port == None:
return code + """
test eax, eax
popad
jnz .loop
"""
else:
return code + """
popad
cmp word [ebp+2], 0x%04x
jne .loop
""" % htons(int(port))
else:
bug('OS was neither linux nor freebsd')
| 11,393
|
def api_wowlight_version_check(version: str) -> bool:
"""
Checks incoming wow-lite wallet version, returns False when the version is too old and needs to be upgraded.
:param version:
:return: bool
"""
url = "https://raw.githubusercontent.com/wownero/wow-lite-wallet/master/src/renderer/components/Landing/LandingPage.vue"
try:
resp = requests.get(url, headers={"User-Agent": "Mozilla 5.0"})
resp.raise_for_status()
content = resp.content.decode()
except:
return True # default to true
# parse latest version
current = next(re.finditer(r"wowlite\?version=(\d+.\d+.\d+)", content), None)
if not current:
return False
return version == current.group(1)
| 11,394
|
def fruit_growth(jth: int, last_24_canopy_t):
"""
Equations 9.38
fruit_growth_rate_j = POTENTIAL_FRUIT_DRY_WEIGHT*math.exp(-math.exp(-curve_steepness*(days_after_fruit_set - fruit_development_time)))
Returns: fruit growth rate [mg {CH2O} fruit^-1 d^-1]
"""
fruit_development_rate = fruit_development(last_24_canopy_t)
Fruit_Growth_Period = 1/(fruit_development_rate*86400)
fruit_development_time = -93.4 + 548.0 * Fruit_Growth_Period
curve_steepness = 1/(2.44 + 403.0 * fruit_development_time)
days_after_fruit_set = ((jth-1)+0.5)*Fruit_Growth_Period/FRUIT_DEVELOPMENT_STAGES_NUM
return POTENTIAL_FRUIT_DRY_WEIGHT*math.exp(-math.exp(-curve_steepness*(days_after_fruit_set - fruit_development_time)))
| 11,395
|
def _pickled_cache_s(filepath: str) -> Callable[[Callable], Callable]:
"""Store the last result of the function call
in a pickled file (string version)
Args:
filepath (str): The path of the file to read/write
Returns:
Callable[[Callable], Callable]: function decorator.
The decorated function will also have an attribute
function 'forced', that calls the function forcing
cache overwriting"""
return _pickled_cache_m(lambda *args, **kwargs: filepath)
| 11,396
|
def home(request):
"""Index page view
:param request: HTTP request
:return: index page render
"""
today = datetime.date.today()
return render(request, 'taskbuster/index.html',
{'today': today, 'now': now()})
| 11,397
|
def get_trials_for_drug(
drug: Tuple[str, str], *, client: Neo4jClient
) -> Iterable[Node]:
"""Return the trials for the given drug.
Parameters
----------
client :
The Neo4j client.
drug :
The drug to query.
Returns
-------
:
The trials for the given drug.
"""
return client.get_targets(
drug,
relation="tested_in",
source_type="BioEntity",
target_type="ClinicalTrial",
)
| 11,398
|
def convert_features_to_dataset(all_features: List[InputFeaturesTC],
dataset_type: str = 'pytorch'
) -> TensorDataset:
"""Converts a list of features into a dataset.
Args:
all_features (:obj:`list` of :obj:`InputFeatureTC`): the list of
``InputFeatureTC`` originating from a list of ``InputExampleTC``
that will constitute the dataset.
dataset_type (str): the type of dataset, curruntly only `pytorch` is
supported.
Returns:
A pytorch TensorDataset.
Raises:
ValueError if `dataset_type` is not supported.
"""
if dataset_type == 'pytorch':
all_input_ids = torch.tensor([x.input_ids for x in all_features],
dtype=torch.long)
all_attention_mask = torch.tensor([x.attention_mask
for x in all_features],
dtype=torch.long)
all_token_type_ids = torch.tensor([x.token_type_ids
for x in all_features],
dtype=torch.long)
all_label_ids = torch.tensor([x.label_ids
for x in all_features],
dtype=torch.long)
# Create Tensor dataset
dataset = TensorDataset(all_input_ids, all_attention_mask,
all_token_type_ids, all_label_ids)
else:
raise ValueError(f'Invalid return dataset type: {dataset_type}')
return dataset
| 11,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.