content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes | 36,100 |
def gen_api_url(endpoint):
"""Construct a Wger API url given the endpoint"""
# type: (str) -> str
return WGER["host_name"] + WGER["api_ext"] + endpoint | 36,101 |
def send_mail(receivers, subject, message, html=None):
"""
Sends an email to the recipients. Must be called from an EngineThread. This method will not raise any exception
if it fails to send a message to the recipients.
:param list(str) receivers: list of recipient email addresses
:param str subject: subject of the email
:param str message: plain text message
:param str html: HTML message
"""
if not isinstance(receivers, list):
raise ValueError('Invalid recipients. Must be a list of email addresses.')
try:
if not subject or not message:
raise ValueError('subject and message body are required to send the email')
sender = threading.current_thread().username
master = threading.current_thread().master
if html is None:
html = re.sub("\r?\n", "<br/>", message)
request = messaging.Request(messaging.RequestType.EMAIL, messaging.WorkerEmailRequest(
sender=sender, receivers=receivers, subject=subject, plain=message, html=html))
messaging.push(master, request)
except Exception as e:
logger.error(e.message)
logger.error(traceback.format_exc()) | 36,102 |
def create_inputs(
data: np.ndarray,
input_type_name: Literal[
"data",
"data_one_column",
"one_in_one_out_constant",
"one_in_one_out",
"one_in_batch_out",
"sequentions",
],
input_type_params: dict,
mode: Literal["validate", "in_sample"] = "validate",
predicts: int = 7,
repeatit: int = 10,
predicted_column_index: int = 0,
) -> Inputs:
"""Define configured inputs for various models. For some models use `make_sequences` function => check it's
documentation how it works. For `data` input type name, just return data, if data_one_column, other columns
are deleted, if something else, it create inputs called X and y - same convention as in sklearn plus x_input
- input for predicted values. If constant in used name, it will insert bias 1 to every sample input.
Args:
data (np.ndarray): Time series data.
input_type_name (str): Name of input. Choices are ['data', 'data_one_column', 'one_in_one_out_constant',
'one_in_one_out', 'one_in_batch_out', 'sequentions']. If 'sequentions', than input type
params define produces inputs.
input_type_params (dict): Dict of params used in make_sequences. E.g. {'n_steps_in': cls.default_n_steps_in,
'n_steps_out': cls.predicts, 'default_other_columns_length': cls.default_other_columns_length, 'constant': 0}.
Used only if `input_type_params` is 'sequentions'.
mode (Literal["validate", "in_sample"], optional): 'validate' or 'in_sample'. All data are used but if 'in_sample', 'repeatit' number of in-sample
inputs are used for test validation. If 'validate', just one last input (same like predict input is used). Test
output is generated before this function in test / train split. Defaults to 'validate'.
predicts (int, optional): Number of predicted values. Defaults to 7.
repeatit (int, optional): Number of generated sequentions for testing. Defaults to 10.
predicted_column_index (int, optional): Predicted column index. Defaults to 0.
Returns:
Inputs: model_train_input, model_predict_input, model_test_inputs.
Example:
>>> data = np.array(
... [
... [1, 2, 3, 4, 5, 6, 7, 8],
... [9, 10, 11, 12, 13, 14, 15, 16],
... [17, 18, 19, 20, 21, 22, 23, 24],
... ]
... ).T
...
>>> inputs = create_inputs(
... data,
... "sequentions",
... {
... "n_steps_in": 3,
... "n_steps_out": 1,
... "constant": 1,
... },
... )
>>> inputs[0][1]
array([[4],
[5],
[6],
[7],
[8]])
>>> inputs[1]
array([[ 1., 6., 7., 8., 14., 15., 16., 22., 23., 24.]])
"""
# Take one input type, make all derived inputs (save memory, because only slices) and create dictionary of inputs for one iteration
used_sequences = {}
if input_type_name == "data":
used_sequences = data
elif input_type_name == "data_one_column":
used_sequences = data[:, predicted_column_index]
else:
if input_type_name in [
"one_in_one_out_constant",
"one_in_one_out",
"one_in_batch_out",
]:
used_sequences = data[:, predicted_column_index : predicted_column_index + 1]
else:
used_sequences = data
used_sequences = make_sequences(
used_sequences, predicts=predicts, repeatit=repeatit, **input_type_params
)
if isinstance(used_sequences, tuple):
model_train_input = (used_sequences[0], used_sequences[1])
model_predict_input = used_sequences[2]
if mode == "validate":
model_test_inputs = [model_predict_input]
else:
model_test_inputs = used_sequences[3]
else:
model_train_input = model_predict_input = used_sequences
if mode == "validate":
model_test_inputs = [model_predict_input]
else:
model_test_inputs = []
if used_sequences.ndim == 1:
for i in range(repeatit):
model_test_inputs.append(used_sequences[: -predicts - repeatit + i + 1])
else:
for i in range(repeatit):
model_test_inputs.append(used_sequences[:, : -predicts - repeatit + i + 1])
return Inputs(model_train_input, model_predict_input, model_test_inputs) | 36,103 |
def predictions(
dataset_path: str,
predictions_path: str,
output_path: str,
label_scores: bool,
calibration_factor: float
) -> None:
"""Analyze classification performance and write a report.
Read in the dataset from DATASET_PATH, as well as predictions from
PREDICTIONS_PATH, then analyze the predictions and write the
results to OUTPUT_PATH. PREDICTIONS_PATH should be a JSON Lines file
in which each object has "id", "label", and optionally
"label_scores" keys, corresponding to the ID for the instance, the
predicted label, and the predicted probabilities for each class.
"""
# Step 1: Read in the dataset.
with click.open_file(dataset_path, 'r') as dataset_file:
id_to_dataset_label_and_label_scores = {}
for ln in dataset_file:
row = json.loads(ln)
id_to_dataset_label_and_label_scores[row['id']] = (
row['gold_label'],
row['gold_annotations']
)
# Step 2: Read in the predictions.
with click.open_file(predictions_path, 'r') as predictions_file:
id_to_predicted_label_and_label_scores = {}
for ln in predictions_file:
row = json.loads(ln)
id_to_predicted_label_and_label_scores[row['id']] = (
row['label'],
row.get('label_scores')
)
# Step 3: Extract the dataset and predictions on the relevant
# subset.
dataset_labels_and_label_scores, predicted_labels_and_label_scores = (
*zip(*[
(
id_to_dataset_label_and_label_scores[id_],
id_to_predicted_label_and_label_scores[id_]
)
for id_ in id_to_predicted_label_and_label_scores.keys()
if id_ in id_to_dataset_label_and_label_scores
]),
)
dataset_labels = [
label
for label, _ in dataset_labels_and_label_scores
]
predicted_labels = [
label
for label, _ in predicted_labels_and_label_scores
]
if label_scores:
dataset_label_scores = [
[count / sum(scores) for count in scores]
for _, scores in dataset_labels_and_label_scores
]
predicted_label_scores = [
[count / sum(scores) for count in scores]
for _, scores in predicted_labels_and_label_scores
]
# Step 4: Write the report.
with click.open_file(output_path, 'w') as output_file:
# create the metrics report
metric_name_to_value = {
name:
metric(
y_true=dataset_labels,
y_pred=predicted_label_scores
if scorer_kwargs['needs_proba']
else predicted_labels)
for name, metric, scorer_kwargs in METRICS.values()
if label_scores or not scorer_kwargs['needs_proba']
}
if label_scores:
if 'xentropy' in metric_name_to_value:
raise ValueError(
'METRICS should not have a key named'
' "xentropy". This issue is a bug in the library,'
' please notify the maintainers.')
metric_name_to_value['xentropy'] = utils.xentropy(
y_true=dataset_label_scores,
y_pred=predicted_label_scores)
if 'calibrated_xentropy' in metric_name_to_value:
raise ValueError(
'METRICS should not have a key named'
' "calibrated_xentropy". This issue is a bug in the'
' library, please notify the maintainers.')
logits = np.log(predicted_label_scores)
temperature = (
calibration_factor
if calibration_factor is not None else
utils.calibration_factor(
logits=logits,
targets=dataset_label_scores)
)
logger.info(f'Calibrating temperature: {temperature}')
metric_name_to_value['calibrated_xentropy'] = utils.xentropy(
y_true=dataset_label_scores,
y_pred=softmax(logits / temperature, axis=-1))
metric_name_width = 1 + max(
len(name)
for name in metric_name_to_value.keys())
metrics_report = '\n'.join(
f'{name: <{metric_name_width}}: {value:.4f}'
for name, value in metric_name_to_value.items())
if label_scores:
calibration_factor_report = (
f'\n\nCalibration Factor: {temperature}\n'
)
else:
calibration_factor_report = ''
# create the classification report
label_names = ['0', '1']
classification_report = metrics.classification_report(
y_true=[str(label) for label in dataset_labels],
y_pred=[str(label) for label in predicted_labels],
labels=label_names)
# create the confusion matrix
confusion_matrix = utils.make_confusion_matrix_str(
y_true=[str(label) for label in dataset_labels],
y_pred=[str(label) for label in predicted_labels],
labels=label_names)
output_file.write(
REPORT_TEMPLATE.format(
metrics_report=metrics_report,
calibration_factor_report=calibration_factor_report,
classification_report=classification_report,
confusion_matrix=confusion_matrix)) | 36,104 |
def CDLKICKINGBYLENGTH(df):
"""
函数名:CDLKICKINGBYLENGTH
名称:Kicking - bull/bear determined by the longer marubozu 由较长缺影线决定的反冲形态
简介:二日K线模式,与反冲形态类似,较长缺影线决定价格的涨跌。
python API
integer=CDLKICKINGBYLENGTH(open, high, low, close)
:return:
"""
open = df['open']
high = df['high']
low = df['low']
close = df['close']
return talib.CDLKICKINGBYLENGTH(open, high, low, close) | 36,105 |
def report_count_table_sort(s1, s2):
""" """
# Sort order: Class and scientific name.
columnsortorder = [0, 2, 3, 6] # Class, species, size class and trophy.
#
for index in columnsortorder:
s1item = s1[index]
s2item = s2[index]
# Empty strings should be at the end.
if (s1item != '') and (s2item == ''): return -1
if (s1item == '') and (s2item != ''): return 1
if s1item < s2item: return -1
if s1item > s2item: return 1
#
return 0 | 36,106 |
def test_none_stream_fails(capsys):
"""
Tests that if a call to the flush method is called (it shouldn't be called directly)
and no process is being monitored then a message is printed to stdout but it doesn't raise
an exception.
"""
streamer = Streamer(verbose=True)
none_stream = None
streamer.monitor(stream=none_stream)
out, err = capsys.readouterr()
assert streamer._messages["stream_none"] in out
assert err == "" | 36,107 |
def serch_handler(msg):
"""
处理音乐搜索结果
:param msg: 搜索信息
:return:
"""
# url = 'https://www.ximalaya.com/revision/search?core=all&kw={0}&spellchecker=true&device=iPhone'
url = 'https://www.ximalaya.com/revision/search?kw={0}&page=1&spellchecker=false&condition=relation&rows=50&device=iPhone&core=track&fq=category_id%3A2&paidFilter=false'
request_url = url.format(urllib.parse.quote(msg)) # url编码
return get_url_response(request_url) | 36,108 |
def test_workflow_cancel(hl7_message):
"""Validates cancel transition invocation"""
edi = EdiProcessor(hl7_message)
actual_result = edi.cancel()
assert actual_result.metadata is None
assert actual_result.metrics.analyzeTime == 0.0
assert actual_result.inputMessage == hl7_message
assert actual_result.operations == ["CANCEL"]
assert len(actual_result.errors) == 0
assert edi.operations == ["CANCEL"]
edi = EdiProcessor(hl7_message)
edi.analyze()
edi.cancel()
assert edi.operations == ["ANALYZE", "CANCEL"]
edi = EdiProcessor(hl7_message)
edi.analyze()
edi.enrich()
edi.cancel()
assert edi.operations == ["ANALYZE", "ENRICH", "CANCEL"]
edi = EdiProcessor(hl7_message)
edi.analyze()
edi.enrich()
edi.validate()
edi.cancel()
assert edi.operations == ["ANALYZE", "ENRICH", "VALIDATE", "CANCEL"]
edi = EdiProcessor(hl7_message)
edi.analyze()
edi.enrich()
edi.validate()
edi.translate()
edi.cancel()
assert edi.operations == [
"ANALYZE",
"ENRICH",
"VALIDATE",
"TRANSLATE",
"CANCEL",
] | 36,109 |
def random(start: int, end: int) -> int:
"""Same as `random.randint(start, end)`"""
return randint(start, end) | 36,110 |
def zero_check(grid):
"""Take a 2d grid and calculates number of 0 entries."""
zeros = 0
for row in grid:
for element in row:
if element == 0:
zeros += 1
return zeros | 36,111 |
def get_channel_clips(channel: Channel) -> List[Clip]:
"""
Uses a (blocking) HTTP request to retrieve Clip info for a specific channel.
:param channel: A Channel object.
:returns: A list of Clip objects.
"""
clips = []
pagination = ""
while True:
query = gql.GET_CHANNEL_CLIPS_QUERY.format(
channel_id=channel.login,
after=pagination, first=100
)
resp = gql.gql_query(query=query).json()
resp = resp["data"]["user"]["clips"]
if not resp or not resp["edges"]:
break
pagination = resp["edges"][-1]["cursor"]
for clip in resp["edges"]:
c = clip["node"]
b = c["broadcaster"]
w = c["curator"]
g = c["game"]
v = c["video"]
v_id = "unknown"
if v is not None:
v_id = v["id"]
w_id = b["id"]
w_login = b["login"]
w_name = b["displayName"]
if w is not None:
w_id = w["id"]
w_login = w["login"]
w_name = w["displayName"]
g_id = ""
g_name = ""
if g is not None:
g_id = g["id"]
g_name = g["name"]
clips.append(
Clip(
id=c["id"], slug=c["slug"], created_at=c["createdAt"],
user_id=b["id"], user_login=b["login"], user_name=b["displayName"],
clipper_id=w_id, clipper_login=w_login, clipper_name=w_name,
game_id=g_id, game_name=g_name, title=c["title"],
view_count=c["viewCount"], length=c["durationSeconds"],
offset=c["videoOffsetSeconds"] or 0, video_id=v_id
)
)
if pagination == "" or pagination == None:
break
return clips | 36,112 |
def scrollLevelData(playerList, level, goldCount, time, levelCount, highScore):
"""Draw the level data to the screen as it scrolls off-screen.
This includes the time remaining, gold remaining, players' lives, black hole sprites, gold sprites, text
sprites, trap sprites, and the level image.
After the level scrolls off-screen, the end-of-level data is drawn to the screen, which includes the current
high score and level count.
Args:
playerList: A list of all PlayerSprite objects in the game.
level: A Level object representing the current level being played.
goldCount: An integer representing how many gold sprites are currently unrevealed (either invisible or
face-down).
time: An integer representing the time the players have remaining to complete the level.
levelCount: An integer storing how many levels the player has currently played.
levelCount: An integer storing how many levels the player has currently played.
highScore: An integer showing the current high score.
"""
playerLivesData = []
playerTextData = []
playerScoreData = []
scrollCount = 0
level.initialize()
for num, player in enumerate(playerList):
playerLivesData.append([c.FONT.render("<", False, playerFontColors[num]),
c.FONT.render("{}".format(min(player.lives, 9)), False, c.WHITE),
c.FONT.render(">", False, playerFontColors[num])])
timeText = c.FONT.render("TIME,{:03d}".format(time), False, c.WHITE)
# The location of where the players' lives are shown depends on the number of players.
# If there are one or two players, player one's lives are displayed on the left and player two's on the right
# If there are three or four players, player one's lives are displayed on the far left, player two's on the
# mid-left, player three's on the mid-right, and player four's on the far right.
if len(playerList) < 3:
livesDataCoordinates = [(42, 16), (428, 16)]
else:
livesDataCoordinates = [(5, 16), (62, 16), (408, 16), (467, 16)]
highScore = compareHighScore(playerList, highScore)
highScoreText = c.FONT.render("TOP,{:06d}".format(highScore), False, c.WHITE)
levelText = c.FONT.render("<<<<< CLU,CLU,LAND,,{:02d} >>>>>".format(levelCount % 100), False, c.WHITE)
if len(playerList) < 3:
playerTextCoordinates = [(162, 497), (162, 721)]
scoreDataCoordinates = [(240, 545), (240, 769)]
else:
playerTextCoordinates = [(37, 496), (292, 496), (37, 721), (292, 721)]
scoreDataCoordinates = [(55, 524), (309, 524), (55, 748), (309, 748)]
for num, player in enumerate(playerList):
playerTextData.append(c.FONT.render("< PLAYER {} >".format(num + 1), False, c.WHITE))
playerScoreData.append(c.FONT.render("{:06d}PTS.".format(player.score % 1000000), False, c.WHITE))
# Which displays are used in the end-of-level animation depends on the number of players.
# If there are one or two players, the full-sized displays are used, with player one on top and player two
# on the bottom.
# If there are three or four players, the half-sized displays are used, with player one on the top-left, player
# two on the top-right, player three on the bottom-left, and player four on the bottom-right.
if len(playerList) < 3:
FullDisplaySprite(num + 1)
else:
HalfDisplaySprite(num + 1)
# Every sprite scrolls upwards 6 pixels per frame until it is all completely off-screen.
# This takes 75 frames in total.
while scrollCount < 448:
c.SCREEN.fill(level.backgroundColor)
checkQuitGame()
checkPauseGameWithInput(playerList)
c.SCREEN.blit(level.image, (0, 0 - scrollCount))
GoldSprite.globalFrameCount += 1
# Bonus levels blit the time count in a different location, and blit the word 'BONUS!' instead of the gold
# count (Also in a different location from the standard gold count location).
if isinstance(level, BonusLevel):
bonusWordText = c.FONT.render("BONUS!", False, c.WHITE)
c.SCREEN.blit(bonusWordText, (210, 210 - scrollCount))
c.SCREEN.blit(timeText, (192, 242 - scrollCount))
else:
goldText = c.FONT.render("LAST,{:02d}".format(goldCount), False, c.WHITE)
c.SCREEN.blit(goldText, (132, 16 - scrollCount))
c.SCREEN.blit(timeText, (262, 16 - scrollCount))
# The highScoreText, levelText, and another copy of timeText begin in the proper location off-screen so
# that they scroll up to the proper location in the end-of-level screen.
c.SCREEN.blit(highScoreText, (254, 674 - scrollCount))
c.SCREEN.blit(timeText, (82, 674 - scrollCount))
c.SCREEN.blit(levelText, (38, 642 - scrollCount))
# The gold, black hole, and text sprites still update every frame as they scroll, so they continue being
# animated, as the main playLevel function is not called during this loop.
for hole in c.blackHoleGroup:
hole.update()
c.SCREEN.blit(hole.image, (hole.coordinates[0], hole.coordinates[1] - scrollCount))
for gold in c.goldGroup:
gold.update()
c.SCREEN.blit(gold.image, (gold.coordinates[0], gold.coordinates[1] - scrollCount))
for textSprite in c.textGroup:
textSprite.update()
c.SCREEN.blit(textSprite.image, (textSprite.coordinates[0], textSprite.coordinates[1] - scrollCount))
for display in c.displayGroup:
c.SCREEN.blit(display.image, (display.coordinates[0], display.coordinates[1] - scrollCount))
for trap in c.rubberGroup:
trap.update()
c.SCREEN.blit(trap.image, (trap.coordinates[0], trap.coordinates[1] - scrollCount))
# Because the < > symbols should be slightly closer to the number of lives than the standard text width would
# allow, the life count is placed 13 pixels after the <, and the > is placed 15 frames after the life count.
for fontData, coords in zip(playerLivesData, livesDataCoordinates):
for num, text in enumerate(fontData):
c.SCREEN.blit(text, (coords[0], coords[1] - scrollCount))
coords = (coords[0] + 13, coords[1] - scrollCount) if num == 0 else\
(coords[0] + 15, coords[1] - scrollCount)
for text, coords in zip(playerTextData, playerTextCoordinates):
c.SCREEN.blit(text, (coords[0], coords[1] - scrollCount))
for text, coords in zip(playerScoreData, scoreDataCoordinates):
c.SCREEN.blit(text, (coords[0], coords[1] - scrollCount))
scrollCount += 6
pg.display.update()
c.CLOCK.tick(c.FPS) | 36,113 |
def tensorflow2xp(tf_tensor: "tf.Tensor") -> ArrayXd: # pragma: no cover
"""Convert a Tensorflow tensor to numpy or cupy tensor."""
assert_tensorflow_installed()
if tf_tensor.device is not None:
_, device_type, device_num = tf_tensor.device.rsplit(":", 2)
else:
device_type = "CPU"
if device_type == "CPU" or not has_cupy:
return tf_tensor.numpy()
else:
dlpack_tensor = tensorflow.experimental.dlpack.to_dlpack(tf_tensor)
return cupy.fromDlpack(dlpack_tensor) | 36,114 |
def test_gradient_blur_ascend():
"""
Feature: Test gradient blur.
Description: Add gradient blur to an image.
Expectation: success.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
image = np.random.random((32, 32, 3))
number = 10
h, w = image.shape[:2]
point = (int(h / 5), int(w / 5))
center = False
trans = GradientBlur(point, number, center)
dst = trans(image)
print(dst) | 36,115 |
def test_execute_query_flow():
"""
Test flow with Compose handle (non DB native handle).
Compose Editor API (non native DB api)
"""
interpreter = _get_interpreter(None)
with patch("compose.editor.query.engines.SqlAlchemyInterface.execute") as execute:
execute.return_value = {"guid": "abc"}
data = Executor(username="test", interpreter=interpreter).execute(
statement="SELECT 1, 2, 3"
)
assert data["guid"] == "abc"
# assert data["handle"].get("guid") == "abc"
with patch(
"compose.editor.query.engines.SqlAlchemyInterface.check_status"
) as check_status:
check_status.return_value = {"status": "running"}
data = Executor(username="test", interpreter=interpreter).check_status(
query_id="abc"
)
assert data["status"] == "running"
# get_logs
with patch(
"compose.editor.query.engines.SqlAlchemyInterface.check_status"
) as check_status:
check_status.return_value = {"status": "ready"}
data = Executor(username="test", interpreter=interpreter).check_status(
query_id="abc"
)
assert data["status"] == "ready"
with patch(
"compose.editor.query.engines.SqlAlchemyInterface.fetch_result"
) as fetch_result:
fetch_result.return_value = {"data": [[1]], "meta": [["C1"]]}
data = Executor(username="test", interpreter=interpreter).fetch_result(
query_id="abc"
)
assert {"data": [[1]], "meta": [["C1"]]} == data | 36,116 |
def __str__(self, indent=0, func_role="obj"):
"""
our own __str__
"""
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_options('Options')
out += self._str_returns()
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out) | 36,117 |
def validate_contests_2006_general():
"""Check that there are the correct number of Contest records for the 2006 general election"""
Election2006General().validate_contests() | 36,118 |
def cmd():
"""Return the Juju command
There are times when multiple versions of Juju may be installed or
unpacked requiring testing.
This function leverages two environment variables to select the correct
Juju binary. Thus allowing easy switching from one version to another.
JUJU_BINARY: The full path location to a Juju binary that may not be in
$PATH.
Example:
/home/ubuntu/Downloads/juju/usr/lib/juju-2.1-rc1/bin/juju
JUJU_VERSION: The full binary name of Juju that is in $PATH.
Example: juju-2.1-rc1
The result of $(which juju-2.1-rc1)
If neither of these environment variables is set, the default Juju binary
in $PATH is selected.
Example: juju
The result of $(which juju)
@returns string Juju command
"""
if os.environ.get('JUJU_BINARY'):
juju = os.environ.get('JUJU_BINARY')
elif os.environ.get('JUJU_VERSION'):
ver = (".".join(os.environ.get('JUJU_VERSION')
.split('-')[0].split('.')[:2]))
juju = 'juju-{}'.format(ver)
else:
juju = 'juju'
return juju | 36,119 |
def worker_years_6_download(request):
"""
纺织类通信带条件查询,然后下载对应结果文件
:param request:
:return:
"""
return download.worker_years_6_download(request) | 36,120 |
def generate_unique_id(mapper, connection, target):
"""Generates a firebase fancy unique Id
Args:
mapper (obj): The current model class
connection (obj): The current database connection
target (obj): The current model instance
Returns:
None
"""
push_id = PushID()
target.id = push_id.next_id() | 36,121 |
def set_autoscaler_location(autoscaler, is_regional, location):
""" Sets location-dependent properties of the autoscaler. """
name = autoscaler['name']
location_prop_name = 'region' if is_regional else 'zone'
autoscaler['type'] = REGIONAL_LOCAL_AUTOSCALER_TYPES[is_regional]
autoscaler['properties'][location_prop_name] = location
location_output = {
'name': location_prop_name,
'value': '$(ref.{}.{})'.format(name, location_prop_name)
}
return location_output | 36,122 |
def test_check_if_metrics_file_are_same():
"""
General tests for checking if the metrics file is the same
"""
logging.info(
"### Check if files are the same from alignment metrics calculation ###"
) | 36,123 |
def test_rdkit_possible_fail():
"""RDKit can't generate structures for some SMILES, make sure they can
be generated in other ways"""
rh_complex = Molecule(smiles='C[Rh](=C=O)(=C=O)(=C=O)=C=O')
assert are_coords_reasonable(coords=rh_complex.coordinates)
# Trying to parse with RDKit should revert to RR structure
rh_complex_rdkit_attempt = Molecule()
init_organic_smiles(rh_complex_rdkit_attempt,
smiles='O=[Rh]([H])([H])([H])=O')
assert are_coords_reasonable(coords=rh_complex.coordinates)
# RDKit also may not parse CH5+
ch5 = Molecule(smiles='[H]C([H])([H])([H])[H+]')
assert are_coords_reasonable(coords=ch5.coordinates) | 36,124 |
def _define_deformation_axes() -> Dict[str, Iterable[str]]:
"""Defines object sets for each axis of deformation."""
rgb_objects_dim = {}
for a in DEFORMATION_AXES:
rgb_objects_dim[a] = []
for v in _DEFORMATION_VALUES:
obj_id = f'{v}{a}'
# There are excluded objects we need to check for.
if obj_id in RGB_OBJECTS_FULL_SET:
rgb_objects_dim[a].append(f'{v}{a}')
return rgb_objects_dim | 36,125 |
def ldns_rr2buffer_wire_canonical(*args):
"""LDNS buffer."""
return _ldns.ldns_rr2buffer_wire_canonical(*args) | 36,126 |
def copy_snapshot(client, rds_instance, debug=True):
""" Copy a snapshot the latest automated snapshot """
latest_snap = get_latest_snap(client, rds_instance, debug)
try:
resp = client.copy_db_snapshot(
SourceDBSnapshotIdentifier=latest_snap['DBSnapshotIdentifier'],
TargetDBSnapshotIdentifier='%s-final-snapshot-%s'
% (rds_instance['DBInstanceIdentifier'],
datetime.today().strftime('%Y%m%d-%H%M%S')),
CopyTags=True
)
print("Copied final snapshot for %s, %s --> %s"
% (rds_instance['DBInstanceIdentifier'],
latest_snap['DBSnapshotIdentifier'],
resp['DBSnapshot']['DBSnapshotIdentifier']))
except botocore.exceptions.ClientError as exception:
print("Unable to take a snapshot of instance: %s" % rds_instance['DBInstanceIdentifier'])
print(exception) | 36,127 |
def get_stretch_factor(folder_name, indices, **kwargs):
""" Computes the stretch factor using the (16-50-84) percentile estimates
of x0 - x1 for each restframe wavelength assuming orthogonality
Parameters:
folder_name: folder containing the individual likelihoods and their
percentile estimates
indices: which restframe wavelengths to use
Returns:
stretch_x0, stretch_x1: the stretch factors along x0 and x1
"""
x0_cen = np.zeros(len(indices))
x0_err = np.zeros(len(indices))
x1_cen = np.zeros(len(indices))
x1_err = np.zeros(len(indices))
for i, index in enumerate(indices):
_, est_x0, est_x1 = np.loadtxt(folder_name + \
'xx_percentile_est_%d.dat' % index)
x0_cen[i] = est_x0[0]
x0_err[i] = (est_x0[1] + est_x0[2]) / 2.
x1_cen[i] = est_x1[0]
x1_err[i] = (est_x1[1] + est_x1[2]) / 2.
res0 = get_corrfunc(x0_cen, x0_err, model=True, est=True,
sfx=folder_name + "x0_corr")
res1 = get_corrfunc(x1_cen, x1_err, model=True, est=True,
sfx=folder_name + "x1_corr")
stretch_x0 = res0[3] / res0[1]
stretch_x1 = res1[3] / res1[1]
return stretch_x0, stretch_x1 | 36,128 |
def zonal_length(lat, nlon):
""" length of zonal 1/nlon segment at latitude lat"""
return R_earth * 2*np.pi/nlon * np.cos(lat*np.pi/180) | 36,129 |
def map_request_attrs(name_request, **kwargs):
"""
Used internally by map_request_data.
:param name_request:
:key nr_id: int
:key nr_num: str
:key request_entity: str
:key request_action: str
:key request_type: str
:return:
"""
try:
# Use class property values for the ID, NR Number and Source!
# Do not map those values from the request if supplied, as they
# should not be changed outside of the context of this application!
nr_id = kwargs.get('nr_id')
nr_num = kwargs.get('nr_num')
name_request.id = nr_id
name_request.nrNum = nr_num
name_request._source = NAME_REQUEST_SOURCE
# Default to whatever entity, action, or type already exists when mapping
request_entity = kwargs.get('request_entity', name_request.entity_type_cd)
request_action = kwargs.get('request_action', name_request.request_action_cd)
request_type = kwargs.get('request_type', name_request.requestTypeCd)
# Set action and entity
if request_entity:
name_request.entity_type_cd = request_entity
if request_action:
name_request.request_action_cd = request_action
# TODO: Throw exceptions for invalid combos?
if not request_type and request_entity and request_action:
# If request_type is None (eg. no 'requestTypeCd' was provided in the payload)
# but a request_entity (entity_type_cd) and a request_action (request_action_cd)
# are supplied, use get_mapped_request_type to map the requestTypeCd in the model
# using the action and entity type
request_type = get_mapped_request_type(request_entity, request_action)
name_request.requestTypeCd = request_type[0]
elif request_type is not None:
# If request_type is NOT None, (eg. 'requestTypeCd' was provided in the payload)
# then use the provided value
name_request.requestTypeCd = request_type
except Exception as err:
raise MapRequestAttributesError(err)
return name_request | 36,130 |
def create_gw_response(app, wsgi_env):
"""Create an api gw response from a wsgi app and environ.
"""
response = {}
buf = []
result = []
def start_response(status, headers, exc_info=None):
result[:] = [status, headers]
return buf.append
appr = app(wsgi_env, start_response)
close_func = getattr(appr, 'close', None)
try:
buf.extend(list(appr))
finally:
close_func and close_func()
response['body'] = ''.join(buf)
response['statusCode'] = result[0].split(' ', 1)[0]
response['headers'] = {}
for k, v in result[1]:
response['headers'][k] = v
if 'Content-Length' not in response['headers']:
response['headers']['Content-Length'] = str(len(response['body']))
if 'Content-Type' not in response['headers']:
response['headers']['Content-Type'] = 'text/plain'
return response | 36,131 |
def Squeeze(parent, axis=-1, name=""):
"""\
Dimension of size one is removed at the specified position (batch
dimension is ignored).
:param parent: parent layer
:param axis: squeeze only along this dimension
(default: -1, squeeze along all dimensions)
:param name: name of the output layer
:return: Squeeze layer
"""
return _eddl.Squeeze(parent, axis, name) | 36,132 |
def parse_namespace(tt):
"""
<!ELEMENT NAMESPACE EMPTY>
<!ATTLIST NAMESPACE
%CIMName;>
"""
check_node(tt, 'NAMESPACE', ['NAME'], [], [])
return attrs(tt)['NAME'] | 36,133 |
def test_get_asset_info_fii() -> None:
"""Return FII."""
asset_info = b3.get_asset_info("DOVL11B")
assert asset_info.category == "FII"
assert asset_info.cnpj == "10.522.648/0001-81" | 36,134 |
def clean(val, floor, ceiling):
"""Make sure RH values are always sane"""
if val > ceiling or val < floor or pd.isna(val):
return None
if isinstance(val, munits.Quantity):
return float(val.magnitude)
return float(val) | 36,135 |
def count_swaps_in_row_order(row_order: Union[List[int], Tuple[int]]) -> int:
"""
Counts the number of swaps in a row order.
Args:
row_order (Union[List[int], Tuple[int]]): A list or tuple
of ints representing the order of rows.
Returns:
int: The minimum number of swaps it takes for a
range(len(row_order)) to reach row_order.
"""
count = 0
for i in range(len(row_order)):
if row_order[i] != i:
row_order[row_order[i]], row_order[i] = (
row_order[i],
row_order[row_order[i]],
)
count += 1
return count | 36,136 |
def evaluate(ast, env):
"""Evaluate an Abstract Syntax Tree in the specified environment."""
print(ast)
if is_boolean(ast):
return ast
if is_integer(ast):
return ast
if is_string(ast):
return ast
if is_symbol(ast):
return env.lookup(ast)
if is_list(ast):
if len(ast) == 0:
raise DiyLangError("Empty list")
if ast[0] == "quote":
if len(ast[1:]) != 1:
raise DiyLangError(f"Wrong number of arguments in {ast[0]}")
return ast[1]
if ast[0] == "atom":
if len(ast[1:]) != 1:
raise DiyLangError(f"Wrong number of arguments in {ast[0]}")
result = evaluate(ast[1], env)
return is_atom(result)
if ast[0] == "eq":
if len(ast[1:]) != 2:
raise DiyLangError(f"Wrong number of arguments in {ast[0]}")
left = evaluate(ast[1], env)
right = evaluate(ast[2], env)
if not is_atom(left) or not is_atom(right):
return False
return left == right
if ast[0] in ["+", "-", "/", "*", "mod", ">"]:
if len(ast[1:]) != 2:
raise DiyLangError(f"Wrong number of arguments in {ast[0]}")
left = evaluate(ast[1], env)
right = evaluate(ast[2], env)
if not is_integer(left) or not is_integer(right):
raise DiyLangError(f"{left} or {right} is not a number")
if ast[0] == "+":
return left + right
if ast[0] == "-":
return left - right
if ast[0] == "/":
return left // right
if ast[0] == "*":
return left * right
if ast[0] == "mod":
return left % right
if ast[0] == ">":
return left > right
if ast[0] == "if":
if len(ast[1:]) != 3:
raise DiyLangError(f"Wrong number of arguments in {ast[0]}")
predicate = evaluate(ast[1], env)
if predicate:
return evaluate(ast[2], env)
else:
return evaluate(ast[3], env)
if ast[0] == "define":
if len(ast[1:]) != 2:
raise DiyLangError(f"Wrong number of arguments in {ast[0]}")
left = ast[1]
if not is_symbol(left):
raise DiyLangError(f"{left} is not a symbol")
right = evaluate(ast[2], env)
env.set(left, right)
return
if ast[0] == "cons":
head = evaluate(ast[1], env)
tail = evaluate(ast[2], env)
if is_list(tail):
return [head] + tail
if is_string(tail):
return String(head.val + tail.val)
raise DiyLangError("Can't use cons on a non list/string")
if ast[0] == "head":
list_ = evaluate(ast[1], env)
if is_list(list_):
if len(list_) == 0:
raise DiyLangError("Can't use head on empty list")
return list_[0]
if is_string(list_):
if len(list_.val) == 0:
raise DiyLangError("Can't use head on empty string")
return String(list_.val[0])
raise DiyLangError("Can't use head on a non list/string")
if ast[0] == "tail":
list_ = evaluate(ast[1], env)
if is_list(list_):
if len(list_) == 0:
raise DiyLangError("Can't use tail on empty list")
return list_[1:]
if is_string(list_):
if len(list_.val) == 0:
raise DiyLangError("Can't use tail on empty string")
return String(list_.val[1:])
raise DiyLangError("Can't use tail on a non list/string")
if ast[0] == "empty":
list_ = evaluate(ast[1], env)
if is_list(list_):
return len(list_) == 0
if is_string(list_):
return len(list_.val) == 0
raise DiyLangError("Can't use empty on a non list/string")
if ast[0] == "cond":
cases = ast[1]
for (condition, value) in cases:
if evaluate(condition, env):
return evaluate(value, env)
return False
if ast[0] == "let":
new_env = env
for (key, value) in ast[1]:
evaluated_value = evaluate(value, new_env)
new_env = new_env.extend({
key: evaluated_value
})
return evaluate(ast[2], new_env)
if ast[0] == "defn":
return evaluate(["define", ast[1], ["lambda", ast[2], ast[3]]], env)
if ast[0] == "lambda":
if len(ast[1:]) != 2:
raise DiyLangError(f"Wrong number of arguments in {ast[0]}")
params = ast[1]
if not is_list(params):
raise DiyLangError(f"{params} is not a list")
for param in params:
if not is_symbol(param):
raise DiyLangError(f"{param} is not a symbol")
body = ast[2]
return Closure(env, params, body)
if is_closure(ast[0]):
closure = ast[0]
args = ast[1:]
return evaluate_closure(closure, args, env)
if is_list(ast[0]):
closure = evaluate(ast[0], env)
args = ast[1:]
return evaluate_closure(closure, args, env)
function_name = ast[0]
if not is_symbol(function_name):
raise DiyLangError(f"{function_name} is not a function")
closure = env.lookup(function_name)
if not is_closure(closure):
raise DiyLangError(f"{closure} is not a function")
args = ast[1:]
return evaluate_closure(closure, args, env) | 36,137 |
def create_eulerian_circuit(graph_augmented, graph_original, start_node=None):
"""
networkx.eulerian_circuit only returns the order in which we hit each node. It does not return the attributes of the
edges needed to complete the circuit. This is necessary for the postman problem where we need to keep track of which
edges have been covered already when multiple edges exist between two nodes.
We also need to annotate the edges added to make the eulerian to follow the actual shortest path trails (not
the direct shortest path pairings between the odd nodes for which there might not be a direct trail)
Args:
graph_augmented (networkx graph): graph w links between odd degree nodes created from `add_augmenting_path_to_graph`.
graph_original (networkx graph): orginal graph created from `create_networkx_graph_from_edgelist`
start_node (str): name of starting (and ending) node for CPP solution.
Returns:
networkx graph (`graph_original`) augmented with edges directly between the odd nodes
"""
euler_circuit = list(nx.eulerian_circuit(graph_augmented, source=start_node, keys=True))
assert len(graph_augmented.edges()) == len(euler_circuit), 'graph and euler_circuit do not have equal number of edges.'
for edge in euler_circuit:
aug_path = nx.shortest_path(graph_original, edge[0], edge[1], weight='distance')
edge_attr = graph_augmented[edge[0]][edge[1]][edge[2]]
if not edge_attr.get('augmented'):
yield edge + (edge_attr,)
else:
for edge_aug in list(zip(aug_path[:-1], aug_path[1:])):
# find edge with shortest distance (if there are two parallel edges between the same nodes)
edge_aug_dict = graph_original[edge_aug[0]][edge_aug[1]]
edge_key = min(edge_aug_dict.keys(), key=(lambda k: edge_aug_dict[k]['distance'])) # index with min distance
edge_aug_shortest = edge_aug_dict[edge_key]
edge_aug_shortest['augmented'] = True
edge_aug_shortest['id'] = edge_aug_dict[edge_key]['id']
yield edge_aug + (edge_key, edge_aug_shortest, ) | 36,138 |
def load_data(
assets: tp.Union[None, tp.List[tp.Union[str,dict]]] = None,
min_date: tp.Union[str, datetime.date, None] = None,
max_date: tp.Union[str, datetime.date, None] = None,
dims: tp.Tuple[str, str] = (ds.TIME, ds.ASSET),
forward_order: bool = True,
tail: tp.Union[datetime.timedelta, int, float] = DEFAULT_TAIL,
) -> tp.Union[None, xr.DataArray]:
"""
Loads index time series.
:param assets:
:param min_date:
:param max_date:
:param dims:
:param forward_order:
:param tail:
:return:
"""
track_event("DATA_INDEX_SERIES")
max_date = parse_date(max_date)
if min_date is not None:
min_date = parse_date(min_date)
else:
min_date = max_date - parse_tail(tail)
if assets is not None:
assets = [a['id'] if type(a) == dict else a for a in assets]
if assets is None:
assets_array = load_list(min_date, max_date)
assets_arg = [i['id'] for i in assets_array]
else:
assets_arg = assets
params = {"ids": assets_arg, "min_date": min_date.isoformat(), "max_date": max_date.isoformat()}
params = json.dumps(params)
params = params.encode()
raw = request_with_retry("idx/data", params)
if raw is None or len(raw) < 1:
arr = xr.DataArray(
[[np.nan]],
dims=[ds.TIME, ds.ASSET],
coords={
ds.TIME: pd.DatetimeIndex([max_date]),
ds.ASSET: ['ignore']
}
)[1:,1:]
else:
arr = xr.open_dataarray(raw, cache=True, decode_times=True)
arr = arr.compute()
if forward_order:
arr = arr.sel(**{ds.TIME: slice(None, None, -1)})
if assets is not None:
assets = list(set(assets))
assets = sorted(assets)
assets = xr.DataArray(assets, dims=[ds.ASSET], coords={ds.ASSET:assets})
arr = arr.broadcast_like(assets).sel(asset=assets)
arr = arr.dropna(ds.TIME, 'all')
arr.name = "indexes"
return arr.transpose(*dims) | 36,139 |
def test_popleft_empty_throws_error(deque_fixture):
"""Test popleft empty throws error."""
with pytest.raises(IndexError):
deque_fixture.popleft() | 36,140 |
def create_model(values):
"""create the model basing on the calculated values.
Args:
values (dict): values from the get_values_from_path function
Raises:
ValueError: if the loss function doesnt excist
Returns:
torch.nn.Module: model the network originally was trained with.
"""
pretrain = get_model(
values["model"], num_classes=values["embs"], in_channels=values["num_chan"])
pretrain.output = nn.BatchNorm1d(512)
if values["loss"] == "softmax":
classifier = nn.Linear(values["embs"], values["num_cl"], bias=False)
model = Model(pretrain, classifier)
elif values["loss"] == "arcface":
classifier = AAML(values["embs"], values["num_cl"])
model = ModelArc(pretrain, classifier)
elif values["loss"] == "circle":
classifier = CircleLoss(values["embs"], values["num_cl"])
model = ModelArc(pretrain, classifier)
elif values["loss"] == "rbf":
classifier = RBFClassifier(
values["embs"], values["num_cl"], scale=3, gamma=1)
model = Model(pretrain, classifier)
else:
raise ValueError("That loss function doesn't exist!")
return model | 36,141 |
def loss(logits, labels):
"""Calculates the loss from the logits and the labels.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size].
Returns:
loss: Loss tensor of type float.
"""
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, labels),
name='xentropy_mean')
return loss | 36,142 |
def old_get_obs_session(project=None, dss=None, date=None, path='proj'):
"""
Provides project, station, year and DOY, asking as needed.
It follows one of several possible paths to get to the session::
proj - path through /usr/local/projects/<project>
hdf5 - path through /usr/local/RA_data/HDF5
fits - path through /usr/local/RA_data/FITS
wvsr - path through /data
@param project : optional name as defined in /usr/local/projects
@type project : str
@param dss : optional station number
@type dss : int
@param date : optional YYYY/DDD
@type date : str
@return: project, DSS, year, DOY.
"""
def get_directory(path):
"""
"""
# only one trailing /
path = path.rstrip('/')+"/*"
logger.debug("get_obs_session:get_directory: from %s", path)
names = glob.glob(path)
if names:
dirs = []
for name in names:
if os.path.isdir(name):
dirs.append(os.path.basename(name))
dirs.sort()
for name in dirs:
print((name), end=' ')
return input('\n>')
else:
return []
def from_wvsr_dir():
"""
this needs to be completed and tested on crab14 or an auto host
"""
session = get_directory(local_dirs.wvsr_dir)
return session
cwd = os.getcwd()
# get the project
if project:
pass
else:
os.chdir(local_dirs.projects_dir)
project = get_directory(local_dirs.projects_dir)
logger.debug("from_wvsr_dir: project is %s", project)
projectpath = local_dirs.projects_dir+project
# get the station
if path[:4].lower() == 'wvsr':
# special call
print("from_wvsr_dir()")
if path[:4].lower() == 'proj':
os.chdir(projectpath+"/Observations/")
elif path[:4].lower() == 'hdf5':
os.chdir(local_dirs.hdf5_dir)
elif path[:4].lower() == 'fits':
os.chdir(local_dirs.fits_dir)
# get the station
if dss:
pass
else:
# This seems odd but get_directory() needs '/' and int does not
station = get_directory(os.getcwd()+"/").rstrip('/')
dss = int(station[-2:])
stationpath = os.getcwd()+"/dss"+str(dss)
# get the date
if date:
items = date.split('/')
year = int(items[0])
DOY = int(items[1])
else:
year = int(get_directory(stationpath))
yearpath = stationpath+"/"+str(year)
DOY = int(get_directory(yearpath))
os.chdir(cwd)
return project, dss, year, DOY | 36,143 |
def create_pattern_neighbors_ca2d(width, height, n_states=2):
"""
Returns a list with the weights for 'neighbors' and 'center_idx' parameters
of evodynamic.connection.cellular_automata.create_conn_matrix_ca1d(...).
The weights are responsible to calculate an unique number for each different
neighborhood pattern.
Parameters
----------
width : int
Neighborhood width.
height : int
Neighborhood height.
n_states : int
Number of discrete state in a cell.
Returns
-------
out1 : list
List of weights of the neighbors.
out2 : int
Index of the center of the neighborhood.
"""
return np.array([n_states**p for p in range(width*height)]).reshape(width,height),\
[width//2, height//2] | 36,144 |
def exception_logging(exctype, value, tb):
"""
Log exception by using the root logger.
Parameters
----------
exctype : type
value : NameError
tb : traceback
"""
write_val = {'exception_type': str(exctype),
'message': str(traceback.format_tb(tb, 10))}
print('Error: %s \n in "%s", line %d' %
(value, tb.tb_frame.f_code.co_filename, tb.tb_lineno)) | 36,145 |
def by_colname_like(colname, colname_val):
"""
Query to handle the cases in which somebody has the correct
words within their query, but in the incorrect order (likely
to be especially relevant for professors).
"""
def like_clause_constructor(colname, colname_val):
"""
Helper function for constructing like clause.
"""
like_list = colname_val.split(' ')
like_unit = "lower({colname}) like lower('%{word}%') and "
like_clause = ""
for word in like_list:
like_clause += like_unit.format(colname=colname, word=word)
return like_clause
return """
select
title,
section,
instructor,
time,
building,
hours,
interesting,
recommend
from
{table_name}
where
{where_clause}
recommend > 0
limit 3
""".format(where_clause=like_clause_constructor(colname=colname, colname_val=colname_val), table_name=TABLE_NAME) | 36,146 |
def export(template,
values,
out,
verbose):
"""
Export OSM data using overpass query templates.
TEMPLATE is a file containing a template string with '$'-based substitution. Defines the overpass query and substitution keys.
VALUES is an yaml-file defining the values for each substition key.
"""
values_dict = yaml.safe_load(values)
query = Template(template.read())
query = query.substitute(values_dict)
# Query time
url = "http://overpass-api.de/api/interpreter?data=" + query
if verbose:
print url
print "\n"
print("\nDownloading OSM data to file '{}'\n...".format(out))
download_file(url, out)
print("OK")
return | 36,147 |
def decode_classnames_json(preds, top=5):
"""
Returns class code, class name and probability for each class amongst top=5 for each prediction in preds
e.g.
[[('n01871265', 'tusker', 0.69987053), ('n02504458', 'African_elephant', 0.18252705), ... ]]
"""
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_classnames_json` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
with open('imagenet_class_index.json') as data_file:
data = json.load(data_file)
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(data[str(i)]) + (pred[i],) for i in top_indices]
results.append(result)
return results | 36,148 |
def MCMC(prob_model):
"""
Samples exact posterior samples from the probabilistic model, which has been fit with VI before.
:param VI_optimized prob_model: the probabilistic model to sample from
"""
return | 36,149 |
def mock_create_schedule_request() -> Generator[MagicMock, Any, None]:
"""Fixture for mocking the create_schedule response.
Yields:
Mocked ``SwitcherV2CreateScheduleResponseMSG`` object.
"""
mock_response = MagicMock(messages.SwitcherV2CreateScheduleResponseMSG)
mock_response.successful = True
mock_response.msg_type = messages.ResponseMessageType.CREATE_SCHEDULE
with patch(
"request_handlers.messages.SwitcherV2CreateScheduleResponseMSG",
new=mock_response,
) as patcher:
yield patcher | 36,150 |
def validate_future(value: date):
"""Validate value >= today."""
if value < date.today():
raise ValidationError(f'{value} est déjà passé') | 36,151 |
def _GetSupplementalColumns(build_dir, supplemental_colummns_file_name):
"""Reads supplemental columns data from a file.
Args:
build_dir: Build dir name.
supplemental_columns_file_name: Name of a file which contains the
supplemental columns data (in JSON format).
Returns:
A dict of supplemental data to send to the dashboard.
"""
supplemental_columns = {}
supplemental_columns_file = os.path.join(build_dir,
results_dashboard.CACHE_DIR,
supplemental_colummns_file_name)
if os.path.exists(supplemental_columns_file):
with file(supplemental_columns_file, 'r') as f:
supplemental_columns = json.loads(f.read())
return supplemental_columns | 36,152 |
def prime_decomposition(number):
"""Returns a dictionary with the prime decomposition of n"""
decomposition = {}
number = int(number)
if number < 2:
return decomposition
gen = primes_gen()
break_condition = int(math.sqrt(number))
while number > 1:
current_prime = next(gen)
if current_prime > break_condition:
decomposition[number] = 1
return decomposition
while number % current_prime == 0 or number == current_prime:
if current_prime in decomposition:
decomposition[current_prime] += 1
else:
decomposition[current_prime] = 1
number /= current_prime
return decomposition | 36,153 |
def remove_office_metadata(file_name):
"""
Remove all metadata from Microsoft Office 2007+ file types such as docx,
pptx, and xlsx.
:param str file_name: The path to the file whose metadata is to be removed.
"""
ns = {
'cp': 'http://schemas.openxmlformats.org/package/2006/metadata/core-properties',
'dc': 'http://purl.org/dc/elements/1.1/',
'dcterms': 'http://purl.org/dc/terms/',
'dcmitype': 'http://purl.org/dc/dcmitype/',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance'
}
for prefix, uri in ns.items():
ElementTree.register_namespace(prefix, uri)
_, file_ext = os.path.splitext(file_name)
tmpfd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name), suffix=file_ext)
os.close(tmpfd)
with zipfile.ZipFile(file_name, 'r') as zin:
with zipfile.ZipFile(tmpname, 'w') as zout:
zout.comment = zin.comment
for item in zin.infolist():
data = zin.read(item.filename)
if item.filename == 'docProps/core.xml':
root = ElementTree.fromstring(data)
root.clear()
data = ElementTree.tostring(root, 'UTF-8')
zout.writestr(item, data)
os.remove(file_name)
os.rename(tmpname, file_name) | 36,154 |
def test_coalesce_ioresult():
"""Ensures that `coalesce` is always returning the correct type."""
assert _ioresult_converter(IOSuccess(1)) == IO(1)
assert _ioresult_converter(IOFailure(1)) == IO(0) | 36,155 |
def org_client(aws_credentials):
"""Organizations Mock Client"""
with mock_organizations():
connection = boto3.client("organizations", region_name="us-east-1")
yield connection | 36,156 |
def load_10x(
celltype: str = "CD8_healthy", exclude_singles: bool = True
) -> pd.DataFrame:
"""
Load 10x data. Columns of interest are TRA_aa and TRB_aa
"""
def split_to_tra_trb(s: Iterable[str]):
"""Split into two lists of TRA and TRB"""
# TODO this does NOT correctly handle cases where there are say
# multiple TRA sequences in a single row
tra_seqs, trb_seqs = [], []
for entry in s:
sdict = dict([part.split(":") for part in entry.split(";")])
tra = sdict["TRA"] if "TRA" in sdict else ""
trb = sdict["TRB"] if "TRB" in sdict else ""
tra_seqs.append(tra)
trb_seqs.append(trb)
return tra_seqs, trb_seqs
dirname = os.path.join(LOCAL_DATA_DIR, "10x", celltype)
assert os.path.isdir(dirname), f"Unrecognized celltype: {celltype}"
if celltype == "CD8_healthy":
fnames = glob.glob(
os.path.join(dirname, "vdj_v1_hs_aggregated_donor*_clonotypes.csv")
)
else:
fnames = glob.glob(os.path.join(dirname, "*_t_clonotypes.csv"))
assert fnames
fnames = sorted(fnames)
dfs = []
for fname in fnames:
df = pd.read_csv(fname)
tra_seqs, trb_seqs = split_to_tra_trb(df["cdr3s_aa"])
df["TRA_aa"] = tra_seqs
df["TRB_aa"] = trb_seqs
tra_nt, trb_nt = split_to_tra_trb(df["cdr3s_nt"])
df["TRA_nt"] = tra_nt
df["TRB_nt"] = trb_nt
if exclude_singles:
is_single_idx = np.where(
np.logical_or(df["TRA_aa"] == "", df["TRB_aa"] == "")
)
logging.info(
f"Dropping {len(is_single_idx[0])} entries for unmatched TRA/TRB"
)
df.drop(index=is_single_idx[0], inplace=True)
dfs.append(df)
retval = pd.concat(dfs, axis=0)
return retval | 36,157 |
def levenshtein(s1, s2):
"""
Levenstein distance, or edit distance, taken from Wikibooks:
http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python
"""
if len(s1) < len(s2):
return levenshtein(s2, s1)
if not s1:
return len(s2)
previous_row = xrange(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1] | 36,158 |
def test_dump_file_name_without_timestamp():
"""
pgdump.dump_file_name returns the name of the database.
"""
assert pgdump.dump_file_name(url) == 'db_one.sql' | 36,159 |
def configure_mpi_node() -> Tuple[RabbitConfig, Celery]:
"""Will configure and return a celery app targetting GPU mode nodes."""
log.info("Initializing celery app...")
app = _celery_app_mpi
# pylint: disable=unused-variable
@app.task(
name="comp.task.mpi",
bind=True,
autoretry_for=(Exception,),
retry_kwargs={"max_retries": 3, "countdown": 2},
on_failure=_on_task_failure_handler,
on_success=_on_task_success_handler,
track_started=True,
)
def pipeline(
self, user_id: str, project_id: str, node_id: Optional[str] = None
) -> None:
shared_task_dispatch(self, user_id, project_id, node_id)
set_boot_mode(BootMode.MPI)
log.info("Initialized celery app in %s", get_boot_mode())
return (_rabbit_config, app) | 36,160 |
def customizations(record):
"""
Use some functions delivered by the library
@type record: record
@param record: a record
@rtype: record
@returns: -- customized record
"""
record = type(record)
# record = author(record)
record = convert_to_unicode(record)
# record = editor(record)
# record = journal(record)
# record = keyword(record)
# record = link(record)
# record = page_double_hyphen(record)
# record = doi(record)
return record | 36,161 |
def test_all_permission_string():
""" test the string representation of the "All" constan """
from fastapi_permissions import All
assert str(All) == "permissions:*" | 36,162 |
def enrichs_to_gv(G, enrich_fhs, ofh, **kwargs):
"""
Read the next line in each file handle in <enrich_fhs> and parse the significant GO:BP
annotation in that line. Then, write the GO:BP annotations, along with the original graph,
to a graphviz open file handle <ofh>.
Parameters
----------
G : nx.Graph
enrich_fhs : list of io-like
GO:BP enrichment results from GProfiler, written by write_enrich
ofh : io-like
Returns
-------
exhausted
TODO
----
- handle case where any fh does not have next
- this could have used the data from GProfiler python API directly to be faster but
this may be more reusable later
"""
# parse annotation data to prepare graphviz "clusters"
cluster_members = []
enrich_ind = 0
exhausted_inds = []
for i in range(len(enrich_fhs)):
enrich_fh = enrich_fhs[i]
line = None
try:
line = enrich_fh.__next__().rstrip()
except StopIteration as err:
exhausted_inds.append(i)
if line is not None:
words = line.split('\t')
p_val = float(words[FIELD_TO_INDEX['p-value']])
t_name = words[FIELD_TO_INDEX['t name']]
anno_hits = words[FIELD_TO_INDEX['Q&T list']].split(",")
cluster = anno_hits
label = "{}\\np = {}".format(t_name, p_val)
tpl = (cluster, label)
cluster_members.append(tpl)
enrich_ind += 1
return exhausted_inds, cluster_members | 36,163 |
def download_yeast_model(rawDir):
"""Download FBA model."""
url = 'https://pilotfiber.dl.sourceforge.net/project/yeast/yeast_7.6.zip'
outPath = os.path.join(rawDir, 'yeast_7.6')
if os.path.exists(outPath):
print outPath, 'already exists. Skipping.'
return
os.system('wget -P ' + rawDir + ' ' + url)
zipPath = os.path.join(rawDir, url.split('/')[-1])
os.system('unzip ' + zipPath + ' -d ' + rawDir)
os.remove(zipPath) | 36,164 |
def export_from_checkpoint():
"""Restore variables from a given checkpoint."""
with tf1.Session() as sess:
model = effnetv2_model.EffNetV2Model(effnet_name)
isize = 224 or model.cfg.eval.isize
inputs = tf.ones((batch_size, isize, isize, 3), tf.float32)
_ = model(inputs, training=False)
sess.run(tf1.global_variables_initializer())
if tf.io.gfile.isdir(checkpoint_dir):
ckpt_path = tf1.train.latest_checkpoint(checkpoint_dir)
else:
ckpt_path = checkpoint_dir
ema = tf1.train.ExponentialMovingAverage(decay=0.0)
ema_vars = utils.get_ema_vars()
var_dict = ema.variables_to_restore(ema_vars)
ema_assign_op = ema.apply(ema_vars)
tf1.train.get_or_create_global_step()
sess.run(tf1.global_variables_initializer())
saver = tf1.train.Saver(var_dict, max_to_keep=1)
# Restore all variables from ckpt.
saver.restore(sess, ckpt_path)
print('export model to {}'.format(export_dir))
sess.run(ema_assign_op)
saver = tf1.train.Saver(max_to_keep=1, save_relative_paths=True)
saver.save(sess, export_dir) | 36,165 |
def _has_letter(pw):
"""
Password must contain a lowercase letter
:param pw: password string
:return: boolean
"""
return any(character.isalpha() for character in pw) | 36,166 |
def closest_power_of_two(n):
"""Returns the closest power of two (linearly) to n.
See: http://mccormick.cx/news/entries/nearest-power-of-two
Args:
n: Value to find the closest power of two of.
Returns:
Closest power of two to "n".
"""
return pow(2, int(math.log(n, 2) + 0.5)) | 36,167 |
def prepare_time_inhomogeneous_cv_object(cv: BaseTimeSeriesCrossValidator):
"""
Creates a sample set consisting in 11 samples at 2h intervals, spanning 20h, as well as 10 samples at 59m intervals,
with the first samples of each group occurring at the same time.
pred_times and eval_times have the following values:
pred_times eval_times
0 2000-01-01 00:00:00 2000-01-01 01:00:00
1 2000-01-01 00:00:00 2000-01-01 01:00:00
2 2000-01-01 00:59:00 2000-01-01 01:59:00
3 2000-01-01 01:58:00 2000-01-01 02:58:00
4 2000-01-01 02:00:00 2000-01-01 03:00:00
5 2000-01-01 02:57:00 2000-01-01 03:57:00
6 2000-01-01 03:56:00 2000-01-01 04:56:00
7 2000-01-01 04:00:00 2000-01-01 05:00:00
8 2000-01-01 04:55:00 2000-01-01 05:55:00
9 2000-01-01 05:54:00 2000-01-01 06:54:00
10 2000-01-01 06:00:00 2000-01-01 07:00:00
11 2000-01-01 06:53:00 2000-01-01 07:53:00
12 2000-01-01 07:52:00 2000-01-01 08:52:00
13 2000-01-01 08:00:00 2000-01-01 09:00:00
14 2000-01-01 08:51:00 2000-01-01 09:51:00
15 2000-01-01 10:00:00 2000-01-01 11:00:00
16 2000-01-01 12:00:00 2000-01-01 13:00:00
17 2000-01-01 14:00:00 2000-01-01 15:00:00
18 2000-01-01 16:00:00 2000-01-01 17:00:00
19 2000-01-01 18:00:00 2000-01-01 19:00:00
20 2000-01-01 20:00:00 2000-01-01 21:00:00
"""
X1, pred_times1, eval_times1 = create_random_sample_set(n_samples=11, time_shift='1H', freq='2H')
X2, pred_times2, eval_times2 = create_random_sample_set(n_samples=10, time_shift='1H', freq='59T')
data1 = pd.concat([X1, pred_times1, eval_times1], axis=1)
data2 = pd.concat([X2, pred_times2, eval_times2], axis=1)
data = pd.concat([data1, data2], axis=0, ignore_index=True)
data = data.sort_values(by=data.columns[3])
data = data.reset_index(drop=True)
X = data.iloc[:, 0:3]
pred_times = data.iloc[:, 3]
eval_times = data.iloc[:, 4]
cv.X = X
cv.pred_times = pred_times
cv.eval_times = eval_times
cv.indices = np.arange(X.shape[0]) | 36,168 |
def get_product(barcode, locale='world'):
"""
Return information of a given product.
"""
return utils.fetch(utils.build_url(geography=locale,
service='api',
resource_type='product',
parameters=barcode,
entity="pet")) | 36,169 |
def score(y_true, y_score):
""" Evaluation metric
"""
fpr, tpr, thresholds = roc_curve(y_true, y_score, pos_label = 1)
score = 0.4 * tpr[np.where(fpr>=0.001)[0][0]] + \
0.3 * tpr[np.where(fpr>=0.005)[0][0]] + \
0.3 * tpr[np.where(fpr>=0.01)[0][0]]
return score | 36,170 |
def group_umis(bam_file, groups_file, log_file, run_config):
"""
Idenfity UMI groups using ``umi_tools group``.
:param bam_file: BAM file (input)
:type bam_file: str or unicode
:param groups_file: UMI groups file (output)
:type groups_file: str or unicode
:param log_file: Log file (output)
:type log_file: str or unicode
:param run_config: Run-related configuration
:type run_config: RunConfigTuple
:raise FileNotFoundError: if ``umi_tools`` cannot be found
:raise AssertionError: if ``umi_tools`` returns a non-zero exit \
code
"""
LOGGER.info("Idenfity UMI groups. Log: %s", log_file)
cmd = ["umi_tools", "group", "-I", bam_file,
"--group-out", groups_file]
process_utils.run_logged_command(cmd, log_file,
run_config.cmd_file,
run_config.is_dry_run) | 36,171 |
def divisors(num):
"""
Takes a number and returns all divisors of the number, ordered least to greatest
:param num: int
:return: list (int)
"""
list = []
x = 0
for var in range(0, num):
x = x + 1
if num % x == 0:
list.append(x)
return list | 36,172 |
def files_page():
"""Displays a table of the user's files."""
user = utils.check_user(token=request.cookies.get("_auth_token"))
if user is None:
return redirect(location="/api/login",
code=303), 303
return render_template(template_name_or_list="home/files.html",
user=user,
files=utils.all(iterable=cache.files,
condition=lambda file: file.owner.id == user.id and not file.deleted)) | 36,173 |
def GetCodepage(language):
""" Returns the codepage for the given |language|. """
lang = _LANGUAGE_MAP[language]
return "%04x" % lang[0] | 36,174 |
def dochdir(thedir):
"""Switch to dir."""
if flag_echo or flag_dryrun:
sys.stderr.write("cd " + thedir + "\n")
if flag_dryrun:
return
try:
os.chdir(thedir)
except OSError as err:
u.error("chdir failed: %s" % err) | 36,175 |
def NumVisTerms(doc):
"""Number of visible terms on the page"""
_, terms = doc
return len(terms) | 36,176 |
def download_all(path='data', verify=True, extract=True):
"""
Downloads all the example datasets. If verify is True then compare the
download signature with the hardcoded signature. If extract is True then
extract the contents of the zipfile to the given path.
"""
for name, meta in DATASETS.items():
url = meta['url']
signature = meta['signature'] if verify else None
download_data(url, path=path, signature=signature, extract=extract) | 36,177 |
def test_fspl_noLD():
"""
check if FSPL magnification is calculate properly
"""
t_0 = 2456789.012345
t_E = 23.4567
u_0 = 1e-4
rho = 1e-3
t_vec = np.array([-(rho**2-u_0**2)**0.5, 0., ((0.5*rho)**2-u_0**2)**0.5])
t_vec = t_vec * t_E + t_0
params = mm.ModelParameters(
{'t_0': t_0, 'u_0': u_0, 't_E': t_E, 'rho': rho})
mag_curve = mm.MagnificationCurve(times=t_vec, parameters=params)
methods = [t_0-t_E, 'finite_source_uniform_Gould94', t_0+t_E]
mag_curve.set_magnification_methods(methods, 'point_source')
results = mag_curve.get_point_lens_magnification()
u = np.array([rho, u_0, 0.5*rho])
pspl = (u**2 + 2.) / np.sqrt(u**2 * (u**2 + 4.))
expected = np.array([1.27323965, 0.19949906, 0.93421546])
# These values were calculated by Andy Gould (file b0b1.dat).
expected *= pspl
np.testing.assert_almost_equal(expected, results, decimal=4) | 36,178 |
def replace_inf_price_nb(prev_close: float, close: float, order: Order) -> Order:
"""Replace infinity price in an order."""
order_price = order.price
if order_price > 0:
order_price = close # upper bound is close
else:
order_price = prev_close # lower bound is prev close
return order_nb(
size=order.size,
price=order_price,
size_type=order.size_type,
direction=order.direction,
fees=order.fees,
fixed_fees=order.fixed_fees,
slippage=order.slippage,
min_size=order.min_size,
max_size=order.max_size,
size_granularity=order.size_granularity,
reject_prob=order.reject_prob,
lock_cash=order.lock_cash,
allow_partial=order.allow_partial,
raise_reject=order.raise_reject,
log=order.log
) | 36,179 |
def get_trend(d):
"""
Calcuate trend for a frame `d`.
"""
dv = d.reset_index(drop=True)
dv["minutes"] = np.arange(dv.shape[0], dtype=np.float64)
covariance = dv.cov()
return (((covariance["minutes"]) / covariance.loc["minutes", "minutes"])[d.columns]
.rename(lambda cl: "_".join([cl, "trend"]))) | 36,180 |
def getIpAddress():
"""Returns the IP address of the computer the client is running on,
as it appears to the client.
See also: system.net.getExternalIpAddress().
Returns:
str: Returns the IP address of the local machine, as it sees it.
"""
return "127.0.0.1" | 36,181 |
def line(x, y, weights=None, clip=0.25):
"""Fit a line
Args:
x (numpy.array): x-values
y (numpy.array): y-values
clip (float, optional): Fit only first part. Defaults to 0.25.
Returns:
pandas.Series: fit parameters
"""
if 0 < clip < 1:
clip_int = int(len(x) * clip) - 1
else:
clip_int = int(clip_int)
# clip data for fit to only use first part
X = x[:clip_int]
Y = y[:clip_int]
if weights:
W = 1 / weights[:clip_int]
else:
W = np.ones((len(X)))
# weighted LS
X = sm.add_constant(X)
wls_model = sm.WLS(Y, X, weights=W)
fit_params = wls_model.fit().params
fit_params["diffusion_constant"] = fit_params["tau"] / 2 / 2
return fit_params | 36,182 |
def one_hot(data):
"""
Using pandas to convert the 'data' into a one_hot enconding format.
"""
one_hot_table = pd.get_dummies(data.unique())
one_hot = data.apply(lambda x: one_hot_table[x] == 1).astype(int)
return one_hot | 36,183 |
def update_set(j, n):
"""Computes the update set of the j-th orbital in n modes
Args:
j (int) : the orbital index
n (int) : the total number of modes
Returns:
Array of mode indexes
"""
indexes = np.array([])
if n % 2 == 0:
if j < n / 2:
indexes = np.append(indexes, np.append(
n - 1, update_set(j, n / 2)))
else:
indexes = np.append(indexes, update_set(j - n / 2, n / 2) + n / 2)
return indexes | 36,184 |
def chunklist(inlist: list, chunksize: int) -> list:
"""Split a list into chucks of determined size.
Keyword arguments:
inList -- list to chunk
chunkSize -- number of elements in each chunk
"""
if not isinstance(inlist, list):
raise TypeError
def __chunkyield() -> list:
# https://www.geeksforgeeks.org/break-list-chunks-size-n-python/
for i in range(0, len(inlist), chunksize):
yield inlist[i:i + chunksize]
return list(__chunkyield()) | 36,185 |
def test_config_is_allowed_external_url():
"""Test is_allowed_external_url method."""
config = ha.Config(None)
config.allowlist_external_urls = [
"http://x.com/",
"https://y.com/bla/",
"https://z.com/images/1.jpg/",
]
valid = [
"http://x.com/1.jpg",
"http://x.com",
"https://y.com/bla/",
"https://y.com/bla/2.png",
"https://z.com/images/1.jpg",
]
for url in valid:
assert config.is_allowed_external_url(url)
invalid = [
"https://a.co",
"https://y.com/bla_wrong",
"https://y.com/bla/../image.jpg",
"https://z.com/images",
]
for url in invalid:
assert not config.is_allowed_external_url(url) | 36,186 |
def _ArchiveFlakesForClosedIssue(flake_issue):
"""Archives flakes with closed issue.
Flakes with closed issue should be archived since they are fixed or not
actionable.
"""
flakes = Flake.query(Flake.flake_issue_key == flake_issue.key).fetch()
for flake in flakes:
flake.archived = True
ndb.put_multi(flakes) | 36,187 |
def create_nwb_group(nwb_path, group_name):
"""
Create an NWB BehavioralTimeSeries for grouping data
Args:
config_filepath (Path): path to the config file
group_name (str): name of group to be created
Returns:
"""
with NWBHDF5IO(nwb_path,'a') as io:
nwbfile = io.read()
if group_name in nwbfile.processing['Face Rhythm'].data_interfaces.keys():
return
new_group = BehavioralTimeSeries(name=group_name)
nwbfile.processing['Face Rhythm'].add(new_group)
io.write(nwbfile) | 36,188 |
def getLocation(seq, meifile, zones):
""" Given a sequence of notes and the corresponding MEI Document, calculates and returns the json formatted list of
locations (box coordinates) to be stored for an instance of a pitch sequence in our CouchDB.
If the sequence is contained in a single system, only one location will be stored. If the sequence
spans two systems, a list of two locations will be stored.
"""
ulys = []
lrys = []
twosystems = 0
endofsystem = len(seq)-1
if seq[0].getId() not in systemcache:
systemcache[seq[0].getId()] = meifile.lookBack(seq[0], "sb")
# systemcache[seq[0]] = meifile.get_system(seq[0])
if seq[endofsystem].getId() not in systemcache:
systemcache[seq[endofsystem].getId()] = meifile.lookBack(seq[endofsystem], "sb")
# systemcache[seq[endofsystem]] = meifile.get_system(seq[endofsystem])
if systemcache[seq[0].getId()] != systemcache[seq[endofsystem].getId()]: # then the sequence spans two systems and we must store two seperate locations to highlight
twosystems = 1
for i in range(1 , len(seq)):
if seq[i-1].getId() not in systemcache:
systemcache[seq[i-1].getId()] = meifile.lookBack(seq[i-1], "sb")
if seq[i] not in systemcache:
systemcache[seq[i].getId()] = meifile.lookBack(seq[i], "sb")
# find the last note on the first system and the first note on the second system
if systemcache[seq[i-1].getId()] != systemcache[seq[i].getId()]:
endofsystem = i # this will be the index of the first note on second system
# ulx1 = int(meifile.get_by_facs(seq[0].parent.parent.facs)[0].ulx)
# lrx1 = int(meifile.get_by_facs(seq[i-1].parent.parent.facs)[0].lrx)
# ulx2 = int(meifile.get_by_facs(seq[i].parent.parent.facs)[0].ulx)
# lrx2 = int(meifile.get_by_facs(seq[-1].parent.parent.facs)[0].lrx)
ulx1 = int(findbyID(zones, seq[0].parent.parent.getAttribute("facs").value, meifile).getAttribute("ulx").value)
lrx1 = int(findbyID(zones, seq[i-1].parent.parent.getAttribute("facs").value, meifile).getAttribute("lrx").value)
ulx2 = int(findbyID(zones, seq[i].parent.parent.getAttribute("facs").value, meifile).getAttribute("ulx").value)
lrx2 = int(findbyID(zones, seq[-1].parent.parent.getAttribute("facs").value, meifile).getAttribute("lrx").value)
else: # the sequence is contained in one system and only one box needs to be highlighted
ulx = int(findbyID(zones, seq[0].parent.parent.getAttribute("facs").value, meifile).getAttribute("ulx").value)
lrx = int(findbyID(zones, seq[-1].parent.parent.getAttribute("facs").value, meifile).getAttribute("lrx").value)
# ulx = int(meifile.get_by_facs(seq[0].parent.parent.facs)[0].ulx)
# lrx = int(meifile.get_by_facs(seq[-1].parent.parent.facs)[0].lrx)
for note in seq:
ulys.append(int(findbyID(zones, note.parent.parent.getAttribute("facs").value, meifile).getAttribute("uly").value))
lrys.append(int(findbyID(zones, note.parent.parent.getAttribute("facs").value, meifile).getAttribute("lry").value))
if twosystems:
uly1 = min(ulys[:endofsystem])
uly2 = min(ulys[endofsystem:])
lry1 = max(lrys[:endofsystem])
lry2 = max(lrys[endofsystem:])
return [{"ulx": int(ulx1), "uly": int(uly1), "height": abs(uly1 - lry1), "width": abs(ulx1 - lrx1)}, {"ulx": int(ulx2), "uly": int(uly2), "height": abs(uly2 - lry2), "width": abs(ulx2 - lrx2)}]
else:
uly = min(ulys)
lry = max(lrys)
return [{"ulx": int(ulx), "uly": int(uly), "height": abs(uly - lry), "width": abs(ulx - lrx)}] | 36,189 |
def pytest_runtest_logfinish(nodeid: str) -> None:
"""
At the end of running the runtest protocol for a single item.
https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_runtest_logfinish
"""
global _syrupy
if _syrupy:
_syrupy.ran_item(nodeid) | 36,190 |
def overlay_on_image(display_image:numpy.ndarray, object_info_list:list, fps:float):
"""Overlays the boxes and labels onto the display image.
:param display_image: the image on which to overlay the boxes/labels
:param object_info_list: is a list of lists which have 6 values each
these are the 6 values:
[0] string that is network classification ie 'cat', or 'chair' etc
[1] float value for box upper left X
[2] float value for box upper left Y
[3] float value for box lower right X
[4] float value for box lower right Y
[5] float value that is the probability 0.0 -1.0 for the network classification.
:return: None
"""
source_image_width = display_image.shape[1]
source_image_height = display_image.shape[0]
for one_object in object_info_list:
percentage = int(one_object[5] * 100)
label_text = one_object[0] + " (" + str(percentage) + "%)"
box_left = int(one_object[1]) # int(object_info[base_index + 3] * source_image_width)
box_top = int(one_object[2]) # int(object_info[base_index + 4] * source_image_height)
box_right = int(one_object[3]) # int(object_info[base_index + 5] * source_image_width)
box_bottom = int(one_object[4])# int(object_info[base_index + 6] * source_image_height)
box_color = (255, 128, 0) # box color
box_thickness = 2
cv2.rectangle(display_image, (box_left, box_top), (box_right, box_bottom), box_color, box_thickness)
scale_max = (100.0 - min_score_percent)
scaled_prob = (percentage - min_score_percent)
scale = scaled_prob / scale_max
# draw the classification label string just above and to the left of the rectangle
label_background_color = (0, int(scale * 175), 75)
label_text_color = (255, 255, 255) # white text
label_size = cv2.getTextSize(label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]
label_left = box_left
label_top = box_top - label_size[1]
if (label_top < 1):
label_top = 1
label_right = label_left + label_size[0]
label_bottom = label_top + label_size[1]
cv2.rectangle(display_image, (label_left - 1, label_top - 1), (label_right + 1, label_bottom + 1),
label_background_color, -1)
# label text above the box
cv2.putText(display_image, label_text, (label_left, label_bottom), cv2.FONT_HERSHEY_SIMPLEX, 0.5, label_text_color, 1)
if (show_fps):
fps_text = "FPS: " + "{:.2f}".format(fps)
fps_thickness = 2
fps_multiplier = 1.5
fps_size = cv2.getTextSize(fps_text, cv2.FONT_HERSHEY_SIMPLEX, fps_multiplier, fps_thickness)[0]
text_pad = 10
box_coord_left = 0
box_coord_top = 0
box_coord_right = box_coord_left + fps_size[0] + text_pad * 2
box_coord_bottom = box_coord_top + fps_size[1] + text_pad * 2
fps_left = box_coord_left + text_pad
fps_right = box_coord_right - text_pad
fps_top = box_coord_top + text_pad
fps_bottom = box_coord_bottom - text_pad
label_background_color = (200, 200, 200)
label_text_color = (255, 0, 0)
fps_image = numpy.full((box_coord_bottom - box_coord_top, box_coord_right - box_coord_left, 3), label_background_color, numpy.uint8)
cv2.putText(fps_image, fps_text, (fps_left, fps_bottom), cv2.FONT_HERSHEY_SIMPLEX, fps_multiplier, label_text_color, fps_thickness)
fps_transparency = 0.4
cv2.addWeighted(display_image[box_coord_top:box_coord_bottom, box_coord_left:box_coord_right], 1.0 - fps_transparency,
fps_image, fps_transparency, 0.0, display_image[box_coord_top:box_coord_bottom, box_coord_left:box_coord_right]) | 36,191 |
def calculate_A0_moving_LE(psi_baseline, psi_goal_0, Au_baseline, Au_goal, deltaz,
c_baseline, l_LE, eps_LE):
"""Find the value for A_P0^c that has the same arc length for the first bay
as for the parent."""
def integrand(psi_baseline, Al, deltaz, c ):
return c*np.sqrt(1 + dxi_u(psi_baseline, Al, deltaz/c)**2)
def equation(A0, L_baseline, Au_goal, deltaz):
Au_goal[0] = A0
c = calculate_c_baseline(c_P, Au_goal, Au_baseline, deltaz/c_P, l_LE, eps_LE, psi_spars[0])
y, err = quad(integrand, 0, psi_goal_0, args=(Au_goal, deltaz, c))
print('y', y, y - (1-eps_LE)*L_baseline, A0, c)
return y - (1-eps_LE)*(L_baseline - c*l_LE)
L_baseline, err = quad(integrand, 0, psi_baseline[0], args=(Au_baseline, deltaz,
c_baseline))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
y = fsolve(equation, Au_goal[0], args=(L_baseline, Au_goal, deltaz))
return y[0] | 36,192 |
def create_train_val_set(x_train, one_hot_train_labels):
"""[summary]
Parameters
----------
x_train : [type]
[description]
y_train : [type]
[description]
"""
x_val = x_train[:1000]
partial_x_train = x_train[1000:]
y_val = one_hot_train_labels[:1000]
partial_y_train = one_hot_train_labels[1000:]
return (x_val, partial_x_train, y_val, partial_y_train) | 36,193 |
def _initialize_indices(model_class, name, bases, attrs):
"""
Stores the list of indexed attributes.
"""
model_class._indexed_fields = []
model_class._indexed_meta_field = model_class.__index_meta__
model_class._indexed_unique_fields = model_class.__unique_index__
model_class._indexed_value_fields = []
for parent in bases:
if not isinstance(parent, ModelBase):
continue
for k, v in parent._attributes.iteritems():
if v.indexed:
model_class._indexed_fields.append(k)
for k, v in attrs.iteritems():
if isinstance(v, (Attribute,)):
if v.indexed:
model_class._indexed_fields.append(k)
elif v.index_value:
model_class._indexed_value_fields.append(k)
if model_class._meta['indexed_fields']:
model_class._indexed_fields.extend(model_class._meta['indexed_fields']) | 36,194 |
def run_server(host, port, debug):
"""启动flask开发服务"""
threading.currentThread().name = 'server'
from libraryuir.web import create_app
app = create_app()
app.run(host, port, debug) | 36,195 |
def build_master_and_get_version():
"""Checks out the latest master build and creates a new binary."""
if not os.path.exists(TOOL_SOURCE):
process.call(
'git clone https://github.com/google/clusterfuzz-tools.git', cwd=HOME)
process.call('git fetch', cwd=TOOL_SOURCE)
process.call('git checkout origin/master -f', cwd=TOOL_SOURCE)
process.call('./pants binary tool:clusterfuzz-ci', cwd=TOOL_SOURCE,
env={'HOME': HOME})
delete_if_exists(BINARY_LOCATION)
shutil.copy(os.path.join(TOOL_SOURCE, 'dist', 'clusterfuzz-ci.pex'),
BINARY_LOCATION)
# The full SHA is too long and unpleasant to show in logs. So, we use the
# first 7 characters of the SHA instead.
return process.call(
'git rev-parse HEAD', capture=True, cwd=TOOL_SOURCE)[1].strip()[:7] | 36,196 |
def get_adj_date(time,today):
"""
"""
#print(time,today)
if 'month' in time:
week_multiplier = 28
elif 'week' in time:
week_multiplier = 7
elif 'day' in time:
week_multiplier = 1
else:
week_multiplier = 0
# Hack! updated from 01.12.2020
# TODO: please remove this!!!
if 'Posted ' in time:
if time != '--missing--':
units = int(time.split('Posted ')[1].split()[0])
else:
units = 0
else:
if time != '--missing--':
units = int(time.split(' ')[0])
else:
units = 0
days_to_subtract = units*week_multiplier
#print(time,today, days_to_subtract, units)
adjusted_date = today - timedelta(days=days_to_subtract)
#adjusted_date = str(d.date())#.split(' ')[0].replace('-','.')
return str(adjusted_date.date()),adjusted_date | 36,197 |
def update_dataset_temporal_attrs(dataset: xr.Dataset,
update_existing: bool = False,
in_place: bool = False) -> xr.Dataset:
"""
Update temporal CF/THREDDS attributes of given *dataset*.
:param dataset: The dataset.
:param update_existing: If ``True``, any existing attributes will be updated.
:param in_place: If ``True``, *dataset* will be modified in place and returned.
:return: A new dataset, if *in_place* is ``False`` (default), else the passed and modified *dataset*.
"""
return _update_dataset_attrs(dataset, [_TIME_ATTRS_DATA],
update_existing=update_existing, in_place=in_place) | 36,198 |
def pprint_bird_protocols(version):
"""
Pretty print the output from the BIRD "show protocols". This parses the
existing output and lays it out in pretty printed table.
:param version: The IP version (4 or 6).
:return: None.
"""
# Based on the IP version, run the appropriate BIRD command, and select
# the appropriate separator char for an IP address.
if version == 4:
bird_cmd = docker_client.exec_create("calico-node",
["/bin/bash", "-c",
"echo show protocols | "
"birdcl -s /etc/service/bird/bird.ctl"])
results = docker_client.exec_start(bird_cmd)
ip_sep = "."
else:
bird6_cmd = docker_client.exec_create("calico-node",
["/bin/bash", "-c",
"echo show protocols | "
"birdcl -s "
"/etc/service/bird6/bird6.ctl"])
results = docker_client.exec_start(bird6_cmd)
ip_sep = ":"
# Parse the output from BIRD to extract the values in the protocol status
# table. We'll further parse the name since that includes details about
# the type of peer and the peer IP address.
x = PrettyTable(["Peer address", "Peer type", "State",
"Since", "Info"])
lines = results.split("\n")
found_table = False
for line in lines:
# When BIRD displays its protocol table, it prints the bird> prompt and
# then shifts the cursor to print back over the prompt. However, this
# means that we get rogue prompts when parsing the output. For this
# processing just remove the prompt if it is present.
if line.startswith("bird>"):
line = line[5:]
# Skip blank lines.
line = line.strip()
if not line:
continue
# Split the line into columns based on whitespace separators. We split
# a maximum of 5 times because the 6th "info" column may contain a
# string that itself includes whitespace that should be maintained.
columns = re.split("\s+", line.strip(), 5)
# Loop until we find the table heading.
if columns == ["name", "proto", "table", "state", "since", "info"]:
found_table = True
continue
elif not found_table:
continue
# We expect either 5 or 6 columns depending on whether there was a
# value in the info column. Anything else is not handled, so revert
# to displaying the raw BIRD output.
if not (5 <= len(columns) <= 6):
found_table = False
break
# Parse the name, we name our BGP peers as "Mesh", "Node" or "Global"
# followed by the IP address. Extract the info so we can pretty
# print it.
combined = columns[0]
if combined.startswith("Mesh_"):
name = combined[5:].replace("_", ip_sep)
ptype = "node-to-node mesh"
elif combined.startswith("Node_"):
name = combined[5:].replace("_", ip_sep)
ptype = "node specific"
elif combined.startswith("Global_"):
name = combined[7:].replace("_", ip_sep)
ptype = "global"
else:
# This is not a BGP Peer, so do not include in the output.
continue
x.add_row([name, ptype, columns[3], columns[4],
columns[5] if len(columns) == 6 else ""])
# If we parsed the table then pretty print the table, otherwise just output
# the BIRD output directly. The first line of the BIRD output provides an
# overall BIRD status.
if found_table:
print str(x) + "\n"
else:
print results + "\n" | 36,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.