content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def getListProjectsInGroup(config, grp):
"""
Get list of issue in group
"""
print("Retrieve project of group: %s " % grp.name)
data = None
__prjLst = gitlabProjectList(grp)
if (DUMMY_DATA):
testFile = getFullFilePath(ISSUES_GRP_TEST_FILE)
with open (testFile, 'rt') as f:
data = f.read()
f.close()
else:
# retrieve data from server
url = getApiUrl(config, "groups/%s/projects" % grp.id)
logD("URL " + url)
token = config.getToken()
hdrs = {"PRIVATE-TOKEN":config.getToken()}
__totalPage = 0
__page = 1
while True:
logD("Page %d" % (__page))
params = {'page': __page}
logD("header %s" % hdrs)
resp = requests.get(url, headers=hdrs, params=params)
logD("resp status_code %s" % resp.status_code)
if (resp.status_code == 200):
data = resp.content
logD (resp.headers)
if (len(resp.headers.get('X-Next-Page')) > 0):
__page = int(resp.headers.get('X-Next-Page'))
else:
__page = 0
logD("next page %d" % (__page))
else:
__page = 0
break
if (data is not None) and len(data) > 0:
logD("data %s" % data)
__prjLst.parseData(data)
__totalPage += 1
if (config.getMaxProject() is not None) and (__prjLst.getLen() >= config.getMaxProject()):
print("Reach max %s/%s" % (__prjLst.getLen(), config.getMaxProject()))
break
if (__page == 0): #ok, reach end, out
break
if (__totalPage > 500): # 500 pages? no way, something wrong, out
print("SOMETHING WRONG, total is to big, out")
break
print("Total pages %d" % (__totalPage))
return __prjLst
| 16,400
|
def group_naptan_datatypes(gdf, naptan_column='LocalityName'):
"""[summary] groups together naptan datasets into subsets that are grouped
by the given naptan column.
Args:
gdf ([type]): [description]
naptan_column (str, optional): [description]. Defaults to 'LocalityName'.
Returns:
[type]: [description]
"""
# collapse dataset to minimum, keeping possibly useable datasets
gdf2 = gdf[['LocalityName',
'NptgLocalityCode',
'AreaName',
'StopAreaCode',
'Latitude',
'Longitude']]
# calculates the centroid of each given naptan segment.
gdf3 = gdf2.groupby([naptan_column], as_index=False)[
['Latitude', 'Longitude']].apply(lambda x: np.mean(x, axis=0))
# convert the lat lon into centroid geometry points.
gdf4 = geo.calculate_naptan_geometry(gdf3)
# save output to csv.
gdf4.to_csv(f'{naptan_column}.csv',
encoding='utf-8',
sep=',')
return gdf4
| 16,401
|
def test_dataview_delitem():
"""Test that table.data can be indexed like a numpy array."""
input = _TABLE_DATA["dict"]
table = Table(value=input)
row_keys = table.keys("row") # also demoing keys views
col_keys = table.keys("column") # also demoing keys views
assert list(row_keys) == ["r1", "r2"]
assert list(col_keys) == ["col_1", "col_2", "col_3"]
del table.data[1]
assert not table.to_dict("dict") == input
assert list(row_keys) == ["r1"]
assert list(col_keys) == ["col_1", "col_2", "col_3"]
del table.data[:, 2]
assert list(row_keys) == ["r1"]
assert list(col_keys) == ["col_1", "col_2"]
with pytest.raises(ValueError):
del table.data[0, 0]
| 16,402
|
def plot_paper(altitudes, S_hat, world_shape, fileName=""):
"""
Plots for NIPS paper
Parameters
----------
altitudes: np.array
True value of the altitudes of the map
S_hat: np.array
Safe and ergodic set
world_shape: tuple
Size of the grid world (rows, columns)
fileName: string
Name of the file to save the plot. If empty string the plot is not
saved
Returns
-------
"""
# Size of figures and colormap
tw = cw = 13.968
cmap = 'jet'
alpha = 1.
alpha_world = 0.25
size_wb = np.array([cw / 2.2, tw / 4.])
#size_wb = np.array([cw / 4.2, cw / 4.2])
# Shift altitudes
altitudes -= np.nanmin(altitudes)
vmin, vmax = (np.nanmin(altitudes), np.nanmax(altitudes))
origin = 'lower'
fig = paper_figure(size_wb)
# Copy altitudes for different alpha values
altitudes2 = altitudes.copy()
altitudes2[~S_hat[:, 0]] = np.nan
axis = fig.gca()
# Plot world
c = axis.imshow(np.reshape(altitudes, world_shape).T, origin=origin, vmin=vmin,
vmax=vmax, cmap=cmap, alpha=alpha_world)
cbar = plt.colorbar(c)
#cbar = None
# Plot explored area
plt.imshow(np.reshape(altitudes2, world_shape).T, origin=origin, vmin=vmin,
vmax=vmax, interpolation='nearest', cmap=cmap, alpha=alpha)
format_figure(axis, cbar)
# Save figure
if fileName:
plt.savefig(fileName, transparent=False, format="pdf")
plt.show()
| 16,403
|
def get_facts_by_name_and_value(api_url=None, fact_name=None, fact_value=None, verify=False, cert=list()):
"""
Returns facts by name and value
:param api_url: Base PuppetDB API url
:param fact_name: Name of fact
:param fact_value: Value of fact
"""
return utils._make_api_request(api_url, '/facts/{0}/{1}'.format(fact_name, fact_value), verify, cert)
| 16,404
|
def produce_phase(pipeline_run):
"""Produce result with Produce phase data."""
scores = pipeline_run['run']['results']['scores']
if len(scores) > 1:
raise ValueError('This run has more than one score!')
scores = scores[0]
return {
'metric': scores['metric']['metric'],
'context': pipeline_run['context'],
'normalized_score': scores['normalized']
}
| 16,405
|
async def init_integration(hass: HomeAssistant, use_nickname=True) -> MockConfigEntry:
"""Set up the Mazda Connected Services integration in Home Assistant."""
get_vehicles_fixture = json.loads(load_fixture("mazda/get_vehicles.json"))
if not use_nickname:
get_vehicles_fixture[0].pop("nickname")
get_vehicle_status_fixture = json.loads(
load_fixture("mazda/get_vehicle_status.json")
)
config_entry = MockConfigEntry(domain=DOMAIN, data=FIXTURE_USER_INPUT)
config_entry.add_to_hass(hass)
client_mock = MagicMock(
MazdaAPI(
FIXTURE_USER_INPUT[CONF_EMAIL],
FIXTURE_USER_INPUT[CONF_PASSWORD],
FIXTURE_USER_INPUT[CONF_REGION],
aiohttp_client.async_get_clientsession(hass),
)
)
client_mock.get_vehicles = AsyncMock(return_value=get_vehicles_fixture)
client_mock.get_vehicle_status = AsyncMock(return_value=get_vehicle_status_fixture)
client_mock.lock_doors = AsyncMock()
client_mock.unlock_doors = AsyncMock()
with patch(
"homeassistant.components.mazda.config_flow.MazdaAPI",
return_value=client_mock,
), patch("homeassistant.components.mazda.MazdaAPI", return_value=client_mock):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
return client_mock
| 16,406
|
def get_app_domain():
"""
Returns the full URL to the domain. The output from this function gets
generally appended with a path string.
"""
url = settings.INCOMEPROPERTYEVALUATOR_APP_HTTP_PROTOCOL
url += settings.INCOMEPROPERTYEVALUATOR_APP_HTTP_DOMAIN
return url
| 16,407
|
def deprecated(removal_version, hint_message=None, subject=None, ensure_stderr=False):
"""Marks a function or method as deprecated.
A removal version must be supplied and it must be greater than the current 'pantsbuild.pants'
version.
When choosing a removal version there is a natural tension between the code-base, which benefits
from short deprecation cycles, and the user-base which may prefer to deal with deprecations less
frequently. As a rule of thumb, if the hint message can fully convey corrective action
succinctly and you judge the impact to be on the small side (effects custom tasks as opposed to
effecting BUILD files), lean towards the next release version as the removal version; otherwise,
consider initiating a discussion to win consensus on a reasonable removal version.
:param str removal_version: The pantsbuild.pants version which will remove the deprecated
function.
:param str hint_message: An optional hint pointing to alternatives to the deprecation.
:param str subject: The name of the subject that has been deprecated for logging clarity. Defaults
to the name of the decorated function/method.
:param bool ensure_stderr: Forwarded to `ensure_stderr` in warn_or_error().
:raises DeprecationApplicationError if the @deprecation is applied improperly.
"""
validate_deprecation_semver(removal_version, 'removal version')
def decorator(func):
if not inspect.isfunction(func):
raise BadDecoratorNestingError('The @deprecated decorator must be applied innermost of all '
'decorators.')
func_full_name = '{}.{}'.format(func.__module__, func.__name__)
@wraps(func)
def wrapper(*args, **kwargs):
warn_or_error(removal_version, subject or func_full_name, hint_message,
ensure_stderr=ensure_stderr)
return func(*args, **kwargs)
return wrapper
return decorator
| 16,408
|
def prepare_comparator(comparator_path):
""" Processes the comparator path from the benchmark specification. Imports the object
dynamically.
Parameters
----------
comparator_path : str
Path to the python script file containing the comparator definition.
Returns
-------
ccobra.CCobraComparator
Comparator object.
"""
comp = None
with contextmanager.dir_context(comparator_path):
imp = modelimporter.ModelImporter(comparator_path, superclass=CCobraComparator)
comp = imp.instantiate()
if not comp:
raise ValueError('Failed to instantiate comparator class.')
return comp
| 16,409
|
def get_genes(path):
"""Returns a list of genes from a DE results table"""
with open(path) as gene_list:
gene_list = csv.reader(gene_list)
gene_list = [row[0] for row in gene_list if row[0].startswith('P')]
return gene_list
| 16,410
|
def process(path, ignore=[]):
"""calculate SET1 directory stats for given path, skipping
directories mentioned in ignore (e.g. '.hg', '.svn', ...)
"""
if not PY3K:
# unicode is critical to for non-English local names on Windows
path = unicode(path)
s = copy.copy(SET1)
s['totalsize'] = 0
for root, dirs, files in os.walk(path):
# filter directories
for ig in ignore:
if ig in dirs:
dirs.remove(ig)
for f in files:
s['totalsize'] += os.path.getsize(os.path.join(root, f))
s['filesnum'] += len(files)
s['dirsnum'] += len(dirs)
return s
| 16,411
|
def getNoncaptureMovesForRegularPiece(theGame, pieceLocation):
""" This returns a GameNode for every legal move of a regular piece """
moveList = []
xBoard = pieceLocation.get_x_board()
yBoard = pieceLocation.get_y_board()
pieceDestinationLeft = None
pieceDestinationRight = None
if theGame.getState(pieceLocation) is types.PLAYER_A_REGULAR:
# Player A moves in positive Y increments
moveDelta = 1
elif theGame.getState(pieceLocation) is types.PLAYER_B_REGULAR:
# Player B moves in negative Y increments
moveDelta = -1
pieceDestinationLeft = getCoordinateHelper(xBoard - 1, yBoard + moveDelta)
pieceDestinationRight = getCoordinateHelper(xBoard + 1, yBoard + moveDelta)
if (pieceDestinationLeft and
destinationIsEmpty(theGame, pieceDestinationLeft)):
moveList.append(makePieceMove(theGame,
pieceDestinationLeft,
pieceLocation))
if (pieceDestinationRight and
destinationIsEmpty(theGame, pieceDestinationRight)):
moveList.append(makePieceMove(theGame,
pieceDestinationRight,
pieceLocation))
return moveList
| 16,412
|
def _top_level_package_filenames(tarball_paths):
"""Transform the iterable of npm tarball paths to the top-level files contained within the package."""
paths = []
for path in tarball_paths:
parts = pathlib.PurePath(path).parts
if parts[0] == "package" and len(parts) == 2:
paths.append(parts[1])
return frozenset(paths)
| 16,413
|
def scale(X_train, X_test, type='MinMaxScaler', tuning_mode= True):
"""
This function apply Min Max or Standard scaling to a divided set of features divided as train and test data
Args:
The two dataframes:
X_train: a pandas dataframe with features of the training window
X_test: a pandas dataframe with features of the test window
tuning_mode: a boolean parameter set for cases when tuning is made. Automatically set to True unless provided a False.
Return:
Two arrays coming from the original dataframes after applying StandardScaler() or MinMaxScaler(), where the standarization is made using the X_train features
"""
# Create an Scaler instance
scaler = MinMaxScaler()
if type=='StandardScaler':
scaler = StandardScaler()
# Apply the scaler model to fit the X_train data
X_scaler = scaler.fit(X_train)
# Transform the X_train and X_test DataFrames using the X_scaler
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
if tuning_mode == True:
print(f"X_train_scaled shape: {X_train_scaled.shape}")
print(f"X_test_scaled shape: {X_test_scaled.shape}")
return X_train_scaled, X_test_scaled
| 16,414
|
def calc_positions(pl, pr, region1, region3, w, xi, t, gamma, dustFrac=0.):
"""
:return: tuple of positions in the following order ->
Head of Rarefaction: xhd, Foot of Rarefaction: xft,
Contact Discontinuity: xcd, Shock: xsh
"""
p1, rho1 = region1[:2] # don't need velocity
p3, rho3, u3 = region3
c1 = sound_speed(gamma, p1, rho1, dustFrac)
c3 = sound_speed(gamma, p3, rho3, dustFrac)
if pl > pr:
xsh = xi + w * t
xcd = xi + u3 * t
xft = xi + (u3 - c3) * t
xhd = xi - c1 * t
else:
# pr > pl
xsh = xi - w * t
xcd = xi - u3 * t
xft = xi - (u3 - c3) * t
xhd = xi + c1 * t
return xhd, xft, xcd, xsh
| 16,415
|
def spectrum_correlation_fft(tlist, y):
"""
Calculate the power spectrum corresponding to a two-time correlation
function using FFT.
Parameters
----------
tlist : *list* / *array*
list/array of times :math:`t` which the correlation function is given.
y : *list* / *array*
list/array of correlations corresponding to time delays :math:`t`.
Returns
-------
w, S : *tuple*
Returns an array of angular frequencies 'w' and the corresponding
one-sided power spectrum 'S(w)'.
"""
if debug:
print(inspect.stack()[0][3])
N = len(tlist)
dt = tlist[1] - tlist[0]
F = scipy.fftpack.fft(y)
# calculate the frequencies for the components in F
f = scipy.fftpack.fftfreq(N, dt)
# select only indices for elements that corresponds
# to positive frequencies
indices = np.where(f > 0.0)
return 2 * pi * f[indices], 2 * dt * np.real(F[indices])
| 16,416
|
def accuracy(targets, predictions, weights=None):
"""Computes the categorical accuracy.
Given a set of ground truth values and a set of predicted labels as tensors of
the same shape, it returns a tensor of the same shape with 1.0 in those position
where the ground truth value and the predicted one are equal, 0.0 otherwise.
So, if the grount truth is [[1, 2, 3], [0, 9, 23]] and the predicted labels
are [[1, 2, 4], [9, 0, 23]] the result will be: [[1, 1, 0], [0, 0, 1]].
Arguments:
target: the gold truth values `Tensor`, with `tf.int32` as `dtype`. It has rank
`[d_0, d_1, ..., d_{r-1}]` and the last value is supposed to range between
`0` and `num_classes - 1`, where `num_classes` is the number of possible classes.
predictions: the predicted values `Tensor` with `tf.float32` as `dtype`. It can
have shape `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float32` and
represents the probability distribution across the output classes generated by
the model -- so that the predicted label is the one coming from argmax over the
last dimension. Alternatively it can be of the same shape, `dtype` and format of
`target`, and it will considered as the predicted labels.
weights: coefficients for the metric. This must be scalar or of same rank as `target`.
Returns:
values: a `Tensor` of `dtype=tf.float32` and of the same shape as `targest`
representing the accuracy, weighted according to the input argument `weights`.
weights: a `Tensor` of `dtype=tf.float32` and of the same shape of `values`
representing the weighted scheme for the streaming average on `values`, which
is the same tensor of the input `weights` argument.
"""
trank = targets.get_shape().ndims
prank = predictions.get_shape().ndims
if prank > trank:
diff = prank - trank
if diff > 1:
raise ValueError(
"""Rank of `predictions` must be equal to rank of `label` """
"""or greater of 1, found %d and %d instead.""" % (prank, trank))
predictions = tf.argmax(predictions, axis=-1) # tf.int64!!!
predictions = tf.cast(predictions, tf.int32)
is_equal = tf.equal(targets, predictions)
is_equal = tf.cast(is_equal, tf.float32)
if weights is not None:
is_equal = tf.multiply(is_equal, weights)
return is_equal, weights
| 16,417
|
def restart_parent_process() -> None:
"""After enabling or disabling a plugin we must gracefully restart our server process."""
logger.info("Restarting requested from %s", os.getpid())
# Don't restart if we're running under the develop server
if sys.argv[1] != "runserver":
os.kill(os.getppid(), signal.SIGHUP)
| 16,418
|
def jp_author_name_normalized(name):
"""Construct the author name as P. Szekely."""
clean = name.replace('.',' ').replace(',',' ').replace(';', ' ')
clean = asciiChars(clean, '')
names = re.sub(r'\s+', ' ', clean.strip()).split(' ');
last_word = names[-1]
if len(last_word) == 1:
# The last word is an initial, so we accumulate all words before it that are not initials
# that will be our last name
i = 0;
index = -1 # index of last word that is not an initial
for n in names:
if len(n)>1:
index = i
else:
names[i] = n + '.'
i = i + 1;
if index == -1 or index == len(names) - 1:
return ' '.join(names).title();
last = names[index]
first = ' '.join(names[0:index]) + ' '.join(names[index + 1:])
return (first + ' ' + last).title()
else:
i = 0
for n in names:
if len(n) == 1:
names[i] = n + '.'
elif i < len(names) - 1:
names[i] = n[0] + '.'
i = i + 1
return ' '.join(names).title();
| 16,419
|
def test_randint():
"""
test randint
Returns:
None
"""
with fluid.dygraph.guard(fluid.CPUPlace()):
x = paddle.tensor.randint(low=1, high=5, shape=[3, 3], seed=33)
if platform.system() == "Darwin":
expect = [[3, 4, 1], [2, 4, 1], [1, 2, 3]]
elif platform.system() == "Linux":
expect = [[1, 4, 4], [2, 4, 2], [4, 1, 3]]
else:
expect = [[3, 2, 1], [2, 1, 2], [3, 3, 4]]
tools.compare(x.numpy(), expect)
| 16,420
|
def plot_training_results(train_loss_list, valid_loss_list, valid_accuracy_list, epoch_num):
"""
This function plots the results of training the network.
Inputs:
train_loss_list: list of loss value on the entire training dataset.
valid_loss_list: list of loss value on the entire validation dataset.
valid_accuracy_list: list of accuracy on the entire validation dataset.
Output: None
"""
# Plotting training and validation loss vs. epoch number
plt.figure()
plt.plot(range(len(train_loss_list)),
train_loss_list, label='Training Loss')
plt.plot(range(len(valid_loss_list)),
valid_loss_list, label='Validation Loss')
plt.title(
f'Training and Validation Loss Vs. Epoch Number ({epoch_num} Epochs)')
plt.xlabel('Epoch Number')
plt.ylabel('Loss')
plt.legend(loc="best")
plt.savefig(f"images/Losses ({epoch_num} Epochs).png")
plt.show()
# Plotting validation accuracy vs. epoch number
plt.figure()
plt.plot(range(len(valid_accuracy_list)),
valid_accuracy_list, label='Validation Accuracy')
plt.title(f'Validation Accuracy Vs. Epoch Number ({epoch_num} Epochs)')
plt.xlabel('Epoch Number')
plt.ylabel('Accuracy')
plt.xlim([0, len(train_loss_list)])
plt.ylim([0, 100])
plt.legend(loc="best")
plt.savefig(f"images/Accuracy ({epoch_num} Epochs).png")
plt.show()
| 16,421
|
def _write_vocabulary_to_file(model_prefix: str, voc_fname: str):
"""write processed vocabulary to file"""
snt_vocab = _read_sentencepiece_vocab("{}.vocab".format(model_prefix))
bert_vocab = list(map(_parse_sentencepiece_token, snt_vocab))
ctrl_symbols = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"]
bert_vocab = ctrl_symbols + bert_vocab
bert_vocab += ["[UNUSED_{}]".format(i) for i in range(voc_size - len(bert_vocab))]
# write vocabulary to file
with open(voc_fname + '.txt', "w") as fo:
for token in bert_vocab:
fo.write(token + "\n")
| 16,422
|
def run5():
"""传入客户号,客户类型,只生成交易"""
busi = ',2,2,20200222'
main4()
| 16,423
|
def palette(tensor, shape, name=None, time=0.0, speed=1.0):
"""
Another approach to image coloration
https://iquilezles.org/www/articles/palettes/palettes.htm
"""
if not name:
return tensor
channel_shape = [shape[0], shape[1], 3]
p = palettes[name]
offset = p["offset"] * tf.ones(channel_shape)
amp = p["amp"] * tf.ones(channel_shape)
freq = p["freq"] * tf.ones(channel_shape)
phase = p["phase"] * tf.ones(channel_shape) + time
# Multiply value_map's result x .875, in case the image is just black and white (0 == 1, we don't want a solid color image)
return offset + amp * tf.math.cos(math.tau * (freq * value.value_map(tensor, shape, keepdims=True, with_normalize=False) * .875 + phase))
| 16,424
|
def _generate_frame(drive_date, drive_number, frame, attack_type, verbose=Verbose.NORMAL):
"""
generates a log file corresponding to the attack frame saves it in the raw
folder with the same structure as the original data, under attack/log folder
Usage::
>>> generate_frame('2011_09_26', 1, 0)
:param drive_date: drive date (ex. '2011_09_26')
:param drive_number: drive number (ex. 1)
:param frame: frame within drive (ex. 0)
"""
image = paths.rgb.external_frame(drive_date, drive_number, frame)
image = cv2.imread(str(image))
# load stored mrcnn result pickle file
pickle_path = paths.mask.mrcnn_pickle(drive_date, drive_number, frame)
with open(pickle_path, 'rb') as handle:
result = pickle.load(handle)
# step 2: split masks to individual images
masks = _split_masks(result['masks'])
# step 3: log difficulty
csv_data = [['index', 'size', 'distance', 'attack_type']]
_calculate_difficulty(masks, csv_data, attack_type)
output_path = paths.attack.log_file(drive_date, drive_number, frame, attack_type)
output_path.parent.mkdir(exist_ok=True, parents=True) # ensure directory exists
with open(output_path, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerows(csv_data)
| 16,425
|
def binarize_categorical_columns(
input_train_df, input_test_df, categorical_columns):
"""Function to converting categorical features to one-hot encodings."""
# Binarize categorical columns.
binarized_train_df = pd.get_dummies(
input_train_df, columns=categorical_columns)
binarized_test_df = pd.get_dummies(
input_test_df, columns=categorical_columns)
# Make sure the train and test dataframes have the same binarized columns.
# Identify columns in train set not in test set and fill them in test set.
test_df_missing_cols = set(binarized_train_df.columns) - set(
binarized_test_df.columns)
for c in test_df_missing_cols:
binarized_test_df[c] = 0
# Identify columns in test set not in train set and fill them in train set.
train_df_missing_cols = set(binarized_test_df.columns) - set(
binarized_train_df.columns)
for c in train_df_missing_cols:
binarized_train_df[c] = 0
# Just to be sure that both train and test df"s have same columns.
binarized_train_df = binarized_train_df[binarized_test_df.columns]
return binarized_train_df, binarized_test_df
| 16,426
|
def testAgentBuildCLI_whenCommandIsValidAndImageAlreadyExists_ShowsMessageAndExists(image_cleanup):
"""Test ostorlab agent build CLI command : Case where the command is valid.
The agent container should be built.
"""
del image_cleanup
dummy_def_yaml_file_path = Path(__file__).parent / 'assets/dummydef.yaml'
runner = testing.CliRunner()
_ = runner.invoke(rootcli.rootcli, ['agent',
'build',
f'--file={dummy_def_yaml_file_path}',
'--organization=ostorlab'])
result = runner.invoke(rootcli.rootcli, ['agent',
'build',
f'--file={dummy_def_yaml_file_path}',
'--organization=ostorlab'])
assert 'already exist' in result.output
assert result.exit_code == 0
| 16,427
|
def smallest_sval(X, solver='lobpcg', **kws):
"""
Computes the smallest singular value of a matrix using
scipy.sparse.linalg.svds
Parameters
----------
X: array-like
solver: str
Which solver to use. Must be one of ['lobpcg', 'arpack']
**kws
Kws for svds
Output
------
smallest_sval: float
The smallest singular value of X
"""
# for 1d arrays return the frobenius norm
if min(X.shape) == 1:
return np.sqrt((X.reshape(-1) ** 2).sum())
return svds(X, k=1, which='SM', solver=solver, **kws)[1].item()
| 16,428
|
def test_load_schemes() -> None:
"""Test that loading schemes works"""
for scheme_file in os.listdir(SCHEMES_PATH):
scheme_path = os.path.join(SCHEMES_PATH, scheme_file)
_ = xcodeproj.Scheme.from_file(scheme_path)
| 16,429
|
def mean_ndcg_score(u_scores, u_labels, wtype="max"):
"""Mean Normalize Discounted cumulative gain (NDCG) for all users.
Parameters
----------
u_score : array of arrays, shape = [num_users]
Each array is the predicted scores, shape = [n_samples[u]]
u_label : array of arrays, shape = [num_users]
Each array is the ground truth label, shape = [n_samples[u]]
wtype : 'log' or 'max'
type for discounts
Returns
-------
mean_ndcg : array, shape = [num_users]
mean ndcg for each user (averaged among all rank)
avg_ndcg : array, shape = [max(n_samples)], averaged ndcg at each
position (averaged among all users for given rank)
"""
num_users = len(u_scores)
n_samples = [len(scores) for scores in u_scores]
max_sample = max(n_samples)
count = np.zeros(max_sample)
mean_ndcg = np.zeros(num_users)
avg_ndcg = np.zeros(max_sample)
for u in range(num_users):
ndcg = ndcg_score(u_scores[u], u_labels[u], wtype)
avg_ndcg[: n_samples[u]] += ndcg
count[: n_samples[u]] += 1
mean_ndcg[u] = ndcg.mean()
return mean_ndcg, avg_ndcg / count
| 16,430
|
def load_genbank_features(
genbank_record: SeqRecord.SeqRecord,
terminus: Union[SeqFeature.FeatureLocation] = None
) -> List[GENBANK_FEATURE]:
"""
Parses a GenBank record and generates Bitome knowledgebase objects based on the features within the record
Currently set up to create the following feature types:
- Gene
- Protein
- TRNA
- MobileElement
- RepeatRegion
- Origin
:param SeqRecord.SeqRecord genbank_record: the Genbank record to parse
:param Union[SeqFeature.FeatureLocation] terminus: the location of the terminus region for this genome; used to
determine whether a GenBank feature is on the leading or lagging strand
:return List[GENBANK_FEATURE] genbank_features: the GenBank-based knowledgebase objects for genomic features
"""
# some functionality is limited to the E. coli K-12 MG1655 genome annotation currently; set a flag for that
is_k12 = genbank_record.id == 'NC_000913.3'
if is_k12:
locus_tag_cog_lookup = load_locus_tag_cogs_lookup()
locus_tag_yome_lookup = load_locus_tag_yome_lookup()
essential_locus_tags = load_essential_genes()
else:
locus_tag_cog_lookup = {}
locus_tag_yome_lookup = {}
essential_locus_tags = []
genome_seq = genbank_record.seq
# separate the gene SeqFeatures and non-gene SeqFeatures from the GenBank record
gene_seqfeatures = select_seqfeatures(genbank_record.features, 'gene')
non_gene_seqfeatures = list(set(genbank_record.features).difference(set(gene_seqfeatures)))
origin_seqfeatures = select_seqfeatures(non_gene_seqfeatures, 'rep_origin')
origins: list = []
for origin_seqfeature in origin_seqfeatures:
origins.append(Origin(
origin_seqfeature.location,
genome_seq,
name=get_seqfeature_qualifier(origin_seqfeature, 'note')
))
genes: list = []
proteins: list = []
trnas: list = []
for gene_seqfeature in gene_seqfeatures:
locus_tag = get_seqfeature_qualifier(gene_seqfeature, 'locus_tag')
gene_name = get_seqfeature_qualifier(gene_seqfeature, 'gene')
gene_location = gene_seqfeature.location
# pseudogenes have a 'pseudo' key (with empty value) in their qualifiers dictionary
is_pseudo = 'pseudo' in gene_seqfeature.qualifiers
# determine if feature is leading, lagging, or in terminus region (if we have that region provided)
# assumes the origin is at a "higher" position in the linear numbering of the chromosome than terminus
replication_strand = None
origin_distance = None
terminus_distance = None
if len(origins) == 1 and terminus is not None:
origin = origins[0]
gene_start = gene_location.start.position
gene_end = gene_location.end.position
gene_strand = gene_location.strand
# all below descriptions of conditional cases assume we're looking at the genome with origin at the top,
# and the absolute position numbering goes clockwise; so the origin is 12:00, terminus is 5:30 - 6:30
# the gene is in the 12:00 - 5:30 region; note, we're assuming that the wraparound point is here (i.e.
# the spot where base 4.6M and base 1 are adjacent; also assuming that terminus region is 180 degrees
# from origin, so that the clockwise direction will definitely be shorter
if gene_start > origin.location.end.position or gene_start < terminus.start.position:
if gene_strand == 1:
replication_strand = 'leading'
else:
replication_strand = 'lagging'
if gene_start > origin.location.end.position:
origin_distance = gene_start - origin.location.end.position
terminus_distance = terminus.start.position + (len(genome_seq) - gene_end)
else:
origin_distance = (len(genome_seq) - origin.location.end.position) + gene_start
terminus_distance = terminus.start.position - gene_end
# the gene is in the terminus region between 5:30 and 6:30; can't guarantee if it's leading or lagging
# also don't assume which direction to origin is closer; distance to terminus is 0 since it's in there
elif terminus.start.position < gene_start < terminus.end.position:
replication_strand = 'terminus'
origin_distance_1 = (len(genome_seq) - origin.location.end.position) + gene_start
origin_distance_2 = origin.location.start.position - gene_end
origin_distance = min(origin_distance_1, origin_distance_2)
terminus_distance = 0
# the gene is on the left of the clock (6:30 - 12:00)
elif terminus.end.position < gene_start < origin.location.start.position:
if gene_strand == 1:
replication_strand = 'lagging'
else:
replication_strand = 'leading'
origin_distance = origin.location.start.position - gene_end
terminus_distance = gene_start - terminus.end.position
# isolate the feature that this gene codes; GenBank record separates these; e.g. a gene and its 'CDS' (coding
# sequence) will be distinct SeqFeature objects when parsed from the GenBank record
# for coronavirus, we want to ignore mat_peptide for now
if genbank_record.id == 'NC_045512.2':
coded_seqfeature = find_locus_tag(locus_tag, non_gene_seqfeatures, ignore_types=['mat_peptide'])
else:
coded_seqfeature = find_locus_tag(locus_tag, non_gene_seqfeatures)
if is_pseudo:
gene_type = 'pseudo'
# note; this ignores ONE gene in NC_000913.3; ralA, antisense toxin; TODO don't ignore this
elif coded_seqfeature is None:
warn(f'No coded feature found for {locus_tag}; no Gene object created')
continue
elif coded_seqfeature.type == 'ncRNA':
ncrna_class = get_seqfeature_qualifier(coded_seqfeature, 'ncRNA_class')
if ncrna_class == 'antisense_RNA':
gene_type = 'antisense_RNA'
else:
gene_type = 'ncRNA'
elif coded_seqfeature.type == 'mat_peptide':
gene_type = 'CDS'
# TODO don't ignore variation and mRNA features for lambda phage genome
elif coded_seqfeature.type in ['variation', 'mRNA']:
continue
else:
gene_type = coded_seqfeature.type
# use the CDS location if the coded feature is a CDS; TODO this glosses over genes whose mRNA are altered to
# make the CDS (see lambdap57 for an example)
if gene_type == 'CDS':
gene_location = coded_seqfeature.location
gene = Gene(
locus_tag,
gene_type,
gene_location,
gene_name,
genome_seq,
# these lookups are non-empty only for GenBank record NC_000913.3 (E. coli K-12 MG1655)
cog=locus_tag_cog_lookup.get(locus_tag, None),
y_ome=locus_tag_yome_lookup.get(locus_tag, None),
essential=(locus_tag in essential_locus_tags),
replication_strand=replication_strand,
origin_distance=origin_distance,
terminus_distance=terminus_distance
)
genes.append(gene)
if gene_type == 'CDS':
protein_name = get_seqfeature_qualifier(coded_seqfeature, 'product')
protein = protein_from_gene(gene, include_gempro=is_k12, name=protein_name)
proteins.append(protein)
gene.link_protein(protein)
# if we have a gene coding for a tRNA, generate a TRNA object
if gene_type == 'tRNA':
trna_name = get_seqfeature_qualifier(coded_seqfeature, 'product')
trna_note = get_seqfeature_qualifier(coded_seqfeature, 'note')
if trna_note is None:
trna_anticodon = None
else:
# assumes an anticodon will be somewhere in the note
trna_anticodon = re.findall(r'[AUCGTaugct]{3}', trna_note)
trna = TRNA(
locus_tag,
gene_location,
gene.reading_frame,
genome_seq,
name=trna_name,
anticodon=trna_anticodon
)
trnas.append(trna)
gene.link_trna(trna)
# add CAI for protein-coding genes
cds_genes = [gene for gene in genes if gene.gene_type == 'CDS']
calculate_and_add_cai(cds_genes)
# load mobile element, repeat region, and origin of replication features
mobile_element_seqfeatures = select_seqfeatures(non_gene_seqfeatures, 'mobile_element')
mobile_elements: list = []
for mobile_element_seqfeature in mobile_element_seqfeatures:
mobile_elements.append(MobileElement(
mobile_element_seqfeature.location,
genome_seq,
name=get_seqfeature_qualifier(mobile_element_seqfeature, 'mobile_element_type')
))
repeat_region_seqfeatures = select_seqfeatures(non_gene_seqfeatures, 'repeat_region')
repeat_regions: list = []
for repeat_region_seqfeature in repeat_region_seqfeatures:
repeat_regions.append(RepeatRegion(
repeat_region_seqfeature.location,
genome_seq,
name=get_seqfeature_qualifier(repeat_region_seqfeature, 'note')
))
all_genbank_features = genes + proteins + trnas + mobile_elements + repeat_regions + origins
return all_genbank_features
| 16,431
|
def average_precision(predictions: List, targets: List,
iou_threshold: float = 0.5) -> torch.Tensor:
"""Calculates average precision for given inputs
Args:
predictions (List): [Ni,5 dimensional as xmin,ymin,xmax,ymax,conf]
targets (List): [Mi,4 dimensional as xmin,ymin,xmax,ymax]
iou_threshold (float, optional): iou threshold for ap score. Defaults to 0.5.
Raises:
AssertionError: [description]
Returns:
torch.Tensor: average precision score
"""
assert len(predictions) == len(targets), "prediction and ground truths must be equal in lenght"
assert len(predictions) > 0, "given input list lenght must be greater than 0"
device = predictions[0].device
sorted_table, M = _generate_prediction_table(predictions, targets, device=device)
N = sorted_table.size(0)
if N == 0:
# pylint: disable=not-callable
return torch.tensor([0], dtype=torch.float32, device=device)
accumulated_tp = torch.zeros(sorted_table.size(0), dtype=torch.float32, device=device)
accumulated_fp = torch.zeros(sorted_table.size(0), dtype=torch.float32, device=device)
sorted_table[sorted_table[:, 0] < iou_threshold, 1] = 0.
tp = 0
fp = 0
for i, row in enumerate(sorted_table):
# row : 3 as iou,tp,confidence
if row[1] == 1.:
tp += 1
else:
fp += 1
accumulated_tp[i] = tp
accumulated_fp[i] = fp
precision = accumulated_tp / torch.arange(1, N+1, dtype=torch.float32, device=device)
recall = accumulated_tp / (M + 1e-16)
unique_recalls = recall.unique_consecutive()
auc = torch.empty(unique_recalls.size(0), dtype=torch.float32, device=device)
# pylint: disable=not-callable
last_value = torch.tensor(0, dtype=torch.float32, device=device)
for i, recall_value in enumerate(unique_recalls):
mask = recall == recall_value # N,
p_mul = precision[mask].max() # get max p
auc[i] = p_mul * (recall_value-last_value)
last_value = recall_value
return auc.sum()
| 16,432
|
def new_post(update: Update, context: CallbackContext) -> int:
"""Start the conversation, display any stored data and ask user for input."""
# init empty list to store dicts w/ info about each uploaded photo
context.user_data['photos'] = []
reply_text = "Initiate conversation: new post "
# if context.user_data:
# reply_text += (
# f"Current data: {', '.join(context.user_data.keys())}."
# )
# else:
reply_text += (
"Enter title"
)
update.message.reply_text(reply_text, reply_markup=markup)
return ENTER_TITLE
| 16,433
|
def format_residual_axis(ax, xlim=None, ylim=None, num_ticks=5, line_kws=None):
"""Creates a clean axis for the residual plots
This method is primarily designed to make neatly formatted axes for
residual axes in a plot.
Parameters
----------
ax : Axis
The axis to be formatted
xlim, ylim : list, optional
The x and y limits for the residual axis. By default, the current x
limits will be used.
ylim : list, optional
The y limits for the residual axis. By default, these will be set as
symetrical values at the limits.
num_ticks : int, optional
The number of ticks ot use on the x and y axis.
"""
if xlim is None:
xlim = ax.get_xlim()
if ylim is None:
ylim = _get_symetrical(ax.get_ylim())
xticks = _set_ticks(xlim, num_ticks)
yticks = _set_ticks(ylim, num_ticks)
ax.set_xlim(xlim)
ax.set_xticks(xticks)
ax.set_xlabel('')
ax.set_ylim(ylim)
ax.set_yticks(yticks)
ax.set_yticklabels('')
ax.set_ylabel('')
ax.plot(ax.get_xticks(), [0] * len(ax.get_xticks()),
marker='|', color='k', markeredgewidth=1, linestyle='',
linewidth=0.75)
sn.despine(ax=ax, top=True, bottom=True, right=True)
for tic in ax.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
| 16,434
|
def _get_geometry_type_from_list(
features: List, allowed_features: List[Union[Tuple, Sequence]]
) -> Tuple[str]:
"""
Gets the Geometry type from a List, otherwise it raises an exception.
:param features: input feature as a list
:return: tuple with extracted geometry types
"""
geometry_type = tuple()
n_dim = get_input_dimensions(features)
if n_dim == 1 and all(
isinstance(el, (dict, *allowed_features[0])) for el in features
):
return tuple(
map(
lambda geom: _get_geometry_type_from_feature(geom, allowed_features),
features,
)
)
elif all(isinstance(el, (list, tuple, int, float)) for el in features):
feature_type = [
k for k, v in dimensions.items() if v == n_dim and k in allowed_features[1]
]
if len(feature_type) == 1:
geometry_type += (feature_type[0],)
else:
raise InvalidInput(
error_code_messages["InvalidGeometry"](allowed_features[1])
)
else:
raise InvalidInput(error_code_messages["InvalidGeometry"](allowed_features[1]))
return geometry_type
| 16,435
|
def test_web_config():
"""Test whether filabel posts a label"""
filabel.web.load_config(config_path="../test/fixtures/labels.abc.cfg")
assert filabel.config.config['labels'] is not None
| 16,436
|
def _create_mock_event(datastore, event_id, quantity, time_diffs=None,
source_attrs=None):
"""
Loads in the datastore mock events that based on the given arguments.
Args:
datastore: An instance of MockDataStore.
event_id: Desired ID for the first Event (to then be incremented).
quantity: The number of Events to be generated.
time_diffs: A list of time differences between the generated
Events.
source_attrs: A list of attributes to be added to the source attribute
of the Events.
"""
if not time_diffs:
time_diffs = [0]
quantity = abs(quantity)
# If the list of time differences is too small to be compatible
# with the quantity of events, then extend the list with the last
# value for as many items as necessary.
if quantity - len(time_diffs) > 0:
time_diffs.extend([time_diffs[len(time_diffs) - 1]] *
(quantity - len(time_diffs)))
#similarly for source_attrs
if source_attrs is None:
source_attrs = [None] * quantity
else:
if quantity - len(source_attrs) > 0:
source_attrs.extend([source_attrs[-1]] *
(quantity - len(source_attrs)))
event_timestamp = 1410895419859714
for i in range(quantity):
_create_eventObj(datastore, event_id, event_timestamp, source_attrs[i])
event_timestamp += abs(time_diffs[i])
event_id += 1
| 16,437
|
def test_binarytree_instantiate_null():
""" Can we instantiate with no intial values
"""
b = BinaryTree()
assert isinstance(b, BinaryTree)
| 16,438
|
def save_network_to_path(interactions, path):
"""Save dataframe to a tab-separated file at path."""
return interactions.to_csv(path, sep='\t', index=False, na_rep=str(None))
| 16,439
|
def apply_sql(
query: str,
output_name: Optional[str],
found: Dict[str, beam.PCollection],
run: bool = True) -> Tuple[str, Union[PValue, SqlNode], SqlChain]:
"""Applies a SqlTransform with the given sql and queried PCollections.
Args:
query: The SQL query executed in the magic.
output_name: (optional) The output variable name in __main__ module.
found: The PCollections with variable names found to be used in the query.
run: Whether to prepare the SQL pipeline for a local run or not.
Returns:
A tuple of values. First str value is the output variable name in
__main__ module, auto-generated if not provided. Second value: if run,
it's a PValue; otherwise, a SqlNode tracks the SQL without applying it or
executing it. Third value: SqlChain is a chain of SqlNodes that have been
applied.
"""
output_name = _generate_output_name(output_name, query, found)
query, sql_source, chain = _build_query_components(
query, found, output_name, run)
if run:
try:
output = sql_source | SqlTransform(query)
# Declare a variable with the output_name and output value in the
# __main__ module so that the user can use the output smoothly.
output_name, output = create_var_in_main(output_name, output)
_LOGGER.info(
"The output PCollection variable is %s with element_type %s",
output_name,
pformat_namedtuple(output.element_type))
return output_name, output, chain
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
on_error('Error when applying the Beam SQL: %s', e)
else:
return output_name, chain.current, chain
| 16,440
|
def test_list_nmtoken_white_space_nistxml_sv_iv_list_nmtoken_white_space_1_5(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-whiteSpace-1.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-whiteSpace-1-5.xml",
class_name="NistschemaSvIvListNmtokenWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 16,441
|
def loadRepository(connection, sessionUrl, rxf_file_path):
""" Load test repository. In case of remote IxLoadGatware, copy the repository to the remote machine first.
todo: copy adjacent files to remote machine.
:param connection: connection object that manages the HTTP data transfers between the client and the REST API
:param sessionUrl: address of the session to load the rxf for
:param rxf_file_path: the local rxf path on the machine that holds the IxLoad instance
"""
load_test_url = '{}/ixload/test/operations/loadTest'.format(sessionUrl)
if connection.is_remote:
relative_file_path = upload_file(connection, rxf_file_path)
data = {'fullPath': '/mnt/ixload-share/{}'.format(relative_file_path)}
else:
data = {'fullPath': rxf_file_path}
try:
# try local / linux
performGenericOperation(connection, load_test_url, data)
except Exception as _:
# except it is windows
data = {'fullPath': 'C:/ProgramData/Ixia/IxLoadGateway/{}'.format(relative_file_path)}
performGenericOperation(connection, load_test_url, data)
| 16,442
|
def check_course_time_conflict(current_course: Course,
user: NaturalPerson) -> Tuple[bool, str]:
"""
检查当前选择课程的时间和已选课程是否冲突
"""
selected_courses = Course.objects.activated().filter(
participant_set__person=user,
participant_set__status__in=[
CourseParticipant.Status.SELECT,
CourseParticipant.Status.SUCCESS,
]).prefetch_related("time_set")
def time_hash(time: datetime):
return time.weekday() * 1440 + time.hour * 60 + time.minute
# 因为选择的课最多只能有6门,所以暂时用暴力算法
for current_course_time in current_course.time_set.all():
# 当前选择课程的上课时间
current_start_time = current_course_time.start
current_end_time = current_course_time.end
for course in selected_courses:
for course_time in course.time_set.all():
start_time = course_time.start
end_time = course_time.end
# 效率不高,有待改进
if not (time_hash(current_start_time) >= time_hash(end_time) or
time_hash(current_end_time) <= time_hash(start_time)):
# 发生冲突
return True, \
f"《{current_course.name}》和《{course.name}》的上课时间发生冲突!"
# 没有冲突
return False, ""
'''
# 循环较少的写法
from django.db.models import Q
conflict_course_names = set()
for current_course_time in current_course.time_set.all():
# 冲突时间
conflict_times = CourseTime.objects.filter(
# 已选的课程
Q(course__in=selected_courses),
# 开始比当前的结束时间早
(Q(start__week_day=current_course_time.end.weekday() + 1,
start__time__lte=current_course_time.end.time())
| Q(start__week_day__lt=current_course_time.end.weekday() + 1))
# 结束比当前的开始时间晚
& (Q(end__week_day=current_course_time.start.weekday() + 1,
end__time__gte=current_course_time.start.time())
| Q(end__week_day__gt=current_course_time.start.weekday() + 1))
)
if conflict_times.exists():
# return True, f'《{conflict_times.first().course.name}》'
conflict_course_names.union(
conflict_times.values_list('course__name', flat=True))
conflict_count = len(conflict_course_names)
# 有冲突
if conflict_count:
return conflict_count, f'《{"》《".join(conflict_course_names)}》'
# 没有冲突
return conflict_count, ""
'''
| 16,443
|
def update_linear_nodes(graph:Graph,
qc: QuantizationConfig,
fw_info: FrameworkInfo,
first_op2d_node: BaseNode,
second_op2d_node: BaseNode,
scale_factor: np.ndarray):
"""
Scale the weights of two linear nodes with a scale factor. Each node is scaled in
the opposite scale factor such that the output of the second node is the same as it
is without the scaling.
Thresholds are recalculated as the weights were changed.
The scale factor contain a scale value per-channel.
Args:
graph: Graph to apply the scaling on its nodes.
qc: QuantizationConfig containing parameters of how the model should be quantized.
fw_info: Information needed for quantization about the specific framework (e.g., kernel channels indices,
groups of layers by how they should be quantized, etc.)
first_op2d_node: Node to multiply its kernel by the scale factor.
second_op2d_node: Node to divide its kernel by the scale factor.
scale_factor: Scaling factor to scale the nodes' weights.
"""
w2_fixed = second_op2d_node.get_weights_by_keys(KERNEL) / scale_reshaping(scale_factor,
second_op2d_node,
fw_info.kernel_channels_mapping)
w1_fixed = first_op2d_node.get_weights_by_keys(KERNEL) * scale_reshaping(scale_factor,
first_op2d_node,
fw_info.kernel_channels_mapping,
in_channels=False)
if first_op2d_node.get_weights_by_keys(BIAS) is not None:
b1_fixed = first_op2d_node.get_weights_by_keys(BIAS) * scale_factor
first_op2d_node.set_weights_by_keys(BIAS, b1_fixed)
first_op2d_node.quantization_attr[OUTPUT_SCALE] = scale_factor
first_op2d_node.set_weights_by_keys(KERNEL, w1_fixed)
second_op2d_node.set_weights_by_keys(KERNEL, w2_fixed)
for nqc in first_op2d_node.candidates_weights_quantization_cfg:
nqc.calculate_and_set_weights_params(w1_fixed)
for nqc in second_op2d_node.candidates_weights_quantization_cfg:
nqc.calculate_and_set_weights_params(w2_fixed)
| 16,444
|
def or_func(a, b):
"""Creates a new list out of the two supplied by applying the function to each
equally-positioned pair in the lists. The returned list is truncated to the
length of the shorter of the two input lists"""
return a or b
| 16,445
|
def json_loads(
value: typing.Union[bytes, bytearray, str]
) -> typing.Union[
typing.List[typing.Dict[str, typing.Any]], typing.Dict[str, typing.Any]
]:
"""Practical json dumps helper function, bytes, bytearray, and
str input are accepted. supports for ``orjson``, ``simplejson`.
In case of orjson, if the input exists as bytes (was read directly from a source),
it is recommended to pass bytes. This has lower memory usage and lower latency.
The input must be valid UTF-8."""
if json_mod.__name__ != "orjson" and isinstance(value, (bytes, bytearray)):
value = value.decode("utf8", "strict")
return json_mod.loads(value)
| 16,446
|
def execute_create_payment(client, create_payment_request):
"""
Create a payment. Automatically creates an NR for use.
:param client:
:param create_payment_request:
:return:
"""
headers = get_test_headers()
draft_nr = setup_draft_nr(client)
nr_id = draft_nr.get('id')
payment_action = 'COMPLETE'
# POST /api/v1/payments/<int:nr_id>/<string:payment_action>
request_uri = API_BASE_URI + str(nr_id) + '/' + payment_action
path = request_uri
body = json.dumps(create_payment_request)
log_request_path(path)
response = client.post(path, data=body, headers=headers)
assert response.status_code == 201
payload = json.loads(response.data)
verify_payment_payload(payload)
assert payload.get('statusCode') == 'CREATED'
return payload
| 16,447
|
def collect_contrib_features(
project: 'ballet.project.Project'
) -> List[Feature]:
"""Collect contributed features for a project at project_root
For a project ``foo``, walks modules within the ``foo.features.contrib``
subpackage. A single object that is an instance of ``ballet.Feature`` is
imported if present in each module. The resulting ``Feature`` objects are
collected.
Args:
project: project object
Returns:
collected features
"""
contrib = project.resolve('features.contrib')
return _collect_contrib_features(contrib)
| 16,448
|
def callwebservice(omdb_api_key, dvd_title, year=""):
""" Queries OMDbapi.org for title information and parses if it's a movie
or a tv series """
logging.debug("***Calling webservice with Title: " + dvd_title + " and Year: " + year)
try:
strurl = "http://www.omdbapi.com/?t={1}&y={2}&plot=short&r=json&apikey={0}".format(omdb_api_key, dvd_title, year)
logging.debug("http://www.omdbapi.com/?t={1}&y={2}&plot=short&r=json&apikey={0}".format("key_hidden", dvd_title, year))
dvd_title_info_json = urllib.request.urlopen(strurl).read()
except Exception:
logging.debug("Webservice failed")
return "fail", None
else:
doc = json.loads(dvd_title_info_json.decode())
if doc['Response'] == "False":
logging.debug("Webservice failed with error: " + doc['Error'])
return "fail", None
else:
media_type = doc['Type']
year = re.sub(r'[^\x00-\x7f]',r'', doc['Year'])
logging.debug("Webservice successful. Document returned is: " + json.dumps(doc))
return (media_type, year)
| 16,449
|
def rect_bevel_2d(width, height, bevel) -> Verts2D:
"""get a rib, parameterized by the height and width of the opening"""
# TODO: there's a generic bevel somewhere in here
width_half = width * 0.5
height_half = height * 0.5
return np.array([
(width_half - bevel, -height_half),
(width_half, -height_half + bevel),
(width_half, height_half - bevel),
(width_half - bevel, height_half),
# TODO: function for this - flip sign and reverse order
(-width_half + bevel, height_half),
(-width_half, height_half - bevel),
(-width_half, -height_half + bevel),
(-width_half + bevel, -height_half)
])
| 16,450
|
def verify_token(token):
""" Basic auth method """
curr_user = User.check_token(token) if token else None
return curr_user is not None
| 16,451
|
def print_error(msg):
"""
Wrapper function to prints the msg into stderr and log-file.
"""
msg = 'SEUC:: [' + time.ctime() + ']' + str(msg) + '::SEUC'
print(msg)
log.error(msg)
| 16,452
|
def generate_test_cases(n_tests: int, min_len: int, max_len: int, min_dim: int, max_dim: int) \
-> Sequence[Sequence[int]]:
"""
:param n_tests: number of test to generate
:param min_len: minimum number of matrices for each test case
:param max_len: maximum number of matrices for each test case
:param min_dim: minimum dimension for each matrix (applies both for rows and columns)
:param max_dim: maximum dimension for each matrix (applies both for rows and columns)
:return:
"""
solutions = []
for n in range(n_tests):
test_len = random.randint(min_len, max_len)
dims = tuple([random.randint(min_dim, max_dim) for _ in range(test_len)])
solution = memoized_mcm(dims=dims)[0]
solutions.append([dims, solution])
return solutions
| 16,453
|
def calc_neighbours(
adata: AnnData,
distance: float = None,
index: bool = True,
verbose: bool = True,
) -> List:
"""Calculate the proportion of known ligand-receptor co-expression among the neighbouring spots or within spots
Parameters
----------
adata: AnnData The data object to scan
distance: float Distance to determine the neighbours (default: closest), distance=0 means within spot
index: bool Indicates whether to return neighbours as indices to other spots or names of other spots.
Returns
-------
neighbours: numba.typed.List List of np.array's indicating neighbours by indices for each spot.
"""
if verbose:
print("Calculating neighbours...")
# get neighbour spots for each spot according to the specified distance
coor = adata.obs[["imagerow", "imagecol"]]
point_tree = spatial.cKDTree(coor)
neighbours = []
for i, spot in enumerate(adata.obs_names):
if distance == 0:
neighbours.append(np.array([i if index else spot]))
else:
n_index = point_tree.query_ball_point(
np.array(
[adata.obs["imagerow"].loc[spot], adata.obs["imagecol"].loc[spot]]
),
distance,
)
if index:
n_index = np.array(n_index, dtype=np.int_)
neighbours.append(n_index[n_index != i])
else:
n_spots = adata.obs_names[n_index]
neighbours.append(n_spots[n_spots != spot])
typed_neighs = List()
[typed_neighs.append(neigh) for neigh in neighbours]
n_neighs = np.array([len(neigh) for neigh in neighbours])
if verbose:
print(
f"{len(np.where(n_neighs==0)[0])} spots with no neighbours, "
f"{int(np.median(n_neighs))} median spot neighbours."
)
if np.all(n_neighs == 0):
raise Exception(
"All spots have no neighbours at current distance,"
" set distance to higher value, or distance=0 for "
"within-spot mode."
)
return typed_neighs
| 16,454
|
def _build_tree_string(
root: Optional[Node],
curr_index: int,
include_index: bool = False,
delimiter: str = "-",
) -> Tuple[List[str], int, int, int]:
"""Recursively walk down the binary tree and build a pretty-print string.
In each recursive call, a "box" of characters visually representing the
current (sub)tree is constructed line by line. Each line is padded with
whitespaces to ensure all lines in the box have the same length. Then the
box, its width, and start-end positions of its root node value repr string
(required for drawing branches) are sent up to the parent call. The parent
call then combines its left and right sub-boxes to build a larger box etc.
:param root: Root node of the binary tree.
:type root: binarytree.Node | None
:param curr_index: Level-order_ index of the current node (root node is 0).
:type curr_index: int
:param include_index: If set to True, include the level-order_ node indexes using
the following format: ``{index}{delimiter}{value}`` (default: False).
:type include_index: bool
:param delimiter: Delimiter character between the node index and the node
value (default: '-').
:type delimiter:
:return: Box of characters visually representing the current subtree, width
of the box, and start-end positions of the repr string of the new root
node value.
:rtype: ([str], int, int, int)
.. _Level-order:
https://en.wikipedia.org/wiki/Tree_traversal#Breadth-first_search
"""
if root is None:
return [], 0, 0, 0
line1 = []
line2 = []
if include_index:
node_repr = "{}{}{}".format(curr_index, delimiter, root.val)
else:
node_repr = str(root.val)
new_root_width = gap_size = len(node_repr)
# Get the left and right sub-boxes, their widths, and root repr positions
l_box, l_box_width, l_root_start, l_root_end = _build_tree_string(
root.left, 2 * curr_index + 1, include_index, delimiter
)
r_box, r_box_width, r_root_start, r_root_end = _build_tree_string(
root.right, 2 * curr_index + 2, include_index, delimiter
)
# Draw the branch connecting the current root node to the left sub-box
# Pad the line with whitespaces where necessary
if l_box_width > 0:
l_root = (l_root_start + l_root_end) // 2 + 1
line1.append(" " * (l_root + 1))
line1.append("_" * (l_box_width - l_root))
line2.append(" " * l_root + "/")
line2.append(" " * (l_box_width - l_root))
new_root_start = l_box_width + 1
gap_size += 1
else:
new_root_start = 0
# Draw the representation of the current root node
line1.append(node_repr)
line2.append(" " * new_root_width)
# Draw the branch connecting the current root node to the right sub-box
# Pad the line with whitespaces where necessary
if r_box_width > 0:
r_root = (r_root_start + r_root_end) // 2
line1.append("_" * r_root)
line1.append(" " * (r_box_width - r_root + 1))
line2.append(" " * r_root + "\\")
line2.append(" " * (r_box_width - r_root))
gap_size += 1
new_root_end = new_root_start + new_root_width - 1
# Combine the left and right sub-boxes with the branches drawn above
gap = " " * gap_size
new_box = ["".join(line1), "".join(line2)]
for i in range(max(len(l_box), len(r_box))):
l_line = l_box[i] if i < len(l_box) else " " * l_box_width
r_line = r_box[i] if i < len(r_box) else " " * r_box_width
new_box.append(l_line + gap + r_line)
# Return the new box, its width and its root repr positions
return new_box, len(new_box[0]), new_root_start, new_root_end
| 16,455
|
def growth(xs, ys , x):
"""
growth function
pre:
xs,ys are arrays of known x and y values. x is a scaler or np.array
of values to calculate new y values for
post:
return new y values
"""
xs = np.array(xs)
ys = np.log(np.array(ys))
xy_bar = np.average(xs*ys)
x_bar = np.average(xs)
y_bar = np.average(ys)
x_sq_bar = np.average(xs**2)
beta = (xy_bar - x_bar*y_bar)/(x_sq_bar- x_bar**2)
alpha = y_bar - beta* x_bar
return np.exp(alpha + beta * x)
| 16,456
|
def diarization_experiment(model_args, training_args, inference_args):
"""Experiment pipeline.
Load data --> train model --> test model --> output result
Args:
model_args: model configurations
training_args: training configurations
inference_args: inference configurations
"""
predicted_labels = []
test_record = []
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
train_data = np.load('/scratch/hh2263/VCTK/training_data_vctk_muda_updated.npz')
# restore np.load for future normal usage
np.load = np_load_old
train_sequence = train_data['train_sequence']
train_cluster_id = train_data['train_cluster_id']
train_sequence_list = [seq.astype(float)+0.00001 for seq in train_sequence]
train_cluster_id_list = [np.array(cid).astype(str) for cid in train_cluster_id]
model = uisrnn.UISRNN(model_args)
model.load(original_model)
# training
model.fit(train_sequence_list, train_cluster_id_list, training_args)
model.save(SAVED_MODEL_NAME)
'''
# testing
# we can also skip training by calling:
model.load(SAVED_MODEL_NAME)
for (test_sequence, test_cluster_id) in zip(test_sequences, test_cluster_ids):
predicted_label = model.predict(test_sequence, inference_args)
predicted_labels.append(predicted_label)
accuracy = uisrnn.compute_sequence_match_accuracy(
test_cluster_id, predicted_label)
test_record.append((accuracy, len(test_cluster_id)))
print('Ground truth labels:')
print(test_cluster_id)
print('Predicted labels:')
print(predicted_label)
print('-' * 80)
output_string = uisrnn.output_result(model_args, training_args, test_record)
print('Finished diarization experiment')
print(output_string)
'''
| 16,457
|
def remove_short_transition(transition_sites,thresh=11):
"""
removes transitions that are too close from others.
"""
if len(transition_sites) < 4:
return transition_sites
for i in range(len(transition_sites) - 1):
forward_difference = transition_sites[i+1] - transition_sites[i]
if forward_difference <= thresh:
transition_sites[i] = transition_sites[-1]
transition_sites.append(0)
transition_sites = list(set(transition_sites))
transition_sites = sorted(transition_sites)
return transition_sites
| 16,458
|
def load_image(file_path):
"""
Load data from an image.
Parameters
----------
file_path : str
Path to the file.
Returns
-------
float
2D array.
"""
if "\\" in file_path:
raise ValueError(
"Please use a file path following the Unix convention")
mat = None
try:
mat = np.asarray(Image.open(file_path), dtype=np.float32)
except IOError:
print(("No such file or directory: {}").format(file_path))
raise
if len(mat.shape) > 2:
axis_m = np.argmin(mat.shape)
mat = np.mean(mat, axis=axis_m)
return mat
| 16,459
|
def _kubectl_port_forward(
service: str, namespace: str, target_port: int, local_port: Optional[int] = None
) -> Generator[int, None, None]:
"""Context manager which creates a kubectl port-forward process targeting a
K8s service.
Terminates the port-forwarding process upon exit.
Args:
service: Name of a K8s service.
namespace: Namespace to which the service belongs.
target_port: The port targeted by the service.
local_port: Forward from this port. Optional. By default, uses the port exposed
by the service.
Yields:
The local port. The service can then be accessed at 127.0.0.1:<local_port>.
"""
# First, figure out which port the service exposes for the given target port.
service_port = _get_service_port(service, namespace, target_port)
if not local_port:
local_port = service_port
process = subprocess.Popen(
[
"kubectl",
"-n",
namespace,
"port-forward",
f"service/{service}",
f"{local_port}:{service_port}",
]
)
def terminate_process():
process.terminate()
# Wait 10 seconds for the process to terminate.
# This cleans up the zombie entry from the process table.
# 10 seconds is a deliberately excessive amount of time to wait.
process.wait(timeout=10)
# Ensure clean-up in case of interrupt.
atexit.register(terminate_process)
# terminate_process is ok to execute multiple times.
try:
yield local_port
finally:
terminate_process()
| 16,460
|
def get_city_data(message):
""" Send the data for a city to the requesting player. """
position = message["position"]
city = CITIES.get(Coordinate.load(position))
LOGGER.debug(f"{current_user} loading city {city}")
if city:
emit("update_city", city)
| 16,461
|
def absolute_dispersion(drifters,starttime,time):
"""
Calculates absolute dispersion A^2, given desired current and
initial time.
Parameters
----------
drifters : GladDrifter instance, list, ndarray
A list or numpy array of GladDrifter instances.
starttime : datetime instance
Start time.
time : datetime instance
Time at which to compute absolute dispersion.
Returns
-------
A2 : float
Absolute dispersion in km^2.
"""
import numpy as np
from glad.util import argmin_datetime,haversine
if not isinstance(drifters,list):
drifters = [drifters]
dist_squared = []
for d in drifters:
if not (d.has_time(starttime) and d.has_time(time)):
continue
n1 = argmin_datetime(time,d.time)
n0 = argmin_datetime(starttime,d.time)
dist_squared.append(haversine(d.lon[n1],d.lat[n1],\
d.lon[n0],d.lat[n0])**2)
A2 = np.mean(dist_squared)
return A2
| 16,462
|
def add_xgis_url(df: gpd.geodataframe.GeoDataFrame) -> gpd.geodataframe.GeoDataFrame:
""" Adding x-gis URL which will let the user check the result
:param df: gdf to use
"""
# Generaring url from string
df.reset_index(inplace=True) # resetting index
xy_tog = df[c.X].astype(str) + "," + df[c.Y].astype(str)
kinnistu_str = df[c.kinnistu_nr].astype(str)
# Final URL
x_gis_url = c.X_GIS_URL_yua + "?" + "punkt=" + xy_tog + "&moot=500" + "&tooltip=Kinnistu nr: " + kinnistu_str
# Adding new column
df[c.URL] = x_gis_url
logging.info("\tAdded URL to dataframe")
return df
| 16,463
|
def get_compute_capacity_reservation_instance_shapes(availability_domain: Optional[str] = None,
compartment_id: Optional[str] = None,
display_name: Optional[str] = None,
filters: Optional[Sequence[pulumi.InputType['GetComputeCapacityReservationInstanceShapesFilterArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetComputeCapacityReservationInstanceShapesResult:
"""
This data source provides the list of Compute Capacity Reservation Instance Shapes in Oracle Cloud Infrastructure Core service.
Lists the shapes that can be reserved within the specified compartment.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_compute_capacity_reservation_instance_shapes = oci.core.get_compute_capacity_reservation_instance_shapes(compartment_id=var["compartment_id"],
availability_domain=var["compute_capacity_reservation_instance_shape_availability_domain"],
display_name=var["compute_capacity_reservation_instance_shape_display_name"])
```
:param str availability_domain: The name of the availability domain. Example: `Uocm:PHX-AD-1`
:param str compartment_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment.
:param str display_name: A filter to return only resources that match the given display name exactly.
"""
__args__ = dict()
__args__['availabilityDomain'] = availability_domain
__args__['compartmentId'] = compartment_id
__args__['displayName'] = display_name
__args__['filters'] = filters
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:core/getComputeCapacityReservationInstanceShapes:getComputeCapacityReservationInstanceShapes', __args__, opts=opts, typ=GetComputeCapacityReservationInstanceShapesResult).value
return AwaitableGetComputeCapacityReservationInstanceShapesResult(
availability_domain=__ret__.availability_domain,
compartment_id=__ret__.compartment_id,
compute_capacity_reservation_instance_shapes=__ret__.compute_capacity_reservation_instance_shapes,
display_name=__ret__.display_name,
filters=__ret__.filters,
id=__ret__.id)
| 16,464
|
def is_branch_or_version(string):
"""Tries to figure out if passed argument is branch or version.
Returns 'branch', 'version', or False if deduction failed.
Branch is either 'master' or something like 3.12.x;
version is something like 3.12.5,
optionally followed by letter (3.12.5b) for aplha/beta/gamma...zeta,
optionally followed by release (3.12.5-2).
"""
if string == "master" or re.match("3\.\\d+\.x$", string):
return "branch"
if re.match("3\\.\\d+\\.\\d+[a-z]?(-\\d+)?$", string):
return "version"
return None
| 16,465
|
def eval_regressors(regressor_factories, gen_one_data, batch_size=1, names=None):
"""Evaluates an iterable of regressors on some test data of size
:batch_size: generated from :gen_one_data:.
"""
X, y = dg.BatchData.batch(gen_one_data, batch_size)
return _eval_regressors(regressor_factories, X, y, names=names)
| 16,466
|
def beautify():
"""Set reasonable defaults matplotlib.
This method replaces matplotlib's default rgb/cmyk colors with the
colarized colors. It also does:
* re-orders the default color cycle
* sets the default linewidth
* replaces the defaault 'RdBu' cmap
* sets the default cmap to 'RdBu'
Examples
--------
You can safely call ``beautify`` right after you've imported the
``plot`` module.
>>> from wyrm import plot
>>> plot.beautify()
"""
def to_mpl_format(r, g, b):
"""Convert 0..255 t0 0..1."""
return r / 256, g / 256, b / 256
# The solarized color palette
base03 = to_mpl_format( 0, 43, 54)
base02 = to_mpl_format( 7, 54, 66)
base01 = to_mpl_format( 88, 110, 117)
base00 = to_mpl_format(101, 123, 131)
base0 = to_mpl_format(131, 148, 150)
base1 = to_mpl_format(147, 161, 161)
base2 = to_mpl_format(238, 232, 213)
base3 = to_mpl_format(253, 246, 227)
yellow = to_mpl_format(181, 137, 0)
orange = to_mpl_format(203, 75, 22)
red = to_mpl_format(220, 50, 47)
magenta = to_mpl_format(211, 54, 130)
violet = to_mpl_format(108, 113, 196)
blue = to_mpl_format( 38, 139, 210)
cyan = to_mpl_format( 42, 161, 152)
green = to_mpl_format(133, 153, 0)
white = (1, 1, 1)#base3
black = base03
# Tverwrite the default color values with our new ones. Those
# single-letter colors are used all over the place in matplotlib, so
# this setting has a huge effect.
mpl.colors.ColorConverter.colors = {
'b': blue,
'c': cyan,
'g': green,
'k': black,
'm': magenta,
'r': red,
'w': white,
'y': yellow
}
# Redefine the existing 'RdBu' (Red-Blue) colormap, with our new
# colors for red and blue
cdict = {
'red' : ((0., blue[0], blue[0]), (0.5, white[0], white[0]), (1., magenta[0], magenta[0])),
'green': ((0., blue[1], blue[1]), (0.5, white[1], white[1]), (1., magenta[1], magenta[1])),
'blue' : ((0., blue[2], blue[2]), (0.5, white[2], white[2]), (1., magenta[2], magenta[2]))
}
mpl.cm.register_cmap('RdBu', data=cdict)
# Reorder the default color cycle
mpl.rcParams['axes.color_cycle'] = ['b', 'm', 'g', 'r', 'c', 'y', 'k']
# Set linewidth in plots to 2
mpl.rcParams['lines.linewidth'] = 2
# Set default cmap
mpl.rcParams['image.cmap'] = 'RdBu'
| 16,467
|
def filter_xr_by_month(data: xr.DataArray, month: str) -> xr.DataArray:
"""
filtering xr.DataArray by input string of season
:param data:
:param month: such as 'JJA', 'DJF', et 'NDJF'
:return:
"""
if isinstance(data, xr.DataArray):
month = value_month_from_str(month)
mask = [True if x in month else False for x in data.time.dt.month]
lookup = xr.DataArray(mask, dims=data.dims[0])
data_to_return = data.where(lookup, drop=True)
if isinstance(data, xr.Dataset):
# TODO: to be updated:
print(f'function to update')
return data_to_return
| 16,468
|
def main():
"""
NAME
vgpmap_magic.py
DESCRIPTION
makes a map of vgps and a95/dp,dm for site means in a pmag_results table
SYNTAX
vgpmap_magic.py [command line options]
OPTIONS
-h prints help and quits
-eye ELAT ELON [specify eyeball location], default is 90., 0.
-f FILE pmag_results format file, [default is pmag_results.txt]
-res [c,l,i,h] specify resolution (crude, low, intermediate, high]
-etp plot the etopo20 topographpy data (requires high resolution data set)
-prj PROJ, specify one of the following:
ortho = orthographic
lcc = lambert conformal
moll = molweide
merc = mercator
-sym SYM SIZE: choose a symbol and size, examples:
ro 5 : small red circles
bs 10 : intermediate blue squares
g^ 20 : large green triangles
-ell plot dp/dm or a95 ellipses
-rev RSYM RSIZE : flip reverse poles to normal antipode
-S: plot antipodes of all poles
-age : plot the ages next to the poles
-crd [g,t] : choose coordinate system, default is to plot all site VGPs
-fmt [pdf, png, eps...] specify output format, default is pdf
-sav save and quit
DEFAULTS
FILE: pmag_results.txt
res: c
prj: ortho
ELAT,ELON = 0,0
SYM SIZE: ro 8
RSYM RSIZE: g^ 8
"""
dir_path = '.'
res, ages = 'c', 0
plot = 0
proj = 'ortho'
results_file = 'pmag_results.txt'
ell, flip = 0, 0
lat_0, lon_0 = 90., 0.
fmt = 'pdf'
sym, size = 'ro', 8
rsym, rsize = 'g^', 8
anti = 0
fancy = 0
coord = ""
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-S' in sys.argv:
anti = 1
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind+1]
if '-sav' in sys.argv:
plot = 1
if '-res' in sys.argv:
ind = sys.argv.index('-res')
res = sys.argv[ind+1]
if '-etp' in sys.argv:
fancy = 1
if '-prj' in sys.argv:
ind = sys.argv.index('-prj')
proj = sys.argv[ind+1]
if '-rev' in sys.argv:
flip = 1
ind = sys.argv.index('-rev')
rsym = (sys.argv[ind+1])
rsize = int(sys.argv[ind+2])
if '-sym' in sys.argv:
ind = sys.argv.index('-sym')
sym = (sys.argv[ind+1])
size = int(sys.argv[ind+2])
if '-eye' in sys.argv:
ind = sys.argv.index('-eye')
lat_0 = float(sys.argv[ind+1])
lon_0 = float(sys.argv[ind+2])
if '-ell' in sys.argv:
ell = 1
if '-age' in sys.argv:
ages = 1
if '-f' in sys.argv:
ind = sys.argv.index('-f')
results_file = sys.argv[ind+1]
if '-crd' in sys.argv:
ind = sys.argv.index('-crd')
crd = sys.argv[ind+1]
if crd == 'g':
coord = '0'
if crd == 't':
coord = '100'
results_file = dir_path+'/'+results_file
data, file_type = pmag.magic_read(results_file)
if file_type != 'pmag_results':
print("bad results file")
sys.exit()
FIG = {'map': 1}
pmagplotlib.plot_init(FIG['map'], 6, 6)
# read in er_sites file
lats, lons, dp, dm, a95 = [], [], [], [], []
Pars = []
dates, rlats, rlons = [], [], []
if 'data_type' in data[0].keys():
# get all site level data
Results = pmag.get_dictitem(data, 'data_type', 'i', 'T')
else:
Results = data
# get all non-blank latitudes
Results = pmag.get_dictitem(Results, 'vgp_lat', '', 'F')
# get all non-blank longitudes
Results = pmag.get_dictitem(Results, 'vgp_lon', '', 'F')
if coord != "":
# get specified coordinate system
Results = pmag.get_dictitem(Results, 'tilt_correction', coord, 'T')
location = ""
for rec in Results:
if rec['er_location_names'] not in location:
location = location+':'+rec['er_location_names']
if 'average_age' in rec.keys() and rec['average_age'] != "" and ages == 1:
dates.append(rec['average_age'])
lat = float(rec['vgp_lat'])
lon = float(rec['vgp_lon'])
if flip == 0:
lats.append(lat)
lons.append(lon)
elif flip == 1:
if lat < 0:
rlats.append(-lat)
lon = lon+180.
if lon > 360:
lon = lon-360.
rlons.append(lon)
else:
lats.append(lat)
lons.append(lon)
elif anti == 1:
lats.append(-lat)
lon = lon+180.
if lon > 360:
lon = lon-360.
lons.append(lon)
ppars = []
ppars.append(lon)
ppars.append(lat)
ell1, ell2 = "", ""
if 'vgp_dm' in rec.keys() and rec['vgp_dm'] != "":
ell1 = float(rec['vgp_dm'])
if 'vgp_dp' in rec.keys() and rec['vgp_dp'] != "":
ell2 = float(rec['vgp_dp'])
if 'vgp_alpha95' in rec.keys() and rec['vgp_alpha95'] != "":
ell1, ell2 = float(rec['vgp_alpha95']), float(rec['vgp_alpha95'])
if ell1 != "" and ell2 != "":
ppars = []
ppars.append(lons[-1])
ppars.append(lats[-1])
ppars.append(ell1)
ppars.append(lons[-1])
isign = abs(lats[-1])/lats[-1]
ppars.append(lats[-1]-isign*90.)
ppars.append(ell2)
ppars.append(lons[-1]+90.)
ppars.append(0.)
Pars.append(ppars)
location = location.strip(':')
Opts = {'latmin': -90, 'latmax': 90, 'lonmin': 0., 'lonmax': 360., 'lat_0': lat_0, 'lon_0': lon_0,
'proj': proj, 'sym': 'bs', 'symsize': 3, 'pltgrid': 0, 'res': res, 'boundinglat': 0.}
Opts['details'] = {'coasts': 1, 'rivers': 0, 'states': 0,
'countries': 0, 'ocean': 1, 'fancy': fancy}
# make the base map with a blue triangle at the pole`
pmagplotlib.plot_map(FIG['map'], [90.], [0.], Opts)
Opts['pltgrid'] = -1
Opts['sym'] = sym
Opts['symsize'] = size
if len(dates) > 0:
Opts['names'] = dates
if len(lats) > 0:
# add the lats and lons of the poles
pmagplotlib.plot_map(FIG['map'], lats, lons, Opts)
Opts['names'] = []
if len(rlats) > 0:
Opts['sym'] = rsym
Opts['symsize'] = rsize
# add the lats and lons of the poles
pmagplotlib.plot_map(FIG['map'], rlats, rlons, Opts)
if plot == 0:
pmagplotlib.draw_figs(FIG)
if ell == 1: # add ellipses if desired.
Opts['details'] = {'coasts': 0, 'rivers': 0,
'states': 0, 'countries': 0, 'ocean': 0}
Opts['pltgrid'] = -1 # turn off meridian replotting
Opts['symsize'] = 2
Opts['sym'] = 'g-'
for ppars in Pars:
if ppars[2] != 0:
PTS = pmagplotlib.plot_ell(FIG['map'], ppars, 'g.', 0, 0)
elats, elons = [], []
for pt in PTS:
elons.append(pt[0])
elats.append(pt[1])
# make the base map with a blue triangle at the pole`
pmagplotlib.plot_map(FIG['map'], elats, elons, Opts)
if plot == 0:
pmagplotlib.draw_figs(FIG)
files = {}
for key in FIG.keys():
if pmagplotlib.isServer: # use server plot naming convention
files[key] = 'LO:_'+location+'_VGP_map.'+fmt
else: # use more readable plot naming convention
files[key] = '{}_VGP_map.{}'.format(
location.replace(' ', '_'), fmt)
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles = {}
titles['eq'] = 'LO:_'+location+'_VGP_map'
FIG = pmagplotlib.add_borders(FIG, titles, black, purple)
pmagplotlib.save_plots(FIG, files)
elif plot == 0:
pmagplotlib.draw_figs(FIG)
ans = input(" S[a]ve to save plot, Return to quit: ")
if ans == "a":
pmagplotlib.save_plots(FIG, files)
else:
print("Good bye")
sys.exit()
else:
pmagplotlib.save_plots(FIG, files)
| 16,469
|
def ordered_load(stream, loader=yaml.SafeLoader, object_pairs_hook=OrderedDict):
"""Load YAML, preserving the ordering of all data."""
class OrderedLoader(loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader)
| 16,470
|
def user_unions_clear(*args):
"""
user_unions_clear(map)
Clear user_unions_t.
@param map (C++: user_unions_t *)
"""
return _ida_hexrays.user_unions_clear(*args)
| 16,471
|
def heur_best_from_now(state):
"""
This heuristics computes the cost based in put all weight in the launch with the lowest variable cost.
@param state: state to compute the cost.
@return: cost
"""
try:
return min([launch.compute_variable_cost(state.left_weight()) for launch in state.launches[state.launch_nr:]])
except ValueError:
return 0
| 16,472
|
def test_get_task_status_description(task_status_code, language):
"""Test task status description."""
result = utils.get_task_status_description(task_status_code, language)
if task_status_code == "0":
assert result == "Waiting"
elif task_status_code == "5":
assert result == "渲染中"
else:
assert result == "Abort"
| 16,473
|
def test_top_level_availability(make_napari_viewer):
"""Current viewer should be available at napari.current_viewer."""
viewer = make_napari_viewer()
assert viewer == napari.current_viewer()
| 16,474
|
def get_ngram(text, ns=[1]):
"""
获取文本的ngram等特征
:param text: str
:return: list
"""
if type(ns) != list:
raise RuntimeError("ns of function get_ngram() must be list!")
for n in ns:
if n < 1:
raise RuntimeError("enum of ns must '>1'!")
len_text = len(text)
ngrams = []
for n in ns:
ngram_n = []
for i in range(len_text):
if i + n <= len_text:
ngram_n.append(text[i:i + n])
else:
break
if not ngram_n:
ngram_n.append(text)
ngrams += ngram_n
return ngrams
| 16,475
|
def entropy(df):
"""Return Shannon Entropy for purchases of each user."""
from scipy.stats import entropy
mask = df.credit_debit.eq('debit')
df = df[mask]
num_cats = df.auto_tag.nunique()
def calc_entropy(user, num_cats):
total_purchases = len(user)
cat_purchases = user.groupby('auto_tag').size()
probs = (cat_purchases + 1) / (total_purchases + num_cats)
return entropy(probs, base=2)
g = df.groupby('user_id')
return g.apply(calc_entropy, num_cats).rename('entropy')
| 16,476
|
def splitter(h):
""" Splits dictionary numbers by the decimal point."""
if type(h) is dict:
for k, i in h.items():
h[k] = str(i).split('.');
if type(h) is list:
for n in range(0, len(h)):
h[n] = splitter(h[n])
return h
| 16,477
|
def apprise_notify(apprise_cfg, title, body):
"""APPRISE NOTIFICATIONS
:argument
apprise_cfg - The full path to the apprise.yaml file
title - the message title
body - the main body of the message
:returns
nothing
"""
yaml_file = apprise_cfg
with open(yaml_file, "r") as f:
cfg = yaml.safe_load(f)
# boxcar
# boxcar://{access_key}/{secret_key}
if cfg['BOXCAR_KEY'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
# A sample pushbullet notification
apobj.add('boxcar://' + str(cfg['BOXCAR_KEY']) + "/" + str(cfg['BOXCAR_SECRET']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending boxcar apprise notification. Continuing processing...")
# discord
# discord://{WebhookID}/{WebhookToken}/
if cfg['DISCORD_WEBHOOK_ID'] != "":
# TODO: add userid to this and config
try:
# Create an Apprise instance
apobj = apprise.Apprise()
# A sample pushbullet notification
apobj.add('discord://' + str(cfg['DISCORD_WEBHOOK_ID']) + "/" + str(cfg['DISCORD_TOKEN']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending discord apprise notification. Continuing processing...")
# Faast
# faast://{authorizationtoken}
if cfg['FAAST_TOKEN'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
# A sample pushbullet notification
apobj.add('faast://' + str(cfg['FAAST_TOKEN']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending faast apprise notification. Continuing processing...")
# FLOCK
# flock://{token}/
if cfg['FLOCK_TOKEN'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
# A sample pushbullet notification
apobj.add('flock://' + str(cfg['FLOCK_TOKEN']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending flock apprise notification. Continuing processing...")
# GITTER
# gitter: // {token} / {room} /
if cfg['GITTER_TOKEN'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
# A sample pushbullet notification
apobj.add('gitter://' + str(cfg['GITTER_TOKEN']) + "/" + str(cfg['GITTER_ROOM']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending gitter apprise notification. Continuing processing...")
# Gotify
# gotify://{hostname}/{token}
if cfg['GOTIFY_TOKEN'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
# A sample pushbullet notification
apobj.add('gotify://' + str(cfg['GOTIFY_HOST']) + "/" + str(cfg['GOTIFY_TOKEN']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending gitter apprise notification. Continuing processing...")
# Growl
# growl://{hostname} || growl://{password}@{hostname}
if cfg['GROWL_HOST'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
# Check if we have a pass, use it if we do
if cfg['GROWL_PASS'] != "":
# A sample pushbullet notification
apobj.add('growl://' + str(cfg['GROWL_PASS']) + "@" + str(cfg['GROWL_HOST']))
else:
# A sample pushbullet notification
apobj.add('growl://' + str(cfg['GROWL_HOST']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending growl apprise notification. Continuing processing...")
# JOIN
# join://{apikey}/ || join://{apikey}/{device_id}
if cfg['JOIN_API'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
# Check if we have a pass, use it if we do
if cfg['JOIN_DEVICE'] != "":
# A sample pushbullet notification
apobj.add('join://' + str(cfg['JOIN_API']) + "/" + str(cfg['JOIN_DEVICE']))
else:
# A sample pushbullet notification
apobj.add('join://' + str(cfg['JOIN_API']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending growl apprise notification. Continuing processing...")
# Kodi
# kodi://{hostname}:{port} || kodi: // {userid}: {password} @ {hostname}:{port}
if cfg['KODI_HOST'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
# check if we have login details, if so use them
if cfg['KODI_USER'] != "":
apobj.add('kodi://' + str(cfg['KODI_USER']) + ":" + str(cfg['KODI_PASS']) + "@" + str(
cfg['KODI_HOST']) + ":" + str(cfg['KODI_PORT']))
else:
if cfg['KODI_PORT'] != "":
# we need to check if they are using secure or this will fail
if cfg['KODI_PORT'] == "443":
apobj.add('kodis://' + str(cfg['KODI_HOST']) + ":" + str(cfg['KODI_PORT']))
else:
apobj.add('kodi://' + str(cfg['KODI_HOST']) + ":" + str(cfg['KODI_PORT']))
else:
apobj.add('kodi://' + str(cfg['KODI_HOST']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending KODI apprise notification. Continuing processing...")
# KUMULOS
if cfg['KUMULOS_API'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
# A sample pushbullet notification
apobj.add('kumulos://' + str(cfg['KUMULOS_API']) + "/" + str(cfg['KUMULOS_SERVERKEY']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending KUMULOS apprise notification. Continuing processing...")
# LEMETRIC
if cfg['LAMETRIC_MODE'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
# find the correct mode
if cfg['LAMETRIC_MODE'] == "device":
apobj.add('lametric://' + str(cfg['LAMETRIC_API']) + "@" + str(cfg['LAMETRIC_HOST']))
elif cfg['LAMETRIC_MODE'] == "cloud":
apobj.add('lametric://' + str(cfg['LAMETRIC_APP_ID']) + "@" + str(cfg['LAMETRIC_TOKEN']))
else:
logging.error("LAMETRIC apprise LAMETRIC_MODE not set")
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending LAMETRIC apprise notification. Continuing processing...")
# MAILGUN
if cfg['MAILGUN_DOMAIN'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
# A sample pushbullet notification
apobj.add('mailgun://' + str(cfg['MAILGUN_USER']) + "@" + str(cfg['MAILGUN_DOMAIN']) + "/" + str(
cfg['MAILGUN_APIKEY']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending mailgun apprise notification. Continuing processing...")
# MATRIX
if cfg['MATRIX_HOST'] != "" or cfg['MATRIX_TOKEN'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
if cfg['MATRIX_HOST'] != "":
apobj.add('matrixs://' + str(cfg['MATRIX_USER']) + ":" + str(cfg['MATRIX_PASS']) + "@" + str(
cfg['MATRIX_HOST'])) # + "/#general/#apprise")
else:
apobj.add('matrix://' + str(cfg['MATRIX_TOKEN']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending Matrix apprise notification. Continuing processing...")
# Microsoft teams
if cfg['MSTEAMS_TOKENA'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
# msteams://{tokenA}/{tokenB}/{tokenC}/
apobj.add('msteams://' + str(cfg['MSTEAMS_TOKENA']) + "/" + str(cfg['MSTEAMS_TOKENB']) + "/" + str(
cfg['MSTEAMS_TOKENC']) + "/")
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending Microsoft teams apprise notification. Continuing processing...")
# Nextcloud
if cfg['NEXTCLOUD_HOST'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
apobj.add(
'nclouds://' + str(cfg['NEXTCLOUD_ADMINUSER']) + ":" + str(cfg['NEXTCLOUD_ADMINPASS']) + "@" + str(
cfg['NEXTCLOUD_HOST']) + "/" + str(cfg['NEXTCLOUD_NOTIFY_USER']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending nextcloud apprise notification. Continuing processing...")
# Notica
if cfg['NOTICA_TOKEN'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
apobj.add('notica://' + str(cfg['NOTICA_TOKEN']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending notica apprise notification. Continuing processing...")
# Notifico
if cfg['NOTIFICO_PROJECTID'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
apobj.add('notica://' + str(cfg['NOTIFICO_PROJECTID']) + "/" + str(cfg['NOTIFICO_MESSAGEHOOK']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending notifico apprise notification. continuing processing...")
# Office365
if cfg['OFFICE365_TENANTID'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
# o365://{tenant_id}:{account_email}/{client_id}/{client_secret}/
# TODO: we might need to escape/encode the client_secret
# Replace ? with %3F and @ with %40
apobj.add('o365://' + str(cfg['OFFICE365_TENANTID']) + ":" + str(cfg['OFFICE365_ACCOUNTEMAIL']) + "/" + str(
cfg['OFFICE365_CLIENT_ID']) + "/" + str(cfg['OFFICE365_CLIENT_SECRET']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending Office365 apprise notification. continuing processing...")
# Popcorn
if cfg['POPCORN_API'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
if cfg['POPCORN_EMAIL'] != "":
apobj.add('popcorn://' + str(cfg['POPCORN_API']) + "/" + str(cfg['POPCORN_EMAIL']))
if cfg['POPCORN_PHONENO'] != "":
apobj.add('popcorn://' + str(cfg['POPCORN_API']) + "/" + str(cfg['POPCORN_PHONENO']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending popcorn apprise notification. Continuing processing...")
# PROWL
if cfg['PROWL_API'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
if cfg['PROWL_PROVIDERKEY'] != "":
apobj.add('prowl://' + str(cfg['PROWL_API']) + "/" + str(cfg['PROWL_PROVIDERKEY']))
else:
apobj.add('prowl://' + str(cfg['PROWL_API']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending notifico apprise notification. continuing processing...")
# Pushjet
# project is dead not worth coding fully
if cfg['PUSHJET_HOST'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
apobj.add('pjet://' + str(cfg['PUSHJET_HOST']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending pushjet apprise notification. continuing processing...")
# techulus push
if cfg['PUSH_API'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
apobj.add('push://' + str(cfg['PUSH_API']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending techulus push apprise notification. continuing processing...")
# PUSHED
if cfg['PUSHED_APP_KEY'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
apobj.add('pushed://' + str(cfg['PUSHED_APP_KEY']) + "/" + str(cfg['PUSHED_APP_SECRET']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending PUSHED apprise notification. continuing processing...")
# PUSHSAFER
if cfg['PUSHSAFER_KEY'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
apobj.add('psafers://' + str(cfg['PUSHSAFER_KEY']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending pushsafer apprise notification. continuing processing...")
# ROCKETCHAT
# rocket://{webhook}@{hostname}/{@user}
if cfg['ROCKETCHAT_HOST'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
# TODO: Add checks for webhook or default modes
# for now only the webhook will work
apobj.add('rocket://' + str(cfg['ROCKETCHAT_WEBHOOK']) + "@" + str(cfg['ROCKETCHAT_HOST']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending rocketchat apprise notification. continuing processing...")
# ryver
# ryver://{organization}/{token}/
if cfg['RYVER_ORG'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
# TODO: Add checks for webhook or default modes
# for now only the webhook will work
apobj.add('ryver://' + str(cfg['RYVER_ORG']) + "/" + str(cfg['RYVER_TOKEN']) + "/")
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending RYVER apprise notification. continuing processing...")
# Sendgrid
# sendgrid://{apikey}:{from_email}
if cfg['SENDGRID_API'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
# TODO: Add tomail
apobj.add('sendgrid://' + str(cfg['SENDGRID_API']) + ":" + str(cfg['SENDGRID_FROMMAIL']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending sendgrid apprise notification. continuing processing...")
# simplepush
# spush://{apikey}/
if cfg['SIMPLEPUSH_API'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
apobj.add('spush://' + str(cfg['SIMPLEPUSH_API']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending simplepush apprise notification. continuing processing...")
# slacks
# slack://{tokenA}/{tokenB}/{tokenC}
if cfg['SLACK_TOKENA'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
apobj.add('slack://' + str(cfg['SLACK_TOKENA']) + "/" + str(cfg['SLACK_TOKENB']) + "/" + str(
cfg['SLACK_TOKENC']) + "/" + str(cfg['SLACK_CHANNEL']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending slacks apprise notification. continuing processing...")
# SPARKPOST
# sparkpost://{user}@{domain}/{apikey}/ || sparkpost://{user}@{domain}/{apikey}/{email}/
if cfg['SPARKPOST_API'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
apobj.add('sparkpost://' + str(cfg['SPARKPOST_USER']) + "@" + str(cfg['SPARKPOST_HOST']) + "/" + str(
cfg['SPARKPOST_API']) + "/" + str(cfg['SPARKPOST_EMAIL']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending SparkPost apprise notification. continuing processing...")
# spontit
# spontit://{user}@{apikey}
if cfg['SPONTIT_API'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
apobj.add('spontit://' + str(cfg['SPONTIT_USER_ID']) + "@" + str(cfg['SPONTIT_API']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending Spontit apprise notification. continuing processing...")
# Telegram
# tgram://{bot_token}/{chat_id}/ || tgram://{bot_token}/
if cfg['TELEGRAM_BOT_TOKEN'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
apobj.add('tgram://' + str(cfg['TELEGRAM_BOT_TOKEN']) + "/" + str(cfg['TELEGRAM_CHAT_ID']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending Telegram apprise notification. continuing processing...")
# Twist
# twist://{email}/{password} || twist://{password}:{email}
if cfg['TWIST_EMAIL'] != "":
try:
# Create an Apprise instance
# TODO: add channel var and check if its blank
apobj = apprise.Apprise()
apobj.add('twist://' + str(cfg['TWIST_EMAIL']) + "/" + str(cfg['TWIST_PASS']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending Twist apprise notification. continuing processing...")
# XBMC
# xbmc://{userid}:{password}@{hostname}:{port} || xbmc://{hostname}:{port}
if cfg['XBMC_HOST'] != "":
try:
# Create an Apprise instance
# TODO: add channel var and check if its blank
apobj = apprise.Apprise()
# if we get user we use the username and pass
if cfg['XBMC_USER'] != "":
apobj.add('xbmc://' + str(cfg['XBMC_USER']) + ":" + str(cfg['XBMC_PASS']) + "@" + str(
cfg['XBMC_HOST']) + ":" + str(cfg['XBMC_PORT']))
else:
apobj.add('xbmc://' + str(cfg['XBMC_HOST']) + ":" + str(cfg['XBMC_PORT']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending XBMC apprise notification. continuing processing...")
# XMPP
# xmpp://{password}@{hostname}:{port} || xmpps://{userid}:{password}@{hostname}
if cfg['XMPP_HOST'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
# Is the user var filled
if cfg['XMPP_USER'] != "":
# xmpps://{userid}:{password}@{hostname}
apobj.add(
'xmpps://' + str(cfg['XMPP_USER']) + ":" + str(cfg['XMPP_PASS']) + "@" + str(cfg['XMPP_HOST']))
else:
# xmpp: // {password} @ {hostname}: {port}
apobj.add('xmpp://' + str(cfg['XMPP_PASS']) + "@" + str(cfg['XMPP_HOST']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending XMPP apprise notification. continuing processing...")
# Webex teams
# wxteams://{token}/
if cfg['WEBEX_TEAMS_TOKEN'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
apobj.add('wxteams://' + str(cfg['WEBEX_TEAMS_TOKEN']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending Webex teams apprise notification. continuing processing...")
# Zulip
# zulip://{botname}@{organization}/{token}/
if cfg['ZILUP_CHAT_TOKEN'] != "":
try:
# Create an Apprise instance
apobj = apprise.Apprise()
apobj.add('zulip://' + str(cfg['ZILUP_CHAT_BOTNAME']) + "@" + str(cfg['ZILUP_CHAT_ORG']) + "/" + str(
cfg['ZILUP_CHAT_TOKEN']))
# Then notify these services any time you desire. The below would
# notify all of the services loaded into our Apprise object.
apobj.notify(
body,
title=title,
)
except Exception: # noqa: E722
logging.error("Failed sending Zulip apprise notification. continuing processing...")
| 16,478
|
def listall_comments():
"""Lists rule-based labels
Returns:
list: A list of FileTypeComments
"""
return listall('comment')
| 16,479
|
def batch_iterable(iterable, n: int):
"""Return an iterable of batches with size n"""
it = iter(iterable)
for first in it:
yield list(chain([first], islice(it, n-1)))
| 16,480
|
def test_empty_query(app):
"""Test building an empty query."""
with app.app_context():
q = RecordsSearch()
assert q.to_dict()['query'] == {'match_all': {}}
q = RecordsSearch.faceted_search('')
assert q._s.to_dict()['query'] == {'match_all': {}}
q = RecordsSearch()[10]
assert q.to_dict()['from'] == 10
assert q.to_dict()['size'] == 1
q = q[10:20]
assert q.to_dict()['from'] == 10
assert q.to_dict()['size'] == 10
q = q.sort({'field1': {'order': 'asc'}})
assert q.to_dict()['sort'][0] == {'field1': {'order': 'asc'}}
q = q.sort()
assert 'sort' not in q.to_dict()
q = q.sort('-field1')
assert q.to_dict()['sort'][0] == {'field1': {'order': 'desc'}}
q = q.sort('field2', {'field3': {'order': 'asc'}})
assert q.to_dict()['sort'][0] == 'field2'
assert q.to_dict()['sort'][1] == {'field3': {'order': 'asc'}}
q.sort()
q = RecordsSearch()
q = q.highlight('field1', index_options='offsets')
assert len(q.to_dict()['highlight']['fields']) == 1
assert q.to_dict()['highlight']['fields']['field1'] == {
'index_options': 'offsets'
}
q = q.highlight('field2')
assert len(q.to_dict()['highlight']['fields']) == 2
assert q.to_dict()['highlight']['fields']['field1'] == {
'index_options': 'offsets'
}
assert q.to_dict()['highlight']['fields']['field2'] == {}
q = q.highlight()
assert 'highligth' not in q.to_dict()
| 16,481
|
def get_random_string(length=12,
allowed_chars='abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Returns a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
"%s%s%s" % (
random.getstate(),
time.time(),
UNSECURE_RANDOM_STRING)
).digest())
return ''.join([random.choice(allowed_chars) for i in range(length)])
| 16,482
|
def make_html_doc(body, root, resource_dir=None, title=None, meta=None):
"""Generate HTML document
Parameters
----------
body : fmtxt-object
FMTXT object which should be formatted into an HTML document.
root : str
Path to the directory in which the HTML file is going to be located.
resource_dir : None | str
Name for the directory containing resources like images, relative to
root. If None, images are embedded.
title : None | FMText
Document title. The default is to try to infer the title from the body
or use "Untitled".
meta : dict
Meta-information for document head.
Returns
-------
html : str
HTML document.
"""
if title is None:
if hasattr(body, '_site_title') and body._site_title is not None:
title = html(body._site_title)
elif hasattr(body, '_heading'):
title = html(body._heading)
else:
title = "Untitled"
if meta:
meta = '<meta %s>\n' % ' '.join('%s=%r' % x for x in meta.items())
else:
meta = ''
style = '\n'.join(('', '<style>', STYLE, '</style>'))
env = {'root': root, 'resource_dir': resource_dir}
txt_body = html(body, env)
return _html_doc_template.format(meta=meta, title=title, style=style,
body=txt_body)
| 16,483
|
def blur(img):
"""
:param img:
:return:
"""
blank_img = SimpleImage.blank(img.width, img.height)
for x in range(1, img.width-1):
for y in range(1, img.height-1):
left1_pixel = img.get_pixel(x-1, y-1)
left2_pixel = img.get_pixel(x-1, y)
left3_pixel = img.get_pixel(x-1, y+1)
center1_pixel = img.get_pixel(x, y-1)
center2_pixel = img.get_pixel(x, y)
center3_pixel = img.get_pixel(x, y+1)
right1_pixel = img.get_pixel(x+1, y-1)
right2_pixel = img.get_pixel(x+1, y)
right3_pixel = img.get_pixel(x+1, y+1)
new_pixel = blank_img.get_pixel(x, y)
new_pixel.red = (left1_pixel.red + left2_pixel.red + left3_pixel.red + center1_pixel.red + center2_pixel.red
+ center3_pixel.red + right1_pixel.red + right2_pixel.red + right3_pixel.red) // 9
new_pixel.green = (left1_pixel.green + left2_pixel.green + left3_pixel.green + center1_pixel.green +
center2_pixel.green + center3_pixel.green + right1_pixel.green + right2_pixel.green +
right3_pixel.green) // 9
new_pixel.blue = (left1_pixel.blue + left2_pixel.blue + left3_pixel.blue + center1_pixel.blue +
center2_pixel.blue + center3_pixel.blue + right1_pixel.blue + right2_pixel.blue +
right3_pixel.blue) // 9
for x in range(1):
for y in range(1, img.height-1):
"""edge x=0"""
edge1_pixel = img.get_pixel(x, y-1)
edge2_pixel = img.get_pixel(x, y)
edge3_pixel = img.get_pixel(x, y+1)
edge4_pixel = img.get_pixel(x+1, y-1)
edge5_pixel = img.get_pixel(x+1, y)
edge6_pixel = img.get_pixel(x+1, y+1)
new_pixel = blank_img.get_pixel(x, y)
new_pixel.red = (edge1_pixel.red + edge2_pixel.red + edge3_pixel.red + edge4_pixel.red + edge5_pixel.red +
edge6_pixel.red) // 6
new_pixel.green = (edge1_pixel.green + edge2_pixel.green + edge3_pixel.green + edge4_pixel.green +
edge5_pixel.green + edge6_pixel.green) // 6
new_pixel.blue = (edge1_pixel.blue + edge2_pixel.blue + edge3_pixel.blue + edge4_pixel.blue +
edge5_pixel.blue + edge6_pixel.blue) // 6
for x in range(img.width-1, img.width):
for y in range(1, img.height-1):
"""edge x=width-1"""
edge1_pixel = img.get_pixel(x-1, y-1)
edge2_pixel = img.get_pixel(x-1, y)
edge3_pixel = img.get_pixel(x-1, y+1)
edge4_pixel = img.get_pixel(x, y-1)
edge5_pixel = img.get_pixel(x, y)
edge6_pixel = img.get_pixel(x, y+1)
new_pixel = img.get_pixel(x, y)
new_pixel.red = (edge1_pixel.red + edge2_pixel.red + edge3_pixel.red + edge4_pixel.red + edge5_pixel.red +
edge6_pixel.red) // 6
new_pixel.green = (edge1_pixel.green + edge2_pixel.green + edge3_pixel.green + edge4_pixel.green +
edge5_pixel.green + edge6_pixel.green) // 6
new_pixel.blue = (edge1_pixel.blue + edge2_pixel.blue + edge3_pixel.blue + edge4_pixel.blue +
edge5_pixel.blue + edge6_pixel.blue) // 6
for x in range(1, img.width-1):
for y in range(1):
"""edge y=0"""
edge1_pixel = img.get_pixel(x-1, y)
edge2_pixel = img.get_pixel(x, y)
edge3_pixel = img.get_pixel(x+1, y)
edge4_pixel = img.get_pixel(x-1, y+1)
edge5_pixel = img.get_pixel(x, y+1)
edge6_pixel = img.get_pixel(x+1, y+1)
new_pixel = img.get_pixel(x, y)
new_pixel.red = (edge1_pixel.red + edge2_pixel.red + edge3_pixel.red + edge4_pixel.red + edge5_pixel.red +
edge6_pixel.red) // 6
new_pixel.green = (edge1_pixel.green + edge2_pixel.green + edge3_pixel.green + edge4_pixel.green +
edge5_pixel.green + edge6_pixel.green) // 6
new_pixel.blue = (edge1_pixel.blue + edge2_pixel.blue + edge3_pixel.blue + edge4_pixel.blue +
edge5_pixel.blue + edge6_pixel.blue) // 6
for x in range(1, img.width-1):
for y in range(img.height-1, img.height):
"""edge y=height-1"""
edge1_pixel = img.get_pixel(x-1, y-1)
edge2_pixel = img.get_pixel(x, y-1)
edge3_pixel = img.get_pixel(x+1, y-1)
edge4_pixel = img.get_pixel(x-1, y)
edge5_pixel = img.get_pixel(x, y)
edge6_pixel = img.get_pixel(x+1, y)
new_pixel = img.get_pixel(x, y)
new_pixel.red = (edge1_pixel.red + edge2_pixel.red + edge3_pixel.red + edge4_pixel.red + edge5_pixel.red +
edge6_pixel.red) // 6
new_pixel.green = (edge1_pixel.green + edge2_pixel.green + edge3_pixel.green + edge4_pixel.green +
edge5_pixel.green + edge6_pixel.green) // 6
new_pixel.blue = (edge1_pixel.blue + edge2_pixel.blue + edge3_pixel.blue + edge4_pixel.blue +
edge5_pixel.blue + edge6_pixel.blue) // 6
return blank_img
| 16,484
|
def generalized_euler_solver(descr, coefs, rho0, v0, t, x, bc="periodic", num_integrator_steps=1, fix_vvx_term=True):
"""Solver for Euler hydro system.
Builds RHS of the Euler equation Dv_t = f(...) from symbolic description.
"""
t_pde = np.linspace(t[0], t[-1], len(t)*num_integrator_steps) # Create a refined t-grid
nt, nx = len(t), len(x)
rho_ev, v_ev = np.zeros((len(t), nx)), np.zeros((len(t), nx))
rho, v = rho0, v0
dt = t_pde[1] - t_pde[0]
dx = x[1] - x[0]
for it, t in enumerate(t_pde):
rhox = FiniteDiff(rho, dx, 1, bc)
vx = FiniteDiff(v, dx, 1, bc)
rho_t = -(rhox*v + vx*rho)
rho_next = rho + dt*rho_t
# Add RHS terms to Dt(v) = v_t+v*v_x = f (...)
f = np.sum([coefs[i]*get_euler_term_from_descr(descr_i, rho, v, x) \
for i, descr_i in enumerate(descr)], axis=0)
v_t = f
if fix_vvx_term:
v_t -= v*vx
v_next = v + dt*v_t # D_t(v) = f(rho, v, ...)
step = it // num_integrator_steps
if it % num_integrator_steps == 0:
rho_ev[step, :] = rho.copy()
v_ev[step, :] = v.copy()
rho = rho_next.copy()
v = v_next.copy()
if np.isnan(np.sum(rho)):
# Solution exploded, interrupt
return np.array([np.nan]), np.array([np.nan])
return rho_ev, v_ev
| 16,485
|
def generate_rnr_features(in_query_stream, outfile, collection_id, cluster_id, num_rows=30, config=load_config()):
"""
Iterates over a labelled query stream and generates a feature file with the columns:
<query_num>,<answer_id>,<fea_0>,<fea_1>,...,<fea_n>,<relevance_label>
:param rnr_debug_helpers.queries.LabelledQueryStream in_query_stream:
:param File outfile: where the feature file contents will be written to
:param str collection_id: the RnR solr collection to use for finding search results
:param str cluster_id: the RnR solr cluster id to use for finding search results
:param int or None num_rows: The number of search results that will be retrieved for each query. Defaults to 30
similar to RnR Web UI/Tooling
:param ConfigParser config: A config loaded with the credentials to use
"""
rnr_cluster = RetrieveAndRankProxy(solr_cluster_id=cluster_id, config=config)
writer = csv.writer(outfile)
# Iterate over queries and generate feature vectors
stats = defaultdict(int)
is_first_row = True
for qid, query in enumerate(in_query_stream):
labels_for_relevant_answer_ids = _parse_correct_answer_ids_from_query(query)
_collect_stats(stats, labels_for_relevant_answer_ids)
LOGGER.debug("Getting feature vectors for query:<<%s>>" % query.get_qid())
rnr_search_results = rnr_cluster.get_fcselect_features(query_text=query.get_qid(), collection_id=collection_id,
generate_header=is_first_row,
num_results_to_return=num_rows)
if len(rnr_search_results) == 0:
stats["num_queries_with_zero_rnr_results"] += 1
else:
if is_first_row:
writer.writerow([_QID_COLUMN_NAME] + rnr_search_results.pop(0) + [_GT_COLUMN_NAME])
is_first_row = False
stats["num_queries_with_atleast_one_search_result"] += 1
stats['num_search_results_retrieved'] += len(rnr_search_results)
num_possible_correct, num_correct_answers_in_search_results = \
_print_feature_vectors_and_check_for_correct_answers(writer, rnr_search_results, '%d' % (qid + 1),
labels_for_relevant_answer_ids)
if num_possible_correct != num_correct_answers_in_search_results:
stats['num_queries_where_at_least_correct_answer_didnt_appear_in_rnr'] += 1
stats["num_correct_in_search_result"] += num_correct_answers_in_search_results
if stats["num_queries"] % 100 == 0:
LOGGER.info("Processed %d queries from input file" % stats['num_queries'])
_average_stats_across_collection(stats)
LOGGER.info("Finished processing %d queries from input file" % stats['num_queries'])
return stats
| 16,486
|
def get_theme_settings(theme):
"""
docutils writer will load css file.
"""
stylesheet = {}
search_paths = [
os.path.abspath(os.path.dirname(os.path.dirname(html5_polyglot.__file__))),
]
docutils_theme_path = ''
for path in search_paths:
if os.path.exists(os.path.join(path, 'html5_polyglot', 'template.txt')):
docutils_theme_path = path
break
logger.debug('docutils theme path: %s' % docutils_theme_path)
stylesheet['stylesheet_dirs'] = [
os.path.join(docutils_theme_path, 'html4css1'),
os.path.join(docutils_theme_path, 'html5_polyglot'),
]
pygments_path = os.path.join(__home_data_path__, 'themes', 'reStructuredText', 'pygments.css')
if os.path.exists(pygments_path):
stylesheet['stylesheet_path'] = pygments_path
stylesheet['syntax_highlight'] = 'short'
# docutils default theme
if not theme or theme == 'default':
return stylesheet
# third part theme
themes = get_rst_themes()
styles = themes.get(theme)
# stylesheet_path : css file path
# syntax_highlight: short
# template: template file path
stylesheet['stylesheet_dirs'].extend(styles['stylesheet_dirs'])
if 'syntax_highlight' in styles:
stylesheet['syntax_highlight'] = styles['syntax_highlight']
if 'stylesheet_path' in styles:
css_paths = styles['stylesheet_path'].split(',')
if 'stylesheet_path' in stylesheet:
css_paths += stylesheet['stylesheet_path'].split(',')
stylesheet['stylesheet_path'] = ','.join(css_paths)
if 'template' in styles:
old_path = styles['template']
new_path = os.path.abspath(
os.path.join(__home_data_path__,
'themes', 'reStructuredText',
theme,
old_path))
stylesheet['template'] = new_path
return stylesheet
| 16,487
|
def pick_unassigned_variable(board, strategy, unassigned_heap):
"""
:returns: (row_index, col_index)
"""
if strategy == Strategies.FIRST_FOUND:
return __pick_unassigned_variable_first_found(board)
elif strategy == Strategies.MIN_ROW:
return __pick_unassigned_variable_min_row(board)
else:
(rowi, coli) = (-1, -1)
if strategy == Strategies.MIN_HEAP:
(rowi, coli) = __pick_unassigned_variable_heap(board, unassigned_heap)
else:
(rowi, coli) = __pick_unassigned_variable_heap_2(board, unassigned_heap)
# update the heap
unassigned_heap["row"][rowi] -= 1
unassigned_heap["col"][coli] -= 1
ssi = get_subsquare_index((rowi, coli))
unassigned_heap["subsquare"][ssi] -= 1
return (rowi, coli)
| 16,488
|
def CONTAINS_INTS_FILTER(arg_value):
"""Only keeps int sequences or int tensors."""
return arg_value.elem_type is int or arg_value.has_int_dtypes()
| 16,489
|
def pad_renderable(renderable, offset):
"""
Pad a renderable, subject to a particular truncation offset.
"""
if offset < 0:
raise Exception("invalid offset!")
if offset == 0:
return RenderGroup(_RULE, Padding(renderable, 1))
if offset == 1:
return Padding(renderable, 1)
else:
return Padding(renderable, (0, 1, 1, 1))
| 16,490
|
def hull_test(self):
""" Perform a test run of Ilustrado with dummy DFT,
on all cores of current machine.
"""
from matador.hull import QueryConvexHull
from ilustrado.ilustrado import ArtificialSelector
res_files = glob.glob(REAL_PATH + "/data/hull-KP-KSnP_pub/*.res")
cursor = [res2dict(_file, db=True)[0] for _file in res_files]
# prepare best structures from hull as gene pool
hull = QueryConvexHull(
cursor=cursor,
elements=["K", "P"],
subcmd="hull",
no_plot=True,
source=True,
summary=True,
hull_cutoff=0,
)
cursor = hull.hull_cursor[1:-1]
print("Running on {} cores on {}.".format(NUM_CORES, os.uname()[1]))
minsep_dict = {("K", "K"): 2.5}
ArtificialSelector(
gene_pool=cursor,
hull=hull,
debug=False,
fitness_metric="hull",
nprocs=NUM_CORES,
check_dupes=0,
check_dupes_hull=False,
sandbagging=True,
minsep_dict=minsep_dict,
ncores=1,
testing=True,
mutations=["nudge_positions", "permute_atoms", "random_strain", "vacancy"],
max_num_mutations=1,
max_num_atoms=50,
mutation_rate=0.5,
crossover_rate=0.5,
num_generations=3,
population=15,
num_survivors=10,
elitism=0.5,
loglevel="debug",
)
run_hash = glob.glob("*.json")[0].split("-")[0]
new_life = ArtificialSelector(
gene_pool=cursor,
hull=hull,
debug=False,
fitness_metric="hull",
recover_from=run_hash,
load_only=True,
check_dupes=0,
check_dupes_hull=False,
minsep_dict=minsep_dict,
mutations=["nudge_positions", "permute_atoms", "random_strain", "vacancy"],
sandbagging=True,
nprocs=NUM_CORES,
ncores=1,
testing=True,
max_num_mutations=1,
max_num_atoms=50,
mutation_rate=0.5,
crossover_rate=0.5,
num_generations=10,
population=15,
num_survivors=10,
elitism=0.5,
loglevel="debug",
)
self.assertTrue(len(new_life.generations[-1]) >= 15)
self.assertTrue(len(new_life.generations[-1].bourgeoisie) >= 10)
new_life.start()
self.assertTrue(os.path.isdir(new_life.run_hash + "-results"))
num_structures = len(glob.glob(new_life.run_hash + "-results/*.res"))
self.assertTrue(num_structures > 5)
| 16,491
|
def draw_annotation_names(frame_bgr, names):
""" Writes the trackers names on the screen. """
global colors
if len(names) > 0:
num_names = len(names)
cv2.rectangle(frame_bgr, (10, frame_bgr.shape[0] - 15 - 30 * num_names),
(150, frame_bgr.shape[0] - 10),
(255, 255, 255), -1)
cv2.rectangle(frame_bgr, (10, frame_bgr.shape[0] - 15 - 30 * num_names),
(150, frame_bgr.shape[0] - 10),
(0, 0, 0), 2)
for i, name in enumerate(names):
cv2.putText(frame_bgr, name, (15, frame_bgr.shape[0] - 15 - 30 * (num_names - 1 - i)),
cv2.FONT_HERSHEY_PLAIN, 2, colors[i % len(colors)], 2)
| 16,492
|
def reload_doc(module, dt=None, dn=None, force=False, reset_permissions=False):
"""Reload Document from model (`[module]/[doctype]/[name]/[name].json`) files.
:param module: Module name.
:param dt: DocType name.
:param dn: Document name.
:param force: Reload even if `modified` timestamp matches.
"""
import frappe.modules
return frappe.modules.reload_doc(module, dt, dn, force=force, reset_permissions=reset_permissions)
| 16,493
|
def transform_box_coord_pseudo(H, W, box_vertices, dataset_name):
"""
Transform box_vertices to match the coordinate system of the attributions
:param H: Desired height of image
:param W: Desired width of image
:param box_vertices:
:param dataset_name:
:param high_rez:
:param scaling_factor:
:return: transformed box_vertices
"""
y_range = None
if dataset_name == 'CadcDataset':
y_range = 100.0
elif dataset_name == 'KittiDataset':
'''Note: the range for Kitti is different now'''
y_range = 79.36
elif dataset_name == 'WaymoDataset':
y_range = 168.96
new_scale = H / y_range
# print('H: {}'.format(H))
# TODO: verify the following for waymo
if dataset_name == 'KittiDataset':
for vertex in box_vertices:
vertex[0] = vertex[0] * new_scale
vertex[0] = H - vertex[0]
vertex[1] = vertex[1] * new_scale
else:
# print("\n")
for vertex in box_vertices:
vertex[0] = vertex[0] * new_scale
vertex[0] = H - vertex[0]
vertex[1] = vertex[1] * new_scale
# print("vertex: {}".format(vertex))
return box_vertices
| 16,494
|
def plot_a_bar(x, y,
plot_cmd=plt.loglog,
rec_width=0.1, # box ("rectangle") width, log scale
rec_taille_fac=0.3, # notch width parameter
styles={'color': 'b'},
linewidth=1,
fill_color=None, # None means no fill
fill_transparency=0.7 # 1 should be invisible
):
"""plot/draw a notched error bar, x is the x-position,
y[0,1,2] are lower, median and upper percentile respectively.
hold(True) to see everything.
TODO: with linewidth=0, inf is not visible
"""
if not np.isfinite(y[2]):
y[2] = y[1] + 100 * (y[1] - y[0])
if plot_cmd in (plt.loglog, plt.semilogy):
y[2] = (1 + y[1]) * (1 + y[1] / y[0])**10
if not np.isfinite(y[0]):
y[0] = y[1] - 100 * (y[2] - y[1])
if plot_cmd in (plt.loglog, plt.semilogy):
y[0] = y[1] / (1 + y[2] / y[1])**10
styles2 = {}
for s in styles:
styles2[s] = styles[s]
styles2['linewidth'] = linewidth
styles2['markeredgecolor'] = styles2['color']
dim = 1 # to remove error
x0 = x
if plot_cmd in (plt.loglog, plt.semilogx):
r = np.exp(rec_width) # ** ((1. + i_target / 3.) / 4) # more difficult targets get a wider box
x = [x0 * dim / r, x0 * r * dim] # assumes log-scale of x-axis
xm = [x0 * dim / (r**rec_taille_fac), x0 * dim * (r**rec_taille_fac)]
else:
r = rec_width
x = [x0 * dim - r, x0 * dim + r]
xm = [x0 * dim - (r * rec_taille_fac), x0 * dim + (r * rec_taille_fac)]
y = np.array(y) / dim
if fill_color is not None:
plt.fill_between([x[0], xm[0], x[0], x[1], xm[1], x[1], x[0]],
[y[0], y[1], y[2], y[2], y[1], y[0], y[0]],
color=fill_color, alpha=1-fill_transparency)
plot_cmd([x[0], xm[0], x[0], x[1], xm[1], x[1], x[0]],
[y[0], y[1], y[2], y[2], y[1], y[0], y[0]],
markersize=0, **styles2)
styles2['linewidth'] = 0
plot_cmd([x[0], x[1], x[1], x[0], x[0]],
[y[0], y[0], y[2], y[2], y[0]],
**styles2)
styles2['linewidth'] = 2 # median
plot_cmd([x[0], x[1]], [y[1], y[1]],
markersize=0, **styles2)
| 16,495
|
def create_api_app(global_conf, **local_conf):
"""Creates MainAPI application"""
controllers = {}
api_version = global_conf.get('api_version')
if api_version == 'v2.0':
controllers.update({
'/log/single': v2_logs.Logs()
})
elif api_version == 'v3.0':
controllers.update({
'/logs': v3_logs.Logs()
})
wsgi_app = falcon.API(
request_type=request.Request
)
for route, ctrl in controllers.items():
wsgi_app.add_route(route, ctrl)
error_handlers.register_error_handlers(wsgi_app)
return wsgi_app
| 16,496
|
def gauss_4deg(x,b, ampl,cent,sigm):
""" Simple 3 parameter Gaussian
Args:
x
b (float): Floor
ampl (float): Amplitude
cent (float): Centroid
sigm (float): sigma
Returns:
float or ndarray: Evaluated Gausssian
"""
return b + ampl*np.exp(-1.*(cent-x)**2/2/sigm**2)
| 16,497
|
def get_spreads(pair, since):
"""Returns last recent spreads"""
api_command = API_LINK + f'Spreads?pair={pair}&since={since}'
resp = requests.get(api_command).json()
if not resp['error']: # empty
return resp
return resp['error']
| 16,498
|
def _ValidateMutexOnConfigIdAndOrganization(args):
"""Validates that only a full resource name or split arguments are provided."""
if "/" in args.notificationConfigId:
if args.organization is not None:
raise InvalidNotificationConfigError(
"Only provide a full resouce name "
"(organizations/123/notificationConfigs/test-config) or an --organization "
"flag, not both.")
else:
if args.organization is None:
raise InvalidNotificationConfigError(
"Organization must be provided if it is not included in notification id."
)
| 16,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.