content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def generate_new_admin_class():
"""
we need to generate a new dashboard view for each `setup_admin` call.
"""
class MockDashboard(DashboardView):
pass
class MockAdmin(Admin):
dashboard_class = MockDashboard
return MockAdmin | 35,500 |
def visualize_warp(rgb, oflow):
"""TODO: add info."""
rgb = utils.to_numpy(rgb)
oflow = utils.to_numpy(oflow)
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
rgb = np.moveaxis(rgb, -3, -1)
rgb = rgb*std+mean
rgb = np.clip(rgb*255, 0, 255)
bgr = rgb[..., ::-1].astype(np.uint8)
bgr = bgr[0, 0] # subsample
print(bgr.shape, np.amin(bgr), np.amax(bgr), np.mean(bgr),
np.mean(np.absolute(bgr)))
oflow = np.moveaxis(oflow, -3, -1)
oflow = oflow[0, 0] # subsample
print(oflow.shape, np.amin(oflow), np.amax(oflow), np.mean(oflow),
np.mean(np.absolute(oflow)))
warp = imgproc.warp(bgr[4], bgr[5], oflow[4])
root = '/home/luoa/research'
cv2.imwrite(os.path.join(root, 'bgr1.jpg'), bgr[4])
cv2.imwrite(os.path.join(root, 'bgr2.jpg'), bgr[5])
cv2.imwrite(os.path.join(root, 'warp.jpg'), warp) | 35,501 |
def test_ok_none_charge_efficiency(schema: Type[BaseModel], data: Dict[str, Any]):
"""
Test that a charge efficiency is optional for a schema
"""
schema(charge_efficiency=None, **data) | 35,502 |
def for_default_graph(*args, **kwargs):
"""Creates a bookkeeper for the default graph.
Args:
*args: Arguments to pass into Bookkeeper's constructor.
**kwargs: Arguments to pass into Bookkeeper's constructor.
Returns:
A new Bookkeeper.
Raises:
ValueError: If args or kwargs are provided and the Bookkeeper already
exists.
"""
graph = tf.get_default_graph()
collection = graph.get_collection(_BOOKKEEPER)
if collection:
if args or kwargs:
raise ValueError('Requesting construction of a BookKeeper that already '
'exists: %s %s' % (args, kwargs))
return collection[0]
else:
books = BOOKKEEPER_FACTORY(*args, g=graph, **kwargs)
graph.add_to_collection(_BOOKKEEPER, books)
return books | 35,503 |
def check_paragraph(index: int, line: str, lines: list) -> bool:
"""Return True if line specified is a paragraph
"""
if index == 0:
return bool(line != "")
elif line != "" and lines[index - 1] == "":
return True
return False | 35,504 |
def most_similar(train_path, test_path, images_path, results_path, cuda=False):
"""
Nearest Neighbor Baseline: Img2Vec library (https://github.com/christiansafka/img2vec/) is used to obtain
image embeddings, extracted from ResNet-18. For each test image the cosine similarity with all the training images
is computed in order to retrieve similar training images.
The caption of the most similar retrieved image is returned as the generated caption of the test image.
:param train_path: The path to the train data tsv file with the form: "image \t caption"
:param test_path: The path to the test data tsv file with the form: "image \t caption"
:param images_path: The path to the images folder
:param results_path: The folder in which to save the results file
:param cuda: Boolean value of whether to use cuda for image embeddings extraction. Default: False
If a GPU is available pass True
:return: Dictionary with the results
"""
img2vec = Img2Vec(cuda=cuda)
# Load train data
train_data = pd.read_csv(train_path, sep="\t", header=None)
train_data.columns = ["id", "caption"]
train_images = dict(zip(train_data.id, train_data.caption))
# Get embeddings of train images
print("Calculating visual embeddings from train images")
train_images_vec = {}
print("Extracting embeddings for all train images...")
for train_image in tqdm(train_data.id):
image = Image.open(os.path.join(images_path, train_image))
image = image.convert('RGB')
vec = img2vec.get_vec(image)
train_images_vec[train_image] = vec
print("Got embeddings for train images.")
# Load test data
test_data = pd.read_csv(test_path, sep="\t", header=None)
test_data.columns = ["id", "caption"]
# Save IDs and raw image vectors separately but aligned
ids = [i for i in train_images_vec]
raw = np.array([train_images_vec[i] for i in train_images_vec])
# Normalize image vectors to avoid normalized cosine and use dot
raw = raw / np.array([np.sum(raw,1)] * raw.shape[1]).transpose()
sim_test_results = {}
for test_image in tqdm(test_data.id):
# Get test image embedding
image = Image.open(os.path.join(images_path, test_image))
image = image.convert('RGB')
vec = img2vec.get_vec(image)
# Compute cosine similarity with every train image
vec = vec / np.sum(vec)
# Clone to do efficient mat mul dot
test_mat = np.array([vec] * raw.shape[0])
sims = np.sum(test_mat * raw, 1)
top1 = np.argmax(sims)
# Assign the caption of the most similar train image
sim_test_results[test_image] = train_images[ids[top1]]
# Save test results to tsv file
df = pd.DataFrame.from_dict(sim_test_results, orient="index")
df.to_csv(os.path.join(results_path, "onenn_results.tsv"), sep="\t", header=False)
return sim_test_results | 35,505 |
def pull_backgrounds() -> None:
"""Pull the shape generation backgrounds.
This function utilizes the ``BACKGROUNDS_URLS`` in
``hawk_eye.data_generation.generate_config.py``."""
for background_archive in config.BACKGROUNDS_URLS:
download_file(background_archive, config.ASSETS_DIR) | 35,506 |
def h2o_H2OFrame_tolower():
"""
Python API test: h2o.frame.H2OFrame.tolower()
"""
frame = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris.csv"))
frame["C5"]= frame["C5"].tolower()
assert (frame["C5"]=='iris-setosa').sum() == 50, \
"h2o.H2OFrame.tolower() command is not working." | 35,507 |
def remove_objects_without_content(page_spans, objects):
"""
Remove any objects (these can be rows, columns, supercells, etc.) that don't
have any text associated with them.
"""
for obj in objects[:]:
object_text, _ = extract_text_inside_bbox(page_spans, obj['bbox'])
if len(object_text.strip()) == 0:
objects.remove(obj) | 35,508 |
def find_packages(where='.', exclude=()):
"""Return a list all Python packages found within directory 'where'
'where' should be supplied as a "cross-platform" (i.e. URL-style) path; it
will be converted to the appropriate local path syntax. 'exclude' is a
sequence of package names to exclude; '*' can be used as a wildcard in the
names, such that 'foo.*' will exclude all subpackages of 'foo' (but not
'foo' itself).
"""
out = []
stack = [(convert_path(where), '')]
while stack:
where, prefix = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if (
'.' not in name
and os.path.isdir(fn)
and os.path.isfile(os.path.join(fn, '__init__.py'))
):
out.append(prefix + name)
stack.append((fn, prefix + name + '.'))
for pat in list(exclude) + ['ez_setup']:
from fnmatch import fnmatchcase
out = [item for item in out if not fnmatchcase(item, pat)]
return out | 35,509 |
def ensure_downloaded(data_dir: PathLike, *datasets: str) -> None:
"""
Downloads the specified datasets (all available datasets if none specified)
into the data directory if they are not already present. This is useful in
situations where this package is used in an environment without Internet
access or for establishing local shared caches.
Parameters
----------
data_dir : path-like
Directory to save the dataset files in
*datasets : str
Names of datasets to download (if empty, all datasets will be downloaded)
"""
data_dir = Path(data_dir)
datasets = metadata.keys() if len(datasets) == 0 else set(datasets)
succeeded = []
for name, dataset_metadata in metadata.items():
if name in datasets:
dest_filename = data_dir / f'{name}.npz'
try:
_download_datafile(dataset_metadata['data_location'], dest_filename)
succeeded.append(name)
except Union[HTTPError, RuntimeError]:
print(f'Unable to download the `{name}` dataset')
succeeded_list = f' ({", ".join(succeeded)})' if len(succeeded) > 0 else ''
print(f'Successfully found {len(succeeded)}/{len(datasets)} datasets{succeeded_list}.') | 35,510 |
def _splitSerieIfRequired(serie, series):
""" _splitSerieIfRequired(serie, series)
Split the serie in multiple series if this is required.
The choice is based on examing the image position relative to
the previous image. If it differs too much, it is assumed
that there is a new dataset. This can happen for example in
unspitted gated CT data.
"""
# Sort the original list and get local name
serie._sort()
L = serie._datasets
# Init previous slice
ds1 = L[0]
# Check whether we can do this
if "ImagePositionPatient" not in ds1:
return
# Initialize a list of new lists
L2 = [[ds1]]
# Init slice distance estimate
distance = 0
for index in range(1, len(L)):
# Get current slice
ds2 = L[index]
# Get positions
pos1 = float(ds1.ImagePositionPatient[2])
pos2 = float(ds2.ImagePositionPatient[2])
# Get distances
newDist = abs(pos1 - pos2)
# deltaDist = abs(firstPos-pos2)
# If the distance deviates more than 2x from what we've seen,
# we can agree it's a new dataset.
if distance and newDist > 2.1 * distance:
L2.append([])
distance = 0
else:
# Test missing file
if distance and newDist > 1.5 * distance:
print('Warning: missing file after "%s"' % ds1.filename)
distance = newDist
# Add to last list
L2[-1].append(ds2)
# Store previous
ds1 = ds2
# Split if we should
if len(L2) > 1:
# At what position are we now?
i = series.index(serie)
# Create new series
series2insert = []
for L in L2:
newSerie = DicomSeries(serie.suid, serie._showProgress)
newSerie._datasets = Sequence(L)
series2insert.append(newSerie)
# Insert series and remove self
for newSerie in reversed(series2insert):
series.insert(i, newSerie)
series.remove(serie) | 35,511 |
def mask(node2sequence, edge2overlap, masking: str = "none"):
"""If any of the soft mask or hard mask are activated, mask
:param dict exon_dict: Dict of the shape exon_id: sequence.
:param dict overlap_dict: Dict of the shape (exon1, exon2): overlap between them.
:param str masking: Type of masking to apply. Options: hard, soft, none
(Default value = "None") .
"""
logging.info('Masking sequences')
if masking == 'none':
return node2sequence
# Compose a dataframe of name, sequence, bases to trim to the left
# and bases to trim to the right
logging.info('Computing bases to trim to the right and to the left')
complete = node2sequence.merge(
edge2overlap[['u', 'overlap']]\
.rename(columns={'u': 'name', 'overlap': 'mask_right'}),
on=['name'],
how='outer'
).merge(
edge2overlap[['v', 'overlap']]\
.rename(columns={'v': 'name', 'overlap': 'mask_left'}),
on=['name'],
how='outer'
)\
.fillna(0)\
.astype({'mask_right': np.int64, 'mask_left':np.int64})
logging.info('Removing negative masking')
complete['mask_right'] = complete.mask_right\
.map(lambda x: x if x > 0 else 0)
complete['mask_left'] = complete.mask_left\
.map(lambda x: x if x > 0 else 0)
if masking == "hard":
logging.info("Hard masking sequences")
complete['sequence'] = complete.apply(
lambda x: hard_mask(x.sequence, x.mask_left, x.mask_right),
axis=1
)
elif masking == "soft":
logging.info("Soft masking sequences")
complete['sequence'] = complete.apply(
lambda x: soft_mask(x.sequence, x.mask_left, x.mask_right),
axis=1
)
logging.info('Tidying up')
node2sequence_masked = complete\
[['name', 'sequence']]\
.reset_index(drop=True)
logging.info('Done')
return node2sequence_masked | 35,512 |
def load_patch_for_test_one_subj(file_path, sub_i, patch_shape, over_lap=10,
modalities=['MR_DWI', 'MR_Flair', 'MR_T1', 'MR_T2'],
mask_sym='MR_MASK',
suffix='.nii.gz',
use_norm=True):
"""
for test, split the full image, similar to load_patch_for_epoch, may merge to one function.
:param file_path:
:param sub_i:
:param patch_shape:
:param over_lap:
:param modalities:
:param mask_sym:
:param suffix:
:param use_norm:
:return:
"""
# first load image, mask.
mask_name = os.path.join(file_path, str(sub_i), mask_sym + suffix)
if os.path.exists(mask_name):
mask_img = read_img(mask_name)
channels_one_sub = []
for i in range(len(modalities)):
img_name = os.path.join(file_path, str(sub_i), modalities[i] + suffix)
img_name_norm = os.path.join(file_path, str(sub_i), modalities[i] + '_norm' + suffix)
if not os.path.exists(img_name):
raise Exception('cannot find the path %s!' % img_name)
if not os.path.exists(img_name_norm): # may raise error but my data has mask.
write_norm_img(img_name, img_name_norm, mask_img)
elif use_norm: # if exist norm data and use, replace to load it.
img_name = img_name_norm
channels_one_sub.append(read_img(img_name))
if not os.path.exists(mask_name):
mask_img = np.ones(shape=channels_one_sub[0].shape, dtype=np.float)
channels_one_sub.append(mask_img)
channels_one_sub = np.asarray(channels_one_sub)
# second sample patch.
indices = compute_patch_indices(channels_one_sub[0].shape, patch_shape, over_lap)
patches, chosen = get_patches_according_to_indices(channels_one_sub, patch_shape, np.transpose(indices), True, True)
indices = indices[chosen]
return np.asarray(patches[:, :len(modalities)]), np.asarray(indices) | 35,513 |
def extract_el_from_group(group, el):
"""Extract an element group from a group.
:param group: list
:param el: element to be extracted
:return: group without the extracted element, the extracted element
"""
extracted_group = [x for x in group if x != el]
return [extracted_group] + [[el]] | 35,514 |
def scale_y(scale, axes="current", lines="all"):
"""
This function scales lines vertically.
You can specify a line index, such as lines=0 or lines=[1,2,4]
"""
if axes=="current": axes = _pylab.gca()
# get the lines from the plot
lines = axes.get_lines()
# loop over the lines and trim the data
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
line.set_ydata(_pylab.array(line.get_ydata())*scale)
# update the title
title = axes.title.get_text()
if not title == "":
title += ", y_scale="+str(scale)
axes.title.set_text(title)
# zoom to surround the data properly
auto_zoom() | 35,515 |
def atualizar_pergunta_func(pergunta, titulo, pergunta_questao, resposta_1, resposta_2, resposta_3, resposta_4, resposta_certa, questao_dificuldade):
"""
Função para atualizar a pergunta
@param pergunta: A pergunta extraida do DB (objeto)
@param titulo: titulo extraido do form
@param pergunta_questao: a questão da pergunta extraida do form
@param resposta_1: a resposta 1 extraida do form
@param resposta_2: a resposta 2 extraida do form
@param resposta_3: a resposta 3 extraida do form
@param resposta_4: a resposta 4 extraida do form
@param resposta_certa: a resposta certa extraida do form
@param questao_dificuldade: a dificuldade da questão extraida do form
"""
pergunta.resposta_certa = resposta_certa
pergunta.questao_dificuldade = questao_dificuldade
if titulo:
pergunta.pergunta_titulo = titulo
if pergunta_questao:
pergunta.pergunta = pergunta_questao
if resposta_1:
pergunta.resp_1 = resposta_1
if resposta_2:
pergunta.resp_2 = resposta_2
if resposta_3:
pergunta.resp_3 = resposta_3
if resposta_4:
pergunta.resp_4 = resposta_4 | 35,516 |
def verify(pin, serial, udp):
"""Verify key is valid Nitrokey 'Start' or 'FIDO2' key."""
# Any longer and this needs to go in a submodule
print("Please press the button on your Nitrokey key")
try:
cert = pynitrokey.client.find(serial, udp=udp).make_credential(pin=pin)
except ValueError as e:
# python-fido2 library pre-emptively returns `ValueError('PIN required!')`
# instead of trying, and returning `CTAP error: 0x36 - PIN_REQUIRED`
if "PIN required" in str(e):
print("Your key has a PIN set. Please pass it using `--pin <your PIN>`")
sys.exit(1)
raise
except Fido2ClientError as e:
cause = str(e.cause)
# error 0x31
if "PIN_INVALID" in cause:
print("Your key has a different PIN. Please try to remember it :)")
sys.exit(1)
# error 0x34 (power cycle helps)
if "PIN_AUTH_BLOCKED" in cause:
print(
"Your key's PIN authentication is blocked due to too many incorrect attempts."
)
print("Please plug it out and in again, then again!")
print(
"Please be careful, after too many incorrect attempts, the key will fully block."
)
sys.exit(1)
# error 0x32 (only reset helps)
if "PIN_BLOCKED" in cause:
print(
"Your key's PIN is blocked. To use it again, you need to fully reset it."
)
print("You can do this using: `nitropy fido2 reset`")
sys.exit(1)
# error 0x01
if "INVALID_COMMAND" in cause:
print("Error getting credential, is your key in bootloader mode?")
print("Try: `nitropy fido2 util program aux leave-bootloader`")
sys.exit(1)
raise
hashdb = {
b'd7a23679007fe799aeda4388890f33334aba4097bb33fee609c8998a1ba91bd3': "Nitrokey FIDO2 1.x",
b'6d586c0b00b94148df5b54f4a866acd93728d584c6f47c845ac8dade956b12cb': "Nitrokey FIDO2 2.x",
b'e1f40563be291c30bc3cc381a7ef46b89ef972bdb048b716b0a888043cf9072a': "Nitrokey FIDO2 Dev 2.x ",
}
dev_fingerprint = cert.fingerprint(hashes.SHA256())
a_hex = binascii.b2a_hex(dev_fingerprint)
if a_hex in hashdb:
print('Found device: {}'.format(hashdb[a_hex]))
else:
print("Unknown fingerprint! ", a_hex) | 35,517 |
def example_function_with_shape(a, b):
"""
Example function for unit checks
"""
result = a * b
return result | 35,518 |
def editAddressFile(path, address, property=""):
"""
Edits the address file.
"""
with open(path, "r") as file:
data = json.load(file)
testnets = [
'arbitrum-rinkeby',
'rinkeby'
]
if network.show_active() in testnets:
net = "testnet"
else:
net = "mainnet"
if (len(property) > 0):
data[net][property] = address
else:
data[net] = address
with open(path, "w") as file:
json.dump(data, file) | 35,519 |
def write_comfeat(feat, lbl, fname):
"""Write combined TCN features
Args:
feat: combined feature, ndarray of shape (N_SAMPLES, DIM)
lbl: groundtruth label, ndarray of shape (N_SAMPLES, 1)
fname: path to the output filename
"""
mdict = {'A': feat, 'Y': lbl}
scipy.io.savemat(fname, mdict)
# foo, bar = read_tcnfeat(fname)
# assert np.all(foo == feat)
# assert np.all(bar == lbl)
pass | 35,520 |
def p_statement_vardecl(p: yacc.YaccProduction):
"""STATEMENT : VARDECL SEMICOLON"""
p[0] = {'code': p[1]['code']} | 35,521 |
def fields(
builder: DataclassBuilder, *, required: bool = True, optional: bool = True
) -> "Mapping[str, Field[Any]]":
"""Get a dictionary of the given :class:`DataclassBuilder`'s fields.
.. note::
This is not a method of :class:`DataclassBuilder` in order to not
interfere with possible field names. This function will use special
private methods of :class:`DataclassBuilder` which are excepted from
field assignment.
:param builder:
The dataclass builder to get the fields for.
:param required:
Set to False to not report required fields.
:param optional:
Set to False to not report optional fields.
:return:
A mapping from field names to actual :class:`dataclasses.Field`'s
in the same order as the `builder`'s underlying
:func:`dataclasses.dataclass`.
"""
# pylint: disable=protected-access
return builder._fields(required=required, optional=optional) | 35,522 |
def read_csv(
filepath_or_buffer: Literal["/tmp/tmp0cdg5he1."],
names: List[Literal["amount", "name"]],
skiprows: int,
):
"""
usage.dask: 1
"""
... | 35,523 |
def main():
"""Combine N-Puzzle macros for different starting positions into a single file"""
results_dir = 'results/macros/npuzzle/'
filenames = glob.glob(results_dir+'macro-n15-*-results.pickle')
macros = OrderedDict()
for filename in filenames:
row = int(filename.split('/')[-1].split('-')[-3][1:])
col = int(filename.split('/')[-1].split('-')[-2][1:])
with open(filename, 'rb') as file:
search_results = pickle.load(file)
best_n = search_results[-1]
best_n = [(score, [a[0] for a in macro]) for score, macro in best_n]
clean_macros = []
for _, macro in best_n:
if macro != []:
clean_macros.append(macro)
n_macros = 100
clean_macros = clean_macros[-n_macros:]
macros[(row,col)] = clean_macros
#%% Save the results
os.makedirs('results/macros/npuzzle/', exist_ok=True)
with open('results/macros/npuzzle/clean_macros.pickle', 'wb') as file:
pickle.dump(macros, file) | 35,524 |
def test_export_format(sample_data) -> None:
"""
Test to ensure that the graph is being effectively exported as a dictionary
which is valid JSON.
:param sample_data: the sample data
:return: None
"""
# create an AuthorRank object
ar_graph = ar.Graph()
# fit to the data
ar_graph.fit(
documents=sample_data["documents"]
)
# export them
export = ar_graph.as_json()
assert type(export) == dict | 35,525 |
def condon():
"""Testing whether or not the Condon approximation is appropriate."""
fig, ax = plt.subplots()
frequencies_all = []
intensities_all = []
csvfile = open('condon_analysis_linear_regression.csv', 'w')
csvwriter = csv.writer(csvfile)
csvwriter.writerow([
'# QM',
'# MM',
'# points',
'slope',
'intercept',
'rsq',
])
list_l12 = []
geometries = geometries_d[0][0]
C, O1, O2 = 0, 1, 2
for geometry in geometries:
d_C_O1 = distance(geometry[C], geometry[O1])
d_C_O2 = distance(geometry[C], geometry[O2])
d_O1_O2 = distance(geometry[O1], geometry[O2])
bond_sum = d_C_O1 + d_C_O2
# bond_difference = abs(d_C_O1 - d_C_O2)
list_l12.append(bond_sum)
list_l12 = np.array(list_l12)
for n_qm in sorted(frequencies_CO2_d):
print("Forming Condon approximation plot for {}".format(labels[n_qm]))
frequencies_single_qm_all_mm = []
intensities_single_qm_all_mm = []
# This is only necessary to get this mess to work, so the list
# lengths are correct. The CO2 geometry will always be the
# same.
geometries_single_qm_all_mm = []
for n_mm in possible_keys:
f = frequencies_CO2_d[n_qm][n_mm]
i = intensities_CO2_d[n_qm][n_mm]
s = snapnums_frequencies_d[n_qm][n_mm]
# filter the geometry results based on the current
# snapshots
indices = [(snapnum - 1) for snapnum in s]
g = list_l12[indices]
assert len(f) == len(i) == len(g)
frequencies_single_qm_all_mm.extend(f)
intensities_single_qm_all_mm.extend(i)
geometries_single_qm_all_mm.extend(g)
frequencies_all.extend(f)
intensities_all.extend(i)
print('{} QM/{} MM'.format(n_qm, n_mm))
try:
slope, intercept, rsq = pprint_linregress(f, i)
csvwriter.writerow([n_qm, n_mm, len(f), slope, intercept, rsq])
except:
pass
assert len(frequencies_single_qm_all_mm) == len(intensities_single_qm_all_mm)
# ax.scatter(frequencies_single_qm_all_mm,
# intensities_single_qm_all_mm,
# marker=markers[n_qm],
# label=labels[n_qm],
# color=colors[n_qm])
ax.scatter(geometries_single_qm_all_mm,
intensities_single_qm_all_mm,
marker=markers[n_qm],
label=labels[n_qm],
color=colors[n_qm])
print('{} QM/all MM'.format(n_qm))
slope, intercept, rsq = pprint_linregress(frequencies_single_qm_all_mm,
intensities_single_qm_all_mm)
csvwriter.writerow([n_qm, 'all', len(frequencies_single_qm_all_mm), slope, intercept, rsq])
assert len(frequencies_all) == len(intensities_all)
print('all QM/all MM')
slope, intercept, rsq = pprint_linregress(frequencies_all, intensities_all)
csvwriter.writerow(['all', 'all', len(frequencies_all), slope, intercept, rsq])
ax.set_ylim((0.0, 1000.0))
y_formatter = mpl.ticker.ScalarFormatter(useOffset=False)
ax.yaxis.set_major_formatter(y_formatter)
ax.tick_params(direction='out')
ax.set_xlabel(r"$\nu_{3}$ frequency (cm$^{-1}$)")
ax.set_ylabel(r"$\nu_{3}$ intensity (km/mol)")
ax.legend(loc='lower right',
fancybox=True,
framealpha=0.50,
numpoints=1,
scatterpoints=1)
if args.do_condon_plots:
fig.savefig('condon_approximation.pdf', bbox_inches='tight')
# now add the no CT data
if args.include_noCT:
for n_qm in sorted(frequencies_noCT_CO2_d):
frequencies_single_qm_all_mm = []
intensities_single_qm_all_mm = []
for n_mm in possible_keys:
f = frequencies_noCT_CO2_d[n_qm][n_mm]
i = intensities_noCT_CO2_d[n_qm][n_mm]
assert len(f) == len(i)
frequencies_single_qm_all_mm.extend(f)
intensities_single_qm_all_mm.extend(i)
frequencies_all.extend(f)
intensities_all.extend(i)
print('{} QM/{} MM'.format(n_qm, n_mm))
assert len(frequencies_single_qm_all_mm) == len(intensities_single_qm_all_mm)
ax.scatter(frequencies_single_qm_all_mm,
intensities_single_qm_all_mm,
marker=markers_noCT[n_qm],
label=labels_noCT[n_qm],
color=colors_noCT[n_qm])
print('{} QM/all MM'.format(n_qm))
ax.legend(loc='lower right',
fancybox=True,
framealpha=0.50,
numpoints=1,
scatterpoints=1)
if args.do_condon_plots:
fig.savefig('condon_approximation_noCT.pdf', bbox_inches='tight')
csvfile.close()
plt.close(fig)
return | 35,526 |
def gemm(node: NodeWrapper,
params: Dict[str, np.ndarray],
xmap: Dict[str, XLayer]) -> List[XLayer]:
"""
ONNX Gemm to XLayer Dense (+ Scale) (+ BiasAdd) conversion function
Compute Y = alpha * A' * B' + beta * C
See https://github.com/onnx/onnx/blob/master/docs/Operators.md#Gemm
"""
logger.info("ONNX Gemm-> XLayer Dense (+ Scale) (+ BiasAdd)")
assert len(node.get_outputs()) == 1
assert len(node.get_inputs()) in [2, 3]
name = node.get_outputs()[0]
bottoms = node.get_inputs()
node_attrs = node.get_attributes()
iX = xmap[bottoms[0]] # NC or CN
_, in_c = iX.shapes
W_name = bottoms[1]
wX = xmap[W_name]
assert len(wX.shapes) == 2
B_name = bottoms[2] if len(bottoms) == 3 else None
bX = xmap[B_name] if len(bottoms) == 3 else None
alpha = node_attrs['alpha'] if 'alpha' in node_attrs else 1.0
beta = node_attrs['beta'] if 'beta' in node_attrs else 1.0
trans_A = node_attrs['transA'] > 0 if 'transA' in node_attrs else False
trans_B = node_attrs['transB'] > 0 if 'transB' in node_attrs else False
if alpha != 1.0:
raise NotImplementedError("Alpha != 1.0 not supported in ONNX Gemm to"
" XLayer Dense conversion")
if beta != 1.0:
raise NotImplementedError("Beta != 1.0 not supported in ONNX Gemm to"
" XLayer Dense conversion")
# Quant_info (optional)
vai_quant_in = node_attrs['vai_quant_in'] \
if 'vai_quant_in' in node_attrs else []
vai_quant_out = node_attrs['vai_quant_out'] \
if 'vai_quant_out' in node_attrs else []
vai_quant_weights = node_attrs['vai_quant_weights'] \
if 'vai_quant_weights' in node_attrs else []
vai_quant_biases = node_attrs['vai_quant_biases'] \
if 'vai_quant_biases' in node_attrs else []
vai_quant = node_attrs['vai_quant'] \
if 'vai_quant' in node_attrs else []
vai_quant_dense = [a for a in vai_quant if str(a) != 'vai_quant_biases']
vai_quant_bias_add = [a for a in vai_quant if str(a) == 'vai_quant_biases']
Xs = []
if trans_A:
# iX is in CN -> Transform to NC
iX = xlf.get_xop_factory_func('Transpose')(
op_name=iX.name + '_transpose',
axes=[1, 0],
input_layer=iX,
onnx_id=name
)
Xs.append(iX)
if not trans_B:
# iX is in IO -> Transform to OI
wX = xlf.get_xop_factory_func('Transpose')(
op_name=W_name + '_transpose',
axes=[1, 0],
input_layer=wX,
onnx_id=name
)
Xs.append(wX)
units = wX.shapes[0]
dense_name = name if B_name is None else name + '_Dense'
X = xlf.get_xop_factory_func('Dense')(
op_name=px.stringify(dense_name),
units=units,
input_layer=iX,
weights_layer=wX,
vai_quant=vai_quant_dense,
vai_quant_in=vai_quant_in,
vai_quant_out=vai_quant_out,
vai_quant_weights=vai_quant_weights,
onnx_id=name
)
Xs.append(X)
if B_name is not None:
bias_add_X = xlf.get_xop_factory_func('BiasAdd')(
op_name=px.stringify(name),
axis=1,
input_layer=X,
bias_layer=bX,
vai_quant=vai_quant_bias_add,
vai_quant_biases=vai_quant_biases,
onnx_id=name
)
Xs.append(bias_add_X)
return Xs | 35,527 |
def check_response(game_id, response):
"""Check for correct response"""
if response["result"]["@c"] == "ultshared.rpc.UltSwitchServerException":
game = Game.query.filter(Game.game_id == game_id).first()
if "newHostName" in response["result"]:
print("new host: " + response["result"]["newHostName"])
game.game_host = "http://" + response["result"]["newHostName"]
db.session.commit()
else:
print("Game does not exist")
game.end_of_game = True
game.end_at = datetime.now()
db.session.commit()
job = scheduler.get_job(str(game.game_id))
if job is not None:
job.remove()
raise GameDoesNotExistError("Game %s is not found" % game_id + \
"on the Supremacy 1914 server")
return False
return True | 35,528 |
def extract_to_dst(src, dst):
"""extract addon src zip file to destination."""
copied_items = []
zip_file = path.basename(src)
zip_name, _ = path.splitext(zip_file)
cache_path = path.join(root_path, 'cache', zip_name)
with zipfile.ZipFile(src, 'r') as z:
# create folder and extract to cache
mkdir(cache_path)
z.extractall(cache_path)
trim_os_hidden_files(cache_path)
top_levels = [path.join(cache_path, c) for c in listdir(cache_path)]
if len(top_levels) > 1:
# zip's top-level has multiple files or folder
# if it contains only folders, we should copy everything to dst
# otherwize, this is not a standard addon package, we should raise an exception
if not only_dirs_or_not(cache_path):
remove_src(cache_path)
raise Exception('addon-zip contents contain file, this is not a standard addon.')
results = copy_contents(cache_path, dst)
copied_items.extend(results)
elif len(top_levels) == 1:
if not only_dirs_or_not(top_levels[0]):
# extracted-folder which contains files and folders.
# it means that we only should copy this folder to dst
result = copy_src_to_dst(top_levels[0], dst)
copied_items.append(result)
else:
# extracted-folder which contains only folders.
# it means that we should copy every sub-folders to dst
results = copy_contents(top_levels[0], dst)
copied_items.extend(results)
# delete cache folder before return
remove_src(cache_path)
return copied_items | 35,529 |
def get_signal_handler():
"""Get the singleton signal handler"""
if not len(_signal_handler_):
construct_signal_handler()
return _signal_handler_[-1] | 35,530 |
def post_rule(team_id):
"""Add a new rule.
.. :quickref: POST; Add a new rule.
**Example request**:
.. sourcecode:: http
POST /v1/teams/66859c4a-3e0a-4968-a5a4-4c3b8662acb7/rules HTTP/1.1
Host: example.com
Accept: application/json
{
"name": "Servers",
"description": "Compute the QOS of our servers"
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 201 CREATED
{
"checks": [],
"createdAt": "2018-05-17T12:01:09Z",
"description": "Compute the QOS of our servers",
"id": "ff130e9b-d226-4465-9612-a93e12799091",
"name": "Servers",
"updatedAt": "2018-11-09T15:33:06Z"
}
:resheader Content-Type: application/json
:status 201: the created rule
"""
if not TeamPermission.is_manager_or_editor(team_id):
abort(403)
payload = get_payload()
payload["team_id"] = team_id
rule = RuleController.create(payload)
return jsonify(format_rule(rule)), 201 | 35,531 |
def intents(interface):
"""
Method to get an object that implements interface by just returning intents
for each method call.
:param interface: The interface for which to create a provider.
:returns: A class with method names equal to the method names of the
interface. Each method on this class will generate an Intent for use
with the Effect library.
"""
return interface._ziffect_intents | 35,532 |
def _write_init_py(package_name: str) -> None:
"""
Dynamically write the __init__.py for the package using the chosen package.
:param chosen_package: mystery package name.
:type chosen_package: str
:rtype: None
"""
package_name = _fix_package_name(package_name)
init_py_path = pathlib.Path('mystery')
init_py_path.mkdir(exist_ok=True)
init_py_path = init_py_path / '__init__.py'
init_py_path.write_text(
f'''
# Here we're trying to import the mystery package (it's "{package_name}" this time).
# If it exists, overwrite 'mystery' in 'sys.modules'. Else, print there was an error.
import sys
try:
import {package_name}
except ImportError as error:
print('Internal error:', error)
print("The mystery package wasn't playing nice. Sorry!")
print('Hint: you can always try to reinstall mystery and get a different package!')
sorry = 'try reinstalling mystery and get a different package!'
else:
sys.modules['mystery'] = {package_name}
sys.modules['mystery'].__mystery_init_py__ = __file__
sys.modules['mystery'].__mystery_package_name__ = '{package_name}'
del sys # We care about this only when mystery fails (and even that's inconsequential).
'''
) | 35,533 |
def model(X, Y, learning_rate=0.3, num_iterations=30000, print_cost=True, is_plot=True, lambd=0, keep_prob=1):
"""
实现一个三层的神经网络:LINEAR ->RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
参数:
X - 输入的数据,维度为(2, 要训练/测试的数量)
Y - 标签,【0(蓝色) | 1(红色)】,维度为(1,对应的是输入的数据的标签)
learning_rate - 学习速率
num_iterations - 迭代的次数
print_cost - 是否打印成本值,每迭代10000次打印一次,但是每1000次记录一个成本值
is_polt - 是否绘制梯度下降的曲线图
lambd - 正则化的超参数,实数
keep_prob - 随机删除节点的概率
返回
parameters - 学习后的参数
"""
grads = {}
costs = []
m = X.shape[1]
layers_dims = [X.shape[0], 20, 3, 1]
# 初始化参数
parameters = reg_utils.initialize_parameters(layers_dims)
# 开始学习
for i in range(0, num_iterations):
# 前向传播
##是否随机删除节点
if keep_prob == 1:
###不随机删除节点
a3, cache = reg_utils.forward_propagation(X, parameters)
elif keep_prob < 1:
###随机删除节点
a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob)
else:
print("keep_prob参数错误!程序退出。")
exit
# 计算成本
## 是否使用二范数
if lambd == 0:
###不使用L2正则化
cost = reg_utils.compute_cost(a3, Y)
else:
###使用L2正则化
cost = compute_cost_with_regularization(a3, Y, parameters, lambd)
# 反向传播
##可以同时使用L2正则化和随机删除节点,但是本次实验不同时使用。
assert (lambd == 0 or keep_prob == 1)
##两个参数的使用情况
if (lambd == 0 and keep_prob == 1):
### 不使用L2正则化和不使用随机删除节点
grads = reg_utils.backward_propagation(X, Y, cache)
elif lambd != 0:
### 使用L2正则化,不使用随机删除节点
grads = backward_propagation_with_regularization(X, Y, cache, lambd)
elif keep_prob < 1:
### 使用随机删除节点,不使用L2正则化
grads = backward_propagation_with_dropout(X, Y, cache, keep_prob)
# 更新参数
parameters = reg_utils.update_parameters(parameters, grads, learning_rate)
# 记录并打印成本
if i % 1000 == 0:
## 记录成本
costs.append(cost)
if (print_cost and i % 10000 == 0):
# 打印成本
print("第" + str(i) + "次迭代,成本值为:" + str(cost))
# 是否绘制成本曲线图
if is_plot:
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (x1,000)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# 返回学习后的参数
return parameters | 35,534 |
def load_phoenix_stars(logg_list=PHOENIX_LOGG, teff_list=PHOENIX_TEFF, zmet_list=PHOENIX_ZMET, add_carbon_star=True, file='bt-settl_t400-7000_g4.5.fits'):
"""
Load Phoenix stellar templates
"""
from collections import OrderedDict
try:
from urllib.request import urlretrieve
except:
from urllib import urlretrieve
# file='bt-settl_t400-5000_g4.5.fits'
# file='bt-settl_t400-3500_z0.0.fits'
try:
hdu = pyfits.open(os.path.join(GRIZLI_PATH, 'templates/stars/', file))
except:
#url = 'https://s3.amazonaws.com/grizli/CONF'
#url = 'https://erda.ku.dk/vgrid/Gabriel%20Brammer/CONF'
url = ('https://raw.githubusercontent.com/gbrammer/' +
'grizli-config/master')
print('Fetch {0}/{1}'.format(url, file))
#os.system('wget -O /tmp/{1} {0}/{1}'.format(url, file))
res = urlretrieve('{0}/{1}'.format(url, file),
filename=os.path.join('/tmp', file))
hdu = pyfits.open(os.path.join('/tmp/', file))
tab = GTable.gread(hdu[1])
tstars = OrderedDict()
N = tab['flux'].shape[1]
for i in range(N):
teff = tab.meta['TEFF{0:03d}'.format(i)]
logg = tab.meta['LOGG{0:03d}'.format(i)]
try:
met = tab.meta['ZMET{0:03d}'.format(i)]
except:
met = 0.
if (logg not in logg_list) | (teff not in teff_list) | (met not in zmet_list):
#print('Skip {0} {1}'.format(logg, teff))
continue
label = 'bt-settl_t{0:05.0f}_g{1:3.1f}_m{2:.1f}'.format(teff, logg, met)
tstars[label] = SpectrumTemplate(wave=tab['wave'],
flux=tab['flux'][:, i], name=label)
if add_carbon_star:
cfile = os.path.join(GRIZLI_PATH, 'templates/stars/carbon_star.txt')
sp = read_catalog(cfile)
if add_carbon_star > 1:
import scipy.ndimage as nd
cflux = nd.gaussian_filter(sp['flux'], add_carbon_star)
else:
cflux = sp['flux']
tstars['bt-settl_t05000_g0.0_m0.0'] = SpectrumTemplate(wave=sp['wave'], flux=cflux, name='carbon-lancon2002')
return tstars | 35,535 |
def write_ini(locStr_ini_file_path, locStr_ini):
"""
.. _write_ini :
Write the given string into the given INI file path.
Parameters
----------
locStr_ini_file_path : str
The file full path of the INI file. If the extension ".ini" is not included,
it would be added to the path.
locStr_ini : str
The string to be written into the INI file.
Returns
-------
bool
Returns True if deemed successful (no exception). Returns False if deemed
unsuccessful (on exception).
Examples
--------
>>> write_ini('C:\\Temp\\testini', '[User configurations]\\nsome string')
2017-11-21, 16:24:40:INI file save start
2017-11-21, 16:24:40:INI file save complete
Out[51]: True
Content of the INI file would be:
| '[User configurations]
| some string'
"""
print(date_time_now() + 'INI file save start')
try:
# check whether the INI file path ends with '.ini' (case insensitive)
if locStr_ini_file_path[-4:].lower() == '.ini':
# if yes, pass
pass
else:
# if no, append
locStr_ini_file_path = locStr_ini_file_path + '.ini'
# open the INI for write
locIni_file = open(locStr_ini_file_path, 'w')
# write the string into the INI
locIni_file.write(locStr_ini)
# close the INI file
locIni_file.close()
print(date_time_now() + 'INI file save complete')
return True
except:
print(date_time_now() + 'INI file save failed')
return False | 35,536 |
def save_model(model, out_dir):
"""
Save all model information in multiple files.
This is meant to be temporary until SBML output is functional.
:param model: Model to save
:type model: Model
:param out_dir: Directory to store files
:type out_dir: str
"""
import datetime
dt = datetime.datetime.now()
prefix = model.name
# Check if directory exists using method from
# http://stackoverflow.com/a/5032238
try:
os.makedirs(out_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Begin by storing basic info
fname = prefix + ".info"
with open(os.path.join(out_dir, fname), "w") as f:
f.write("id\t" + model.id + "\n")
f.write("name\t" + model.name + "\n")
f.write("organism_type\t" + model.organism_type + "\n")
f.write("created_on\t" + dt.isoformat())
# Store roles -> reaction ID lists
fname = prefix + ".roles"
with open(os.path.join(out_dir, fname), "w") as f:
for rl, rIDs in model.roles.items():
f.write(rl + "\t" + ";".join(rIDs) + "\n")
# Store reaction IDs
fname = prefix + ".reactions"
with open(os.path.join(out_dir, fname), "w") as f:
f.write("\n".join(model.reactions.keys()) + "\n")
# Store compound IDs
fname = prefix + ".compounds"
with open(os.path.join(out_dir, fname), "w") as f:
f.write("\n".join(model.compounds.keys()) + "\n")
# Store gap-filled media
fname = prefix + ".gfmedia"
with open(os.path.join(out_dir, fname), "w") as f:
if len(model.gapfilled_media) > 0:
f.write("\n".join(model.gapfilled_media) + "\n")
# Store gap-filled reaction IDs
fname = prefix + ".gfreactions"
with open(os.path.join(out_dir, fname), "w") as f:
if len(model.gf_reactions) > 0:
f.write("\n".join(model.gf_reactions) + "\n") | 35,537 |
def create_form(data, form_idx=0):
""" Creates PDB structure forms.
form_idx = 0 is apo; 1 - holo1; and 2 - holo2
Note: Only works for homodimers.
"""
# Make a deep copy of BioPandas object to make changes
data_out = deepcopy(data)
# If form_idx == 2 that's holo2 already
if form_idx == 1:
hetatm_record_len = data_out.df['HETATM'].shape[0]
# Keep only one ligand
data_out.df['HETATM'] = data_out.df['HETATM'][:int(hetatm_record_len/2)]
elif form_idx == 0:
# Delete all 'HETATM' records
data_out.df['HETATM'] = pd.DataFrame(columns=data_out.df['HETATM'].columns)
return data_out | 35,538 |
def get_verbosity(parsed_arguments: Dict) -> int:
"""
Gets the verbosity level from parsed arguments.
Assumes parameter is being parsed similarly to:
```
parser.add_argument(f"-{verbosity_parser_configuration[VERBOSE_PARAMETER_KEY]}", action="count", default=0,
help="increase the level of log verbosity (add multiple increase further)")
```
Parsed arguments can be gathered into an appropriate dict as show below:
```
assert type(argument_parser) is ArgumentParser
parsed_arguments = {x.replace("_", "-"): y for x, y in vars(argument_parser.parse_args(arguments)).items()}
```
:param parsed_arguments: parsed arguments in dictionary form
:return: the verbosity level implied
:raises ValueError: if the logging level is too high
"""
verbosity_parameter = verbosity_parser_configuration[VERBOSE_PARAMETER_KEY]
verbosity = verbosity_parser_configuration[DEFAULT_LOG_VERBOSITY_KEY] - (
int(parsed_arguments.get(verbosity_parameter)) * 10)
if verbosity < 10:
raise ValueError("Cannot provide any further logging - reduce log verbosity")
assert verbosity <= logging.CRITICAL
return verbosity | 35,539 |
def test_main_named_args():
"""
Ensure that named arguments are passed on properly to the flash() function.
"""
with mock.patch('uflash.flash') as mock_flash:
uflash.main(argv=['-r', 'baz.hex'])
mock_flash.assert_called_once_with(path_to_python=None,
paths_to_microbits=[],
path_to_runtime='baz.hex',
minify=False,
keepname=False) | 35,540 |
def finalize(c, push=False, branch='master'):
"""
Step 7: Finalize your changes. Merge them with the base branch and optionally push upstream.
:param push: git push your changes (Default: False)
:param branch: the base branch for your changes. (Default: master)
"""
init(c)
commit_num, branch_num = _get_ticket_numbers(c)
if commit_num != branch_num:
print('[ERROR] Please commit your local changes before finalizing.')
sys.exit(1)
feature_branch = c.run('git rev-parse --abbrev-ref HEAD', hide=True).stdout
_git_refresh(c, branch)
res = c.run(f'git rebase {branch}', warn=True)
if res.return_code != 0:
print(f'[ERROR] Did not rebase cleanly onto {branch}, please manually run: git rebase {branch}')
c.run(f'git checkout {feature_branch}')
sys.exit(1)
c.run(f'git checkout {branch}')
c.run(f'git merge {feature_branch}')
push_cmd = 'git push'
if not push:
push_cmd += ' -n'
res = c.run(push_cmd, warn=True)
if res.return_code != 0:
print('[ERROR] git push failed!')
c.run(f'git checkout {feature_branch}')
sys.exit(1)
cache = _load_cache(c)
project = cache.get(branch_num, {}).get('project', 'server')
if push:
if branch_num in cache:
del cache[branch_num]
_store_cache(c, cache)
c.run(f'git branch -d {feature_branch}')
# TODO: Update Jira and close CR.
# jirac = get_jira()
# if jirac:
# ticket = get_jira().issue(f'SERVER-{branch_num}')
#
# # Transition Ticket
# if ticket.fields.status.id == '10018': # '10018' = In Code Review.
# print_bold(f'Transitioning SERVER-{branch_num} in Jira to "Closed"')
# jirac.transition_issue(ticket, '981') # '981' = Close Issue
# else:
# print_bold(
# f'SERVER-{branch_num} in Jira is not in "In Code Review" status, not updating Jira')
# else:
# print_bold(f'Please manually add a link of your codereview to: '
# f'https://jira.mongodb.org/browse/SERVER-{commit_num}')
print_bold(
'Please remember to close this issue and add a comment of your patch build link '
'if you haven\'t already. The comment should have "Developer" visibility')
print_bold(f'https://jira.mongodb.com/browse/{project}-{branch_num}')
self_update(c) | 35,541 |
def update_versions_in_library_versions_kt(group_id, artifact_id, old_version):
"""Updates the versions in the LibrarVersions.kt file.
This will take the old_version and increment it to find the appropriate
new version.
Args:
group_id: group_id of the existing library
artifact_id: artifact_id of the existing library
old_version: old version of the existing library
Returns:
True if the version was updated, false otherwise.
"""
group_id_variable_name = group_id.replace("androidx.","").replace(".","_").upper()
artifact_id_variable_name = artifact_id.replace("androidx.","").replace("-","_").upper()
new_version = increment_version(old_version)
# Special case Compose because it uses the same version variable.
if group_id_variable_name.startswith("COMPOSE"):
group_id_variable_name = "COMPOSE"
# Open file for reading and get all lines
with open(LIBRARY_VERSIONS_FP, 'r') as f:
library_versions_lines = f.readlines()
num_lines = len(library_versions_lines)
updated_version = False
# First check any artifact ids with unique versions.
for i in range(num_lines):
cur_line = library_versions_lines[i]
# Skip any line that doesn't declare a version
if 'Version(' not in cur_line: continue
version_variable_name = cur_line.split('val ')[1].split(' =')[0]
if artifact_id_variable_name == version_variable_name:
if not should_update_version_in_library_versions_kt(cur_line, new_version):
break
# Found the correct variable to modify
if version_variable_name == "COMPOSE":
new_version_line = (" val COMPOSE = Version("
"System.getenv(\"COMPOSE_CUSTOM_VERSION\") "
"?: \"" + new_version + "\")\n")
else:
new_version_line = " val " + version_variable_name + \
" = Version(\"" + new_version + "\")\n"
library_versions_lines[i] = new_version_line
updated_version = True
break
if not updated_version:
# Then check any group ids.
for i in range(num_lines):
cur_line = library_versions_lines[i]
# Skip any line that doesn't declare a version
if 'Version(' not in cur_line: continue
version_variable_name = cur_line.split('val ')[1].split(' =')[0]
if group_id_variable_name == version_variable_name:
if not should_update_version_in_library_versions_kt(cur_line, new_version):
break
# Found the correct variable to modify
if version_variable_name == "COMPOSE":
new_version_line = (" val COMPOSE = Version("
"System.getenv(\"COMPOSE_CUSTOM_VERSION\") "
"?: \"" + new_version + "\")\n")
else:
new_version_line = " val " + version_variable_name + \
" = Version(\"" + new_version + "\")\n"
library_versions_lines[i] = new_version_line
updated_version = True
break
# Open file for writing and update all lines
with open(LIBRARY_VERSIONS_FP, 'w') as f:
f.writelines(library_versions_lines)
return updated_version | 35,542 |
def GetIdpCertificateAuthorityDataFlag():
"""Anthos auth token idp-certificate-authority-data flag, specifies the PEM-encoded certificate authority certificate for OIDC provider."""
return base.Argument(
'--idp-certificate-authority-data',
required=False,
help='PEM-encoded certificate authority certificate for OIDC provider.') | 35,543 |
def MapBasinKeysToJunctions(DataDirectory,FilenamePrefix):
"""
Function to write a dict of basin keys vs junctions
Args:
DataDirectory (str): the data directory
fname_prefix (str): the name of the DEM
Returns:
A dictionary with the basin key as the key and the junction as the value
Author: FJC
"""
# load the channel data
ChannelData = ReadChannelData(DataDirectory, FilenamePrefix)
#print BasinChannelData
# load the hillslopes data
HillslopeData = ReadHillslopeData(DataDirectory, FilenamePrefix)
basin_keys = ChannelData.basin_key.unique()
basin_junctions = HillslopeData.BasinID.unique()
basin_dict = {}
for i, key in enumerate(basin_keys):
print(basin_junctions[i], key)
basin_dict[key] = basin_junctions[i]
print(basin_dict)
return basin_dict | 35,544 |
def _copy_file(src, dest):
""" Copies a file from src path to dest path
src: source path as string
dest: destination path as string
Uses ditto to perform copy; will silently overwrite dest if it exists
Raises exception if copy fails or either path is None """
if src is None or dest is None:
raise ValueError("src and dest must not be None", src, dest)
if not os.path.isfile(src):
raise ValueError("src file does not appear to exist", src)
# if error on copy, subprocess will raise CalledProcessError
try:
subprocess.run(
["/usr/bin/ditto", src, dest], check=True, stderr=subprocess.PIPE
)
except subprocess.CalledProcessError as e:
logging.critical(
f"ditto returned error: {e.returncode} {e.stderr.decode(sys.getfilesystemencoding()).rstrip()}"
)
raise e | 35,545 |
def password_renew(_name: str, old_password: str, new_password: str):
"""パスワード変更"""
old_dat = old_password
new_dat = new_password
new_hs = sha256(new_dat.encode()).hexdigest() # sha256で暗号化
old_hs = sha256(old_dat.encode()).hexdigest() # sha256で暗号化
if User.select().where(User.name != _name):
raise HTTPException(status_code=401, detail='すでにユーザーは存在していません')
elif User.select().where(User.password != old_hs):
raise HTTPException(
status_code=401, detail='パスワードが間違っていますもう一度確認してください')
else:
User.update(password=new_hs).where(User.name == _name).execute()
return {'message': '新しいパスワードになりました'} | 35,546 |
def fetch_abs(compare_res_fn: Callable[[res_arg_dict], List[BadResult]], paper_id: str) -> Tuple[Dict, List[BadResult]]:
"""Fetch an abs page."""
ng_url = ng_abs_base_url + paper_id
legacy_url = legacy_abs_base_url + paper_id
res_dict: res_arg_dict = {'ng_url': ng_url,
'legacy_url': legacy_url,
'ng_res': requests.get(ng_url),
'legacy_res': requests.get(legacy_url),
'paper_id': paper_id,
'id': paper_id}
compare_config = {'ng_url': ng_url,
'legacy_url': legacy_url,
'paper_id': paper_id,
'id': paper_id}
return compare_config, list(compare_res_fn(res_dict)) | 35,547 |
def is_happy(number:int) -> bool:
"""Returns a bool that states wether a number is happy or not"""
results = []
result = thing(number)
results.append(result)
while results.count(result) < 2: # Checking if a number has shown up in the list of previous results again as that is
result = thing(result) # the point where you can determine if the number is happy or not
results.append(result)
return (result == 1) | 35,548 |
def self_play(n_iterations=10, ben_steps=1000, training_steps=int(1e4),
n_eval_episodes=100, **kwargs):
"""
Returns an agent that learns from playing against himself from random to
optimal play.
"""
agents = [RLAgent(**kwargs), RandomAgent()]
for _ in range(n_iterations):
benchmark(agents[0], agents[1], ben_steps, training_steps, n_eval_episodes)
# adding the trained agent as the new opponent to exploit
agents[1] = opposite_agent(agents[0])
agents[1].eps = agents[0].original_eps
return agents[0] | 35,549 |
def auth(event, context):
"""
Return the plain text session key used to encrypt the CAN Data File
event dictionary input elements:
- CAN Conditioner Serial Number
- Encrypted data
Prerequisites:
The CAN Conditioner must be provisioned with a securely stored key tied to the
serial number.
"""
#Determine the identity of the requester.
requester_data = event["requestContext"]
if requester_data["authorizer"]["claims"]["email_verified"]:
identity_data = event["requestContext"]["identity"]
ip_address = identity_data["sourceIp"]
email = requester_data["authorizer"]["claims"]["email"].lower()
else:
return response(400, "Email not verified.")
#Check if email is the uploader or has share access
if not email in item['uploader'] and not email in item['access_list']:
return response(400, "You do not have permission to decrypt.")
#load the event body into a dictionary
body = json.loads(event['body'])
# Test to be sure the necessary elements are present
try:
assert 'serial_number' in body
assert 'encrypted_session_key' in body
except AssertionError:
return response(400, "Missing required parameters.")
# Lookup the data needed from the unique CAN Logger by its serial number
dbClient = boto3.resource('dynamodb', region_name=region)
table = dbClient.Table("CANConditioners")
try:
item = table.get_item(
Key = {'id': body['serial_number'],}
).get('Item')
except:
return response(400, "Unable to retrieve table item.")
# load the device's public key which was stored as a base64 encoded binary
device_public_key_bytes = base64.b64decode(item['device_public_key']).decode('ascii')
device_bytes = b'\x04' + device_public_key_bytes
device_public_key = ec.EllipticCurvePublicKey.from_encoded_point(ec.SECP256R1(),device_bytes)
# Decrypt the data key before using it
cipher_key = base64.b64decode(item['encrypted_data_key'])
data_key_plaintext = decrypt_data_key(cipher_key)
if data_key_plaintext is None:
return response(400, "Data Key is Not Available")
# Decrypt the private key for the device
f = Fernet(data_key_plaintext)
decrypted_pem = f.decrypt(base64.b64decode(item['encrypted_server_pem_key']))
#load the serialized key into an object
server_key = serialization.load_pem_private_key(decrypted_pem,
password=None,
backend=default_backend())
#Derive shared secret
shared_secret = server_key.exchange(ec.ECDH(),device_public_key)
#use the first 16 bytes (128 bits) of the shared secret to decrypt the session key
cipher = Cipher(algorithms.AES(shared_secret[:16]),
modes.ECB(),
backend=default_backend())
decryptor = cipher.decryptor()
clear_key = decryptor.update(session_key) + decryptor.finalize()
# set attribution data
timestamp = get_timestamp(time.time())
access_tuple = str((timestamp, email, ip_address))
print("Access Tuple: {}".format(access_tuple))
download_list = item["download_log"]
download_list.append(access_tuple)
#update the download log with the user details. Keep the last 100 access tuples
table.update_item(
Key = {'digest':body['digest']},
UpdateExpression = 'SET download_log= :var',
ExpressionAttributeValues = {':var':download_list[-100:]},
)
#return the string base64 encoded AES key for that session.
return response(200, base64.b64encode(clear_key).decode('ascii')) | 35,550 |
def xpro_aws_settings(aws_settings):
"""Default xPRO test settings"""
aws_settings.XPRO_LEARNING_COURSE_BUCKET_NAME = (
"test-xpro-bucket"
) # impossible bucket name
return aws_settings | 35,551 |
def frames2files(arFrames, sTargetDir):
"""
Write array of frames to jpg files.
Keyword arguments:
arFrames -- np.array of shape: (number of frames, height, width, depth)
sTargetDir -- dir to hold jpg files
returns None
"""
os.makedirs(sTargetDir, exist_ok=True)
for nFrame in range(arFrames.shape[0]):
cv2.imwrite(sTargetDir + "/frame%04d.jpg" % nFrame, arFrames[nFrame, :, :, :])
return | 35,552 |
def function_check(arg, result):
"""arg ↝ result : return"""
if result == TypeBuiltin():
return TypeBuiltin()
if arg == KindBuiltin() and result == KindBuiltin():
return KindBuiltin()
if arg == SortBuiltin() and result in (KindBuiltin(), SortBuiltin()):
return SortBuiltin()
raise TypeError('Function check failed for `{} ↝ {}`'.format(
arg.to_dhall(),
result.to_dhall(),
)) | 35,553 |
def test_parameter_1_1():
"""
Feature: Check the names of parameters and the names of inputs of construct.
Description: If the name of the input of construct is same as the parameters, add suffix to the name of the input.
Expectation: No exception.
"""
class ParamNet(Cell):
def __init__(self):
super(ParamNet, self).__init__()
self.param_a = Parameter(Tensor([1], ms.float32), name="name_a")
self.param_b = Parameter(Tensor([2], ms.float32), name="name_b")
def construct(self, name_a):
return self.param_a + self.param_b - name_a
net = ParamNet()
res = net(Tensor([3], ms.float32))
assert res == 0 | 35,554 |
def while_D():
""" *'s printed in the Shape of Capital D """
row =0
while row<9:
col =0
while col <6:
if col ==0 or row %8 == 0 and col!=5 or col ==5 and row %8 !=0:
print('*',end=' ')
else:
print(' ',end=' ')
col +=1
print()
row +=1 | 35,555 |
def frule_edit(request, frule_id):
""" FM模块编辑应用包下载规则 """
try:
frule = FRule.objects.filter(id=frule_id).first()
if not frule:
response = '<script>alert("Rule id not exist!");'
response += 'location.href=document.referrer;</script>'
return HttpResponse(response)
name = request.POST['name'].strip()
desc = request.POST['description'].strip()
source_url = request.POST['source_url'].strip()
regex = request.POST['regex'].strip()
regex_content = request.POST['regex_content'].strip()
vendor_id = request.POST['vendor_id']
if name != frule.name:
if check_filter(name):
response = '<script>alert("New rule name contain filter chars!");'
response += 'location.href=document.referrer;</script>'
return HttpResponse(response)
try:
process_rule_rename(frule.id, name)
except Exception, ex:
response = '<script>alert("Cant rename rule!");alert("%s");' % str(ex)
response += 'location.href=document.referrer;</script>'
return HttpResponse(response)
frule.name = name
frule.desc = desc
frule.source_url = source_url
frule.regex = regex
frule.regex_content = regex_content
frule.vendor_id = vendor_id
frule.save()
response = '<script>alert("Success!");location.href=document.referrer;</script>'
return HttpResponse(response)
except Exception, ex:
response = '<script>alert("Error!");alert("%s");' % str(ex)
response += 'location.href=document.referrer;</script>'
return HttpResponse(response) | 35,556 |
def zip(args):
"""Combine 2 files with interleaved pages."""
filesandranges = iohelper.parse_ranges(args[:-1])
outputfilename = args[-1]
verbose = staplelib.OPTIONS.verbose
if not filesandranges or not outputfilename:
raise CommandError('Both input and output filenames are required.')
# Make [[file1_p1, file1_p2], [file2_p1, file2_p2], ...].
filestozip = []
for input in filesandranges:
pdf = input['pdf']
if verbose:
print input['name']
# Empty range means "include all pages".
pagerange = input['pages'] or [
(p, iohelper.ROTATION_NONE) for p in
range(1, pdf.getNumPages() + 1)]
pagestozip = []
for pageno, rotate in pagerange:
if 1 <= pageno <= pdf.getNumPages():
if verbose:
print "Using page: {} (rotation: {} deg.)".format(
pageno, rotate)
pagestozip.append(
pdf.getPage(pageno - 1).rotateClockwise(rotate))
else:
raise CommandError("Page {} not found in {}.".format(
pageno, input['name']))
filestozip.append(pagestozip)
# Interweave pages.
output = PdfFileWriter()
for page in more_itertools.roundrobin(*filestozip):
output.addPage(page)
if os.path.isabs(outputfilename):
iohelper.write_pdf(output, outputfilename)
else:
iohelper.write_pdf(output, staplelib.OPTIONS.destdir +
os.sep + outputfilename) | 35,557 |
def register_help(module: str, topic: str, contents: Union[str, callback_type]) -> None:
"""
Register a help article to a specific module, having a specific topic and contents.
If the contents is a callback, it will be called and awaited to get the help string.
"""
handler = HelpHandler(topic, module, contents)
handler.register(master)
#print(f"{module}: {topic}") | 35,558 |
def load_preprocess_data(days_for_validation: int,
lag_variables: list,
random_validation: bool = False,
seed: int = None,
lag: int = 8,
reload: bool = True,
save_csv: bool = True) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame,
pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""Loading and data preprocessing for the Stream water temperature case study
Parameters
----------
days_for_validation : int
Number of days used for validation
lag_variables : list[str]
List with variable names that should be lagged
random_validation :
seed : int
Random seed. Only relevant if random_validation=True
lag : int
number of lagged time steps that are computed for all lag_variables.
reload : bool
Should a previously computed processed data set be loaded? True/False
save_csv : bool
Should the preprocessed data be saved as a csv? Necessary if reload=True will be used.
Returns
-------
Tuple of pd.DataFrames:
data : Full preprocessed data set
x_train : Training features
y_train : Training labels
x_test : Test features
y_test :
x : All features
y : All labels
"""
if isfile('data/processed/data.csv') and reload:
print('Load previously computed data set from "data/preprocessed/data.csv"')
data = pd.read_csv('data/processed/data.csv')
x_train = pd.read_csv("data/processed/x_train.csv")
y_train = pd.read_csv("data/processed/y_train.csv")
x_test = pd.read_csv("data/processed/x_test.csv")
y_test = pd.read_csv("data/processed/y_test.csv")
x = pd.read_csv("data/processed/x.csv")
y = pd.read_csv("data/processed/y.csv")
else:
append_data = []
for index in ['C', 'V', 'V3']:
# Meteorological Data
met_data = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="met_data")
precip = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="precip")
dis_data = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="dis_data", skiprows=1, header=None)
discharge_805 = pd.DataFrame({'Discharge (m3/s)': dis_data.iloc[4, 1:].transpose()})
# observed wt
wt_observed = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="temp", header=None).transpose()
measurement_points = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="temp_t0_data")
wt_observed.columns = ["wt_observed_point_" + str(i) for i in measurement_points["Distance (m)"]]
# observed wt at boundary
x0_data = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="temp_x0_data")
x0_data = x0_data.drop(labels='Time (min)', axis=1)
x0_data.columns = ['x0 Temperature (deg C)']
# predicted wt
wt_predicted = pd.read_csv('data/raw/Output' + index + '.csv',
header=None) # rows: m of stream, columns: timesteps in min
# get only relevant points and every 15th time steps
wt_predicted = wt_predicted.iloc[measurement_points["Distance (m)"]]
wt_predicted = wt_predicted.iloc[:, ::15].transpose()
wt_predicted.columns = ["wt_predicted_point_" + str(i) for i in measurement_points["Distance (m)"]]
# get shading predictions
measurement_points = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="temp_t0_data")
# fix index columns
x0_data.index = wt_observed.index
wt_predicted.index = wt_observed.index
discharge_805.index = wt_observed.index
# concat data
data_sub = pd.concat([met_data, precip.iloc[:, 1], discharge_805,
wt_observed, wt_predicted, x0_data], axis=1)
append_data.append(data_sub)
# Concatenate full data set
data = pd.concat(append_data)
data_time_index = pd.DataFrame({'year': data.Year.tolist(),
'month': data.Month.tolist(),
'hour': data.Hour.tolist(),
'minute': data.Minute.tolist(),
'day': data.Day.tolist()})
data.index = pd.to_datetime(data_time_index)
data = data.sort_index()
# Define training/validation column
validation_timesteps = 4 * 24 * days_for_validation
cal_ts = len(data.index) - validation_timesteps
if random_validation:
cal_val = ["calibration" for i in range(cal_ts)] + ["validation" for i in range(validation_timesteps)]
shuffled_index = np.random.RandomState(seed=seed).permutation(len(cal_val)).tolist()
cal_val = [cal_val[i] for i in shuffled_index]
else:
# cal_val = ["calibration" for x in range(cal_ts)] + ["validation" for x in range(validation_timesteps)]
cal_val = ["validation" for x in range(validation_timesteps)] + ["calibration" for x in range(cal_ts)]
data['calibration_validation'] = pd.Series(cal_val, index=data.index)
# Compute residual columns
for point in measurement_points["Distance (m)"]:
data['residuals_point_' + str(point)] = data['wt_predicted_point_' + str(point)] - \
data['wt_observed_point_' + str(point)]
# Save as csv
data['sin_hour'] = np.sin(2 * np.pi * data.Hour / 24)
data['cos_hour'] = np.cos(2 * np.pi * data.Hour / 24)
# remove dupolicated rows if any exist
data = data[~data.index.duplicated(keep='first')]
# create lagged features
data = create_lags(data, lag_variables, lag)
# Data for ML models
lagged_variable_names = [[x + "_lag" + str(y + 1) for y in range(lag)] for x in lag_variables]
model_variables = ['sin_hour', 'cos_hour'] + lag_variables + sum(lagged_variable_names, [])
# training data
training_data = data[data["calibration_validation"] != "validation"]
x_train = training_data[model_variables]
y_train = training_data['residuals_point_640']
# Validation data
validation_data = data[data["calibration_validation"] == "validation"]
x_test = validation_data[model_variables]
y_test = validation_data['residuals_point_640']
# full dataset x, y
x = data[model_variables]
y = data['residuals_point_640']
# Save as csv
if save_csv:
data.to_csv("data/processed/data.csv", index_label=False)
x_train.to_csv("data/processed/x_train.csv", index_label=False)
y_train.to_csv("data/processed/y_train.csv", index_label=False)
x_test.to_csv("data/processed/x_test.csv", index_label=False)
y_test.to_csv("data/processed/y_test.csv", index_label=False)
x.to_csv("data/processed/x.csv", index_label=False)
y.to_csv("data/processed/y.csv", index_label=False)
print('Finished preprocessing. Final data sets are stored in "data/preprocessed/"')
if not random_validation:
print("Time periods")
training_data = data[data["calibration_validation"] != "validation"]
validation_data = data[data["calibration_validation"] == "validation"]
print(f"Training: {training_data.index[0]} - {training_data.index[-1]}")
print(f"Validation: {validation_data.index[0]} - {validation_data.index[-1]}")
return data, x_train, y_train, x_test, y_test, x, y | 35,559 |
def extract_sha256_hash(hash):
"""Extrach SHA256 hash or return None
"""
prefix = 'sha256:'
if hash and hash.startswith(prefix):
return hash.replace(prefix, '')
return None | 35,560 |
def logout():
"""User logout"""
global bandwidth_object, qos_object
bandwidth_object = {}
qos_object = {}
success_login_form = None
return redirect(url_for('base_blueprint.login')) | 35,561 |
def draft_intro():
"""
Controller for presenting draft versions of document introductions.
"""
response.files.append(URL('static/js/codemirror/lib', 'codemirror.js'))
response.files.append(URL('static/js/codemirror/lib', 'codemirror.css'))
response.files.append(URL('static/js/codemirror/theme', 'solarized.css'))
response.files.append(URL('static/js/codemirror/mode/xml', 'xml.js'))
response.files.append(URL('static/js/summernote', 'summernote.min.js'))
response.files.append(URL('static/js/summernote', 'summernote.css'))
session.filename = get_truename(request.args[0])
filename = session.filename
docrow = db(db.draftdocs.filename == filename).select().first()
if not docrow:
# draft document does not exist in the database, so can't be edited
return {'doc_exists': False,
'editing_permission': False,
'filename': filename}
else:
# draft document does exist in database and can be edited
editor_ids = [docrow['editor'], docrow['editor2'], docrow['editor3'],
docrow['editor4'], docrow['assistant_editor'],
docrow['assistant_editor2'], docrow['assistant_editor3'],
docrow['proofreader'], docrow['proofreader2'],
docrow['proofreader3']
]
if auth.has_membership('administrators') \
or (auth.has_membership('editors') and auth.user_id in editor_ids):
# current user has permission to edit this page
body_fields = OrderedDict([(v, docrow[k]) for k, v in DISPLAY_FIELDS.iteritems()
if docrow[k]])
editor_names = OrderedDict([])
for ed in ['editor', 'editor2', 'editor3', 'editor4']:
if docrow[ed]:
editor_names[docrow[ed]['id']] = '{} {}'.format(docrow[ed]['first_name'],
docrow[ed]['last_name'])
asst_editor_names = OrderedDict([])
for ed in ['assistant_editor', 'assistant_editor2', 'assistant_editor3']:
if docrow[ed]:
asst_editor_names[docrow[ed]['id']] = '{} {}'.format(docrow[ed]['first_name'],
docrow[ed]['last_name'])
proofreader_names = OrderedDict([])
for ed in ['proofreader', 'proofreader2', 'proofreader3']:
if docrow[ed]:
proofreader_names[docrow[ed]['id']] = '{} {}'.format(docrow[ed]['first_name'],
docrow[ed]['last_name'])
return {'doc_exists': True,
'editing_permission': True,
'title': docrow['name'],
'body_fields': body_fields,
'citation_format': docrow['citation_format'],
'editors': editor_names,
'assistant_editors': asst_editor_names,
'proofreaders': proofreader_names,
'filename': filename,
'version': docrow['version']}
else:
# current user does not have permission
return {'doc_exists': True,
'editing_permission': False,
'filename': filename,
'title': docrow['name']} | 35,562 |
def call_repeatedly(loop, interval, function, *args, **kwargs):
"""
Wrapper function to schedule function periodically
"""
# Schedule next call
loop.call_later(
interval, call_repeatedly, loop, interval, function,
*args, **kwargs,
)
# Call function
function(*args, **kwargs) | 35,563 |
def collect_shape_data(gtfs_dir):
"""Calculate the number of times a shape (line on a map) is travelled.
Appends some additional information about the route that the shape belongs to.
Args:
gtfs_dir: the directory where the GTFS file is extracted
Returns:
pandas.DataFrame: contains shape data
"""
gtfs_dir = pathlib.Path(gtfs_dir)
service_days = calculate_service_days(gtfs_dir)
trips = pd.read_csv(gtfs_dir / 'trips.txt', index_col=2)
routes = pd.read_csv(gtfs_dir / 'routes.txt', index_col=0)
route_id_diffs = trips \
.groupby('shape_id') \
.aggregate({'route_id': [min, max]})
if any(route_id_diffs[('route_id', 'min')] != route_id_diffs[('route_id', 'max')]):
raise ValueError("Shape ids must uniquely identify route_ids")
route_info = trips \
.join(service_days, on="service_id", how="left") \
.groupby(["shape_id"]) \
.aggregate({'days': sum, 'route_id': 'first'}) \
.rename(columns={'days': 'times_taken'}) \
.join(
routes[['route_short_name', 'route_type', 'route_color']],
on="route_id", how="left"
) \
.reset_index()
return route_info | 35,564 |
def lick():
"""
Returns a string when a user says 'lick' (This is a joke command)
:return: A string
"""
return "*licks ice cream cone*" | 35,565 |
async def on_webhook_shutdown(dp):
"""Колбэк при выключении бота через вебхук"""
from . import db
from .bot import bot
from .logger import logger
await bot.delete_webhook()
db.close_connection()
logger.info("Работа бота завершена") | 35,566 |
def from_dict(obj, node_name='root'):
"""Converts a simple dictionary into an XML document.
Example:
.. code-block:: python
data = {
'test': {
'nodes': {
'node': [
'Testing',
'Another node'
]
},
}
}
xml = from_dict(data) # <test><nodes><node>Testing</node><node>Another node</node></nodes></test>
Args:
node_name (string): the initial node name in case there are multiple
top level elements.
"""
return __dict_to_xml(obj, node_name) | 35,567 |
def scores_plot(values, start=0, ncomps=3,
classes=None, class_name=None,
dist_kws={}, scatter_kws={}):
"""
"""
ncomps = min(ncomps, values.shape[1])
if type(values) == pd.DataFrame:
values_ = values.iloc[:, start:(start + ncomps)].copy()
else:
values_ = pd.DataFrame(np.array(values)[:, start:(start + ncomps)])
if classes is not None:
assert len(classes) == values_.shape[0]
if hasattr(classes, 'name'):
class_name = classes.name
elif class_name is None:
class_name = 'classes'
values_[class_name] = np.array(classes)
values_[class_name] = values_[class_name].astype(str)
else:
class_name = None
g = sns.PairGrid(values_, hue=class_name,
vars=list(values_.columns.difference([class_name]))) # Hack
g = g.map_upper(plt.scatter, **scatter_kws)
g = g.map_diag(sns.distplot, rug=True, **dist_kws)
# kill lower diagonal plots
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
g.axes[j, i].set_visible(False)
if classes is not None:
g.add_legend()
# label axes properly
xlabels, ylabels = [], []
for ax in g.axes[-1, :]:
xlabel = ax.xaxis.get_label_text()
xlabels.append(xlabel)
for ax in g.axes[:, 0]:
ylabel = ax.yaxis.get_label_text()
ylabels.append(ylabel)
for i in range(len(xlabels)):
for j in range(len(ylabels)):
if i == j:
g.axes[j, i].xaxis.set_label_text(xlabels[i])
g.axes[j, i].yaxis.set_label_text(ylabels[j]) | 35,568 |
def parse_ascii(state: str, size: int) -> str:
"""
Args:
state: an ascii picture of a cube
size: the size of the cube
Returns:
a string of the cube state in ULFRBD order
"""
U = []
L = []
F = []
R = []
B = []
D = []
lines = []
for line in state.splitlines():
line = line.strip().replace(" ", "")
if line:
lines.append(line)
U = "".join(lines[0:size])
for line in lines[size : size * 2]:
L.append(line[0:size])
F.append(line[size : size * 2])
R.append(line[size * 2 : size * 3])
B.append(line[size * 3 : size * 4])
L = "".join(L)
F = "".join(F)
R = "".join(R)
B = "".join(B)
D = "".join(lines[size * 2 : size * 4])
return "".join([U, L, F, R, B, D]) | 35,569 |
def _project_observation_params(access_token: str, project_id: int, observation_id: int):
"""Args:
access_token: An access token required for user authentication, as returned by :py:func:`.get_access_token()`
project_id: ID of project to add onto
observation_id: ID of observation to add
""" | 35,570 |
def _get_build_failure_reasons(build):
# type: (Build) -> List[str]
"""Return the names of all the FailureReasons associated with a build.
Args:
build (Build): The build to return reasons for.
Returns:
list: A sorted list of the distinct FailureReason.reason values associated with
the build.
"""
failure_reasons = [r for r, in db.session.query(
distinct(FailureReason.reason)
).join(
JobStep, JobStep.id == FailureReason.step_id,
).filter(
FailureReason.build_id == build.id,
JobStep.replacement_id.is_(None),
).all()]
# The order isn't particularly meaningful; the sorting is primarily
# to make the same set of reasons reliably result in the same JSON.
return sorted(failure_reasons) | 35,571 |
def parse_line(line: str):
"""
Parses single record from a log according to log_pattern.
If error occurs in parsing request_time, the log line is considered broken and function returns None.
If error occurs in parsing URL, while request_time is present,
the URL is marked as 'parse_failed' to allow further statistical checking.
:param line: UTF-8 encoded string of a log record.
:return: dictionary, made up according to regex_log_pattern or None.
"""
log_contents = {}
request_time_pat = ' \d*[.]?\d*$'
request_pat = '"(GET|HEAD|POST|PUT|DELETE|CONNECT|OPTIONS|TRACE|PATCH)\s(?P<url>.+?)\sHTTP/.+"\s'
log_contents['request_time'] = re.search(request_time_pat, line)[0].strip()
request = re.findall(request_pat, line)
log_contents['request'] = request[0][1] if request else 'bad_request'
if log_contents['request_time']:
return log_contents
else:
return None | 35,572 |
def for_all_arglocs(*args):
"""
for_all_arglocs(vv, vloc, size, off=0) -> int
Compress larger argloc types and initiate the aloc visitor.
@param vv (C++: aloc_visitor_t &)
@param vloc (C++: argloc_t &)
@param size (C++: int)
@param off (C++: int)
"""
return _ida_typeinf.for_all_arglocs(*args) | 35,573 |
def set_settings_module():
"""
This function will choose a Django settings file to work with.
If you have a local file with settings, it will use that file.
But if not, stay calm: it will directly go with "settings.py".
"""
local_settings = Path(f"estudio/{LOCAL_SETTINGS}")
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE",
f"estudio.{local_settings.stem}"
if local_settings.exists()
else "estudio.settings",
) | 35,574 |
def fetch_project_check_perm(id, user, perm):
"""Fetches a project by id and check the permission.
Fetches a project by id and check whether the user has certain permission.
Args:
project_id:
The id of the project.
user:
A User instance.
perm:
Permission to check. Example: "nlpviewer_backend.read_project"
Returns:
A json response of the or forbidden or not found.
"""
project = get_object_or_404(Project, pk=id)
check_perm_project(project, user, perm)
return project | 35,575 |
def toRoman(n):
""" Convert an integer to Roman numeral."""
if not (0 < n < 5000):
raise OutOfRangeError("number out of range (must be 1..4999)")
if int(n) != n:
raise NotIntegerError("decimals can not be converted")
result = ""
for numeral, integer in romanNumeralMap:
while n >= integer:
result += numeral
n -= integer
return str(result) | 35,576 |
def imsave(addr,im):
"""
input a string of save address, an im array\n
save the image to the address
"""
import matplotlib.pyplot as plt
return plt.imsave(addr,im) | 35,577 |
def optimizer(args: 'Namespace'):
"""
Start an optimization from a YAML file
:param args: arguments coming from the CLI.
"""
from jina.optimizers import run_optimizer_cli
run_optimizer_cli(args) | 35,578 |
def test_imdb_test():
"""
Feature: Test IMDB Dataset.
Description: read data from test file.
Expectation: the data is processed successfully.
"""
logger.info("Test Case test")
# define parameters
repeat_count = 1
usage = "test"
# apply dataset operations
data1 = ds.IMDBDataset(DATA_DIR, usage=usage, shuffle=False)
data1 = data1.repeat(repeat_count)
# Verify dataset size
data1_size = data1.get_dataset_size()
logger.info("dataset size is: {}".format(data1_size))
assert data1_size == 4
content = ["test_pos_0.txt", "test_pos_1.txt", "test_neg_0.txt", "test_neg_1.txt"]
label = [1, 1, 0, 0]
num_iter = 0
for index, item in enumerate(data1.create_dict_iterator(num_epochs=1, output_numpy=True)):
# each data is a dictionary
# in this example, each dictionary has keys "text" and "label"
strs = item["text"].item().decode("utf8")
logger.info("text is {}".format(strs))
logger.info("label is {}".format(item["label"]))
assert strs == content[index]
assert label[index] == int(item["label"])
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 4 | 35,579 |
def simulate_one(ticket: Ticket, strategy: Strategy, trials: int) -> float:
"""
:param ticket:
:return:
"""
diagnostics = False
workers = multiprocessing.cpu_count()
things = [(strategy, ticket) for x in range(0, trials)]
chunksize = int(len(things) / workers)
with multiprocessing.Pool(processes=workers) as pool:
results = pool.map(playone, things, chunksize)
return sum(results) / trials | 35,580 |
def copy_attr(f1, f2):
""" Copies the special packaging file attributes from f1 to f2.
"""
if f1._tags:
pattrs = [
tag
for tag in f1._tags
if not hasattr(f2, tag) and tag.startswith('PACKAGING_')
]
for attr in pattrs:
f2.Tag(attr, f1.GetTag(attr)) | 35,581 |
def download_prostate():
"""Download prostate dataset."""
return _download_and_read('prostate.img') | 35,582 |
def create_json_instance(name, agts, vars, doms, cons, fileout=''):
""""
It assumes constraint tables are complete
"""
jagts = {}
jvars = {}
jcons = {}
for vid in vars:
v = vars[vid]
d = doms[v['dom']]
aid = v['agt']
jvars['v'+vid] = {
'value': None,
'domain': d,
'agent': 'a'+str(aid),
'type': 1,
'id': int(vid),
'cons': []
}
for aid in agts:
jagts['a'+aid] = {'vars': ['v'+vid for vid in vars if vars[vid]['agt'] == aid]}
jagts['id'] = int(aid)
for cid in cons:
c = cons[cid]
jcons['c'+cid] = {
'scope': ['v'+vid for vid in c['scope']],
'vals': [x['cost'] for x in c['values']]
}
for vid in c['scope']:
jvars['v'+str(vid)]['cons'].append('c'+cid)
instance = {'variables': jvars, 'agents': jagts, 'constraints': jcons}
if fileout:
#cm.save_json_file(fileout, instance)
with open(fileout, 'w') as outfile:
json.dump(instance, outfile, indent=2)
else:
print(json.dumps(instance, indent=2)) | 35,583 |
def decBIPKey(encrypted_privK, passphrase, currency):
"""
Decrypt an encrypted Private key
Show the corresponding public address
"""
#using the currencies.json file, get the currency data
with open('currencies.json', 'r') as dataFile:
currencies = json.load(dataFile)
for cur in currencies:
if cur['currency'] == currency:
break
#randomly choose a prefix if multiples exist
prefixes = cur['prefix'].split('|')
prefix = prefixes[random.randint(0, (len(prefixes)-1))]
#decrypt the BIP key
PrivK, Addresshash = bip38.decrypt(str(encrypted_privK), str(passphrase), 8)
#calculate the address from the key
PrivK = enc.decode(PrivK, 256)
publicAddress = address.publicKey2Address(address.privateKey2PublicKey(PrivK), int(cur['version']), prefix, int(cur['length']))
#check our generated address against the address hash from BIP
if hashlib.sha256(hashlib.sha256(publicAddress).digest()).digest()[0:4] != Addresshash:
return False, False
else:
return address.privateKey2Wif(PrivK, cur['version'], prefix, cur['length']), publicAddress | 35,584 |
def any_download(url: str, **kwargs):
"""
dowload a single html url
use module if matched, otherwise we use universal downloader
"""
m, url = url_to_module(url)
m.prefer_download(url) | 35,585 |
def plot_data(coordinate, box=[], plt_inst=None, **kwargs):
"""
Plot the coordinate with the "std box" around the curve
Args:
coordinate (float[]): 1D array of the coordinate to plot
box (float[]): 1D array of the box around the curve
plt_inst (pyplot): pyplot instance
Returns:
(plt_inst)
"""
if plt_inst is None:
plt_inst = plt
if len(box) == len(coordinate):
plt_inst.fill_between(np.arange(len(box)), box[:, 0:1].squeeze(), box[:, 1:].squeeze(), zorder=1, alpha=0.2)
plt_inst.plot(coordinate[:, 0:1].squeeze(), coordinate[:, 1:].squeeze(), **kwargs)
return plt_inst | 35,586 |
def test_parameter_wrapping_parameters():
"""Tests that ensure wrapping works with other parameters"""
var = 5
var = Parameter(var)
res = Parameter._wrap(var)
assert isinstance(res, Parameter)
assert res.x == var | 35,587 |
def read_config_file(config_file):
"""Read an YAML config file
:param config_file: [description]
:type config_file: [type]
"""
if os.path.isfile(config_file):
extension = os.path.splitext(config_file)[1]
try:
with open(config_file) as data_file:
if extension in ['.yml', '.yaml']:
config_json = yaml.safe_load(data_file.read())
elif extension in ['.json']:
config_json = json.safe_load(data_file.read())
except IOError:
print("Unable to read the file", config_file)
exit(1)
else:
print("Cannot find the file", config_file)
exit(1)
return config_json | 35,588 |
def param_curve(t, R, r, d):
"""Coordinates of a hypotrochoid for parameters t, R, r and d"""
x = (R - r)*cos(t) + d*cos((R - r)/r*t)
y = (R - r)*sin(t) - d*sin((R - r)/r*t)
z = 3*sin(t)
return x, y, z | 35,589 |
def archive_link(link: Link, overwrite: bool=False, methods: Optional[Iterable[str]]=None, out_dir: Optional[Path]=None) -> Link:
"""download the DOM, PDF, and a screenshot into a folder named after the link's timestamp"""
# TODO: Remove when the input is changed to be a snapshot. Suboptimal approach.
from core.models import Snapshot, ArchiveResult
try:
snapshot = Snapshot.objects.get(url=link.url) # TODO: This will be unnecessary once everything is a snapshot
except Snapshot.DoesNotExist:
snapshot = write_link_to_sql_index(link)
ARCHIVE_METHODS = get_default_archive_methods()
if methods:
ARCHIVE_METHODS = [
method for method in ARCHIVE_METHODS
if method[0] in methods
]
out_dir = out_dir or Path(link.link_dir)
try:
is_new = not Path(out_dir).exists()
if is_new:
os.makedirs(out_dir)
link = load_link_details(link, out_dir=out_dir)
write_link_details(link, out_dir=out_dir, skip_sql_index=False)
log_link_archiving_started(link, out_dir, is_new)
link = link.overwrite(updated=datetime.now(timezone.utc))
stats = {'skipped': 0, 'succeeded': 0, 'failed': 0}
start_ts = datetime.now(timezone.utc)
for method_name, should_run, method_function in ARCHIVE_METHODS:
try:
if method_name not in link.history:
link.history[method_name] = []
if should_run(link, out_dir, overwrite):
log_archive_method_started(method_name)
result = method_function(link=link, out_dir=out_dir)
link.history[method_name].append(result)
stats[result.status] += 1
log_archive_method_finished(result)
write_search_index(link=link, texts=result.index_texts)
ArchiveResult.objects.create(snapshot=snapshot, extractor=method_name, cmd=result.cmd, cmd_version=result.cmd_version,
output=result.output, pwd=result.pwd, start_ts=result.start_ts, end_ts=result.end_ts, status=result.status)
# bump the updated time on the main Snapshot here, this is critical
# to be able to cache summaries of the ArchiveResults for a given
# snapshot without having to load all the results from the DB each time.
# (we use {Snapshot.id}-{Snapshot.updated} as the cache key and assume
# ArchiveResults are unchanged as long as the updated timestamp is unchanged)
snapshot.save()
else:
# print('{black} X {}{reset}'.format(method_name, **ANSI))
stats['skipped'] += 1
except Exception as e:
raise Exception('Exception in archive_methods.save_{}(Link(url={}))'.format(
method_name,
link.url,
)) from e
# print(' ', stats)
try:
latest_title = link.history['title'][-1].output.strip()
if latest_title and len(latest_title) >= len(link.title or ''):
link = link.overwrite(title=latest_title)
except Exception:
pass
write_link_details(link, out_dir=out_dir, skip_sql_index=False)
log_link_archiving_finished(link, link.link_dir, is_new, stats, start_ts)
except KeyboardInterrupt:
try:
write_link_details(link, out_dir=link.link_dir)
except:
pass
raise
except Exception as err:
print(' ! Failed to archive link: {}: {}'.format(err.__class__.__name__, err))
raise
return link | 35,590 |
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere.
Optionally `append_path` is joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir | 35,591 |
def city():
"""The city function provide
the details of the city
Args:
city (str): name of the city
Returns:
None:
"""
city = "Aurangabad"
print("Welcome to the {0} city.".format(city))
maharashtra_info.mh(city) | 35,592 |
def say_my_name(first_name, last_name=""):
"""
Function that prints My name is <first name> <last name>.
Args:
first_name (string): the first name
last_name (string): the last name
"""
if type(first_name) is not str:
raise TypeError("first_name must be a string")
if type(last_name) is not str:
raise TypeError("last_name must be a string")
print("My name is {} {}".format(first_name, last_name)) | 35,593 |
def monthly_sales_splash() -> None:
"""Display monthly sales splash."""
print('Monthly Sales Program\n')
display_options() | 35,594 |
def create_collection(metadata_url: str = METADATA_URL,
thumbnail_url: str = THUMBNAIL_URL) -> pystac.Collection:
"""Create a STAC Collection using AAFC Land Use metadata
Args:
metadata_url (str, optional): Metadata json provided by AAFC
Returns:
pystac.Collection: pystac collection object
"""
metadata = get_metadata(metadata_url)
provider = Provider(
name=metadata.provider,
roles=[
ProviderRole.HOST,
ProviderRole.LICENSOR,
ProviderRole.PROCESSOR,
ProviderRole.PRODUCER,
],
url=PROVIDER_URL,
)
extent = pystac.Extent(
pystac.SpatialExtent([metadata.bbox_polygon]),
pystac.TemporalExtent(
[[metadata.datetime_start, metadata.datetime_end]]),
)
collection = pystac.Collection(
id=LANDUSE_ID,
title=metadata.title,
description=metadata.description,
providers=[provider],
license=metadata.license_id,
extent=extent,
catalog_type=pystac.CatalogType.RELATIVE_PUBLISHED,
keywords=KEYWORDS,
)
collection.add_link(
Link(rel="license",
target=metadata.license_url,
title=metadata.license_title))
# Add the metadata url and thumbnail url as assets
collection.add_asset(
"metadata",
pystac.Asset(
href=metadata_url,
media_type=pystac.MediaType.JSON,
roles=["metadata"],
title="AAFC Land Use collection metadata",
),
)
collection.add_asset(
"thumbnail",
pystac.Asset(
href=thumbnail_url,
media_type=pystac.MediaType.PNG,
roles=["thumbnail"],
title="AAFC Land Use collection thumbnail",
),
)
collection_label = LabelExtension.summaries(collection,
add_if_missing=True)
collection_label.label_type = [LabelType.RASTER]
collection_label.label_tasks = [LabelTask.CLASSIFICATION]
collection_label.label_properties = None
collection_label.label_classes = [
# TODO: The STAC Label extension JSON Schema is incorrect.
# https://github.com/stac-extensions/label/pull/8
# https://github.com/stac-utils/pystac/issues/611
# When it is fixed, this should be None, not the empty string.
LabelClasses.create(list(CLASSIFICATION_VALUES.values()), "")
]
collection_proj = ProjectionExtension.summaries(collection,
add_if_missing=True)
collection_proj.epsg = [metadata.epsg]
collection_item_asset = ItemAssetsExtension.ext(collection,
add_if_missing=True)
collection_item_asset.item_assets = {
"metadata":
AssetDefinition(
dict(
type=pystac.MediaType.JSON,
roles=["metadata"],
title="AAFC Land Use metadata",
)),
"landuse":
AssetDefinition({
"type":
pystac.MediaType.COG,
"roles": [
"data",
"labels",
"labels-raster",
],
"title":
"AAFC Land Use COG",
"raster:bands": [
RasterBand.create(
nodata=0,
sampling=Sampling.AREA,
data_type=DataType.UINT8,
spatial_resolution=30,
).to_dict()
],
"file:values": [{
"values": [value],
"summary": summary
} for value, summary in CLASSIFICATION_VALUES.items()],
"label:type":
collection_label.label_type[0],
"label:tasks":
collection_label.label_tasks,
"label:properties":
None,
"label:classes": [collection_label.label_classes[0].to_dict()],
"proj:epsg":
metadata.epsg,
}),
}
return collection | 35,595 |
def close_socket_cleanly(sock: socket.socket) -> None:
"""
Ensures that the connection to a client is closed cleanly without errors and with no data loss.
Use this instead of the .close() method.
"""
# The code is based on this blog post:
# https://blog.netherlabs.nl/articles/2009/01/18/the-ultimate-so_linger-page-or-why-is-my-tcp-not-reliable
try:
sock.shutdown(socket.SHUT_WR)
sock.settimeout(10)
sock.recv(1) # Wait for client to close the connection
except OSError:
# Possible causes:
# - Client decided to keep its connection open for more than 10sec.
# - Client was already disconnected.
# - Probably something else too that I didn't think of...
pass
sock.close() | 35,596 |
def deploy(account, timeout=TIMEOUT_DEPLOY):
"""
deploys contract, waits for receipt, returns address
"""
before = time.time()
_, abi, contract_bin = load_contract(file_abi=FILE_CONTRACT_ABI, file_bin=FILE_CONTRACT_BIN)
storage_contract = w3.eth.contract(abi=abi, bytecode=contract_bin)
contract_tx = storage_contract.constructor().buildTransaction({
'gas': GAS_DEPLOY,
'gasPrice': GAS_PRICE,
'nonce': account["nonce"].increment(),
'chainId': CHAIN_ID
})
signed = w3.eth.account.signTransaction(contract_tx, account["private_key"])
tx_hash = w3.toHex(w3.eth.sendRawTransaction(signed.rawTransaction))
print("tx_hash = ", tx_hash,
"--> waiting for receipt (timeout=%d) ..." % timeout)
sys.stdout.flush()
# Wait for the transaction to be mined, and get the transaction receipt
receipt = w3.eth.waitForTransactionReceipt(tx_hash, timeout=timeout)
print("Receipt arrived. Took %.1f seconds." % (time.time()-before))
if receipt.status == 1:
line = "Deployed. Gas Used: {gasUsed}. Contract Address: {contractAddress}"
print(line.format(**receipt))
save_address(receipt.contractAddress)
return
else:
line = "Deployed failed. Receipt Status: {status}"
print(line.format(**receipt))
exit() | 35,597 |
def test_root_route(client):
"""Test root route."""
response = client.get('/')
assert b'Hello, World!' in response.data | 35,598 |
def calculate_average_crossing_per_month_and_measure(num_of_months, list_with_agg_values):
"""Calculates the average crossings per month and per measure.
Args:
num_of_months: the number of months based on the
frequency of each measure, saved as
a dict or a list.
list_with_agg_values: the list with Border, Date, Measure,
and aggregated values.
Returns:
list_with_avg (list): the list with the average crossing values
per month and per measure
"""
list_with_avg = []
# Going through the list of aggregated valves backwards
# the list was sorted with the most recent date up first, so hence we are adding from the
# the bottom up and not top down direction
for i in range(len(list_with_agg_values) - 1, 0, -1):
each_row = list_with_agg_values[i]
# Now check whether the number of the months per measure is the same or not:
# If it's not, we going to calculate the average for each measure's frequency
if isinstance(num_of_months, dict):
for key, value in num_of_months.items():
if each_row[2] == key:
if i % value == 0:
accumulation, counter = 0, 0
each_row = each_row + [0]
else:
# Add up each of the previous months' values
each_row_before = list_with_agg_values[i + 1]
accumulation += each_row_before[3]
# Similarly add for each month to the counter
counter += 1
# For each row, get the average value of crossing based for each measure and border
each_row = each_row + [my_round(accumulation / counter)]
# And keep track in the list
list_with_avg.append(each_row)
else:
# Otherwise, if the frequency is the same for all of the measures
if i % (num_of_months - 1) == 0:
accumulation, counter = 0, 0
each_row = each_row + [0]
else:
# Add up each of the previous months' values
each_row_before = list_with_agg_values[i + 1]
accumulation += each_row_before[3]
# Similarly add for each month to the counter
counter += 1
# For each row, get the average value of crossing based for each measure and border
each_row = each_row + [my_round(accumulation / counter)]
# And keep track in the list
list_with_avg.append(each_row)
return list_with_avg | 35,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.