content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def schema_to_entity_names(class_string):
"""
Mapping from classes path to entity names (used by the SQLA import/export)
This could have been written much simpler if it is only for SQLA but there
is an attempt the SQLA import/export code to be used for Django too.
"""
if class_string is None or len(class_string) == 0:
return
if(class_string == "aiida.backends.djsite.db.models.DbNode" or
class_string == "aiida.backends.sqlalchemy.models.node.DbNode"):
return NODE_ENTITY_NAME
if(class_string == "aiida.backends.djsite.db.models.DbLink" or
class_string == "aiida.backends.sqlalchemy.models.node.DbLink"):
return LINK_ENTITY_NAME
if(class_string == "aiida.backends.djsite.db.models.DbGroup" or
class_string ==
"aiida.backends.sqlalchemy.models.group.DbGroup"):
return GROUP_ENTITY_NAME
if(class_string == "aiida.backends.djsite.db.models.DbComputer" or
class_string ==
"aiida.backends.sqlalchemy.models.computer.DbComputer"):
return COMPUTER_ENTITY_NAME
if (class_string == "aiida.backends.djsite.db.models.DbUser" or
class_string == "aiida.backends.sqlalchemy.models.user.DbUser"):
return USER_ENTITY_NAME | c7a6aabde74e3639f39b8e56ad5f82c6684dd2ba | 31,900 |
def csm_data(csm):
"""
Return the data field of the sparse variable.
"""
return csm_properties(csm)[0] | c0f6993f9fb005f5659539deb89b459039801ea1 | 31,901 |
def plot_hypnogram(resampled_hypno, cycles=None, label="", fig=None, ax=None):
"""
Plot an aesthetically pleasing hypnogram with optional cycle markers.
Parameters
----------
resampled_hypno : pd.DataFrame
Hypnogram resampled to epoch time units.
cycles : cycles : pd.DataFrame, optional
Tabulated estimates for onsets, offsets and durations of detected cycles.
The default is None.
label : str, optional
Custom label to add in plot title, usually a file identifier. The default is "".
Returns
-------
fig : Figure object
Main figure on which hypnogram has been plotted.
ax : Axis object
Axis on figure used by hypnogram.
"""
if (fig is None) and (ax is None):
# Initialize figure and axes
fig, ax = plt.subplots(1, 1, figsize=(16, 4.5), tight_layout=True, sharex=True)
# Ordering of stages and corresponding colors
stages = ["N3", "N2", "N1", "W", "R"]
colors = ["#002D72", "#005EB8", "#307FE2", "#30B700", "#BE3A34"]
# Step 1: Plot colored markers for each stage
# Iterate over each stage-color pair
for stage, color in zip(stages, colors):
# Filter subset of data
dat = resampled_hypno.loc[resampled_hypno["Stage"] == stage]
# Plot markers for each stage
ax.plot(
dat["Epoch_number"],
dat["Stage"],
linestyle="",
marker="s",
color=color,
markersize=2.5,
alpha=0.5,
# mec='w',
zorder=10,
)
# Step 2: Plot stage changes - classic hypnogram style
# Plot all stages across all epochs as a line
ax.plot(
resampled_hypno["Epoch_number"],
resampled_hypno["Stage"],
color="k",
alpha=0.4,
zorder=1,
linestyle="-",
linewidth=0.75,
)
# If cycle information provided
if cycles is not None:
# Iterate over cycles
for k, row in cycles.iterrows():
# Plot background tint
ax.axvspan(
xmin=2 * row["Onset"], # Convert minutes to epochs
xmax=2 * row["Offset"], # Convert minutes to epochs
ymin=0.025,
ymax=0.975,
color="#98B6E4",
alpha=0.16,
)
# Plot cycle onset and offset, and add text label for cycle number
ax.axvline(2 * row["Onset"], color="#98A4AE", alpha=0.25, linestyle="-")
ax.axvline(2 * row["Offset"], color="#98A4AE", alpha=0.25, linestyle="-")
ax.text(2 * row["Onset"] + 5, "R", f"C$_{k+1}$", va="top", alpha=0.75)
# Prepare secondary title with durations
cst = cycles["Duration"].sum()
tst = resampled_hypno["Epoch_number"].iloc[-1] / 2
N = len(cycles)
covg = 100 * cst / tst
title = f"{N} Sleep Cycles - Coverage: {covg:.1f}% of TST ({cst} of {tst} min)"
ax.set_title(title, loc="right")
# Adjust x-axis limits for cleaner fit, and add grid markers, ticks every 100 epochs
ax.set_xlim(-5, resampled_hypno["Epoch_number"].iloc[-1] + 5)
ax.xaxis.set_major_locator(MultipleLocator(100))
# Sane axis labels and primary title
ax.set_ylabel("Sleep Stage")
ax.set_xlabel("Epoch Number")
ax.set_title(f"Hypnogram for PSG {label}", loc="left", fontweight="bold")
return fig, ax | 5c9be12d2aa9553d4a0084fd6734e5847eabcb57 | 31,902 |
from typing import Callable
def _scroll_screen(direction: int) -> Callable:
"""
Scroll to the next/prev group of the subset allocated to a specific screen.
This will rotate between e.g. 1->2->3->1 when the first screen is focussed.
"""
def _inner(qtile):
if len(qtile.screens) == 1:
current = qtile.groups.index(qtile.current_group)
destination = (current + direction) % 6
qtile.groups[destination].cmd_toscreen()
return
current = qtile.groups.index(qtile.current_group)
if current < 3:
destination = (current + direction) % 3
else:
destination = ((current - 3 + direction) % 3) + 3
qtile.groups[destination].cmd_toscreen()
return _inner | e778b6ef8a07fe8609a5f3332fa7c44d1b34c17a | 31,903 |
def _gate_altitude_data_factory(radar):
""" Return a function which returns the gate altitudes. """
def _gate_altitude_data():
""" The function which returns the gate altitudes. """
try:
return radar.altitude['data'] + radar.gate_z['data']
except ValueError:
return np.mean(radar.altitude['data']) + radar.gate_z['data']
return _gate_altitude_data | 2a13cd44a6c50e6cbe7272e5628926aa4e76c71b | 31,904 |
from typing import Callable
import pathlib
def test_posix_message_queue_ee(
monkeypatch: pytest.MonkeyPatch,
is_dir: bool,
ee_support: bool,
engine: str,
generate_config: Callable,
):
"""Confirm error messages related to missing ``/dev/mqueue/`` and ``podman``.
Test using all possible combinations of container_engine, ee_support, and ``is_dir``.
:param monkeypatch: Fixture for patching
:param is_dir: The return value to set for ``pathlib.Path.is_dir``
:param ee_support: The value to set for ``--ee``
:param engine: The value to set for ``--ce``
:param generate_config: The configuration generator fixture
"""
message_queue_msg = (
"Execution environment support while using podman requires a '/dev/mqueue/' directory."
)
unpatched_is_dir = pathlib.Path.is_dir
def mock_is_dir(path):
"""Override the result for ``Path('/dev/mqueue/')`` to ``is_dir``.
:param path: The provided path to check
:returns: ``is_dir`` if the path is ``/dev/mqueue/``, else the real result
"""
if path == pathlib.Path("/dev/mqueue/"):
return is_dir
return unpatched_is_dir(path)
monkeypatch.setattr("pathlib.Path.is_dir", mock_is_dir)
response = generate_config(params=["--ce", engine, "--ee", str(ee_support)])
should_error = ee_support and engine == "podman" and not is_dir
message_queue_msg_exists = any(
exit_msg.message == message_queue_msg for exit_msg in response.exit_messages
)
assert should_error == message_queue_msg_exists | 7dddd24953261324b982da0ed7adbf632cf47779 | 31,905 |
import random
def RandHexColor(length=6):
"""Generates a random color using hexadecimal digits.
Args:
length: The number of hex digits in the color. Defaults to 6.
"""
result = [random.choice(HEX_DIGITS) for _ in range(length)]
return '#' + ''.join(result) | 7e196fa2b0666dc9aee67bcac83d0186812d9335 | 31,906 |
def _backtest2(prediction, price, acct_num,):
"""
Cal daiy return in % form
:param prediction:
:param price:
:param acct_num:
:return:
"""
# starting net val for trading account
mat = np.ones((acct_num, len(price)))
# liquidate or build position time
_idx = np.arange(len(price))
# price change
_chg = price.pct_change()
for i in range(acct_num):
adjust_time = _idx[i::acct_num]
for j, k in zip(adjust_time, np.hstack((adjust_time[1:], [-1]))):
sign = np.sign(prediction[j])
if k != -1:
mat[i][j+1:k+1] = 1+sign * _chg[j+1: k+1]
else:
mat[i][j+1:] = 1+ sign * _chg[j+1: ]
mat = mat.cumprod(1).sum(0)
mat /= mat[0]
# daily return in % form.
return 100 * np.diff(mat)/mat[:-1] | 09e230945daf2f745b39523e5fba676e88629e09 | 31,907 |
def check_players(instance):
""" Checks to see if any of the starting players have left.
Args:
instance: The GameInstance model for this operation.
If a player has left the game, they are invited back and
a ValueError is raised.
Raises:
ValueError if a player has left the game.
"""
if len(instance.players) < len(instance.starting_players):
for starting_player in instance.starting_players:
if starting_player not in instance.players:
instance.invited.append(starting_player)
return ('%s left during your game. They have ' %
starting_player +
'been invited and must rejoin before continuing.')
return False | d3a31f17cf5d3dee2e3fd075cea2e31d8a806952 | 31,908 |
def list_descendant(input_list):
"""Function that orders a list on descendant order using insertion sort
Args:
input_list ([type]): List with the values that we want to order
Returns:
list: The ordered list
"""
for i in range(1, len(input_list)):
j = i-1
next_element = input_list[i]
# Compare the current element with next one
while (input_list[j] < next_element) and (j >= 0):
input_list[j+1] = input_list[j]
j = j-1
input_list[j+1] = next_element
return input_list | 8d2c452a513ccf67c4b179bf989fac5cc0108d09 | 31,909 |
from io import StringIO
import csv
async def chargify_invoices_csv(request: Request):
"""
Output Chargify invoices data.
"""
output = StringIO()
writer = csv.writer(output)
if not request.app['chargify_invoices_data']:
raise Exception("data not loaded yet")
writer.writerows(request.app['chargify_invoices_data'])
return Response(text=output.getvalue(), content_type="text/csv") | 5558b1b885f52edccc45a3819cab0353f84578fa | 31,910 |
def generate_base_reference(header,waveform="cosine",period=24,phase=0,width=12):
"""
This will generate a waveform with a given phase and period based on the header,
"""
ZTs = header
tpoints = np.zeros(len(ZTs))
coef = 2.0 * np.pi / float(period)
w = float(width) * coef
for i,ZT in enumerate(ZTs):
z = float(ZT[2:].split("_")[0])
tpoints[i] = (z-phase) * coef
if waveform == "cosine":
def cosine(x,w):
x = x % (2*np.pi)
w = w % (2*np.pi)
if x <= w:
y = np.cos(x/(w/np.pi))
elif x > w:
y = np.cos( (x+2.*(np.pi-w))*np.pi/ (2*np.pi - w) )
return y
#fcos = lambda tp : cosine(tp,w)
reference= [cosine(tpoint,w) for tpoint in tpoints]
elif waveform == "trough":
def trough(x,w):
x = x % (2*np.pi)
w = w % (2*np.pi)
if x <= w:
y = 1 + -x/w
elif x > w:
y = (x-w)/(2*np.pi - w)
return y
#ftro = lambda tp : trough(tp,w)
reference= [trough(tpoint,w) for tpoint in tpoints]
return reference | b1178e07d9d628654902b6198acac1f791ba2064 | 31,911 |
def from_string(dlstr):
"""Factory method taking the string as appearing in FIELD input"""
input_key = dlstr.split()[0].lower()
for subcls in Interaction.__subclasses__():
if subcls.key == input_key:
return subcls.from_string(dlstr)
raise ValueError("No interaction available for {!r}".format(str)) | 875eb21e3773f47fdc77d3177a5f5253d0656546 | 31,912 |
def assign_cat(plugin):
"""Assigns `fonts` module keywords to the `Warp` plugin."""
meta = [
[KEYWORD_MATHCAL, "Script (or calligraphy): ๐โฌ๐๐ถ๐ท๐ธ๐๐๐ถ๐ท๐ธ"],
[KEYWORD_MATHBB, "Double-struck: ๐ธ๐นโ๐๐๐๐๐๐๐ธ๐น๐๐๐๐๐๐"],
[KEYWORD_MATHFRAK, "Fraktur: ๐๐
โญ๐๐๐ ๐๐
๐๐๐ "],
[KEYWORD_MATHSF, "Sans-serif: ๐ ๐ก๐ข๐บ๐ป๐ผ๐ฃ๐ค๐ฅ๐ ๐ก๐ข๐บ๐ป๐ผ๐ฃ๐ค๐ฅ"],
[KEYWORD_TEXTSF, "Sans-serif: ๐ ๐ก๐ข๐บ๐ป๐ผ๐ฃ๐ค๐ฅ๐ ๐ก๐ข๐บ๐ป๐ผ๐ฃ๐ค๐ฅ"],
[KEYWORD_MATHBF, "Serif Bold: ๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐"],
[KEYWORD_TEXTBF, "Serif Bold: ๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐"],
[KEYWORD_MATHBI, "Serif Bold italic: ๐จ๐ฉ๐ช๐๐๐๐๐๐๐จ๐ฉ๐ช๐๐๐๐๐๐"],
[KEYWORD_TEXTIT, "Serif Italic: ๐ด๐ต๐ถ๐๐๐123๐ด๐ต๐ถ๐๐๐"],
[KEYWORD_TEXTTT, "Mono-space: ๐ฐ๐ฑ๐ฒ๐๐๐๐ท๐ธ๐น๐ฐ๐ฑ๐ฒ๐๐๐๐ท๐ธ๐น"]]
items = [plugin.create_item(
category=plugin.CATEGORY_FONTS,
label=el[0],
short_desc=el[1],
target=el[0],
args_hint=kp.ItemArgsHint.REQUIRED,
hit_hint=kp.ItemHitHint.IGNORE) for el in meta]
return items | a1168624fa330ce19a3c8e78346bb7a893bb74e3 | 31,913 |
def config_func(config):
"""Configure the maintenance degrade notifier plugin"""
collectd.debug('%s config function' % PLUGIN)
for node in config.children:
key = node.key.lower()
val = node.values[0]
if key == 'port':
obj.port = int(val)
collectd.info("%s configured mtce port: %d" %
(PLUGIN, obj.port))
return 0
obj.port = MTCE_CMD_RX_PORT
collectd.error("%s no mtce port provided ; defaulting to %d" %
(PLUGIN, obj.port)) | 18d97c46e5a0a72cd4d4c45c2d43c8867f6f6d49 | 31,914 |
def get_holidays(startYear = 2018, endYear = 2025, countryCode = 'ZA'):
"""
Takes in a start and end date, and start and end year.
Produces a dataframe with a daily date and columns:
holiday - 'Y' for holiday
holidayName - name of the holiday if holiday is 'Y'
Returns a dataframe
"""
holidayDict = {}
for i in range(startYear, endYear):
for date, name in sorted(holidays.CountryHoliday(countryCode,years=[i]).items()):
holidayDict[date] = name
holiday_df = pd.DataFrame(list(holidayDict.items()),columns = ['day','holidayName'])
holiday_df['day'] = pd.to_datetime(holiday_df['day']).dt.date
return holiday_df | 6d7d83389bee2a83c6cc2d565f7c04b8d131555a | 31,915 |
import struct
def _testHeadCheckSum(header, tableDirectory):
"""
>>> header = dict(sfntVersion="OTTO")
>>> tableDirectory = [
... dict(tag="head", offset=100, length=100, checkSum=123, data="00000000"+struct.pack(">L", 925903070)),
... dict(tag="aaab", offset=200, length=100, checkSum=456),
... dict(tag="aaac", offset=300, length=100, checkSum=789),
... ]
>>> bool(_testHeadCheckSum(header, tableDirectory))
"""
flavor = header["sfntVersion"]
tables = {}
for entry in tableDirectory:
tables[entry["tag"]] = entry
data = tables["head"]["data"][8:12]
checkSumAdjustment = struct.unpack(">L", data)[0]
shouldBe = calcHeadCheckSumAdjustment(flavor, tables)
if checkSumAdjustment != shouldBe:
return ["The head checkSumAdjustment value is incorrect."]
return [] | 4391c4d6dd6c3fedf99315a2d6995962251ca10f | 31,916 |
from model import MyLearner
import os
import logging
import time
from sys import path
import scipy
def scoring(argv):
"""
For each task, load and fit the Learner with the support set and evaluate
the submission performance with the query set.
A directory 'scoring_output' is created and contains a txt file that
contains the submission score and duration. Note that the former is the
time elapsed during the ingestion program and hence the meta-fit()
duration.
The metric considered here is the Sparse Categorical Accuracy for a
5 classes image classification problem.
"""
del argv
saved_model_dir = FLAGS.saved_model_dir
meta_test_dir = FLAGS.meta_test_dir
eval_type = FLAGS.evaltype
ingestion_time_budget = FLAGS.ingestion_time_budget
query_size_per_class = FLAGS.query_size_per_class
# Making eval type compatible with DataGenerator specs
if eval_type == 'train' or eval_type == 'val':
data_generator_eval_type = 'train'
elif eval_type == 'test':
data_generator_eval_type = 'test'
# Use CodaLab's path `run/input/ref` in parallel with `run/input/res`
if not os.path.isdir(meta_test_dir):
meta_test_dir = os.path.join(saved_model_dir, os.pardir, 'ref')
# Evaluation type scenario: if meta_test is specified -> act as normal
# scoring on meta_test data
if (eval_type == 'train' or eval_type == 'val') and 'meta_test' in meta_test_dir:
raise ValueError('Cannot perform train/val evaluation on meta-test data!')
#if 'meta_test' not in meta_test_dir:
# if eval_type == 'test':
# meta_test_dir = os.path.join(meta_test_dir, 'meta_test')
# else:
# meta_test_dir = os.path.join(meta_test_dir, 'meta_train')
code_dir = os.path.join(saved_model_dir, 'code_dir')
score_dir = FLAGS.score_dir
logging.debug("Using meta_test_dir={}".format(meta_test_dir))
logging.debug("Using code_dir={}".format(code_dir))
logging.debug("Using saved_model_dir={}".format(saved_model_dir))
logging.debug("Using score_dir={}".format(score_dir))
list_files(os.path.join(score_dir, os.pardir, os.pardir))
# Initialize detailed results page
initialize_detailed_results_page(score_dir)
########################################
# IMPORTANT:
# Wait until code_dir is created
########################################
t = 0
while (not os.path.isdir(code_dir)) and t < ingestion_time_budget:
time.sleep(1)
t += 1
path.append(code_dir)
if(os.path.exists(os.path.join(code_dir, 'model.gin'))):
gin.parse_config_file(os.path.join(code_dir, 'model.gin'))
logging.info('Ingestion done! Starting scoring process ... ')
logging.info('Creating the meta-test episode generator ... \n ')
generator = DataGenerator(path_to_records=meta_test_dir,
batch_config=None,
episode_config=[128, 5, 5, query_size_per_class],
pool= data_generator_eval_type,
mode='episode')
if eval_type == 'test':
meta_test_dataset = generator.meta_test_pipeline
elif eval_type == 'train':
meta_test_dataset = generator.meta_train_pipeline
elif eval_type == 'val':
meta_test_dataset = generator.meta_valid_pipeline
else:
raise ValueError('Wrong eval_type : {}'.format(eval_type))
logging.info('Evaluating performance on episodes ... ')
meta_test_dataset = meta_test_dataset.batch(1)
meta_test_dataset = meta_test_dataset.prefetch(5)
learner = MyLearner()
if (not os.path.isdir(score_dir)):
os.mkdir(score_dir)
score_file = os.path.join(score_dir, 'scores.txt')
results = []
dev = set_device()
if dev is not None:
with tf.device(f'/device:{dev}'):
metric = tf.metrics.SparseCategoricalAccuracy(name="test_sparse_categorical_accuracy")
else:
metric = tf.metrics.SparseCategoricalAccuracy(name="test_sparse_categorical_accuracy")
nbr_episodes = 600
for k , task in enumerate(meta_test_dataset):
support_set, query_set, ground_truth = process_task(task, query_size_per_class)
learner.load(saved_model_dir)
predictor = learner.fit(support_set)
predictions = predictor.predict(query_set)
score = NwayKshot_accuracy(predictions, ground_truth, metric)
results.append(score)
logging.debug('Score on {} : {}'.format(k, score))
logging.debug('Results : {}'.format(results[:20]))
if(k == nbr_episodes - 1):
break
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, h
m, conf_int = mean_confidence_interval(results)
with open(score_file, 'w') as f :
write_score(m,
conf_int,
f,
extract_elapsed_time(saved_model_dir))
# Update detailed results page
task_name = None
plot_detailed_results_figure(results, score_dir, task_name=task_name)
write_scores_html(score_dir)
logging.info(('Scoring done! The average score over {} '
+ 'episodes is : {:.3%}').format(nbr_episodes,
sum(results)/len(results))
) | b9baccd716a4e2d01961b3eade801368b1683983 | 31,917 |
def svn_ra_do_status(*args):
"""
svn_ra_do_status(svn_ra_session_t session, svn_ra_reporter2_t reporter,
void report_baton, char status_target, svn_revnum_t revision,
svn_boolean_t recurse, svn_delta_editor_t status_editor,
void status_baton,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_do_status, args) | 30d2d310bf2e047653c99ba4add68b3e2d07ee00 | 31,918 |
def clear_spaces(comp):
"""
'A + D' -> 'A+D'
"""
r = ''
for c in comp:
if c != ' ':
r += c
return r | cf8f28cf3633eb31d0c934dd9fc3035b3c1539f4 | 31,919 |
def _load_metabolite_linkouts(session, cobra_metabolite, metabolite_database_id):
"""Load new linkouts even ones that are pointing to previously created universal
metabolites.
The only scenario where we don't load a linkout is if the external id and
metabolite is exactly the same as a previous linkout.
"""
# parse the notes
def parse_linkout_str(id):
if id is None:
return None
id_string = str(id)
for s in ['{', '}', '[', ']', ''', "'",]:
id_string = id_string.replace(s, '')
return id_string.strip()
data_source_fix = {'KEGG_ID' : 'KEGGID', 'CHEBI_ID': 'CHEBI'}
db_xref_data_source_id = { data_source.name: data_source.id for data_source
in session.query(base.DataSource).all() }
for external_source, v in cobra_metabolite.notes.iteritems():
# ignore formulas
if external_source.lower() in ['formula', 'formula1', 'none']:
continue
# check if linkout matches the list
external_source = external_source.upper()
v = v[0]
if external_source in data_source_fix:
external_source = data_source_syn[external_source]
if '&apos' in v:
ids = [parse_linkout_str(x) for x in v.split(',')]
else:
ids = [parse_linkout_str(v)]
for external_id in ids:
if external_id.lower() in ['na', 'none']:
continue
exists = (session
.query(base.Synonym)
.filter(base.Synonym.synonym == external_id)
.filter(base.Synonym.type == 'component')
.filter(base.Synonym.ome_id == metabolite_database_id)
.count() > 0)
if not exists:
ome_linkout = {'type': 'component'}
ome_linkout['ome_id'] = metabolite_database_id
ome_linkout['synonym'] = external_id
try:
data_source_id = db_xref_data_source_id[external_source]
except KeyError:
data_source_id = create_data_source(session, external_source)
db_xref_data_source_id[external_source] = data_source_id
check_and_update_url(session, data_source_id)
ome_linkout['synonym_data_source_id'] = data_source_id
synonym = base.Synonym(**ome_linkout)
session.add(synonym) | cda0a2fef212b091d39b45d2c8603eedc0f3b99a | 31,920 |
import random
def gen_ascii_captcha(symbols, length=6, max_h=10, noise_level=0, noise_char="."):
"""
Return a string of the specified length made by random symbols.
Print the ascii-art representation of it.
Example:
symbols = gen_ascii_symbols(input_file='ascii_symbols.txt',
chars = string.ascii_lowercase+string.ascii_uppercase+'0123456789')
while True:
captcha = gen_ascii_captcha(symbols, noise_level=0.2)
x = input('captcha: ')
if x == captcha:
print('\ncorrect')
break
print('\ninvalid captcha, please retry')
"""
assert noise_level <= 1
# max_h = 10
# noise_level = 0
captcha = "".join(random.sample(chars, length))
# print(code)
pool = [symbols[c].split("\n") for c in captcha]
for n in range(max_h, 0, -1):
line = ""
for item in pool:
try:
next_line = item[-n]
except IndexError:
next_line = "".join(
[" " for i in range(max([len(_item) for _item in item]))]
)
if noise_level:
# if random.random() < noise_level:
# next_line = next_line.replace(' ', noise_char)
next_line = "".join(
[
c
if random.random() > noise_level
else random.choice(noise_char)
for c in next_line
]
)
line += next_line
print(line)
return captcha | b6f6f02bbbe3cbdfce007ea13e26836c21e20ef2 | 31,921 |
def get_app_name_cache_file(file_path: str) -> str:
"""Returns path to app name cache file"""
return f'{file_path}.{APP_NAME_FILE_EXTENSION}' | 16bdfe57e28eb5ea97e317fea98285af494ff0a9 | 31,922 |
def make_matrix_A(x_r, simulations, ฮพ, ฮ_t=1, ฮป=1):
"""
Equation (9) from paper
:param x_r: reference output
:param simulations: simulation outputs
:param t_ฮ: the intervals between each observation (series or constant)
:param ฮพ: squash control parameter
:return: the position/trend matrix
"""
A = []
for x_s in simulations:
a_i1 = position_metric(x_r, x_s, ฮพ)
a_i2 = trend_metric(x_r, x_s, ฮพ, ฮ_t, ฮป)
A.append([a_i1, a_i2])
return np.array(A) | f4cb5e4dd5f79385ee4c945b4b7982200f2fa474 | 31,923 |
def generate_markov_content(content):
""" Generate Markov Content
Parameters
----------
content : str
Corpus to generate markov sequences from
"""
# Build the model.
text_model = markovify.Text(content, state_size=3)
# return randomly-generated sentence of no more than 280 characters
return text_model.make_short_sentence(200) | c0503dca553712f8563c53676d3216f8206808ab | 31,924 |
def _quadratic_system_matrix(x):
"""
Create system matrix of a model with linear, quadratic and 1st-order 2-way interaction terms
Parameters
----------
x : array_like, shape (n, m)
Explanatory variable of n data points and m variables
Returns
-------
A : ndarray, shape (n, 1 + 1.5 m + 0.5 m ** 2)
System matrix
"""
n, m = x.shape
nc = int(1. + 1.5 * m + .5 * m ** 2.) # number of coefficients
if n < nc:
raise ValueError('Insufficient data points to fit full quadratic model.')
A = _allocate_system_matrix((n, nc))
_assign_linear_terms(A, x)
_assign_quadratic_terms(A, x)
_assign_interaction_terms(A, x, 2 * m + 1)
return A | 6d958cccaf5127fdc9f3fc6bcc472ceafd2947d6 | 31,925 |
def pytest_pycollect_makeitem(collector, name, obj):
"""A pytest hook to collect curio coroutines."""
if collector.funcnamefilter(name) and curio.meta.iscoroutinefunction(obj):
item = pytest.Function.from_parent(collector, name=name)
if "curio" in item.keywords:
return list(collector._genfunctions(name, obj)) | 95d18bc4575fc31c938dae6d487cf4cd058d89c4 | 31,926 |
def eta_hms(seconds, always_show_hours=False, always_show_minutes=False, hours_leading_zero=False):
"""Converts seconds remaining into a human readable timestamp (e.g. hh:mm:ss, h:mm:ss, mm:ss, or ss).
Positional arguments:
seconds -- integer/float indicating seconds remaining.
Keyword arguments:
always_show_hours -- don't hide the 0 hours.
always_show_minutes -- don't hide the 0 minutes.
hours_leading_zero -- show 01:00:00 instead of 1:00:00.
Returns:
Human readable string.
"""
# Convert seconds to other units.
final_hours, final_minutes, final_seconds = 0, 0, seconds
if final_seconds >= 3600:
final_hours = int(final_seconds / 3600.0)
final_seconds -= final_hours * 3600
if final_seconds >= 60:
final_minutes = int(final_seconds / 60.0)
final_seconds -= final_minutes * 60
final_seconds = int(ceil(final_seconds))
# Determine which string template to use.
if final_hours or always_show_hours:
if hours_leading_zero:
template = '{hour:02.0f}:{minute:02.0f}:{second:02.0f}'
else:
template = '{hour}:{minute:02.0f}:{second:02.0f}'
elif final_minutes or always_show_minutes:
template = '{minute:02.0f}:{second:02.0f}'
else:
template = '{second:02.0f}'
return template.format(hour=final_hours, minute=final_minutes, second=final_seconds) | db85099a0a1c19391ed8abc1ea2bcf4b867a93af | 31,927 |
def apply_Hc(C, A_L, A_R, Hlist):
"""
Compute C' via eq 16 of vumps paper (132 of tangent space methods).
"""
H, LH, RH = Hlist
A_Lstar = np.conj(A_L)
A_C = ct.rightmult(A_L, C)
to_contract = [A_C, A_Lstar, A_R, np.conj(A_R), H]
idxs = [(4, 1, 3),
(6, 1, -1),
(5, 3, 2),
(7, -2, 2),
(6, 7, 4, 5)]
term1 = tn.ncon(to_contract, idxs)
term2 = np.dot(LH, C)
term3 = np.dot(C, RH.T)
C_prime = term1 + term2 + term3
return C_prime | ae43b1fe1484ee70b2245474f03a3e9676b44929 | 31,928 |
import ipywidgets as ipyw
def make_basic_gui(container):
"""Create a basic GUI layout.
Parameters
----------
container : RenderContainer
Returns
-------
ipywidgets.GridspecLayout
"""
element_controls = [
ipyw.HTML(value="<b>Elements</b>", layout=ipyw.Layout(align_self="center"))
]
for key, descript in [
("group_atoms", "Atoms"),
("cell_lines", "Unit Cell"),
("group_labels", "Labels"),
("bond_lines", "Bonds"),
("group_millers", "Planes"),
("group_ghosts", "Ghosts"),
]:
if key not in container:
continue
toggle = ipyw.ToggleButton(
description=descript,
icon="eye",
button_style="primary",
value=False if key == "group_labels" else container[key].visible,
layout=ipyw.Layout(width="auto"),
)
ipyw.jslink((toggle, "value"), (container[key], "visible"))
element_controls.append(toggle)
control_box_elements = ipyw.Box(
element_controls, layout=ipyw.Layout(flex_flow="column")
)
container["control_box_elements"] = control_box_elements
background_controls = [
ipyw.HTML(value="<b>Background</b>", layout=ipyw.Layout(align_self="center"))
]
background_color = ipyw.ColorPicker(
concise=True,
description="Color",
description_tooltip="Background Color",
value=container.element_renderer.clearColor,
layout=ipyw.Layout(align_items="center"),
)
background_color.style.description_width = "40px"
ipyw.jslink((background_color, "value"), (container.element_renderer, "clearColor"))
background_controls.append(background_color)
background_opacity = ipyw.FloatSlider(
value=container.element_renderer.clearOpacity,
min=0,
max=1,
step=0.1,
orientation="horizontal",
readout=False,
description_tooltip="Background Opacity",
)
background_opacity.layout.max_width = "100px"
ipyw.jslink(
(background_opacity, "value"), (container.element_renderer, "clearOpacity")
)
background_controls.append(background_opacity)
# other_controls.append(ipyw.Label(value="Opacity", layout=ipyw.Layout(align_self="center")))
control_box_background = ipyw.Box(
background_controls, layout=ipyw.Layout(flex_flow="column")
)
container["control_box_background"] = control_box_background
axes = [container.axes_renderer] if "axes_renderer" in container else []
info_box = ipyw.HTML(
value="", # "Double-click atom for info (requires active kernel).",
color="grey",
layout=ipyw.Layout(
max_height="10px", margin="0px 0px 0px 0px", align_self="flex-start"
),
)
def on_click(change):
obj = change["new"]
if obj is None:
container.atom_pointer.visible = False
info_box.value = ""
else:
info_box.value = obj.name
# container.atom_pointer.position = container.atom_picker.point
container.atom_pointer.position = obj.position
container.atom_pointer.visible = True
container.atom_picker.observe(on_click, names=["object"])
if axes and container.element_renderer.height > 200:
grid = ipyw.GridspecLayout(
2,
2,
width=f"{container.element_renderer.width + 100}px",
height=f"{container.element_renderer.height + 35}px",
)
grid[0, 0] = container.element_renderer
grid[1, 0] = info_box
grid[:, 1] = ipyw.Box(
axes + [control_box_elements, control_box_background],
layout=ipyw.Layout(align_self="flex-start", flex_flow="column"),
)
else:
grid = ipyw.GridspecLayout(
2,
3,
width=f"{container.element_renderer.width + 200}px",
height=f"{container.element_renderer.height + 35}px",
)
grid[:, 0] = ipyw.Box(
axes, layout=ipyw.Layout(align_self="flex-end", flex_flow="column")
)
grid[0, 1] = container.element_renderer
grid[1, 1] = info_box
grid[:, 2] = ipyw.Box(
[control_box_elements, control_box_background],
layout=ipyw.Layout(align_self="flex-start", flex_flow="column"),
)
return grid | 5a01c70304910b1b20193c507a348ec90dd6a514 | 31,929 |
import traceback, functools
def multiprocessing_traceback(func):
"""A decorator for formatting exception traceback into a string to aid in
debugging when using the ``multiprocessing`` module."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
msg = "{}\n\nOriginal {}".format(e, traceback.format_exc())
raise type(e)(msg)
return wrapper | 078a086574c003f62e332063ef6c3475d9846e20 | 31,930 |
import sysconfig
def shared_libraries_are_available():
"""
check if python was built with --enable-shared or if the system python (with
dynamically linked libs) is in use
default to guessing that the shared libs are not available (be conservative)
"""
# if detection isn't possible because sysconfig isn't available (py2) then fail
if not sysconfig:
return False
enable_shared = sysconfig.get_config_var("Py_ENABLE_SHARED")
return enable_shared == 1 | 65306cc5bda77f07cc6dc118637d3fec7cae47c0 | 31,931 |
def parse_middleware_mkdir_response(mkdir_resp):
"""
Parse a response from RpcMiddlewareMkdir.
Returns (mtime in nanoseconds, inode number, number of writes)
"""
return (_ctime_or_mtime(mkdir_resp),
mkdir_resp["InodeNumber"],
mkdir_resp["NumWrites"]) | 17336084895bbad579785e7e918674e4367f05c6 | 31,932 |
import torch
def decorate_batch(batch, device='cpu'):
"""Decorate the input batch with a proper device
Parameters
----------
batch : {[torch.Tensor | list | dict]}
The input batch, where the list or dict can contain non-tensor objects
device: str, optional
'cpu' or 'cuda'
Raises:
----------
Exception: Unsupported data type
Return
----------
torch.Tensor | list | dict
Maintain the same structure as the input batch, but with tensors moved to a proper device.
"""
if isinstance(batch, torch.Tensor):
batch = batch.to(device)
return batch
elif isinstance(batch, dict):
for key, value in batch.items():
if isinstance(value, torch.Tensor):
batch[key] = value.to(device)
elif isinstance(value, dict) or isinstance(value, list):
batch[key] = decorate_batch(value, device)
# retain other value types in the batch dict
return batch
elif isinstance(batch, list):
new_batch = []
for value in batch:
if isinstance(value, torch.Tensor):
new_batch.append(value.to(device))
elif isinstance(value, dict) or isinstance(value, list):
new_batch.append(decorate_batch(value, device))
else:
# retain other value types in the batch list
new_batch.append(value)
return new_batch
else:
raise Exception('Unsupported batch type {}'.format(type(batch))) | a0bd4a5dff0b5cf6e304aede678c5d56cb93d1dd | 31,933 |
import yaml
def get_config(filename: str = CONFIG_FILENAME) -> dict:
"""
Get config as a dictionary
Parameters
----------
filename: str
The filename with all the configuration
Returns
-------
dict
A dictionary containing all the entries from the config YAML
"""
global config
if config is None:
config = yaml.load(open(filename), Loader=yaml.FullLoader)
return config | 79572679204c53f6ecdf91a120bb6a08ced9afd1 | 31,934 |
def partition_trace(vtrace):
"""partition a trace based on its types """
partition = {}
for v in vtrace:
vty = get_vt_type(v)
if vty not in partition:
partition[vty] = []
partition[vty].append(v)
return partition | 6fa30f6180a122cc6cb74bf7dcdfbbfa2b4a8627 | 31,935 |
def test_text_justify_bottom_right_and_top_left(region, projection):
"""
Print text justified at bottom right and top left.
"""
fig = Figure()
fig.text(
region=region,
projection=projection,
x=1.2,
y=0.2,
text="text justified bottom right",
justify="BR",
)
fig.text(
region=region,
projection=projection,
x=1.2,
y=0.2,
text="text justified top left",
justify="TL",
)
return fig | a8b1735e002f2c310226142bfcbde9bd1fea9f95 | 31,936 |
from pathlib import Path
def get_bench(
lx: tuple[int, int, int], data_dir: Path, tests: list[str], comp_lvls: list[int]
) -> dict[str, pd.DataFrame]:
"""get writing benchmark HDF5 files to pandas dataframe"""
data = {
"write_dr": pd.DataFrame(index=comp_lvls, columns=tests),
"read_dr": pd.DataFrame(index=comp_lvls, columns=tests),
"write_t": pd.DataFrame(index=comp_lvls, columns=tests),
"read_t": pd.DataFrame(index=comp_lvls, columns=tests),
}
hdf5_vers = None
mpi_api = None
for t in tests:
for c in comp_lvls:
tail = f"{lx[0]}_{lx[1]}_{lx[2]}_comp{c}"
h5fn = data_dir / f"{t}_{tail}.h5.write_stat.h5"
try:
with h5py.File(h5fn, "r") as f:
ca = f["/comp_lvl"][()]
data["write_dr"][t][ca] = f["/median_MBsec"][()]
data["write_t"][t][ca] = np.median(f["/t_ms"][:])
except FileNotFoundError:
print(f"ERROR: {t}: write benchmark {h5fn}")
h5fn = data_dir / f"{t}_{tail}.h5.read_stat.h5"
try:
with h5py.File(h5fn, "r") as f:
# read benchmarks refer to write compression level
data["read_dr"][t][ca] = f["/median_MBsec"][()]
data["read_t"][t][ca] = np.median(f["/t_ms"][:])
if "Ncpu" not in data and "mpi" in t:
data["Ncpu"] = f["/Ncpu"][()]
data["compiler"] = f["/compiler"].asstr()[()]
data["os"] = f["/os"].asstr()[()]
hdf5_vers = f["/hdf5version"][:]
mpi_api = f["/mpi_api_version"][:]
data["mpi_lib_version"] = f["/mpi_lib_version"].asstr()[()][:16]
# limit length for title
except FileNotFoundError:
print(f"ERROR: {t}: read benchmark {h5fn}")
if hdf5_vers is None:
raise FileNotFoundError(f"No data files were found in {data_dir}")
data["hdf5version"] = f"{hdf5_vers[0]}.{hdf5_vers[1]}.{hdf5_vers[2]}"
data["mpi_api_version"] = f"{mpi_api[0]}.{mpi_api[1]}"
return data | d135f58a3793cf161f338118060b16106bd6c96a | 31,937 |
def custom_token(deploy_tester_contract, custom_token_params):
"""Deploy CustomToken contract"""
return deploy_tester_contract(
CONTRACT_CUSTOM_TOKEN,
[],
custom_token_params
) | 175f8b49545e422ae691631b361f7bb62e9f62f5 | 31,938 |
def parse_values(reports, criteria1, criteria2, steps, crit1_name, crit2_name, first=False, cpus=1):
"""
Description: Parse the 'reports' and create a sorted array
of size n_structs following the criteria chosen by the user.
"""
info_reports = [ retrieve_report_data(report) for report in reports]
data = pd.concat(info_reports)
data.drop_duplicates(subset=[crit1_name, crit2_name], inplace=True)
print("Simulation data {}".format(data.shape))
return data | 9d22756ecdfe50f7e5563a4fa855ddb5a3a4cd21 | 31,939 |
import io
def read_bytes(n: int, reader: io.IOBase) -> bytes:
"""
Reads the specified number of bytes from the reader. It raises an
`EOFError` if the specified number of bytes is not available.
Parameters:
- `n`: The number of bytes to read;
- `reader`: The reader;
Returns the bytes read.
"""
buff = reader.read(n)
if not isinstance(buff, bytes):
raise ValueError('The reader is expected to return bytes.')
if len(buff) != n:
raise EOFError(f'Unable to read {n} bytes from the stream.')
return buff | bb3d00fc7667839864f4104a94a26e682f058fdc | 31,940 |
import json
def _format_full_payload(_json_field_name, _json_payload, _files_payload):
"""This function formats the full payload for a ``multipart/form-data`` API request including attachments.
.. versionadded:: 2.8.0
:param _json_field_name: The name of the highest-level JSON field used in the JSON payload
:type _json_field_name: str
:param _json_payload: The JSON payload data as a dictionary
:type _json_payload: dict
:param _files_payload: The payload for the attachments containing the IO stream for the file(s)
:type _files_payload: dict
:returns: The full payload as a dictionary
:raises: :py:exc:`TypeError`
"""
_full_payload = {
_json_field_name: (None, json.dumps(_json_payload, default=str), 'application/json')
}
_full_payload.update(_files_payload)
return _full_payload | feacd27be3e6fcbd33f77fa755be513a93e3cdeb | 31,941 |
def convert_rows (rows):
"""Read a two-element tuple from a string.
rows should be a string containing two integers separated by a
comma, blank, or colon. The numbers may be enclosed in parentheses
or brackets, but this is not necessary. Note: the row numbers
are one indexed and inclusive, e.g. rows = "480, 544" means process
rows 479 through 543 (zero indexed), which is equivalent to the
slice 479:544.
"""
if rows.strip() == "":
rows = None
else:
bad = True
if rows.find (",") >= 0:
rownum = rows.split (",")
else:
rownum = rows.split (" ")
if len (rownum) == 2:
bad = False
try:
row0 = int (rownum[0]) - 1
row1 = int (rownum[1])
except:
bad = True
if bad:
raise ValueError("can't interpret rows = %s" % (rows,))
rows = (row0, row1)
return rows | e2d5a8e68459d6cb7a2fa044baa6d25dab46511c | 31,942 |
import numpy
def prepare_data(hsi_img=None, gnd_img=None, window_size=7, n_principle=3,
batch_size=50, merge=False, ratio=[6, 2, 2]):
"""
Process the data from file path to splited train-valid-test sets; Binded in
dataset_spectral and dataset_spatial respectively.
Parameters
----------
hsi_img=None: 3-D numpy.ndarray, dtype=float, storing initial
hyperspectral image data.
gnd_img=None: 2-D numpy.ndarray, dtype=int, containing tags for pixeles.
The size is the same to the hsi_img size, but with only
1 band.
window_size: Size of spatial window. Pass an integer 1 if no spatial
infomation needed.
n_principle: This many principles you want to incorporate while
extracting spatial info.
merge: If merge==True, the returned dataset_spectral has
dataset_spatial stacked in the tail of it; else if
merge==False, the returned dataset_spectral and
dataset_spatial will have spectral and spatial information
only, respectively.
Return
------
dataset_spectral:
dataset_spatial:
extracted_pixel_ind:
split_mask:
"""
data_spectral, data_spatial, gndtruth, extracted_pixel_ind = \
T_pca_constructor(hsi_img=hsi_img, gnd_img=gnd_img, n_principle=n_principle,
window_size=window_size, flag='supervised')
################ separate train, valid and test spatial data ###############
[train_spatial_x, train_y], [valid_spatial_x, valid_y], [test_spatial_x, test_y], split_mask = \
train_valid_test(data=[data_spatial, gndtruth], ratio=ratio,
batch_size=batch_size, random_state=123)
# convert them to theano.shared values
train_set_x = theano.shared(value=train_spatial_x, name='train_set_x', borrow=True)
valid_set_x = theano.shared(value=valid_spatial_x, name='valid_set_x', borrow=True)
test_set_x = theano.shared(value=test_spatial_x, name='test_set_x', borrow=True)
train_set_y = theano.shared(value=train_y, name='train_set_y', borrow=True)
valid_set_y = theano.shared(value=valid_y, name='valid_set_y', borrow=True)
test_set_y = theano.shared(value=test_y, name='test_set_y', borrow=True)
dataset_spatial = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
############### separate train, valid and test spectral data ###############
[train_spectral_x, train_y], [valid_spectral_x, valid_y], [test_spectral_x, test_y], split_mask = \
train_valid_test(data=[data_spectral, gndtruth], ratio=ratio,
batch_size=batch_size, random_state=123)
# if we want to merge data, merge it
if merge:
train_spectral_x = numpy.hstack((train_spectral_x, train_spatial_x))
valid_spectral_x = numpy.hstack((valid_spectral_x, valid_spatial_x))
test_spectral_x = numpy.hstack((test_spectral_x, test_spatial_x))
# convert them to theano.shared values
train_set_x = theano.shared(value=train_spectral_x, name='train_set_x', borrow=True)
valid_set_x = theano.shared(value=valid_spectral_x, name='valid_set_x', borrow=True)
test_set_x = theano.shared(value=test_spectral_x, name='test_set_x', borrow=True)
train_set_y = theano.shared(value=train_y, name='train_set_y', borrow=True)
valid_set_y = theano.shared(value=valid_y, name='valid_set_y', borrow=True)
test_set_y = theano.shared(value=test_y, name='test_set_y', borrow=True)
dataset_spectral = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return dataset_spectral, dataset_spatial, extracted_pixel_ind, split_mask | b54d174cdfc99bc1ba74e7b8cc09661d6bf18f9c | 31,943 |
import os
def make_output_dirs(model_name, dat, let):
"""
Generate output directories of the run corresponding to
- model_name
- dat
- let
0 - output_dir
1 - samples_output_dir
2 - enkf_output_dir
"""
output_dir = (os.environ['HOME'] + "/shematOutputDir/" + model_name +
"_output/" + dat + "/" + dat + "_" + let)
samples_output_dir = (os.environ['HOME'] + "/shematOutputDir/" +
model_name + "_output/" + dat + "/" + dat + "_" +
let + "/samples_output")
enkf_output_dir = (os.environ['HOME'] + "/shematOutputDir/" + model_name +
"_output/" + dat + "/" + dat + "_" + let +
"/enkf_output")
return output_dir, \
samples_output_dir, \
enkf_output_dir | 2804bff3d1da0aae85e133e985bb526859116388 | 31,944 |
def mvresnet152(**kwargs):
"""Constructs a MVResNet-101 model.
"""
model = MVResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model | 86b579e5f3a80b87677d0aa0c075d4e3eeb81d46 | 31,945 |
def _wer_compute(errors: Tensor, total: Tensor) -> Tensor:
"""Compute the word error rate.
Args:
errors: Number of edit operations to get from the reference to the prediction, summed over all samples
total: Number of words overall references
Returns:
Word error rate score
"""
return errors / total | c7a2ea912e27d1867f771b135cb0c8bd9fd7729e | 31,946 |
def get_coordinator():
"""Creates a coordinator and returns it."""
workflow_queue = Queue.Queue()
complete_queue = Queue.Queue()
coordinator = WorkflowThread(workflow_queue, complete_queue)
coordinator.register(WorkflowItem, workflow_queue)
return coordinator | 24fa3b52803f1cebae246be8b3988b9568965e6d | 31,947 |
from typing import Any
from typing import Union
import torch
def tocuda(vars: Any) -> Union[str, torch.Tensor]:
"""Convert tensor to tensor on GPU"""
if isinstance(vars, torch.Tensor):
return vars.cuda()
elif isinstance(vars, str):
return vars
else:
raise NotImplementedError("invalid input type {} for tocuda".format(type(vars))) | b7be275fe7e909fa54fc62ed9e5fbe61d3ff4863 | 31,948 |
import logging
def get_logger():
"""
Returns:
logging.Logger
"""
logger = logging.getLogger('ds-toolkit')
if not len(logger.handlers):
logger.setLevel(logging.DEBUG)
logger.addHandler(_get_console_handler())
return logger | 68afb0f31c24e72edf7539d51efc0985423115a6 | 31,949 |
from typing import Union
from typing import NewType
from typing import Any
from typing import Tuple
from typing import Optional
def _maybe_node_for_newtype(
typ: Union[NewType, Any],
overrides: OverridesT,
memo: MemoType,
forward_refs: ForwardRefs
) -> Tuple[Optional[schema.nodes.SchemaNode], MemoType, ForwardRefs]:
""" newtypes do not change the underlying runtime data type that is used in
calls like isinstance(), therefore it's just enough for us to find
a schema node of the underlying type
"""
rv = None
if insp.is_new_type(typ):
return decide_node_type(typ.__supertype__, overrides, memo, forward_refs)
return rv, memo, forward_refs | 5cce0197a44ebc2517c509211746a996d6e0235c | 31,950 |
def read_slug(filename):
"""
Returns the test slug found in specified filename.
"""
with open(filename, "r") as f:
slug = f.read()
return slug | e1882d856e70efa8555dab9e422a1348594ffcaf | 31,951 |
def boolean_value(value):
"""Given a Value, returns whether the object is statically known to be truthy.
Returns None if its truth value cannot be determined.
"""
if isinstance(value, KnownValue):
try:
return bool(value.val)
except Exception:
# Its __bool__ threw an exception. Just give up.
return None
return None | c4f971b474943f44c2d5b85def14d91901244e72 | 31,952 |
import sys
def _Import(name):
"""Import a module or package if it is not already imported."""
module = sys.modules.get(name, None)
if module is not None:
return module
return import_module(name, None) | 86f65c5e523eb745fe6ffbf122d2e7da34fd223b | 31,953 |
import os
import tqdm
import math
def bin_sparse(X, file, scan_names, bins, max_parent_mass = 850, verbose=False, window_filter=True, filter_window_size=50, filter_window_retain=3):
""" Parses and bins a single MGF file into a matrix that holds the charge intensities of all the spectra
Args:
X: Scipy sparse matrix in the format of bins on the rows and spectra on the columns
file: MGF file to read spectra in from
scan_names: List of spectra names to append to
bins: Numpy array of a list of bins for holding spectra charges
max_parent_mass: Threshold value for max mass of the spectra for filtering
window_filter: Boolean for whether to use a window filter to remove small intensity peaks
filter_window_size: Size of each window for the window filter
filter_window_retain: Number of peaks to keep for the window filter
Returns:
A tuple containing a Scipy sparse matrix that has all the charges' intensities binned and related
to their corresponding spectra, along with a list of scan names that indicate the mgf file and spectra
number corresponding to each column of the intensity matrix
"""
# Get min and max bins
min_bin = min(bins)
max_bin = max(bins)
# Determine bin size from bins array
bin_size = (max_bin - min_bin) / len(bins)
# Parse MGF file
reader = mgf.MGF(file)
# File's name without extension is used for creating scan names
base = os.path.basename(file)
print("Binning " + file) if verbose else None
length = X.shape[1]
if verbose:
pbar = tqdm.tqdm(total=length, unit='spectra', smoothing=0.1, dynamic_ncols=True)
half = length/2
curr = 1
# Go through all the spectra from the MGF file
for spectrum_index, spectrum in enumerate(reader):
if verbose:
pbar.update()
if curr <= half:
# Create the scan name based on the MGF file and the current spectra number
scan_names.append(os.path.splitext(base)[0] + "_filtered" + "_" + spectrum['params']['scans'])
else:
scan_names.append(os.path.splitext(base)[0] + "_" + spectrum['params']['scans'])
curr +=1
# Do a basic filter based on the mass of the spectra
if spectrum['params']['pepmass'][0] > max_parent_mass:
continue
# Some spectra might not have any charges so skip those
if len(spectrum['m/z array']) == 0:
continue
# First do the window filter to remove any noise from low intensity peaks if specified
if window_filter:
spectrum = filter_window(spectrum, filter_window_size, filter_window_retain)
# Loop through all the charges in the spectra and get the corresponding intensities
for mz, intensity in zip(spectrum['m/z array'], spectrum['intensity array']):
# If the charge is outside of the max bin specified or if it's too large
# relative to the spectra itself, skip it
if mz > max_bin or mz > spectrum['params']['pepmass'][0]:
continue
# Figure out what the index of the bin should be
target_bin = math.floor((mz - min_bin)/bin_size)
# Add the intensity to the right spot in the matrix. Uses target_bin-1 because
# indices start at 0. Does += (adds) so that if any charges from the same spectra
# fall into the same bin because of larger bin sizes, it "stacks" on top of each other
X[target_bin-1, spectrum_index] += intensity
if verbose:
pbar.close()
# Normalize the matrix, making each bin relative to its max value
for idx in range(0, X.shape[0]):
X[idx] = X[idx]/X[idx].toarray().max()
print("Finished Binning " + file) if verbose else None
return (X,scan_names) | 6fda1ea76fcbdcc032d5a9b6fca05c1a82e0181a | 31,954 |
import os
def ls_img_sequence(path):
"""Listing all available coherent image sequence from path
Arguments:
path (str): A nuke's node object
Returns:
data (dict): with nuke formated path and frameranges
"""
file = os.path.basename(path)
dirpath = os.path.dirname(path)
base, ext = os.path.splitext(file)
name, padding = os.path.splitext(base)
# populate list of files
files = [
f for f in os.listdir(dirpath)
if name in f
if ext in f
]
# create collection from list of files
collections, reminder = clique.assemble(files)
if len(collections) > 0:
head = collections[0].format("{head}")
padding = collections[0].format("{padding}") % 1
padding = "#" * len(padding)
tail = collections[0].format("{tail}")
file = head + padding + tail
return {
"path": os.path.join(dirpath, file).replace("\\", "/"),
"frames": collections[0].format("[{ranges}]")
}
return False | 5b98496390d94ee96f18769922526d21e136da03 | 31,955 |
def _convert_to_dict(tracked_dict):
"""
Recursively convert a Pony ORM TrackedDict to a normal Python dict
"""
if not isinstance(tracked_dict, orm.ormtypes.TrackedDict):
return tracked_dict
return {k: _convert_to_dict(v) for k, v in tracked_dict.items()} | 40a31ebc96f3010618c3b091e2618d2d6809b82d | 31,956 |
def sky_coords(cluster):
"""Get the sky coordinates of every star in the cluster
Parameters
----------
cluster : class
StarCluster
Returns
-------
ra,dec,d0,pmra,pmdec,vr0 : float
on-sky positions and velocities of cluster stars
History
-------
2018 - Written - Webb (UofT)
"""
cluster.save_cluster()
if origin0 != "galaxy":
cluster.to_galaxy(starsort=False)
x0, y0, z0 = coords.galcenrect_to_XYZ(
cluster.x, cluster.y, cluster.z, Xsun=8.0, Zsun=0.025
).T
vx0, vy0, vz0 = coords.galcenrect_to_vxvyvz(
cluster.vx,
cluster.vy,
cluster.vz,
Xsun=8.0,
Zsun=0.025,
vsun=[-11.1, 244.0, 7.25],
).T
l0, b0, d0 = coords.XYZ_to_lbd(x0, y0, z0, degree=True).T
ra, dec = coords.lb_to_radec(l0, b0, degree=True).T
vr0, pmll0, pmbb0 = coords.vxvyvz_to_vrpmllpmbb(
vx0, vy0, vz0, l0, b0, d0, degree=True
).T
pmra, pmdec = coords.pmllpmbb_to_pmrapmdec(pmll0, pmbb0, l0, b0, degree=True).T
cluster.return_cluster()
return ra, dec, d0, pmra, pmdec, vr0 | 26a17879a6c2cc84dbf250d9250014f8554fbb9e | 31,957 |
import time
import requests
import json
def macro_cons_silver_volume():
"""
ๅ
จ็ๆๅคง็ฝ้ถ ETF--iShares Silver Trust ๆไปๆฅๅ, ๆฐๆฎๅบ้ดไป 20060429-่ณไป
:return: pandas.Series
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["็ฝ้ถ"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["ๆปๅบๅญ(ๅจ)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "2",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 1]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "silver_volume"
url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
r = requests.get(url)
data_json = r.json()
append_temp_df = pd.DataFrame(data_json["values"]).T
append_temp_df.columns = [item["name"] for item in data_json["keys"]]
temp_append_df = append_temp_df["ๆปๅบๅญ"]
temp_append_df.name = "silver_volume"
temp_df = temp_df.reset_index()
temp_df["index"] = temp_df["index"].astype(str)
temp_df = temp_df.append(temp_append_df.reset_index())
temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
temp_df.index = pd.to_datetime(temp_df["index"])
del temp_df["index"]
temp_df = temp_df[temp_df != 'Show All']
temp_df.sort_index(inplace=True)
temp_df = temp_df.astype(float)
return temp_df | 1a806095e935ea5e6065dec4dc412775ebaa2b22 | 31,958 |
import os
def scan_files(prefix):
""" Gets all words from all files ending in the specified prefix and returns
them as a list.
"""
files = ['/'.join([data.DATA_DIR, f])
for f in os.listdir(data.DATA_DIR) if f.startswith(prefix)]
words = []
for f in files:
with open(f, 'r') as wf:
for w in wf.readlines():
w = w.strip()
if w:
words.append(w)
return list(set(words)) | 9861de09940cf437991a66161813f9402b3325c5 | 31,959 |
import numpy as np
from .model_store import get_model_file
import os
def get_jasper(version,
use_dw=False,
use_dr=False,
bn_epsilon=1e-3,
vocabulary=None,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create Jasper/DR/QuartzNet model with specific parameters.
Parameters:
----------
version : tuple of str
Model type and configuration.
use_dw : bool, default False
Whether to use depthwise block.
use_dr : bool, default False
Whether to use dense residual scheme.
bn_epsilon : float, default 1e-3
Small float added to variance in Batch norm.
vocabulary : list of str or None, default None
Vocabulary of the dataset.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
blocks, repeat = tuple(map(int, version[1].split("x")))
main_stage_repeat = blocks // 5
model_type = version[0]
if model_type == "jasper":
channels_per_stage = [256, 256, 384, 512, 640, 768, 896, 1024]
kernel_sizes_per_stage = [11, 11, 13, 17, 21, 25, 29, 1]
dropout_rates_per_stage = [0.2, 0.2, 0.2, 0.2, 0.3, 0.3, 0.4, 0.4]
elif model_type == "quartznet":
channels_per_stage = [256, 256, 256, 512, 512, 512, 512, 1024]
kernel_sizes_per_stage = [33, 33, 39, 51, 63, 75, 87, 1]
dropout_rates_per_stage = [0.0] * 8
else:
raise ValueError("Unsupported Jasper family model type: {}".format(model_type))
stage_repeat = np.full((8,), 1)
stage_repeat[1:-2] *= main_stage_repeat
channels = sum([[a] * r for (a, r) in zip(channels_per_stage, stage_repeat)], [])
kernel_sizes = sum([[a] * r for (a, r) in zip(kernel_sizes_per_stage, stage_repeat)], [])
dropout_rates = sum([[a] * r for (a, r) in zip(dropout_rates_per_stage, stage_repeat)], [])
net = Jasper(
channels=channels,
kernel_sizes=kernel_sizes,
bn_epsilon=bn_epsilon,
dropout_rates=dropout_rates,
repeat=repeat,
use_dw=use_dw,
use_dr=use_dr,
vocabulary=vocabulary,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net | c393894b9c4b0d02289eeeee8f1c027a122ceec3 | 31,960 |
def preprocess_img(img):
"""Preprocessing function for images."""
return img/255 | 11651a809288d5c3aa776b318099b7eb750d28ec | 31,961 |
from typing import OrderedDict
import os
def test_pdbqt():
"""RDKit PDBQT writer and reader"""
mol = next(oddt.toolkit.readfile('sdf', xiap_actives))
mol2 = oddt.toolkit.readstring('pdbqt', mol.write('pdbqt'))
assert mol.title == mol2.title
# test loop breaks in DFS algorithm
mol = oddt.toolkit.readstring('smi', 'CCc1cc(C)c(C)cc1-c1ccc(-c2cccc(C)c2)cc1')
mol.make3D()
# roundtrip molecule with template
mol2 = oddt.toolkit.readstring('pdbqt', mol.write('pdbqt'))
mol.removeh()
assert len(mol.atoms) == len(mol2.atoms)
def nodes_size(block):
out = OrderedDict()
current_key = None
for line in block.split('\n'):
if line[:4] == 'ROOT' or line[:6] == 'BRANCH':
current_key = line.strip()
out[current_key] = 0
elif line[:4] == 'ATOM':
out[current_key] += 1
return list(out.values())
# check the branch order and size
if oddt.toolkit.backend == 'ob':
assert_array_equal(nodes_size(mol.write('pdbqt')),
[6, 8, 2, 7])
else:
assert_array_equal(nodes_size(mol.write('pdbqt')),
[8, 6, 7, 2])
ligand_file = os.path.join(test_data_dir, 'data', 'dude', 'xiap',
'crystal_ligand.sdf')
mol = next(oddt.toolkit.readfile('sdf', ligand_file))
assert_array_equal(nodes_size(mol.write('pdbqt')),
[8, 3, 6, 6, 1, 6, 3, 2, 2])
# roundtrip a disconnected fragments
mol = oddt.toolkit.readstring('smi', 'c1ccccc1.c1ccccc1C')
if oddt.toolkit.backend == 'ob':
kwargs = {'opt': {'r': None}}
else:
kwargs = {'flexible': False}
mol2 = oddt.toolkit.readstring('pdbqt', mol.write('pdbqt', **kwargs))
assert len(mol.atoms) == len(mol2.atoms)
mol2 = oddt.toolkit.readstring('pdbqt', mol.write('pdbqt'))
assert len(mol.atoms) == len(mol2.atoms) | 6b01a554b7421b5ac2d7b744eb5371aa5268418b | 31,962 |
def get_apitools_metadata_from_url(cloud_url):
"""Takes storage_url.CloudUrl and returns appropriate Apitools message."""
messages = apis.GetMessagesModule('storage', 'v1')
if cloud_url.is_bucket():
return messages.Bucket(name=cloud_url.bucket_name)
elif cloud_url.is_object():
generation = int(cloud_url.generation) if cloud_url.generation else None
return messages.Object(
name=cloud_url.object_name,
bucket=cloud_url.bucket_name,
generation=generation) | c8ec4dd6c6019467129c03d367c6dd58963d334f | 31,963 |
def branch(ref=None):
"""Return the name of the current git branch."""
ref = ref or "HEAD"
return local("git symbolic-ref %s 2>/dev/null | awk -F/ {'print $NF'}"
% ref, capture=True) | 8597a9b38f2a6372aa9dba911163bdb44d4db1f2 | 31,964 |
def fit_anis(celldmsx, Ex, ibrav=4, out=False, type="quadratic", ylabel="Etot"):
"""
An auxiliary function for handling fitting in the anisotropic case
"""
if out:
print (type+" fit")
if type=="quadratic":
a, chi = fit_quadratic(celldmsx, Ex, ibrav, out, ylabel)
elif type=="quartic":
a, chi = fit_quartic(celldmsx, Ex, ibrav, out, ylabel)
else:
print ("Fitting type not implemented")
return None, None
if chi!=None:
print_polynomial(a)
print ("Chi squared: ",chi,"\n")
return a, chi
return a, None
else:
if type=="quadratic":
a, chi = fit_quadratic(celldmsx, Ex, ibrav, False, ylabel)
elif type=="quartic":
a, chi = fit_quartic(celldmsx, Ex, ibrav, False, ylabel)
else:
return None, None
if chi!=None:
return a, chi
return a, None | 25f88d62876696d1d9e95e1c11c5b687a59cea7b | 31,965 |
def solicitacao_incluir(discente, sugestao_turma):
"""
Inclui uma Solicitaรงรฃo de interesse do Discente na Sugestรฃo de Turma.
:param discente: Um objeto da classe @Discente
:param sugestao_turma: Um objeto da classe @SugestaoTurma
:return: Um objeto da classe @SolicitacaoTurma e um booleano informando se a Solicitaรงรฃo foi criada.
"""
usuario = discente.usuario
solicitacao, created = SolicitacaoTurma.objects.get_or_create(
usuario=usuario, solicitador=discente, turma=sugestao_turma)
return solicitacao, created | f346058717a62d6007ea99347043512f4770950d | 31,966 |
def _evolve_trotter_gates(psi,
layers,
step_size,
num_steps,
euclidean=False,
callback=None):
"""Evolve an initial wavefunction psi via gates specified in `layers`.
If the evolution is euclidean, the wavefunction will be normalized after
each step.
"""
t = 0.0
for i in range(num_steps):
psi = apply_circuit(psi, layers)
if euclidean:
psi = tf.divide(psi, tf.norm(psi))
t += step_size
if callback is not None:
callback(psi, t, i)
return psi, t | e1319a01434b0de0c4d90db3881a5a1e3b22d491 | 31,967 |
import os
def load_worksheet(
filename,
sheet=None,
worksheet=0,
skip_rows=0,
csv_delimiter=DEFAULT_CSV_DELIMITER,
ext=None,
):
"""
Load a worksheet from a supported file format
:param filename: File to be loaded
:type filename: str
:param sheet: If not None, load the data into the sheet
:type sheet: Worksheet
:param worksheet: Worksheet title or number (zero-based)
:type worksheet: str or int
:param skip_rows: Number of input lines to skip
:type skip_rows: int
:param csv_delimiter: CSV delimiter
:type csv_delimiter: str
:param ext: Force file format (autodetect by default)
:type ext: str
"""
if ext is None:
ext = os.path.splitext(filename)[1]
ext = ext.lower().lstrip('.')
loader = loaders.get(ext)
if loader is None:
raise KeyError('unsupported file format {}'.format(ext))
return loader(
filename=filename,
sheet=sheet,
skip_rows=skip_rows,
worksheet=worksheet,
csv_delimiter=csv_delimiter,
) | ab2e27729dfdb3ee2b0dfda2c837f4f7a62b9304 | 31,968 |
import os
def _get_next_traj_id(root_data_dir='data'):
""" Resolve what is the next trajectory number """
if not os.path.exists(os.path.join(root_data_dir, 'screens')):
return 0
return 1 + max([
int(x) for x in os.listdir(os.path.join(root_data_dir, 'screens'))
]) | d321af4c90d9de78942e2526c0720b3019fff479 | 31,969 |
def get_user(email):
""" Sometimes user account is setup using @gmail.com domain,
but android phone says @googlemail.com and vice versa
"""
try:
if 'googlemail' in email or 'gmail' in email:
uname = email.partition('@')[0]
users = User.objects.filter(Q(email=uname+'@googlemail.com') |
Q(email=uname+'@gmail.com'))
else:
users = User.objects.filter(email=email)
u = users.order_by('-seen')[0]
return u
except (IndexError, User.DoesNotExist):
if settings.DEBUG:
print('no such address: ' + email) # don't notify the sender
return False | 11ce9dddcb2896010cd07c5f95859680e95e7ec4 | 31,970 |
from tvm.tir.analysis import _ffi_api as _analysis_ffi_api
import logging
def test_tuning_gpu_inherits_pass_context(target, dev):
"""Autotvm tuner inherits PassContexts but also adds a gpu verification pass by default.
Test that using PassContext inherits passes properly but also runs gpu verification pass.
"""
@pass_instrument
class PassInstrumentChecker:
"""Pass Instrument that simply sees if it's been run."""
def __init__(self):
self.has_been_run = False
def run_after_pass(self, mod, info):
self.has_been_run = True
class GPUVerifyPassMocked:
"""Context manager that mocks tir.analysis.verify_gpu_code meant
to verify the pass has been run. This is done by patching the ffi func handles."""
FFI_FUNC_HANDLE = "tir.analysis.verify_gpu_code"
FUNC_NAME = "verify_gpu_code"
def __init__(self) -> None:
self.old_impl = tvm._ffi.get_global_func(self.FFI_FUNC_HANDLE)
self.has_been_run = False
def gpu_verify_pass_mocked(self):
"""Get the replacement for the gpu verification pass."""
def _gpu_verify_pass_mocked(*args, **kwargs):
self.has_been_run = True
return self.old_impl(*args, **kwargs)
return _gpu_verify_pass_mocked
def __enter__(self):
tvm._ffi.register_func(
self.FFI_FUNC_HANDLE, self.gpu_verify_pass_mocked(), override=True
)
# Also overwrite the python bindings
setattr(
_analysis_ffi_api, self.FUNC_NAME, tvm._ffi.get_global_func(self.FFI_FUNC_HANDLE)
)
def __exit__(self, *args, **kwargs):
# Restore FFI status back to normal
tvm._ffi.register_func(self.FFI_FUNC_HANDLE, self.old_impl, override=True)
setattr(_analysis_ffi_api, self.FUNC_NAME, self.old_impl)
class OverwrittenBuildFunc(measure_methods._WrappedBuildFunc):
"""BuildFunc that mocks and patches as necessary to test proper passes are run."""
def __call__(self, measure_input, tmp_dir, **kwargs):
instrument = PassInstrumentChecker()
mocked_pass_checker = GPUVerifyPassMocked()
with mocked_pass_checker:
with PassContext(instruments=[instrument]):
regular_result = super().__call__(measure_input, tmp_dir, **kwargs)
# Check instrument has been run, meaning context was inherited by builder
assert instrument.has_been_run
# But also check the gpu verification pass has been run
# (which was not in the inherited ctx)
assert mocked_pass_checker.has_been_run
return regular_result
class MockedLocalBuilder(measure_methods.LocalBuilder):
"""As measure_methods.LocalBuilder but overwrites the PassContext for testing."""
def __init__(
self,
timeout=10,
n_parallel=None,
build_kwargs=None,
build_func="default",
do_fork=False,
runtime=None,
):
super().__init__(timeout, n_parallel, build_kwargs, build_func, do_fork, runtime)
self.build_func = OverwrittenBuildFunc(tar.tar, runtime)
def runner(target, dev):
task, target = get_sample_task(target, None)
logging.info("task config space: %s", task.config_space)
# Note: we use the MockedLocalBuilder here instead of autotvm.LocalBuilder()
measure_option = autotvm.measure_option(MockedLocalBuilder(), autotvm.LocalRunner())
results = []
tuner = RandomTuner(task)
tuner.tune(
n_trial=1,
measure_option=measure_option,
callbacks=(lambda _tuner, _inputs, rs: results.extend(rs),),
)
assert len(results) == 1
run_test_with_all_multiprocessing(runner, target, dev) | 8c62f18293b24601a8ee5296f2e4984bed7c20e4 | 31,971 |
def make_scae(config):
"""Builds the SCAE."""
# return: model ๅบ่ฏฅๆฏๆดไธช็่ฆ่ฎญ็ป็modelไบๅง
# ่ฟไธชๅฅฝๅๆฏ่ฆtransform ่ฎญ็ป้
# canvas & template size ้ฝๆไปไน็จๅข ๅจ่ฟ้?
img_size = [config.canvas_size] * 2
template_size = [config.template_size] * 2
# ่ฟไธชๅชๆฏpart encoder็ไธ้จๅ, ็จ็ๆฏๅท็งฏ,็ดๆฅๅฐฑๆฏ4ไธชๅท็งฏๅฑไบ paddingๅฏไปฅ่ชๅจ, snt NB!!
'''
Sequential(
(0): Conv2d(1, 128, kernel_size=(3, 3), stride=(2, 2))
(1): ReLU()
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2))
(3): ReLU()
(4): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1))
(5): ReLU()
(6): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1))
(7): ReLU() )
// input: [1,40,40]
// output: [128,5,5]
'''
cnn_encoder = snt.nets.ConvNet2D(
output_channels=[128] * 4,
kernel_shapes=[3],
strides=[2, 2, 1, 1],
paddings=[snt.VALID], #่ฟไธชVALAID ๆฏไธๆฏไฝ ็่งฃ็้ฃๆ ท?
activate_final=True)
# !!! ๆบ้พ็ ,ๆ้ part encoder ๅ
ถไธญๆ:
#
part_encoder = primary.CapsuleImageEncoder(
cnn_encoder,
config.n_part_caps,
config.n_part_caps_dims,
n_features=config.n_part_special_features,
similarity_transform=False,
encoder_type='conv_att')
# ๅ
ๅพไธ็ๅง...
part_decoder = primary.TemplateBasedImageDecoder(
output_size=img_size,
template_size=template_size,
n_channels=config.n_channels,
learn_output_scale=False,
colorize_templates=config.colorize_templates,
use_alpha_channel=config.use_alpha_channel,
template_nonlin=config.template_nonlin,
color_nonlin=config.color_nonlin,
)
# stacked auto encoder
obj_encoder = SetTransformer(
n_layers=3,
n_heads=1,
n_dims=16,
n_output_dims=256,
n_outputs=config.n_obj_caps,
layer_norm=True,
dropout_rate=0.)
# decoder
obj_decoder = ImageCapsule(
config.n_obj_caps,
2,
config.n_part_caps,
n_caps_params=config.n_obj_caps_params,
n_hiddens=128,
learn_vote_scale=True,
deformations=True,
noise_type='uniform',
noise_scale=4.,
similarity_transform=False)
# ่ฟ้ๆฏ็ดๆฅๅ ่ตทๆฅๅฐฑ่กไบไน?
model = ImageAutoencoder(
primary_encoder=part_encoder,
primary_decoder=part_decoder,
encoder=obj_encoder,
decoder=obj_decoder,
input_key='image',
label_key='label',
n_classes=10,
dynamic_l2_weight=10,
caps_ll_weight=1.,
vote_type='enc',
pres_type='enc',
stop_grad_caps_inpt=True,
stop_grad_caps_target=True,
prior_sparsity_loss_type='l2',
prior_within_example_sparsity_weight=config.prior_within_example_sparsity_weight, # pylint:disable=line-too-long
prior_between_example_sparsity_weight=config.prior_between_example_sparsity_weight, # pylint:disable=line-too-long
posterior_sparsity_loss_type='entropy',
posterior_within_example_sparsity_weight=config.posterior_within_example_sparsity_weight, # pylint:disable=line-too-long
posterior_between_example_sparsity_weight=config.posterior_between_example_sparsity_weight, # pylint:disable=line-too-long
)
return model | 415ab9d12806f250fa786872b0a3a2d6ecd82f72 | 31,972 |
import requests
def get_materialization_versions(dataset_name, materialization_endpoint=None):
""" Gets materialization versions with timestamps """
if materialization_endpoint is None:
materialization_endpoint = analysisdatalink.materialization_endpoint
url = '{}/api/dataset/{}'.format(materialization_endpoint, dataset_name)
r = requests.get(url)
assert r.status_code == 200
versions = {d['version']:d['time_stamp'] for d in r.json() if d['valid']}
return versions | 32d6976e864a9926f6b2e51ec7a65c33e2b86832 | 31,973 |
import scipy
def WilcoxonRankSum(tpms):
"""May be very slow for large datasets."""
wrs = tpms[['SMTSD', 'ENSG']].copy().drop_duplicates().sort_values(by=['SMTSD', 'ENSG'])
wrs.reset_index(drop=True, inplace=True)
wrs['stat'] = pd.Series(dtype=float)
wrs['pval'] = pd.Series(dtype=float)
for smtsd in wrs.SMTSD.unique():
tpms_this = tpms[tpms.SMTSD==smtsd]
for ensg in tpms_this.ENSG.unique():
vals_f = tpms_this.TPM[(tpms_this.ENSG==ensg) & (tpms_this.SEX=="F")]
vals_m = tpms_this.TPM[(tpms_this.ENSG==ensg) &(tpms_this.SEX=="M")]
stat, pval = scipy.stats.ranksums(x=vals_f, y=vals_m)
#stat, pval = scipy.stats.mannwhitneyu(x=vals_f.rank(), y=vals_m.rank(), use_continuity=True, alternative="two-sided")
wrs.stat.loc[(wrs.SMTSD==smtsd) & (wrs.ENSG==ensg)] = stat
wrs.pval.loc[(wrs.SMTSD==smtsd) & (wrs.ENSG==ensg)] = pval
return wrs | 89733a49bc8e0e0a1d492b21ad6310746e4d23fe | 31,974 |
def filter_image(image, filter_DFT):
"""
Just takes the DFT of a filter and applies the filter to an image
This may optionally pad the image so as to match the number of samples in the
filter DFT. We should make sure this is greater than or equal to the size of
the image.
"""
assert image.dtype == 'float32'
assert filter_DFT.shape[0] >= image.shape[0], "don't undersample DFT"
assert filter_DFT.shape[1] >= image.shape[1], "don't undersample DFT"
filtered_with_padding = np.real(
np.fft.ifft2(np.fft.ifftshift(
filter_DFT * np.fft.fftshift(np.fft.fft2(image, filter_DFT.shape))),
filter_DFT.shape)).astype('float32')
return filtered_with_padding[0:image.shape[0], 0:image.shape[1]] | 0c6d2e46640fbfd7757231eb398c90fea0ca0a65 | 31,975 |
import traceback
import sys
def create_database(server, db_name, encoding=None):
"""This function used to create database and returns the database id"""
db_id = ''
try:
connection = get_db_connection(
server['db'],
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode']
)
old_isolation_level = connection.isolation_level
connection.set_isolation_level(0)
pg_cursor = connection.cursor()
if encoding is None:
pg_cursor.execute(
'''CREATE DATABASE "%s" TEMPLATE template0''' % db_name)
else:
pg_cursor.execute(
'''CREATE DATABASE "%s" TEMPLATE template0
ENCODING='%s' LC_COLLATE='%s' LC_CTYPE='%s' ''' %
(db_name, encoding[0], encoding[1], encoding[1]))
connection.set_isolation_level(old_isolation_level)
connection.commit()
# Get 'oid' from newly created database
pg_cursor.execute("SELECT db.oid from pg_database db WHERE"
" db.datname='%s'" % db_name)
oid = pg_cursor.fetchone()
if oid:
db_id = oid[0]
connection.close()
return db_id
except Exception:
traceback.print_exc(file=sys.stderr)
return db_id | c4825f4bd0d898cdf4889ab09f14d98b58e909cc | 31,976 |
def _create_diff_matrix(n, order=1):
"""Creates n x n matrix subtracting adjacent vector elements
Example:
>>> print(_create_diff_matrix(4, order=1))
[[ 1 -1 0 0]
[ 0 1 -1 0]
[ 0 0 1 -1]]
>>> print(_create_diff_matrix(4, order=2))
[[ 1 -1 0 0]
[-1 2 -1 0]
[ 0 -1 2 -1]
[ 0 0 -1 1]]
"""
if order == 1:
diff_matrix = -1 * np.diag(np.ones(n - 1), k=1).astype("int")
np.fill_diagonal(diff_matrix, 1)
diff_matrix = diff_matrix[:-1, :]
elif order == 2:
diff_matrix = -1 * np.diag(np.ones(n - 1), k=1).astype("int")
diff_matrix = diff_matrix + -1 * np.diag(np.ones(n - 1), k=-1).astype("int")
np.fill_diagonal(diff_matrix, 2)
diff_matrix[-1, -1] = 1
diff_matrix[0, 0] = 1
return diff_matrix | 0d241d37075e37d342e2601fc307277dd92b180f | 31,977 |
def fourier_frequencies_from_times(times):
"""
Calculates the Fourier frequencies from a set of times. These frequencies are in 1/units, where
`units` is the units of time in `times`. Note that if the times are not exactly equally spaced,
then the Fourier frequencies are ill-defined, and this returns the frequencies based on assuming
that the time-step is the mean time-step. This is reasonable for small deviations from equally
spaced times, but not otherwise.
Parameters
----------
times : list
The times from which to calculate the frequencies
Returns
-------
array
The frequencies associated with Fourier analysis on data with these timestamps.
"""
timestep = _np.mean(_np.diff(times)) # The average time step.
numtimes = len(times) # The number of times steps
return frequencies_from_timestep(timestep, numtimes) | 4976483355af4652eafe28c955b2ca847bc140af | 31,978 |
import copy
def fission(candidate_seed, pop, n, max_seed_area):
"""
In fusion, we use the convention of putting one seed on
the left and the other seed on the right, before we fuse
the two seeds. In fission, we assume that fission will
split the left part from the right part. Find the most
sparse column in the candidate seed and split the seed along
this column. If both parts are at least the minimum allowed
seed size, randomly choose one of them. If only one part
is at least the minimum allowed seed size, choose that
one part. If neither part is at least the minimum allowed
seed size, then default to sexual reproduction.
"""
# The most fit member of the tournament.
s0 = candidate_seed
# Minimum xspan. Only xspan is relevant, since we are splitting
# left and right parts.
min_s_xspan = mparam.min_s_xspan
# See whether the seed is big enough to split. If it is too
# small, then default to sexual reproduction.
if (s0.xspan <= min_s_xspan):
return sexual(candidate_seed, pop, n, max_seed_area)
# In the seed matrix, x = row index, y = column index.
# In Golly, g.setcell(g_x, g_y, s_state) refers to the cell
# in horizontal position g_x and vertical position g_y, where
# g_x increases from left to right and g_y increases from top
# to bottom. Unfortunately, x in the seed matrix ranges
# vertically over matrix rows and y in the seed matrix ranges
# horizontally over matrix columns, whereas x in Golly ranges
# horizontally and y in Golly ranges vertically.
#
# Speaking in Golly terms, we want to split the seed along
# any purple border (cells in state 5) such that the border
# spans the entire seed in a straight line. Due to the design
# of fusion(), the border will be a vertical purple stripe in
# Golly.
#
# There may be several vertical purple strips (that is, borders,
# buffer zones, lines of cells in state 5) in the seed.
# We will take the first one that we find.
border_line = -1 # no border found yet
border_colour = 5 # purple, state 5
for x in range(s0.xspan):
for y in range(s0.yspan):
if (s0.cells[x][y] != border_colour):
break # not a border -- try the next x
# if we make it here, then we have found a border
border_line = x
break # stop looking
# If no border was found, then use sexual reproduction
if (border_line == -1):
return sexual(candidate_seed, pop, n, max_seed_area)
# Left and right parts.
left_cells = s0.cells[0:border_line, :]
right_cells = s0.cells[(border_line + 1):, :]
# Initialize a seed for the left or right part.
s1 = copy.deepcopy(s0)
# If both parts are big enough, randomly choose one of them.
if ((left_cells.shape[0] >= min_s_xspan) \
and (right_cells.shape[0] >= min_s_xspan)):
if (rand.uniform(0, 1) < 0.5):
s1.cells = left_cells
else:
s1.cells = right_cells
# If only the left part is big enough, use the left part.
elif (left_cells.shape[0] >= min_s_xspan):
s1.cells = left_cells
# If only the right part is big enough, use the right part.
elif (right_cells.shape[0] >= min_s_xspan):
s1.cells = right_cells
# If neither part is big enough, use sexual reproduction
else:
return sexual(candidate_seed, pop, n, max_seed_area)
# Set the correct dimensions for the new seed
s1.xspan = s1.cells.shape[0]
s1.yspan = s1.cells.shape[1]
# Mutate s1
prob_grow = mparam.prob_grow
prob_flip = mparam.prob_flip
prob_shrink = mparam.prob_shrink
seed_density = mparam.seed_density
mutation_rate = mparam.mutation_rate
s1 = s1.mutate(prob_grow, prob_flip, prob_shrink, seed_density, mutation_rate)
# Update count of living cells
s1.num_living = s1.count_ones()
# Find the least fit old seed in the population. It's not a problem
# if there are ties.
s2 = find_worst_seed(pop)
# Now we have:
#
# s0 = seed 0
# s1 = left or right side of seed 0
# s2 = the least fit old seed, which will be replaced by s1
#
# Replace the least fit old seed in the population (s2) with the
# chosen part (s1).
i = s2.address # find the position of the old seed (s2)
s1.address = i # copy the old position of the old seed into s1
pop[i] = s1 # replace s2 (old seed) in population (pop) with s1
# Build a history for the new seed, by matching it against all seeds
# in the population.
width_factor = mparam.width_factor
height_factor = mparam.height_factor
time_factor = mparam.time_factor
num_trials = mparam.num_trials
pop_size = len(pop)
for j in range(pop_size):
update_history(g, pop, i, j, width_factor, height_factor, \
time_factor, num_trials)
update_similarity(pop, i, j)
# Report on the new history of the new seed
message = "Run: {}".format(n) + \
" Whole fitness (s0): {:.3f}".format(s0.fitness()) + \
" Fragment fitness (s1): {:.3f}".format(s1.fitness()) + \
" Replaced seed fitness (s2): {:.3f}\n".format(s2.fitness())
# Return with the updated population and a message.
return [pop, message] | f056c90f68f91ba1b3f81f76c3599f8f7a3aee51 | 31,979 |
def iou(box, clusters):
"""
Calculates the Intersection over Union (IoU) between a box and k clusters.
param:
box: tuple or array, shifted to the origin (i. e. width and height)
clusters: numpy array of shape (k, 2) where k is the number of clusters
return:
numpy array of shape (k, 0) where k is the number of clusters
"""
x = np.minimum(clusters[:, 0], box[0])
y = np.minimum(clusters[:, 1], box[1])
if np.count_nonzero(x == 0) > 10 or np.count_nonzero(y == 0) > 10:
raise ValueError("Box has no area")
intersection = x * y
box_area = box[0] * box[1]
cluster_area = clusters[:, 0] * clusters[:, 1]
iou_ = np.true_divide(intersection, box_area + cluster_area - intersection + 1e-10)
# iou_ = intersection / (box_area + cluster_area - intersection + 1e-10)
return iou_ | d181cf7234602f4b3f7d5b6c0a25cd9e15c2b5ed | 31,980 |
def munge_av_status(av_statuses):
"""Truncate and lowercase availability_status"""
return [a[20:].lower() for a in av_statuses] | 52a00fc6733015c3618a2a394371ea9387d92fc0 | 31,981 |
def cdf(vals, reverse=False):
"""Computes the CDF of a list of values"""
vals = sorted(vals, reverse=reverse)
tot = float(len(vals))
x = []
y = []
for i, x2 in enumerate(vals):
x.append(x2)
y.append((i+1) / tot)
return x, y | 3cc64dcb8876f7620f02da873e29569e77477823 | 31,982 |
def user_document_verification(request, document_id):
"""Display user's document information request.
Args:
request: URL request
document_request_id: document ID in firebase
Returns:
Render user document verification view.
"""
# Document Data
document_ref = db.collection("document_request")
user_document_data = document_ref.document(document_id).get().to_dict()
if user_document_data["user_verified"]:
return HttpResponseRedirect(
reverse(
"appointment:user_issuing_list",
kwargs={"document_id": user_document_data["document_id"]},
)
)
else:
# User Collection
user_ref = db.collection("users")
user_data = user_ref.document(user_document_data["user_id"]).get().to_dict()
document_userdata_list = []
for document in user_document_data["document"]:
document_userdata_list.append(document["document_name"])
user_document_data["document_list"] = document_list
return render(
request,
"appointment/user_document_request.html",
{"document_data": user_document_data, "user_data": user_data},
) | 6610d21e1a0e35aaf3203814c20687a5b48e5d96 | 31,983 |
import os
def get_not_repeated_file_name(path_with_file):
"""
Returns file_name if file_name does not exist. If it exists, it appends an underscore until
this new file name does not exist, returning it. For example if "/home/mine/file.txt" exists,
it will return "/home/mine/_file.txt".
@param path_with_file: complete path with the name of the file.
"""
directory, file_name = os.path.split(path_with_file)
file_rename = file_name
while os.path.exists(directory+"/"+file_rename):
file_rename = "_"+file_rename
return directory+"/"+file_rename | 7da491a3cd0261d99142905e4e7d2690c3be0d06 | 31,984 |
def attr(name, thing=word, subtype=None):
"""Generate an Attribute with that name, referencing the thing.
Instance variables:
Class Attribute class generated by namedtuple()
"""
# if __debug__:
# if isinstance(thing, (tuple, list)):
# warnings.warn(type(thing).__name__
# + " not recommended as grammar of attribute "
# + repr(name), SyntaxWarning)
return attr.Class(name, thing, subtype) | 488f1aef2b350ae702652dff9e2c5decc21dd271 | 31,985 |
def bulk_records(
name: str,
bulk: TextClassificationBulkData,
common_params: CommonTaskQueryParams = Depends(),
service: TextClassificationService = Depends(
TextClassificationService.get_instance
),
datasets: DatasetsService = Depends(DatasetsService.get_instance),
current_user: User = Security(auth.get_user, scopes=[]),
) -> BulkResponse:
"""
Includes a chunk of record data with provided dataset bulk information
Parameters
----------
name:
The dataset name
bulk:
The bulk data
common_params:
Common query params
service:
the Service
datasets:
The dataset service
current_user:
Current request user
Returns
-------
Bulk response data
"""
task = TASK_TYPE
owner = current_user.check_workspace(common_params.workspace)
try:
dataset = datasets.find_by_name(
current_user, name=name, task=task, workspace=owner
)
datasets.update(
user=current_user,
dataset=dataset,
tags=bulk.tags,
metadata=bulk.metadata,
)
except EntityNotFoundError:
dataset_class = TaskFactory.get_task_dataset(task)
dataset = dataset_class.parse_obj({**bulk.dict(), "name": name})
dataset.owner = owner
datasets.create_dataset(user=current_user, dataset=dataset)
result = service.add_records(
dataset=dataset,
records=bulk.records,
)
return BulkResponse(
dataset=name,
processed=result.processed,
failed=result.failed,
) | aa82e779b5bd9286bd1b456f151f3b0679b3582b | 31,986 |
def environment_list(p_engine, p_username, format, envname):
"""
Print list of environments
param1: p_engine: engine name from configuration
param2: format: output format
param3: envname: environemnt name to list, all if None
return 0 if environment found
"""
ret = 0
enginelist = get_list_of_engines(p_engine, p_username)
if enginelist is None:
return 1
data = DataFormatter()
data_header = [
("Engine name", 30),
("Environment name", 30),
("Application name", 30)
]
data.create_header(data_header)
data.format_type = format
for engine_tuple in enginelist:
engine_obj = DxMaskingEngine(engine_tuple)
if engine_obj.get_session():
continue
envlist = DxEnvironmentList()
# load all objects
# envlist.LoadEnvironments()
if envname is None:
environments = envlist.get_allref()
else:
environment = envlist.get_environmentId_by_name(envname)
if environment is None:
ret = ret + 1
continue
environments = [environment]
for envref in environments:
envobj = envlist.get_by_ref(envref)
data.data_insert(
engine_tuple[0],
envobj.environment_name,
envobj.application_name
)
print("")
print (data.data_output(False))
print("")
return ret | bf1ca059f0fd919445df4ee931450c7a07619707 | 31,987 |
def loss(logits, labels, weight_decay_factor, class_weights = None):
"""
Total loss:
----------
Args:
logits: Tensor, predicted [batch_size * height * width, num_classes]
labels: Tensor, ground truth [batch_size, height, width, 1]
weight_decay_factor: float, factor with which weights are decayed
class_weights: Tensor, weighting of class for loss [num_classes, 1] or None
Returns:
total_loss: Segmentation + Classification losses + WeightDecayFactor * L2 loss
"""
segment_loss = segmentation_loss(logits, labels,class_weights)
total_loss = segment_loss + weight_decay_factor * l2_loss()
tf.summary.scalar("loss/total", total_loss)
return total_loss | 91fae015aeb5bed4c73cf2aa4d6e62a5c1d4580c | 31,988 |
def negative(num):
"""assumes num is a numeric
returns a boolean, True if num is negative, else False"""
return num < 0 | dc8b789b6dbd4d158482de6d4af26f48f9e8cc5b | 31,989 |
def readSudokus(filename):
"""
Returns the n first sudokus of the file with given name
"""
f = open(filename)
res = None
txt = f.readline().strip()
if txt != "":
res = [[int(txt[i + j * 9]) for i in range(9)] for j in range(9)]
f.close()
return np.array(res) | 564da1dd1fc035ec692986eaa8955985acd5b1ce | 31,990 |
from flask.cli import ScriptInfo
def script_info(base_app):
"""Get ScriptInfo object for testing a CLI command.
Scope: module
.. code-block:: python
def test_cmd(script_info):
runner = CliRunner()
result = runner.invoke(mycmd, obj=script_info)
assert result.exit_code == 0
"""
return ScriptInfo(create_app=lambda info: base_app) | 5c5298fcd816538e4890a1d46d51aea6d73702e6 | 31,991 |
def measured_points(idf, return_periods, interim_results=None, max_duration=None):
"""
get the calculation results of the rainfall with u and w without the estimation of the formulation
Args:
idf (IntensityDurationFrequencyAnalyse): idf class
return_periods (float | np.array | list | pd.Series): return period in [a]
interim_results (pd.DataFrame): data with duration as index and u & w as data
max_duration (float): max duration in [min]
Returns:
pd.Series: series with duration as index and the height of the rainfall as data
"""
if interim_results is None:
interim_results = get_interim_results_from_parameters(idf.parameters)
if max_duration is not None:
interim_results = interim_results.loc[:max_duration].copy()
return pd.Series(index=interim_results.index,
data=interim_results['u'] + interim_results['w'] * np.log(return_periods)) | ba0e1cbddf6fd7e23abbfba4954683ada69cea2f | 31,992 |
def get_charpixel():
""" Render a single charpixel """
if options.table == 'input':
c = getch()
if c in ['\n','\t']:
print(c)
else:
c = choice( CHARTABLES[ options.table ] )
return c.encode('utf-8') | deb0475dac66c10c1d9edea45cce1cedffbafd1d | 31,993 |
def test_custom_fixer():
""" Test custom ParseFixer
Verify that read_csv uses custom ParseFixer
"""
class fix_pi(ParseFixer):
def __init__(self):
super().__init__()
# augment existing method, simple fix float
def fix_illegal_cell_value(self, vtype, value):
if vtype == "float":
return 22.0 / 7.0
else:
fix_value = ParseFixer.fix_illegal_cell_value(self, vtype, value)
return fix_value
fix = fix_pi()
fix.stop_on_errors = False
fix._called_from_test = True
with open(input_dir() / "types3.csv", "r") as fh:
g = read_csv(fh, to="jsondata", fixer=fix)
for tp, tt in g:
if tp == BlockType.TABLE:
assert tt["columns"]["num"]["values"][2] == 22.0 / 7.0
assert tt["columns"]["flt"]["values"][0] == 22.0 / 7.0
assert tt["columns"]["flt"]["values"][0] == 22.0 / 7.0
assert tt["columns"]["flt2"]["values"][2] == 22.0 / 7.0
with pytest.raises(InputError):
# test read_csv w. class (not instance) of fixer
# class has default stop_on_errors = True
with open(input_dir() / "types3.csv", "r") as fh:
g = read_csv(fh, to="jsondata", fixer=fix_pi)
for tp, tt in g:
pass | c137e190dffef686b04b2f515c6c65ecd2d50879 | 31,994 |
def context_factory(policy, name):
"""Factory function for creating context objects."""
if not isinstance(name, qpol.qpol_context_t):
raise TypeError("Contexts cannot be looked-up.")
return Context(policy, name) | a528978876bdbaa0d2e249f70a056d49fc894349 | 31,995 |
import email
def decode_mail_header(value, default_charset='us-ascii'):
"""Decode a header value into a unicode string."""
try:
headers = decode_header(value)
except email.errors.HeaderParseError:
return value.encode(default_charset, 'replace').decode(default_charset)
else:
for index, (text, charset) in enumerate(headers):
if isinstance(text, bytes):
try:
headers[index] = text.decode(
charset or default_charset, 'replace')
except LookupError:
# if the charset is unknown, force default
headers[index] = text.decode(default_charset, 'replace')
else:
headers[index] = text
return "".join(headers) | 657a45da883bd35d99642af7cfa1ff5ed9200fbe | 31,996 |
def xyz2xyzr(xyz: np.ndarray, *,
axis: int=None,
illuminant: Illuminant=get_default_illuminant(),
observer: Observer=get_default_observer()) -> np.ndarray:
"""
Convert XYZ to normalized XYZ reflectance
:param xyz: the raw xyz values
:param axis: the axis that the XYZ values lie along
:param illuminant: the illuminant
:param observer: the observer
:return: the xyz normalized Reflectance
"""
if axis is None:
axis = get_matching_axis(xyz.shape, 3)
new_shape = [1] * len(xyz.shape)
new_shape[axis] = -1
white_point = illuminant.get_white_point(observer).reshape(new_shape)
return xyz / white_point | 76e0b3c095ac48a44b31349efe259b70a427cfc0 | 31,997 |
import time
def last_updated(a):
"""
Check the time since file was last updated.
"""
return time.time() - op.getmtime(a) | 653eb5b68e00c57165b413d1de1ed0d8afee41f0 | 31,998 |
import functools
def lstm_acd_decomposition(inp, model):
"""
inp: tf.Tensor(dtype=np.int32, shape=(1, -1)) tokenized input
model: tf.keras.Model or equivalent
"""
l = inp.numpy().size
e, k, rk, b, dw, db = model.weights
embed_inp = tf.nn.embedding_lookup(params=e, ids=inp)
return acd_1d_decomposition(
functools.partial(lstm_score,
embed_inp=embed_inp,
k=k,
rk=rk,
b=b,
dw=dw,
db=db), l) | 51d55fe155290b3ec1bc7f3b67b562e4b6ae4c23 | 31,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.