content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def get_supported():
"""
Returns a list of hints supported by the window manager.
:return: A list of atoms in the _NET_SUPPORTED property.
:rtype: util.PropertyCookie (ATOM[]/32)
"""
return util.PropertyCookie(util.get_property(root, '_NET_SUPPORTED'))
| 9,400
|
def get_exportables():
"""Get all exportables models except snapshot"""
exportables = set(converters.get_exportables().values())
exportables.discard(all_models.Snapshot)
return exportables
| 9,401
|
def add_control_and_colors(name, cs, userDefined, arrayRanges, disableValues):
"""add parameters that change the settings and color of a filter"""
source = paraview.simple.FindSource(name)
# make up list of color options
fields = {'depth': 'depth'}
if not disableValues:
fields['luminance'] = 'luminance'
defaultName = None
view_proxy = paraview.simple.GetActiveView()
rep = paraview.servermanager.GetRepresentation(source, view_proxy)
# select value arrays
if rep.Representation != 'Outline':
defaultName = add_customized_array_selection(
name, source, fields, userDefined, arrayRanges, disableValues)
if defaultName is None:
fields['white'] = 'rgb'
defaultName = 'white'
cparam = store.make_field(
"color"+name, fields, default=defaultName, valueRanges=arrayRanges)
cs.add_field("color"+name, cparam, 'vis', [name])
| 9,402
|
def playback(driver, settings, record, output, mode=None): # pylint: disable=W0621,R0912
"""
Playback a given test.
"""
if settings.desc:
output("%s ... " % settings.desc, flush=True)
else:
output("Playing back %s ... " % settings.name, flush=True)
_begin_browsing(driver, settings)
wait_until_loaded(driver)
state = states.OK
err = None
mode = mode or modes.PLAYBACK
try:
for step in record.steps:
step.delayer(driver)
timeout = 0
while timeout < 40:
timeout += 1
if not driver.execute_script(js.isPageChanging(250)): # milliseconds
step.execute(driver, settings, mode)
break
else:
time.sleep(0.25)
if timeout == 40:
raise exc.PlaybackTimeout(
'%s timed out while waiting for the page to be static.' \
% settings.name
)
except Exception as exception: # pylint: disable=W0703
if isinstance(exception, exc.ScreenshotsDiffer):
state = states.FAIL
err = exception
else:
state = states.ERROR
if hasattr(exception, 'msg') and (exception.msg.startswith('element not visible') or
exception.msg.startswith('Element is not currently visible')):
err = exc.ElementNotVisible(
"Element was not visible when expected during playback. If "
"your playback depended on a significant rerender having been "
"done, then make sure you've waited until nothing is changing "
"before taking a screenshot."
)
else:
err = exception
output('%s' % str(state))
if err:
output(': %s' % str(err))
return (state, err)
| 9,403
|
def weighted_moments(values, weights):
"""Return weighted mean and weighted standard deviation of a sequence"""
w_mean = np.average(values, weights=weights)
sq_err = (values - w_mean)**2
w_var = np.average(sq_err, weights=weights)
w_std = np.sqrt(w_var)
return w_mean, w_std
| 9,404
|
def tallennus(lista, tiedosto):
"""
Tallentaa pelin statsit tiedostoon.
Lista sisältää tulokset, jotka halutaan tallentaa
"""
tuloksia = []
try:
with open(tiedosto, "r+") as lahde:
data = json.load(lahde)
data.append(lista)
lahde.seek(0)
json.dump(data, lahde)
#Päivittää juuri pelatun pelin tiedot aikaisempien pelien joukkoon
except IOError:
with open(tiedosto, "w") as lahde:
tuloksia.append(lista)
json.dump(tuloksia, lahde)
#Luo uuden tiedoston tuloksia varten ja tallettaa sinne ensimmäisen pelin
| 9,405
|
def forestplot(data, kind='forestplot', model_names=None, var_names=None, combined=False,
credible_interval=0.95, quartiles=True, r_hat=True, n_eff=True, colors='cycle',
textsize=None, linewidth=None, markersize=None, joyplot_alpha=None,
joyplot_overlap=2, figsize=None):
"""
Forest plot
Generates a forest plot of 100*(credible_interval)% credible intervals from
a trace or list of traces.
Parameters
----------
data : xarray.Dataset or list of compatible
Samples from a model posterior
kind : str
Choose kind of plot for main axis. Supports "forestplot" or "joyplot"
model_names : list[str], optional
List with names for the models in the list of data. Useful when
plotting more that one dataset
var_names: list[str], optional
List of variables to plot (defaults to None, which results in all
variables plotted)
combined : bool
Flag for combining multiple chains into a single chain. If False (default),
chains will be plotted separately.
credible_interval : float, optional
Credible interval to plot. Defaults to 0.95.
quartiles : bool, optional
Flag for plotting the interquartile range, in addition to the credible_interval intervals.
Defaults to True
r_hat : bool, optional
Flag for plotting Gelman-Rubin statistics. Requires 2 or more chains. Defaults to True
n_eff : bool, optional
Flag for plotting the effective sample size. Requires 2 or more chains. Defaults to True
colors : list or string, optional
list with valid matplotlib colors, one color per model. Alternative a string can be passed.
If the string is `cycle`, it will automatically chose a color per model from the
matyplolibs cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used
for all models. Defauls to 'cycle'.
textsize: int
Text size for labels. If None it will be autoscaled based on figsize.
linewidth : int
Line width throughout. If None it will be autoscaled based on figsize.
markersize : int
Markersize throughout. If None it will be autoscaled based on figsize.
joyplot_alpha : float
Transparency for joyplot fill. If 0, border is colored by model, otherwise
a black outline is used.
joyplot_overlap : float
Overlap height for joyplots.
figsize : tuple, optional
Figure size. Defaults to None
Returns
-------
gridspec : matplotlib GridSpec
"""
ncols, width_ratios = 1, [3]
if n_eff:
ncols += 1
width_ratios.append(1)
if r_hat:
ncols += 1
width_ratios.append(1)
plot_handler = PlotHandler(data, var_names=var_names, model_names=model_names,
combined=combined, colors=colors)
if figsize is None:
figsize = (min(12, sum(width_ratios) * 2), plot_handler.fig_height())
textsize, auto_linewidth, auto_markersize = _scale_text(figsize, textsize=textsize)
if linewidth is None:
linewidth = auto_linewidth
if markersize is None:
markersize = auto_markersize
fig, axes = plt.subplots(nrows=1,
ncols=ncols,
figsize=figsize,
gridspec_kw={'width_ratios': width_ratios},
sharey=True
)
axes = np.atleast_1d(axes)
if kind == 'forestplot':
plot_handler.forestplot(credible_interval, quartiles, textsize,
linewidth, markersize, axes[0])
elif kind == 'joyplot':
plot_handler.joyplot(joyplot_overlap, textsize, linewidth, joyplot_alpha, axes[0])
else:
raise TypeError(f"Argument 'kind' must be one of 'forestplot' or "
f"'joyplot' (you provided {kind})")
idx = 1
if r_hat:
plot_handler.plot_rhat(axes[idx], textsize, markersize)
idx += 1
if n_eff:
plot_handler.plot_neff(axes[idx], textsize, markersize)
idx += 1
for ax in axes:
ax.grid(False)
# Remove ticklines on y-axes
for ticks in ax.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in ax.spines.items():
if loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
if len(plot_handler.data) > 1:
plot_handler.make_bands(ax)
labels, ticks = plot_handler.labels_and_ticks()
axes[0].set_yticks(ticks)
axes[0].set_yticklabels(labels)
all_plotters = list(plot_handler.plotters.values())
y_max = plot_handler.y_max() - all_plotters[-1].group_offset
if kind == 'joyplot': # space at the top
y_max += joyplot_overlap
axes[0].set_ylim(-all_plotters[0].group_offset, y_max)
return fig, axes
| 9,406
|
def write_file(filename, data, plain=False): # pylint: disable=too-many-branches
"""
Write a file, use suffix to determine type and compression.
- types: '.json', '.yaml'
- compression: None, '.gz'
write_file('variable.json.gz')
"""
if '.json' in filename:
content = ujson.dumps(data, indent=1, escape_forward_slashes=False)
elif '.yaml' in filename:
content = yaml.dump(data, indent=1)
elif filename == 'STDOUT':
sys.stdout.write(ujson.dumps(data, indent=1, escape_forward_slashes=False)+'\n')
return True
elif filename == 'STDOUT':
sys.stderr.write(ujson.dumps(data, indent=1, escape_forward_slashes=False)+'\n')
return True
elif plain:
content = '\n'.join(data)
elif '.csv' in filename or '.tsv' in filename:
output = io.StringIO()
if '.csv' in filename:
writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)
else:
writer = csv.writer(output, delimiter='\t')
for row in data:
writer.writerow(row)
content = output.getvalue()
else:
content = data
if '.gz' in filename:
try:
with gzip.open(filename, 'wt') as fh:
fh.write(content)
except OSError:
return False
else:
try:
with open(filename, 'wt') as fh:
fh.write(content)
except IOError:
return False
return True
| 9,407
|
def by_major_predictor(profiles, subject_ids, subject_id_dict, course_data=None):
"""Generates recommendations for people with the same major."""
# Build a list of majors/minors
all_courses_of_study = set()
for profile in profiles:
all_courses_of_study |= profile.courses_of_study
# Find common courses
for course in all_courses_of_study:
if course in excluded_courses: continue
applicable_users = [p for p in profiles if course in p.courses_of_study]
if len(applicable_users) < BY_MAJOR_USER_CUTOFF: continue
print("Generating recommendations for {}...".format(course))
course_distributions = {} # Keys are subject IDs, values are dictionaries {semester: count}
for prof in applicable_users:
for subj, semesters in prof.roads[0].items():
if subj not in course_distributions:
course_distributions[subj] = {}
for sem in semesters:
if sem not in course_distributions[subj]:
course_distributions[subj][sem] = 0
course_distributions[subj][sem] += 1
avg_ratings = {subj: float(sum(prof.regression_predictions[subject_id_dict[subj]] for prof in applicable_users)) / float(len(applicable_users)) for subj in course_distributions if subj in subject_id_dict}
# For each applicable user, generate a rank list by degree of
# commonness and proximity with the user's current semester
for prof in applicable_users:
recs = RankList(BY_MAJOR_REC_COUNT)
for subj in course_distributions:
if sum(course_distributions[subj].values()) < BY_MAJOR_FREQ_CUTOFF:
continue
if (subj in prof.ratings and prof.ratings[subj] < 1.0) or subject_in_list(subj, prof.subjects_taken(), course_data):
continue
if update_by_equivalent_subjects(subj, recs, prof, course_data):
continue
relevance = sum((1.0 - abs(sem - prof.semester) * SEMESTER_DISTANCE_COEFFICIENT) * freq for sem, freq in course_distributions[subj].items()) * avg_ratings.get(subj, -99999)
recs.add(subj, relevance)
subject_items = {subj: float("{:.2f}".format(rating)) for subj, rating in recs.items()}
if len(subject_items) < REC_MIN_COUNT:
continue
yield Recommendation(user=User.objects.get(username=prof.username), rec_type="course:" + course, subjects=json.dumps(subject_items))
| 9,408
|
def SetupPythonPackages(system, wheel, base_dir):
"""Installs python package(s) from CIPD and sets up the build environment.
Args:
system (System): A System object.
wheel (Wheel): The Wheel object to install a build environment for.
base_dir (str): The top-level build directory for the wheel.
Returns: A tuple (path to the python interpreter to run,
dict of environment variables to be set).
"""
host_platform = HostCipdPlatform()
# Building Windows x86 on Windows x64 is a special case. In this situation,
# we want to directly install and run the windows-x86 python package. This
# is because some wheels use properties of the Python interpreter (e.g.
# sys.maxsize) to detect whether to build for 32-bit or 64-bit CPUs.
if (host_platform == 'windows-amd64' and
wheel.plat.cipd_platform == 'windows-386'):
host_platform = 'windows-386'
_, interpreter = _InstallCipdPythonPackage(system, host_platform, wheel,
base_dir)
env = wheel.plat.env.copy()
# If we are cross-compiling, also install the target-platform python and set
# PYTHONHOME to point to it. This will ensure that we use the correct
# compiler and linker command lines which are generated at build time in the
# sysconfigdata module.
if not wheel.spec.universal and host_platform != wheel.plat.cipd_platform:
pkg_dir, _ = _InstallCipdPythonPackage(system, wheel.plat.cipd_platform,
wheel, base_dir)
env['PYTHONHOME'] = pkg_dir
# For python 3, we need to also set _PYTHON_SYSCONFIGDATA_NAME to point to
# the target-architecture sysconfig module.
if wheel.pyversion[0] == '3':
sysconfigdata_modules = glob.glob('%s/lib/python%s/_sysconfigdata_*.py' %
(pkg_dir, '.'.join(wheel.pyversion)))
if len(sysconfigdata_modules) != 1:
raise Exception(
'Expected 1 sysconfigdata module in python package ' +
'for %s, got: [%s]',
(wheel.plat.cipd_platform, ','.join(sysconfigdata_modules)))
env['_PYTHON_SYSCONFIGDATA_NAME'] = (os.path.basename(
sysconfigdata_modules[0])[:-3]) # remove .py
# Make sure not to pick up any extra host python modules.
env['PYTHONPATH'] = ''
return interpreter, env
| 9,409
|
def tf_dtype(dtype):
"""Translates dtype specifications in configurations to tensorflow data types.
Args:
dtype: String describing a numerical type (e.g. 'float'), numpy data type,
or numerical type primitive.
Returns: TensorFlow data type
"""
if dtype == 'float' or dtype == float or dtype == np.float32 or dtype == tf.float32:
return tf.float32
elif dtype == 'int' or dtype == int or dtype == np.int32 or dtype == tf.int32:
return tf.int32
elif dtype == 'bool' or dtype == bool or dtype == np.bool_ or dtype == tf.bool:
return tf.bool
else:
raise TensorforceError("Error: Type conversion from type {} not supported.".format(str(dtype)))
| 9,410
|
def test_getitem(tree):
"""Nodes can be accessed via getitem."""
for node_id in tree:
assert tree[node_id]
# assert 'Node access should be possible via getitem.' in str(exc)
with pytest.raises(NodeNotFound) as exc:
assert tree['root']
assert "Node 'root' is not in the tree" in str(exc)
| 9,411
|
def winedll_override(dll, dtype):
""" Add WINE dll override
"""
log.info('Overriding ' + dll + '.dll = ' + dtype)
protonmain.dlloverrides[dll] = dtype
| 9,412
|
def decode_frame(raw_frame: bytes, frame_width: int, frame_height: int) -> Tuple[str, np.ndarray]:
"""
Decode the image bytes into string compatible with OpenCV
:param raw_frame: frame data in bytes
:param frame_width width of the frame, obtained from Kinesis payload
:param frame_height height of the frame, obtained from Kinesis payload
"""
start_time = timeit.default_timer()
# frameBuffer = Image.frombytes('RGB', (frame_width, frame_height), raw_frame)
# frameBuffer.save("./h264decoded.png", "png")
# frame = np.array(frameBuffer)
# img_str = cv2.imencode('.jpg', frame)[1].tostring()
img = imageio.get_reader(raw_frame, ".png")
frame: np.ndarray = img.get_data(0)
img_str = cv2.imencode('.png', frame)[1].tostring()
logger.info(f'Decoded frame after: {timeit.default_timer() - start_time}')
return img_str, frame
| 9,413
|
def preprocess(picPath):
"""preprocess"""
#read img
bgr_img = cv.imread(picPath)
#get img shape
orig_shape = bgr_img.shape[:2]
#resize img
img = cv.resize(bgr_img, (MODEL_WIDTH, MODEL_HEIGHT)).astype(np.int8)
# save memory C_CONTIGUOUS mode
if not img.flags['C_CONTIGUOUS']:
img = np.ascontiguousarray(img)
return orig_shape, img
| 9,414
|
def live_fractal_or_skip():
"""
Ensure Fractal live connection can be made
First looks for a local staging server, then tries QCArchive.
"""
try:
return FractalClient("localhost:7777", verify=False)
except (requests.exceptions.ConnectionError, ConnectionRefusedError):
print("Failed to connect to localhost, trying MolSSI QCArchive.")
try:
requests.get("https://api.qcarchive.molssi.org:443", json={}, timeout=5)
return FractalClient()
except (requests.exceptions.ConnectionError, ConnectionRefusedError):
return pytest.skip("Could not make a connection to central Fractal server")
| 9,415
|
def get_legendre(theta, keys):
"""
Calculate Schmidt semi-normalized associated Legendre functions
Calculations based on recursive algorithm found in "Spacecraft Attitude Determination and Control" by James Richard Wertz
Parameters
----------
theta : array
Array of colatitudes in degrees
keys: iterable
list of spherical harmnoic degree and order, tuple (n, m) for each
term in the expansion
Returns
-------
P : array
Array of Legendre functions, with shape (theta.size, len(keys)).
dP : array
Array of dP/dtheta, with shape (theta.size, len(keys))
"""
# get maximum N and maximum M:
n, m = np.array([k for k in keys]).T
nmax, mmax = np.max(n), np.max(m)
theta = theta.flatten()[:, np.newaxis]
P = {}
dP = {}
sinth = np.sin(d2r*theta)
costh = np.cos(d2r*theta)
# Initialize Schmidt normalization
S = {}
S[0, 0] = 1.
# initialize the functions:
for n in range(nmax +1):
for m in range(nmax + 1):
P[n, m] = np.zeros_like(theta, dtype = np.float64)
dP[n, m] = np.zeros_like(theta, dtype = np.float64)
P[0, 0] = np.ones_like(theta, dtype = np.float64)
for n in range(1, nmax +1):
for m in range(0, min([n + 1, mmax + 1])):
# do the legendre polynomials and derivatives
if n == m:
P[n, n] = sinth * P[n - 1, m - 1]
dP[n, n] = sinth * dP[n - 1, m - 1] + costh * P[n - 1, n - 1]
else:
if n == 1:
Knm = 0.
P[n, m] = costh * P[n -1, m]
dP[n, m] = costh * dP[n - 1, m] - sinth * P[n - 1, m]
elif n > 1:
Knm = ((n - 1)**2 - m**2) / ((2*n - 1)*(2*n - 3))
P[n, m] = costh * P[n -1, m] - Knm*P[n - 2, m]
dP[n, m] = costh * dP[n - 1, m] - sinth * P[n - 1, m] - Knm * dP[n - 2, m]
# compute Schmidt normalization
if m == 0:
S[n, 0] = S[n - 1, 0] * (2.*n - 1)/n
else:
S[n, m] = S[n, m - 1] * np.sqrt((n - m + 1)*(int(m == 1) + 1.)/(n + m))
# now apply Schmidt normalization
for n in range(1, nmax + 1):
for m in range(0, min([n + 1, mmax + 1])):
P[n, m] *= S[n, m]
dP[n, m] *= S[n, m]
Pmat = np.hstack(tuple(P[key] for key in keys))
dPmat = np.hstack(tuple(dP[key] for key in keys))
return Pmat, dPmat
| 9,416
|
def train_and_evaluate(
model,
num_epochs,
steps_per_epoch,
train_data,
validation_steps,
eval_data,
output_dir,
n_steps_history,
FLAGS,
decay_type,
learning_rate=3e-5,
s=1,
n_batch_decay=1,
metric_accuracy='metric',
):
"""
Compiles keras model and loads data into it for training.
"""
logging.info('training the model ...')
model_callbacks = []
activate_tf_summary_hp = True # False
if FLAGS.is_hyperparameter_tuning:
# get trial ID
suffix = mu.get_trial_id()
if suffix == '':
logging.error('No trial ID for hyper parameter job!')
FLAGS.is_hyperparameter_tuning = False
else:
# callback for hp
logging.info('Creating a callback to store the metric!')
if activate_tf_summary_hp:
logging.info('Hp parameters\'s name {}'.format(metric_accuracy))
hp_metric = mu.HP_metric(metric_accuracy)
model_callbacks.append(hp_metric)
# checking model callbacks for
logging.info('model\'s callback:\n {}'.format(str(model_callbacks)))
# train the model
# time the function
start_time = time.time()
logging.info('starting model.fit')
# verbose = 0 (silent)
# verbose = 1 (progress bar)
# verbose = 2 (one line per epoch)
verbose = 1
model.fit(train_data,
epochs=num_epochs,
steps_per_epoch=steps_per_epoch,
validation_data=eval_data,
validation_steps=validation_steps,
verbose=verbose,
callbacks=model_callbacks)
# print execution time
elapsed_time_secs = time.time() - start_time
logging.info('\nexecution time: {}'.format(timedelta(seconds=round(elapsed_time_secs))))
# for hp parameter tuning in TensorBoard
if FLAGS.is_hyperparameter_tuning:
logging.info('setup hyperparameter tuning!')
logging.info('standard hyperparameter tuning!')
| 9,417
|
def table_4_28(x_t, c_):
"""
Вывод поправочного коэффициента, учитывающего влияние толщины профиля
arguments: относительное положение точки перехода ламинарного пограничного слоя в турбулентный (Х_т_),
относительная толщина профиля
return: Значение поправочного коэффициента"""
nu_t_00 = [1.00, 1.03, 1.05, 1.08, 1.11, 1.13, 1.16, 1.19, 1.22, 1.25, 1.29, 1.33, 1.37]
nu_t_02 = [1.000, 1.020, 1.040, 1.060, 1.080, 1.104, 1.127, 1.155, 1.180, 1.205, 1.235, 1.260, 1.295]
nu_t_04 = [1.00, 1.01, 1.03, 1.04, 1.05, 1.07, 1.09, 1.10, 1.12, 1.14, 1.16, 1.17, 1.20]
c_mas = [0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11, 0.12]
k = int(c_ // 0.01 + 1)
if x_t == 0:
nu_t = interpol(nu_t_00[k], nu_t_00[k - 1], procent(c_, c_mas[k - 1], c_mas[k]))
elif (x_t >= 0) and (x_t <= 0.2):
nu_t = interpol(interpol(nu_t_02[k], nu_t_02[k - 1], procent(c_, c_mas[k - 1], c_mas[k])),
interpol(nu_t_00[k], nu_t_00[k - 1], procent(c_, c_mas[k - 1], c_mas[k])),
procent(x_t, 0, 0.2))
elif (x_t >= 0.2) and (x_t <= 0.4):
nu_t = interpol(interpol(nu_t_04[k], nu_t_04[k - 1], procent(c_, c_mas[k - 1], c_mas[k])),
interpol(nu_t_02[k], nu_t_02[k - 1], procent(c_, c_mas[k - 1], c_mas[k])),
procent(x_t, 0.2, 0.4))
else:
nu_t = interpol(nu_t_04[k], nu_t_04[k - 1], procent(c_, c_mas[k - 1], c_mas[k]))
return nu_t
| 9,418
|
def random_spectra(path_length, coeffs, min_wavelength, max_wavelength, complexity):
"""
"""
solution = random_solution(coeffs, complexity)
return beers_law(solution, path_length, coeffs, min_wavelength, max_wavelength)
| 9,419
|
async def channel_audition_info(channel, type_of_audition_color, type_of_audition_line) -> None:
"""
функция для оповещения об удалении/создании канала на сервере
:param channel: канал о котором сообщается
:param type_of_audition_color: цвет эмбеда
:param type_of_audition_line: линии для эмбеда
:return: информация о канале
"""
emb = discord.Embed(color = type_of_audition_color, timestamp = datetime.now())
if isinstance(channel, discord.TextChannel):
emb.add_field(name = f"{channels_type_dict[f'{channel.type}']} Канал:", value = f"{channel.name}")
emb.add_field(name = "ID канала:", value = f"{channel.id}", inline = False)
emb.add_field(name = f"{INVISIBLE_SYMBOL}", value = f"{type_of_audition_line}", inline = False)
if channel.category:
emb.add_field(name = f"Категория:", value = f"{channel.category}")
emb.add_field(name = f"Дополнительно:", value = f"Позиция: {channel.position}", inline = False)
logs_channel = channel.guild.get_channel(LOGS_CHANNEL_ID)
await logs_channel.send(embed = emb)
elif isinstance(channel, discord.VoiceChannel):
emb.add_field(name = f"{channels_type_dict[f'{channel.type}']} Канал:", value = f"{channel.name}")
emb.add_field(name = "ID канала:", value = f"{channel.id}", inline = False)
emb.add_field(name = f"{INVISIBLE_SYMBOL}", value = f"{type_of_audition_line}", inline = False)
if channel.category:
emb.add_field(name = "Категория:", value = f"{channel.category}")
emb.add_field(name = f"Дополнительно:", value = f"""Позиция: {channel.position}
Битрейт: {channel.bitrate}
Лимит участников: {channel.user_limit}""", inline = False)
logs_channel = channel.guild.get_channel(LOGS_CHANNEL_ID)
await logs_channel.send(embed = emb)
| 9,420
|
def data_prep(data,unit_identifier,time_identifier,matching_period,treat_unit,control_units,outcome_variable,
predictor_variables, normalize=False):
"""
Prepares the data by normalizing X for section 3.3. in order to replicate Becker and Klößner (2017)
"""
X = data.loc[data[time_identifier].isin(matching_period)]
X.index = X.loc[:,unit_identifier]
X0 = X.loc[(X.index.isin(control_units)),(predictor_variables)]
X0 = X0.groupby(X0.index).mean().values.T #control predictors
X1 = X.loc[(X.index == treat_unit),(predictor_variables)]
X1 = X1.groupby(X1.index).mean().values.T #treated predictors
# outcome variable realizations in matching period - Z0: control, Z1: treated
Z0 = np.array(X.loc[(X.index.isin(control_units)),(outcome_variable)]).reshape(len(control_units),len(matching_period)).T #control outcome
Z1 = np.array(X.loc[(X.index == treat_unit),(outcome_variable)]).reshape(len(matching_period),1) #treated outcome
if normalize == True:
# Scaling
nvarsV = X0.shape[0]
big_dataframe = pd.concat([pd.DataFrame(X0), pd.DataFrame(X1)], axis=1)
divisor = np.sqrt(big_dataframe.apply(np.var, axis=1))
V = np.zeros(shape=(len(predictor_variables), len(predictor_variables)))
np.fill_diagonal(V, np.diag(np.repeat(big_dataframe.shape[0],1)))
scaled_matrix = ((big_dataframe.T) @ (np.array(1/(divisor)).reshape(len(predictor_variables),1) * V)).T
X0 = np.array(scaled_matrix.iloc[:,0:len(control_units)])
X1 = np.array(scaled_matrix.iloc[:,len(control_units):(len(control_units)+1)])
Z0 = Z0.astype('float64')
Z1 = Z1.astype('float64')
return X0, X1, Z0, Z1
| 9,421
|
async def ping(ctx):
"""pongs a ping"""
await ctx.send("pong")
| 9,422
|
def calculate_auroc_statistics(y_true, y_pred, confint_alpha=0.05):
"""
calculate AUROC and it's p-values and CI
"""
#TODO: small sample test
#TODO: check when it crashes
#TODO: confidence intervals
predictions_group0 = y_pred[y_true==0, 1]
predictions_group1 = y_pred[y_true==1, 1]
try:
pval_auc = mannwhitneyu(predictions_group0,
predictions_group1,
alternative='less')[1]
except:
pval_auc = 1
auroc = roc_auc_score(y_true, y_pred[:,1])
auroc_ci = calculate_auroc_confint(auroc, len(predictions_group0),
len(predictions_group1), confint_alpha)
return([auroc, pval_auc, auroc_ci[0], auroc_ci[1]])
| 9,423
|
def get_list_of_users(total_users):
"""Get List of GitHub Users."""
users = {}
try:
r = requests.get('https://api.github.com/users', headers={'Accept': 'application/vnd.github.v3+json'})
users = r.json()
len_users = len(users)
# login, id, avatar_url, type, html_url
while 'next' in r.links.keys() and len_users < total_users:
resp = requests.get(r.links['next']['url'], headers={'Accept': 'application/vnd.github.v3+json'})
more_users = resp.json()
len_users += len(more_users)
# Remove this break
break
except requests.exceptions.RequestException as e:
raise SystemExit(e)
| 9,424
|
def set_base_initial_condition(model, monomer, value):
"""Set an initial condition for a monomer in its 'default' state."""
# Build up monomer pattern dict
sites_dict = {}
for site in monomer.sites:
if site in monomer.site_states:
if site == 'loc' and 'cytoplasm' in monomer.site_states['loc']:
sites_dict['loc'] = 'cytoplasm'
else:
sites_dict[site] = monomer.site_states[site][0]
else:
sites_dict[site] = None
mp = monomer(**sites_dict)
pname = monomer.name + '_0'
try:
p = model.parameters[pname]
p.value = value
except KeyError:
p = Parameter(pname, value)
model.add_component(p)
model.initial(mp, p)
| 9,425
|
def kabsch_superpose(P, Q): # P,Q: vstack'ed matrix
"""
Usage:
P = numpy.vstack([a2, b2, c2])
Q = numpy.vstack([a1, b1, c1])
m = kabsch_superpose(P, Q)
newP = numpy.dot(m, P)
"""
A = numpy.dot(numpy.transpose(P), Q)
U, s, V = numpy.linalg.svd(A)
tmp = numpy.identity(3)
tmp[2,2] = numpy.sign(numpy.linalg.det(A))
R = numpy.dot(numpy.dot(numpy.transpose(V), tmp), numpy.transpose(U))
return R
| 9,426
|
def test_list_unsigned_short_max_length_3_nistxml_sv_iv_list_unsigned_short_max_length_4_3(mode, save_output, output_format):
"""
Type list/unsignedShort is restricted by facet maxLength with value 8.
"""
assert_bindings(
schema="nistData/list/unsignedShort/Schema+Instance/NISTSchema-SV-IV-list-unsignedShort-maxLength-4.xsd",
instance="nistData/list/unsignedShort/Schema+Instance/NISTXML-SV-IV-list-unsignedShort-maxLength-4-3.xml",
class_name="NistschemaSvIvListUnsignedShortMaxLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 9,427
|
def __cancelButtonEvent(event):
"""Handle pressing Esc by clicking the Cancel button."""
global boxRoot, __widgetTexts, __replyButtonText
__replyButtonText = CANCEL_TEXT
boxRoot.quit()
| 9,428
|
def add(a_t, b_t):
"""
add operator a+b
"""
return add_op(a_t, b_t)
| 9,429
|
def test_bad_doc():
"""run the cli_twine function on input file
containing un-compileable Python code.
The bad code block just gets skipped.
"""
mydoc = """\
```python .important foo=bar
import
print
```
bar
```python
print(2)
```
"""
with TemporaryDirectory() as tmpdirname:
infile_path = f"{tmpdirname}/tmp.pmd"
outfile_path = f"{tmpdirname}/tmp.md"
_dump(mydoc, infile_path)
res = _run_twine(infile_path, outfile_path)
actual_output = _slurp(outfile_path)
expected_output = 'aaa\nbar\n2\n'
expected_output = 'bar\n2\n'
assert res == TwineExitStatus.BLOCK_COMPILATION_ERROR
assert actual_output == expected_output
| 9,430
|
def download_pkg():
"""第二步下载相关环境需要的第三方库
:return: bool
"""
print("正在下载安装必要的第三方库文件...")
try:
# 如果需要使用IT之家爬虫还需要下载selenium、BeautifulSoup4、requests。可添加到后面
os.system('pip install flask flask_cors flask_wtf flask_mail pymysql redis apscheduler xlwt psutil ')
print("安装成功...")
flag = True
except Exception as e:
print("下载安装失败...原因是:%s" % e)
flag = False
return flag
| 9,431
|
def test_get_jobs(limit, expected_number, auth_client):
"""Tests get method for api/jobs with various query strings"""
auth_client = auth_client[0]
query_string = "api/jobs"
if limit is not None:
query_string += f"?limit={limit}"
response = auth_client.get(query_string)
jobs_received = response.json
assert len(jobs_received) == expected_number, jobs_received
| 9,432
|
def construct_annotated_corpora(extraction_path, id_variant_path, corpus_name, target_dir):
""" Compiles ID-variant corpora annotated with evaluation-relevant information, i.e. normalized surprisal,
normalized UID, and sentence length, by extracting low-ID and high-ID entries from the annotated 90k Europarl
corpus. """
# Read in main ID-annotated file
df_annotated = pd.read_table(extraction_path, header=None,
names=['Sentence', 'Total_surprisal', 'Per_word_surprisal', 'Normalized_surprisal',
'Total_UID_divergence', 'Per_word_UID_divergence', 'Normalized_UID_divergence'],
skip_blank_lines=True)
if id_variant_path is not None:
# Extract ID-specific sentences from the reference corpus
df_variant = pd.read_table(id_variant_path, header=None, names=['Sentence'], skip_blank_lines=True)
target_list = df_variant.iloc[:, 0].tolist()
target_list = [sent.strip() for sent in target_list]
else:
# No extraction, entire reference corpus is considered for further steps
target_list = df_annotated.iloc[:, 0].tolist()
target_list = [sent.strip() for sent in target_list]
# Isolate evaluation-relevant features
df_features = df_annotated.loc[:, ['Sentence', 'Normalized_surprisal', 'Normalized_UID_divergence']]
surprisals = list()
uid_divs = list()
# Write the normalized surprisal and UID divergence distributions to file
features_log_path = os.path.join(target_dir, '{:s}_ID_features.txt'.format(corpus_name))
print('Writing to {:s} ...'.format(features_log_path))
with open(features_log_path, 'w') as id_file:
for line_id in range(len(df_features)):
sent = df_features.iloc[line_id][0]
sent_ns = df_features.iloc[line_id][1]
sent_nud = df_features.iloc[line_id][2]
if sent in target_list:
id_file.write('{:f}\t{:f}\n'.format(sent_ns, sent_nud))
surprisals += [float(sent_ns)]
uid_divs += [float(sent_nud)]
# Calculate corpus statistics
id_file.write('=' * 10 + '\n')
id_file.write('Surprisal max: {:.4f}\n'.format(np.max(surprisals)))
id_file.write('Surprisal min: {:.4f}\n'.format(np.min(surprisals)))
id_file.write('Surprisal mean: {:.4f}\n'.format(np.mean(surprisals)))
id_file.write('Surprisal standard deviation: {:.4f}\n'.format(np.std(surprisals)))
id_file.write('=' * 10 + '\n')
id_file.write('UID divergence max: {:.4f}\n'.format(np.max(uid_divs)))
id_file.write('UID divergence min: {:.4f}\n'.format(np.min(uid_divs)))
id_file.write('UID divergence mean: {:.4f}\n'.format(np.mean(uid_divs)))
id_file.write('UID divergence standard deviation: {:.4f}\n'.format(np.std(uid_divs)))
print('Done.')
| 9,433
|
def GetExtensionDescriptor(full_extension_name):
"""Searches for extension descriptor given a full field name."""
return _pool.FindExtensionByName(full_extension_name)
| 9,434
|
def upload_file(file_name, bucket, object_name=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = os.path.basename(file_name)
# Upload the file
try:
response = s3.upload_file(file_name, bucket, object_name)
except ClientError as e:
logging.error(e)
return False
return True
| 9,435
|
def parse_args():
"""
Parses command-line arguments and returns username, title of specified
repository and its' branch.
Returns: tuple (username, repo_name, branch).
Used only once in `main` method.
"""
DESC = 'Automatic license detection of a Github repository.'
parser = ArgumentParser(description=DESC)
# Specify agruments
parser.add_argument('--branch',
default='master',
required=False,
help='A branch of a repository from which license file should be obtained. Default: `master`.')
parser.add_argument('--repository_name',
required=False,
help='A name of a repository, whose license needs to be detected. Required.')
parser.add_argument('--username',
required=False,
help='A name of a user who owns a repository. Required.')
parser.add_argument('--url',
required=False,
help='An URL to Github repository.')
# Start parsing sys.argv
arg_dict = parser.parse_args().__dict__
branch = arg_dict['branch'] # `master` by default
user = arg_dict['username']
repo = arg_dict['repository_name']
url = arg_dict['url']
if (user is None) or (repo is None):
if (url is None):
# No repository information was typed, exiting...
print('Usage: --user <USERNAME> --repo <REPOSITORY NAME> --branch'
'<BRANCH NAME> (optional) or ')
print('--url <LINK TO REPOSITORY>')
exit(-1)
# Cut the `http` header of an URL
chopped_url = sub('https+:\/\/', '', url)
# Extract user and repository names from URL
user, repo = findall('\/{1}([^\/]+)', chopped_url)
return user, repo, branch
| 9,436
|
def get_parameters():
"""
Parse script arguments
"""
parser = argparse.ArgumentParser(prog='compile.py')
# config.h template parameters
parser.add_argument('os', type=str, default="LINUX", choices=available_os)
parser.add_argument('arch', type=str, default="X86", choices=available_archs)
parser.add_argument('--log_lvl', type=str, default="LOG_LVL_INFO", choices=available_log_lvl)
parser.add_argument('--name', type=str, default="SUCHAI-DEV")
parser.add_argument('--id', type=str, default="0")
parser.add_argument('--version', type=str, default=configure.call_git_describe())
parser.add_argument('--con', type=str, default="1")
parser.add_argument('--comm', type=str, default="1")
parser.add_argument('--fp', type=str, default="1")
parser.add_argument('--hk', type=str, default="1")
parser.add_argument('--sen', type=str, default="0")
parser.add_argument('--adcs', type=str, default="0")
parser.add_argument('--test', type=str, default="0")
parser.add_argument('--node', type=str, default="1")
parser.add_argument('--zmq_in', type=str, default="tcp://127.0.0.1:8001")
parser.add_argument('--zmq_out', type=str, default="tcp://127.0.0.1:8002")
parser.add_argument('--st_mode', type=str, default="1")
parser.add_argument('--st_triple_wr', type=str, default="1")
parser.add_argument('--buffers_csp', type=str, default="10")
parser.add_argument('--socket_len', type=str, default="100")
# Build parameters
parser.add_argument('--drivers', action="store_true", help="Install platform drivers")
parser.add_argument('--ssh', action="store_true", help="Use ssh for git clone")
parser.add_argument('--test_type', type=str, default='', choices=available_tests)
# Force clean
parser.add_argument('--clean', action="store_true", help="Clean before build")
# Program
parser.add_argument('--program', action="store_true", help="Compile and program")
parser.add_argument('--console', type=int, default=4, help="Console to use. 2=Nanomind-USB-SERIAL, 4=FFP-USB")
# Skip config
parser.add_argument('--no-config', action="store_true", help="Skip configure, do not generate a new config.h")
return parser.parse_args()
| 9,437
|
def collect_inventory_values(dataset, inventory_list, parameter_map):
"""
Collect inventories from a dataset.
"""
# Collect raw/unicode/clts for all relevant inventories
to_collect = []
for catalog in inventory_list.keys():
to_collect += list(
itertools.chain.from_iterable(inventory_list[catalog].values())
)
values = defaultdict(list)
for row in dataset["ValueTable"]:
if row["Contribution_ID"] in to_collect:
values[row["Contribution_ID"]].append(
{
"raw": row["Value"],
"unicode": parameter_map[row["Parameter_ID"]]["unicode"],
"bipa": parameter_map[row["Parameter_ID"]]["bipa"],
}
)
return values
| 9,438
|
def offset_resources(api, res_id, resource_list, position="top_left", align="horizontal", horizontal_offset=300,
vertical_offset=150, x_axis=100, y_axis=50):
"""
add resources to sandbox in different quadrants. choose stacking order
:param CloudShellAPISession api:
:param resource_list:
:param align: how to stack, "horizontal" or "vertical"
:param position: "top_left", "middle_center", "bottom_right", etc.
:return:
"""
# set alternate x
if "center" in position:
x_axis = 700
elif "right" in position:
x_axis = 1300
# set alternate y
if "middle" in position:
y_axis = 200
elif "bottom" in position:
y_axis = 450
for resource in resource_list:
api.SetReservationResourcePosition(reservationId=res_id, resourceFullName=resource, x=x_axis, y=y_axis)
if align == "vertical":
if "bottom" in position:
y_axis -= vertical_offset
else:
y_axis += vertical_offset
else:
if "right" in position:
x_axis -= horizontal_offset
else:
x_axis += horizontal_offset
| 9,439
|
def attest(att_stmt: AttestationStatement, att_obj: AttestationObject,
auth_data: bytes,
client_data_hash: bytes) -> Tuple[AttestationType, TrustedPath]:
"""Attest an attestation object.
Args:
att_stmt (AttestationStatement): The attestation statment.
att_obj (AttestationObject): The attestation object.
auth_data (bytes): The raw authenticator data.
client_data_hash (bytes): The client data hash.
Returns:
The attestation type and trusted path.
References:
* https://www.w3.org/TR/webauthn/#defined-attestation-formats
"""
raise UnimplementedError('{} attestation unimplemented'.format(
type(att_stmt)))
| 9,440
|
def Start(parser=None,
argv=sys.argv,
quiet=False,
add_pipe_options=True,
add_extract_options=False,
add_group_dedup_options=True,
add_sam_options=True,
add_umi_grouping_options=True,
return_parser=False):
"""set up an experiment.
The :py:func:`Start` method will set up a file logger and add some
default and some optional options to the command line parser. It
will then parse the command line and set up input/output
redirection and start a timer for benchmarking purposes.
The default options added by this method are:
``-v/--verbose``
the :term:`loglevel`
``timeit``
turn on benchmarking information and save to file
``timeit-name``
name to use for timing information,
``timeit-header``
output header for timing information.
``seed``
the random seed. If given, the python random
number generator will be initialized with this
seed.
Optional options added are:
Arguments
---------
param parser : :py:class:`U.OptionParser`
instance with command line options.
argv : list
command line options to parse. Defaults to
:py:data:`sys.argv`
quiet : bool
set :term:`loglevel` to 0 - no logging
return_parser : bool
return the parser object, no parsing. Useful for inspecting
the command line options of a script without running it.
add_pipe_options : bool
add common options for redirecting input/output
add_extract_options : bool
add options for extracting barcodes
add_sam_options : bool
add options for SAM/BAM input
add_umi_grouping_options : bool
add options for barcode grouping
add_group_dedup_options : bool
add options for UMI grouping and deduping
Returns
-------
tuple
(:py:class:`U.OptionParser` object, list of positional
arguments)
"""
if not parser:
parser = OptionParser(
version="%prog version: $Id$")
global global_options, global_args, global_starting_time
# save default values given by user
user_defaults = copy.copy(parser.defaults)
global_starting_time = time.time()
if add_extract_options:
group = OptionGroup(parser, "fastq barcode extraction options")
group.add_option("--extract-method",
dest="extract_method", type="choice",
choices=["string", "regex"],
help=("How to extract the umi +/- cell barcodes, "
"Choose from 'string' or 'regex'"))
group.add_option("-p", "--bc-pattern", dest="pattern", type="string",
help="Barcode pattern")
group.add_option("--bc-pattern2", dest="pattern2", type="string",
help="Barcode pattern for paired reads")
group.add_option("--3prime", dest="prime3", action="store_true",
help="barcode is on 3' end of read.")
group.add_option("--read2-in", dest="read2_in", type="string",
help="file name for read pairs")
parser.add_option_group(group)
if add_sam_options:
group = OptionGroup(parser, "Barcode extraction options")
group.add_option("--extract-umi-method", dest="get_umi_method", type="choice",
choices=("read_id", "tag", "umis"), default="read_id",
help="how is the read UMI +/ cell barcode encoded? "
"[default=%default]")
group.add_option("--umi-separator", dest="umi_sep",
type="string", help="separator between read id and UMI",
default="_")
group.add_option("--umi-tag", dest="umi_tag",
type="string", help="tag containing umi",
default='RX')
group.add_option("--umi-tag-split", dest="umi_tag_split",
type="string",
help="split UMI in tag and take the first element",
default=None)
group.add_option("--umi-tag-delimiter", dest="umi_tag_delim",
type="string",
help="concatenate UMI in tag separated by delimiter",
default=None)
group.add_option("--cell-tag", dest="cell_tag",
type="string", help="tag containing cell barcode",
default=None)
group.add_option("--cell-tag-split", dest="cell_tag_split",
type="string",
help=("split cell barcode in tag and take the first element"
"for e.g 10X GEM tags"),
default='-')
group.add_option("--cell-tag-delimiter", dest="cell_tag_delim",
type="string",
help="concatenate cell barcode in tag separated by delimiter",
default=None)
parser.add_option_group(group)
if add_umi_grouping_options:
group = OptionGroup(parser, "UMI grouping options")
group.add_option("--method", dest="method", type="choice",
choices=("adjacency", "directional",
"percentile", "unique", "cluster"),
default="directional",
help="method to use for umi grouping [default=%default]")
group.add_option("--edit-distance-threshold", dest="threshold",
type="int",
default=1,
help="Edit distance theshold at which to join two UMIs "
"when grouping UMIs. [default=%default]")
group.add_option("--spliced-is-unique", dest="spliced",
action="store_true",
help="Treat a spliced read as different to an unspliced"
" one [default=%default]",
default=False)
group.add_option("--soft-clip-threshold", dest="soft_clip_threshold",
type="float",
help="number of bases clipped from 5' end before "
"read is counted as spliced [default=%default]",
default=4)
group.add_option("--read-length", dest="read_length",
action="store_true", default=False,
help="use read length in addition to position and UMI "
"to identify possible duplicates [default=%default]")
parser.add_option_group(group)
if add_sam_options:
group = OptionGroup(parser, "single-cell RNA-Seq options")
group.add_option("--per-gene", dest="per_gene", action="store_true",
default=False,
help="Group/Dedup/Count per gene. Must combine with "
"either --gene-tag or --per-contig")
group.add_option("--gene-tag", dest="gene_tag",
type="string",
help="Gene is defined by this bam tag [default=%default]",
default=None)
group.add_option("--assigned-status-tag", dest="assigned_tag",
type="string",
help="Bam tag describing whether read is assigned to a gene "
"By defualt, this is set as the same tag as --gene-tag",
default=None)
group.add_option("--skip-tags-regex", dest="skip_regex",
type="string",
help="Used with --gene-tag. "
"Ignore reads where the gene-tag matches this regex",
default="^(__|Unassigned)")
group.add_option("--per-contig", dest="per_contig", action="store_true",
default=False,
help="group/dedup/count UMIs per contig (field 3 in BAM; RNAME),"
" e.g for transcriptome where contig = gene")
group.add_option("--gene-transcript-map", dest="gene_transcript_map",
type="string",
help="File mapping transcripts to genes (tab separated)",
default=None)
group.add_option("--per-cell", dest="per_cell", action="store_true",
default=False,
help="group/dedup/count per cell")
parser.add_option_group(group)
if add_group_dedup_options:
group = OptionGroup(parser, "group/dedup options")
group.add_option("--buffer-whole-contig", dest="whole_contig",
action="store_true", default=False,
help="Read whole contig before outputting bundles: "
"guarantees that no reads are missed, but increases "
"memory usage")
group.add_option("--whole-contig", dest="whole_contig",
action="store_true", default=False,
help=optparse.SUPPRESS_HELP)
group.add_option("--multimapping-detection-method",
dest="detection_method", type="choice",
choices=("NH", "X0", "XT"),
default=None,
help="Some aligners identify multimapping using bam "
"tags. Setting this option to NH, X0 or XT will "
"use these tags when selecting the best read "
"amongst reads with the same position and umi "
"[default=%default]")
parser.add_option_group(group)
# options added separately here to maintain better output order
if add_sam_options:
group = OptionGroup(parser, "SAM/BAM options")
group.add_option("--mapping-quality", dest="mapping_quality",
type="int",
help="Minimum mapping quality for a read to be retained"
" [default=%default]",
default=0)
group.add_option("--output-unmapped", dest="output_unmapped", action="store_true",
default=False, help=optparse.SUPPRESS_HELP)
group.add_option("--unmapped-reads", dest="unmapped_reads",
type="choice",
choices=("discard", "use", "output"),
default="discard",
help=("How to handle unmapped reads. Options are "
"'discard', 'use' or 'correct' [default=%default]"))
group.add_option("--chimeric-pairs", dest="chimeric_pairs",
type="choice",
choices=("discard", "use", "output"),
default="use",
help=("How to handle chimeric read pairs. Options are "
"'discard', 'use' or 'correct' [default=%default]"))
group.add_option("--unpaired-reads", dest="unpaired_reads",
type="choice",
choices=("discard", "use", "output"),
default="use",
help=("How to handle unpaired reads. Options are "
"'discard', 'use' or 'correct' [default=%default]"))
group.add_option("--ignore-umi", dest="ignore_umi",
action="store_true", help="Ignore UMI and dedup"
" only on position", default=False)
group.add_option("--ignore-tlen", dest="ignore_tlen", action="store_true",
default=False,
help="Option to dedup paired end reads based solely on read1, "
"whether or not the template length is the same")
group.add_option("--chrom", dest="chrom", type="string",
help="Restrict to one chromosome",
default=None)
group.add_option("--subset", dest="subset", type="float",
help="Use only a fraction of reads, specified by subset",
default=None)
group.add_option("-i", "--in-sam", dest="in_sam", action="store_true",
help="Input file is in sam format [default=%default]",
default=False)
group.add_option("--paired", dest="paired", action="store_true",
default=False,
help="paired input BAM. [default=%default]")
group.add_option("-o", "--out-sam", dest="out_sam", action="store_true",
help="Output alignments in sam format [default=%default]",
default=False)
group.add_option("--no-sort-output", dest="no_sort_output",
action="store_true", default=False,
help="Don't Sort the output")
parser.add_option_group(group)
if add_pipe_options:
group = OptionGroup(parser, "input/output options")
group.add_option("-I", "--stdin", dest="stdin", type="string",
help="file to read stdin from [default = stdin].",
metavar="FILE")
group.add_option("-L", "--log", dest="stdlog", type="string",
help="file with logging information "
"[default = stdout].",
metavar="FILE")
group.add_option("-E", "--error", dest="stderr", type="string",
help="file with error information "
"[default = stderr].",
metavar="FILE")
group.add_option("-S", "--stdout", dest="stdout", type="string",
help="file where output is to go "
"[default = stdout].",
metavar="FILE")
group.add_option("--temp-dir", dest="tmpdir", type="string",
help="Directory for temporary files. If not set,"
" the bash environmental variable TMPDIR is used"
"[default = None].",
metavar="FILE")
group.add_option("--log2stderr", dest="log2stderr",
action="store_true", help="send logging information"
" to stderr [default = False].")
group.add_option("--compresslevel", dest="compresslevel", type="int",
help="Level of Gzip compression to use. Default (6) matches"
"GNU gzip rather than python gzip default (which is 9)")
parser.set_defaults(stderr=sys.stderr)
parser.set_defaults(stdout=sys.stdout)
parser.set_defaults(stdlog=sys.stdout)
parser.set_defaults(stdin=sys.stdin)
parser.set_defaults(tmpdir=None)
parser.set_defaults(log2stderr=False)
parser.set_defaults(compresslevel=6)
parser.add_option_group(group)
group = OptionGroup(parser, "profiling options")
group.add_option("--timeit", dest='timeit_file', type="string",
help="store timeing information in file [%default].")
group.add_option("--timeit-name", dest='timeit_name', type="string",
help="name in timing file for this class of jobs "
"[%default].")
group.add_option("--timeit-header", dest='timeit_header',
action="store_true",
help="add header for timing information [%default].")
parser.add_option_group(group)
group = OptionGroup(parser, "common options")
group.add_option("-v", "--verbose", dest="loglevel", type="int",
help="loglevel [%default]. The higher, the more output.")
group.add_option("-h", "--help", dest="short_help", action="callback",
callback=callbackShortHelp,
help="output short help (command line options only).")
group.add_option('--help-extended', action='help',
help='Output full documentation')
group.add_option("--random-seed", dest='random_seed', type="int",
help="random seed to initialize number generator "
"with [%default].")
parser.add_option_group(group)
# restore user defaults
parser.defaults.update(user_defaults)
if quiet:
parser.set_defaults(loglevel=0)
else:
parser.set_defaults(loglevel=1)
parser.set_defaults(
timeit_file=None,
timeit_name='all',
timeit_header=None,
random_seed=None,
)
if return_parser:
return parser
global_options, global_args = parser.parse_args(argv[1:])
if global_options.random_seed is not None:
random.seed(global_options.random_seed)
if add_pipe_options:
if global_options.stdout != sys.stdout:
global_options.stdout = openFile(global_options.stdout, "w")
if global_options.stderr != sys.stderr:
if global_options.stderr == "stderr":
global_options.stderr = global_options.stderr
else:
global_options.stderr = openFile(global_options.stderr, "w")
if global_options.stdlog != sys.stdout:
global_options.stdlog = openFile(global_options.stdlog, "a")
elif global_options.log2stderr:
global_options.stdlog = global_options.stderr
if global_options.stdin != sys.stdin:
global_options.stdin = openFile(global_options.stdin, "r")
else:
global_options.stderr = sys.stderr
global_options.stdout = sys.stdout
global_options.stdin = sys.stdin
if global_options.log2stderr:
global_options.stdlog = sys.stderr
else:
global_options.stdlog = sys.stdout
if global_options.loglevel >= 1:
global_options.stdlog.write(getHeader() + "\n")
global_options.stdlog.write(getParams(global_options) + "\n")
global_options.stdlog.flush()
# configure logging
# map from 0-10 to logging scale
# 0: quiet
# 1: little verbositiy
# >1: increased verbosity
if global_options.loglevel == 0:
lvl = logging.ERROR
elif global_options.loglevel == 1:
lvl = logging.INFO
else:
lvl = logging.DEBUG
if global_options.stdout == global_options.stdlog:
format = '# %(asctime)s %(levelname)s %(message)s'
else:
format = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(
level=lvl,
format=format,
stream=global_options.stdlog)
# set up multi-line logging
# Note that .handlers is not part of the API, might change
# Solution is to configure handlers explicitely.
for handler in logging.getLogger().handlers:
handler.setFormatter(MultiLineFormatter(format))
return global_options, global_args
| 9,441
|
def sac(env_fn, actor_critic=core.mlp_actor_critic, ac_kwargs=dict(), seed=0,
steps_per_epoch=10000, epochs=10000, replay_size=int(1e6), gamma=0.99,
polyak=0.995, lr=1e-4, alpha=0.004, batch_size=256, start_steps=1000,
update_after=1000, update_every=1, num_test_episodes=0, max_ep_len=1000,
dynamic_skip = True,
logger_kwargs=dict(), save_freq=1):
"""
Soft Actor-Critic (SAC)
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: A function which takes in placeholder symbols
for state, ``x_ph``, and action, ``a_ph``, and returns the main
outputs from the agent's Tensorflow computation graph:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``mu`` (batch, act_dim) | Computes mean actions from policy
| given states.
``pi`` (batch, act_dim) | Samples actions from policy given
| states.
``logp_pi`` (batch,) | Gives log probability, according to
| the policy, of the action sampled by
| ``pi``. Critical: must be differentiable
| with respect to policy parameters all
| the way through action sampling.
``q1`` (batch,) | Gives one estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q2`` (batch,) | Gives another estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the actor_critic
function you provided to SAC.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs to run and train agent.
replay_size (int): Maximum length of replay buffer.
gamma (float): Discount factor. (Always between 0 and 1.)
polyak (float): Interpolation factor in polyak averaging for target
networks. Target networks are updated towards main networks
according to:
.. math:: \\theta_{\\text{targ}} \\leftarrow
\\rho \\theta_{\\text{targ}} + (1-\\rho) \\theta
where :math:`\\rho` is polyak. (Always between 0 and 1, usually
close to 1.)
lr (float): Learning rate (used for both policy and value learning).
alpha (float): Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.)
batch_size (int): Minibatch size for SGD.
start_steps (int): Number of steps for uniform-random action selection,
before running real policy. Helps exploration.
update_after (int): Number of env interactions to collect before
starting to do gradient descent updates. Ensures replay buffer
is full enough for useful updates.
update_every (int): Number of env interactions that should elapse
between gradient descent updates. Note: Regardless of how long
you wait between updates, the ratio of env steps to gradient steps
is locked to 1.
num_test_episodes (int): Number of episodes to test the deterministic
policy at the end of each epoch.
max_ep_len (int): Maximum length of trajectory / episode / rollout.
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
"""
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
tf.set_random_seed(seed)
np.random.seed(seed)
#TODO multithreading is a problem with deepmimic at the moment
#env, test_env = env_fn(), env_fn()
env = env_fn()
if dynamic_skip:
from spinup.env_wrappers.dynamic_skip_env import DynamicSkipEnv
env = DynamicSkipEnv(env)
#if num_test_episodes > 0:
# test_env = DynamicSkipEnv(test_env)
test_env = env
# Let's make sure that every incoming env can be treated as a multi agent env.
if not type(env.observation_space) is list:
from spinup.env_wrappers.single_agent_env import SingleAgentEnv
env = SingleAgentEnv(env)
if num_test_episodes > 0:
test_env = SingleAgentEnv(test_env)
n_agents = len(env.observation_space)
obs_dim = [space for space in env.observation_space]
act_dim = [space.shape[0] for space in env.action_space]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = [space.high[0] for space in env.action_space]
# Inputs to computation graph
x_ph, a_ph, x2_ph, r_ph, d_ph = core.placeholders(obs_dim, act_dim, obs_dim, [None] * n_agents, [None] * n_agents)
# Main outputs from computation graph
replay_buffer = []
step_ops = []
target_init = []
mu, pi, q1, q2 = [], [], [], []
for i in range(n_agents):
# Share information about action space with policy architecture
ac_kwargs['action_space'] = env.action_space[i]
with tf.variable_scope('main' + str(i)):
mu_, pi_, logp_pi, q1_, q2_ = actor_critic(x_ph[i], a_ph[i], **ac_kwargs)
mu.append(mu_)
pi.append(pi_)
q1.append(q1_)
q2.append(q2_)
with tf.variable_scope('main' + str(i), reuse=True):
# compose q with pi, for pi-learning
_, _, _, q1_pi, q2_pi = actor_critic(x_ph[i], pi_, **ac_kwargs)
# get actions and log probs of actions for next states, for Q-learning
_, pi_next, logp_pi_next, _, _ = actor_critic(x2_ph[i], a_ph[i], **ac_kwargs)
# Target value network
with tf.variable_scope('target' + str(i)):
# target q values, using actions from *current* policy
_, _, _, q1_targ, q2_targ = actor_critic(x2_ph[i], pi_next, **ac_kwargs)
# Experience buffer
#replay_buffer.append(ReplayBuffer(obs_dim_obj=obs_dim[i], act_dim=act_dim[i], size=replay_size))
spinup_replay_buffer = ReplayBuffer(obs_dim_obj=obs_dim[i], act_dim=act_dim[i], size=replay_size)
#TODO make AMP optional
replay_buffer.append(AMPReplayBuffer(inner_replay_buffer=spinup_replay_buffer, env_reward_weight=0.5, amp_env=env, logger=logger))
# Count variables
var_counts = tuple(core.count_vars(scope) for scope in ['main' + str(i) + '/pi', 'main' + str(i) + '/q1', 'main' + str(i) + '/q2', 'main' + str(i)])
print('\nNumber of parameters agent ' + str(i) + ': \t pi: %d, \t q1: %d, \t q2: %d, \t total: %d\n'%var_counts)
# Min Double-Q:
min_q_pi = tf.minimum(q1_pi, q2_pi)
min_q_targ = tf.minimum(q1_targ, q2_targ)
# Entropy-regularized Bellman backup for Q functions, using Clipped Double-Q targets
q_backup = tf.stop_gradient(r_ph[i] + gamma*(1-d_ph[i])*(min_q_targ - alpha * logp_pi_next))
# Soft actor-critic losses
pi_loss = tf.reduce_mean(alpha * logp_pi - min_q_pi)
q1_loss = 0.5 * tf.reduce_mean((q_backup - q1_)**2)
q2_loss = 0.5 * tf.reduce_mean((q_backup - q2_)**2)
value_loss = q1_loss + q2_loss
# Policy train op
# (has to be separate from value train op, because q1_pi appears in pi_loss)
pi_optimizer = tf.train.AdamOptimizer(learning_rate=lr)
train_pi_op = pi_optimizer.minimize(pi_loss, var_list=get_vars('main' + str(i) + '/pi'))
# Value train op
# (control dep of train_pi_op because sess.run otherwise evaluates in nondeterministic order)
value_optimizer = tf.train.AdamOptimizer(learning_rate=lr)
value_params = get_vars('main' + str(i) + '/q')
with tf.control_dependencies([train_pi_op]):
train_value_op = value_optimizer.minimize(value_loss, var_list=value_params)
# Polyak averaging for target variables
# (control flow because sess.run otherwise evaluates in nondeterministic order)
with tf.control_dependencies([train_value_op]):
target_update = tf.group([tf.assign(v_targ, polyak*v_targ + (1-polyak)*v_main)
for v_main, v_targ in zip(get_vars('main' + str(i)), get_vars('target' + str(i)))])
# All ops to call during one training step
step_ops.append([pi_loss, q1_loss, q2_loss, q1_, q2_, logp_pi,
train_pi_op, train_value_op, target_update])
# Initializing targets to match main variables
target_init.append(tf.group([tf.assign(v_targ, v_main)
for v_main, v_targ in zip(get_vars('main' + str(i)), get_vars('target' + str(i)))]))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(target_init)
# Setup model saving
dict_space = False
inputs, outputs = {}, {}
for i in range(n_agents):
# multi modality handling
if isinstance(x_ph[i], dict):
input_modalities = x_ph[i]
dict_space = True
else:
input_modalities = {'':x_ph[i]}
inputs.update({'x' + k + str(i):v for k,v in input_modalities.items()})
inputs.update({'a' + str(i): a_ph[i]})
outputs.update({'mu' + str(i): mu[i], 'pi' + str(i): pi[i], 'q1' + str(i): q1[i], 'q2' + str(i): q2[i]})
logger.setup_tf_saver(sess, inputs=inputs,
outputs=outputs)
def get_action(o, deterministic=False):
act_op = mu if deterministic else pi
feed_dict = {}
for i in range(n_agents):
if dict_space:
feed_dict.update({x_ph[i][modality]: o[i][modality].reshape(1,-1) for modality in x_ph[i]})
else:
feed_dict.update({x_ph[i]:o[i].reshape(1,-1)})
outputs = sess.run(act_op, feed_dict=feed_dict)
return np.array([out[0] for out in outputs])
def test_agent():
if num_test_episodes == 0:
return
test_env.reset()
for j in range(num_test_episodes):
o, d, ep_ret, ep_len = test_env.reset(), False, np.zeros(n_agents), 0
while not(np.any(d) or (ep_len == max_ep_len)):
# Take deterministic actions at test time
o, r, d, _ = test_env.step(get_action(o, True))
ep_ret += np.array(r)
ep_len += 1
logger.store(TestEpRet=np.average(ep_ret), TestEpLen=ep_len)
test_env.reset()
start_time = time.time()
o, ep_ret, ep_len = env.reset(), np.zeros(n_agents), 0
total_steps = steps_per_epoch * epochs
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
# Until start_steps have elapsed, randomly sample actions
# from a uniform distribution for better exploration. Afterwards,
# use the learned policy.
if t > start_steps:
a = get_action(o)
else:
a = np.array([space.sample() for space in env.action_space])
# Step the env
o2, r, d, _ = env.step(a)
ep_ret += np.array(r)
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = [False] if ep_len==max_ep_len else d
# Store experience to replay buffer
for i in range(n_agents):
amp_obs = env.get_amp_obs(i)
state_amp_agent = amp_obs["state_amp_agent"]
state_amp_expert = amp_obs["state_amp_expert"]
# TODO remove
#print("\no[i]: " + str(o[i]))
#print("a[i]: " + str(a[i]))
#print("o2[i]: " + str(o2[i]))
#print("d[i]: " + str(d[i]))
#print("state_amp_agent: " + str(state_amp_agent))
#print("state_amp_expert: " + str(state_amp_expert))
replay_buffer[i].store(o[i], a[i], r[i], o2[i], d[i], state_amp_agent, state_amp_expert)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
# End of trajectory handling
if np.any(d) or (ep_len == max_ep_len):
# TODO I think it would be better if I could reset for agents individually. Maybe the env could do this internally.
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, ep_ret, ep_len = env.reset(), 0, 0
# Update handling
if t >= update_after and t % update_every == 0:
for j in range(update_every):
feed_dict = {}
for i in range(n_agents):
batch = replay_buffer[i].sample_batch(batch_size)
feed_dict.update(replay_buffer[i].to_feed_dict(x_ph[i], batch['obs1']))
feed_dict.update(replay_buffer[i].to_feed_dict(x2_ph[i], batch['obs2']))
feed_dict.update({
a_ph[i]: batch['acts'],
r_ph[i]: batch['rews'],
d_ph[i]: batch['done'],
})
outs = sess.run(step_ops, feed_dict)
loss_pi, loss_q1, loss_q2, q1_vals, q2_vals, log_pi = [], [], [], [], [], []
for i in range(n_agents):
loss_pi.append(outs[i][0])
loss_q1.append(outs[i][1])
loss_q2.append(outs[i][2])
q1_vals.append(outs[i][3])
q2_vals.append(outs[i][4])
log_pi.append(outs[i][5])
logger.store(LossPi=np.average(loss_pi), LossQ1=np.average(loss_q1), LossQ2=np.average(loss_q2),
Q1Vals=np.average(q1_vals), Q2Vals=np.average(q2_vals), LogPi=np.average(log_pi))
# End of epoch wrap-up
if (t+1) % steps_per_epoch == 0:
epoch = (t+1) // steps_per_epoch
# Save model
if (epoch % save_freq == 0) or (epoch == epochs):
logger.save_state({'env': env}, None)
# Test the performance of the deterministic version of the agent.
test_agent()
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
if num_test_episodes > 0:
logger.log_tabular('TestEpRet', with_min_and_max=True)
logger.log_tabular('TestEpLen', average_only=True)
logger.log_tabular('TotalEnvInteracts', t)
logger.log_tabular('Q1Vals', with_min_and_max=True)
logger.log_tabular('Q2Vals', with_min_and_max=True)
if "LossAmp" in logger.epoch_dict:
logger.log_tabular('LossAmp', average_only=True)
if "AccExpertAMP" in logger.epoch_dict:
logger.log_tabular('AccExpertAMP', average_only=True)
if "AccAgentAMP" in logger.epoch_dict:
logger.log_tabular('AccAgentAMP', average_only=True)
if "AmpRew" in logger.epoch_dict:
logger.log_tabular('AmpRew', average_only=True)
if "AmpRewBatchMax" in logger.epoch_dict:
logger.log_tabular('AmpRewBatchMax', average_only=True)
if "AmpRewBatchMin" in logger.epoch_dict:
logger.log_tabular('AmpRewBatchMin', average_only=True)
logger.log_tabular('LogPi', with_min_and_max=True)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossQ1', average_only=True)
logger.log_tabular('LossQ2', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
| 9,442
|
def plot_progress_means(i, data, centroid_history, idx_history):
"""
Plot points on 2D plane
:param np.array data: array containing points
:param np.array centroid_history: saved centroids_history for every step
:param np.array centroid_history: saved centroids_history for every step
"""
K = centroid_history[0].shape[0]
pyplot.gcf().clf()
cmap = pyplot.cm.hsv
for k in range(K):
pyplot.scatter(data[:, 0], data[:, 1],
c=idx_history[i],
cmap=cmap,
marker='o',
s=8 ** 2,
linewidths=1,
)
pyplot.grid(False)
pyplot.title('Iteration number %d' % (i + 1))
| 9,443
|
def flatten_list(a_list, parent_list=None):
"""Given a list/tuple as entry point, return a flattened list version.
EG:
>>> flatten_list([1, 2, [3, 4]])
[1, 2, 3, 4]
NB: The kwargs are only for internal use of the function and should not be
used by the caller.
"""
if parent_list is None:
parent_list = []
for element in a_list:
if isinstance(element, list):
flatten_list(element, parent_list=parent_list)
elif isinstance(element, tuple):
flatten_list(element, parent_list=parent_list)
else:
parent_list.append(element)
return parent_list
| 9,444
|
def list_to_csv_str(input_list: List) -> Text:
"""
Concatenates the elements of the list, joining them by ",".
Parameters
----------
input_list : list
List with elements to be joined.
Returns
-------
str
Returns a string, resulting from concatenation of list elements,
separeted by ",".
Example
-------
>>> from pymove import conversions
>>> a = [1, 2, 3, 4, 5]
>>> conversions.list_to_csv_str(a)
'1 1:2 2:3 3:4 4:5'
"""
return list_to_str(input_list)
| 9,445
|
def close_xray_safety_shutters():
"""Remote Frontend shutter"""
xray_safety_shutters_open.value = False
while not xray_safety_shutters_open.value == False and not task.cancelled:
sleep(0.2)
| 9,446
|
def scheme_listp(x):
"""Return whether x is a well-formed list. Assumes no cycles."""
while x is not nil:
if not isinstance(x, Pair):
return False
x = x.second
return True
| 9,447
|
def test_multisig_digprefix():
"""
Test multisig with self-addressing (digest) pre
"""
# Test sequence of events given set of secrets
secrets = [
'ArwXoACJgOleVZ2PY7kXn7rA0II0mHYDhc6WrBH8fDAc',
'A6zz7M08-HQSFq92sJ8KJOT2cZ47x7pXFQLPB0pckB3Q',
'AcwFTk-wgk3ZT2buPRIbK-zxgPx-TKbaegQvPEivN90Y',
'Alntkt3u6dDgiQxTATr01dy8M72uuaZEf9eTdM-70Gk8',
'A1-QxDkso9-MR1A8rZz_Naw6fgaAtayda8hrbkRVVu1E',
'AKuYMe09COczwf2nIoD5AE119n7GLFOVFlNLxZcKuswc',
'AxFfJTcSuEE11FINfXMqWttkZGnUZ8KaREhrnyAXTsjw',
'ALq-w1UKkdrppwZzGTtz4PWYEeWm0-sDHzOv5sq96xJY'
]
with openDB("controller") as conlgr, openDB("validator") as vallgr:
# create event stream
kes = bytearray()
# create signers
signers = [Signer(qb64=secret) for secret in secrets] # faster
assert [siger.qb64 for siger in signers] == secrets
# Event 0 Inception Transferable (nxt digest not empty)
# 2 0f 3 multisig
keys = [signers[0].verfer.qb64, signers[1].verfer.qb64, signers[2].verfer.qb64]
nxtkeys = [signers[3].verfer.qb64, signers[4].verfer.qb64, signers[5].verfer.qb64]
sith = "2"
code = MtrDex.Blake3_256 # Blake3 digest of incepting data
serder = incept(keys=keys,
code=code,
sith=sith,
nxt=Nexter(keys=nxtkeys).qb64)
# create sig counter
count = len(keys)
counter = Counter(CtrDex.ControllerIdxSigs, count=count) # default is count = 1
# sign serialization
sigers = [signers[i].sign(serder.raw, index=i) for i in range(count)]
# create key event verifier state
kever = Kever(serder=serder, sigers=sigers, baser=conlgr)
#extend key event stream
kes.extend(serder.raw)
kes.extend(counter.qb64b)
for siger in sigers:
kes.extend(siger.qb64b)
assert kes == bytearray(b'{"v":"KERI10JSON000144_","i":"EJPRBUSEdUuZnh9kRGg8y7uBJDxTGZdp4Y'
b'eUSqBv5sEk","s":"0","t":"icp","kt":"2","k":["DSuhyBcPZEZLK-fcw5t'
b'zHn2N46wRCG_ZOoeKtWTOunRA","DVcuJOOJF1IE8svqEtrSuyQjGTd2HhfAkt9y'
b'2QkUtFJI","DT1iAhBWCkvChxNWsby2J0pJyxBIxbAtbLA0Ljx-Grh8"],"n":"E'
b'9izzBkXX76sqt0N-tfLzJeRqj0W56p4pDQ_ZqNCDpyw","wt":"0","w":[],"c"'
b':[]}-AADAA74a3kHBjpaY2h3AzX8UursaGoW8kKU1rRLlMTYffMvKSTbhHHy96br'
b'GN2P6ehcmEW2nlUNZVuMf8zo6Qd8PkCgABIJfoSJejaDh1g-UZKkldxtTCwic7kB'
b'3s15EsDPKpm_6EhGcxVTt0AFXQUQMroKgKrGnxL0GP6gwEdmdu9dVRAgACtJFQBQ'
b'iRX5iqWpJQntfAZTx6VIv_Ghydg1oB0QCq7s8D8LuKH5n1S5t8AbbQPXv6Paf7AV'
b'JRFv8lhCT5cdx3Bg')
# Event 1 Rotation Transferable
keys = nxtkeys
sith = "2"
nxtkeys = [signers[5].verfer.qb64, signers[6].verfer.qb64, signers[7].verfer.qb64]
serder = rotate(pre=kever.prefixer.qb64,
keys=keys,
sith=sith,
dig=kever.serder.diger.qb64,
nxt=Nexter(keys=nxtkeys).qb64,
sn=1)
# create sig counter
count = len(keys)
counter = Counter(CtrDex.ControllerIdxSigs, count=count) # default is count = 1
# sign serialization
sigers = [signers[i].sign(serder.raw, index=i-count) for i in range(count, count+count)]
# update key event verifier state
kever.update(serder=serder, sigers=sigers)
#extend key event stream
kes.extend(serder.raw)
kes.extend(counter.qb64b)
for siger in sigers:
kes.extend(siger.qb64b)
# Event 2 Interaction
serder = interact(pre=kever.prefixer.qb64,
dig=kever.serder.diger.qb64,
sn=2)
# create sig counter
counter = Counter(CtrDex.ControllerIdxSigs, count=count) # default is count = 1
# sign serialization
sigers = [signers[i].sign(serder.raw, index=i-count) for i in range(count, count+count)]
# update key event verifier state
kever.update(serder=serder, sigers=sigers)
#extend key event stream
kes.extend(serder.raw)
kes.extend(counter.qb64b)
for siger in sigers:
kes.extend(siger.qb64b)
# Event 4 Interaction
serder = interact(pre=kever.prefixer.qb64,
dig=kever.serder.diger.qb64,
sn=3)
# create sig counter
counter = Counter(CtrDex.ControllerIdxSigs, count=count) # default is count = 1
# sign serialization
sigers = [signers[i].sign(serder.raw, index=i-count) for i in range(count, count+count)]
# update key event verifier state
kever.update(serder=serder, sigers=sigers)
#extend key event stream
kes.extend(serder.raw)
kes.extend(counter.qb64b)
for siger in sigers:
kes.extend(siger.qb64b)
# Event 7 Rotation to null NonTransferable Abandon
# nxt digest is empty
keys = nxtkeys
serder = rotate(pre=kever.prefixer.qb64,
keys=keys,
sith="2",
dig=kever.serder.diger.qb64,
nxt="",
sn=4)
# create sig counter
counter = Counter(CtrDex.ControllerIdxSigs, count=count) # default is count = 1
# sign serialization
sigers = [signers[i].sign(serder.raw, index=i-5) for i in range(5, 8)]
# update key event verifier state
kever.update(serder=serder, sigers=sigers)
#extend key event stream
kes.extend(serder.raw)
kes.extend(counter.qb64b)
for siger in sigers:
kes.extend(siger.qb64b)
assert len(kes) == 2692
kevery = Kevery(db=vallgr)
kevery.process(ims=kes)
pre = kever.prefixer.qb64
assert pre in kevery.kevers
vkever = kevery.kevers[pre]
assert vkever.sn == kever.sn
assert vkever.verfers[0].qb64 == kever.verfers[0].qb64
assert vkever.verfers[0].qb64 == signers[5].verfer.qb64
assert not os.path.exists(kevery.db.path)
""" Done Test """
| 9,448
|
def main():
"""
Runs the test
"""
args = mujoco_arg_parser().parse_args()
logger.configure()
train(args.env, num_timesteps=args.num_timesteps, seed=args.seed)
env = make_mujoco_env(args.env, args.seed)
model = PPO1(MlpPolicy, env, timesteps_per_actorbatch=2048, clip_param=0.2, entcoeff=0.0, optim_epochs=10,
optim_stepsize=3e-4, optim_batchsize=64, gamma=0.99, lam=0.95, schedule='linear')
model.learn(total_timesteps=args.num_timesteps)
model.save("ppo1")
# env.close()
del model # remove to demonstrate saving and loading
# env = make_mujoco_env(args.env, args.seed)
model = PPO1.load("ppo1")
logger.log("~!!!!!!!!")
episode_rew = 0
obs = env.reset()
while True:
action, _states = model.predict(obs)
ob, reward, done, info = env.step(action)
episode_rew += reward
env.render()
if done:
print(f'episode_rew={episode_rew}')
episode_rew = 0
obs = env.reset()
| 9,449
|
def check_license_analysis_message(context, message):
"""Check the message for the last license analysis."""
json_data = context.response.json()
actual = check_and_get_attribute(json_data, "message")
assert actual == message, \
"License service returns message {actual}, but other message {message} is expected" \
.format(actual=actual, message=message)
| 9,450
|
def main(args):
"""Extract a MSR-VTT captions dataframe from the annotation files."""
with open(args.raw_data_path) as data_file:
data = json.load(data_file)
df = pd.DataFrame(columns=['vid_id', 'sen_id', 'caption'])
df_idx = 0
if args.continue_converting:
if os.path.is_file(args.interim_data_path + 'all_captions.csv'):
df = pd.read_csv(args.interim_data_path + 'all_captions.csv')
df_idx = len(df)
if df_idx == 0:
print('Number of captions: {}'.format(len(data['sentences'])))
else:
print('Number of captions remaining: {}'.format(
len(data['sentences']) - len(df)))
for i in range(len(data['sentences'])):
if i % 1000 == 0:
print('Converting json to csv: {}%\r'.format(
round(i / float(len(data['sentences'])) * 100, 2)), end='')
df.to_csv(args.interim_data_path + 'all_captions.csv', index=False)
df.loc[df_idx, 'vid_id'] = data['sentences'][i]['video_id']
df.loc[df_idx, 'sen_id'] = data['sentences'][i]['sen_id']
df.loc[df_idx, 'caption'] = unicode_to_ascii(
data['sentences'][i]['caption'])
df_idx += 1
df.to_csv(args.interim_data_path + 'all_captions.csv', index=False)
print('\nDone Converting')
print('Number of videos: {}'.format(df['vid_id'].nunique()))
# Get and shuffle video names
vid_names = df['vid_id'].unique()
shuffle(vid_names)
# Determine number of videos in training and development set
num_train_vids = int(len(vid_names) * args.train_pct)
num_dev_vids = int(len(vid_names) * args.dev_pct)
# Parition videos into respective sets
train_vids = vid_names[:num_train_vids]
dev_vids = vid_names[num_train_vids:num_train_vids + num_dev_vids]
test_vids = vid_names[num_train_vids + num_dev_vids:]
print('Number of training videos: {}'.format(len(train_vids)))
print('Number of development videos: {}'.format(len(dev_vids)))
print('Number of testing videos: {}'.format(len(test_vids)))
for i, row in df.iterrows():
if i % 1000 == 0:
print('Assigning to sets: {}%\r'.format(
round(i / float(len(df)) * 100, 2)), end='')
df.to_csv(args.interim_data_path +
'partially_distributed_captions.csv', index=False)
if row['vid_id'] in train_vids:
df.loc[i, 'set'] = 'train'
elif row['vid_id'] in dev_vids:
df.loc[i, 'set'] = 'dev'
elif row['vid_id'] in test_vids:
df.loc[i, 'set'] = 'test'
df.to_csv(args.final_data_path + 'msrvtt_captions.csv', index=False)
print('Saved final distributed set to ' +
args.final_data_path + 'msrvtt_captions.csv')
print('\nDone')
| 9,451
|
def list_to_dict(config):
"""
Convert list based beacon configuration
into a dictionary.
"""
_config = {}
list(map(_config.update, config))
return _config
| 9,452
|
def get_model(model_name, in_channels = 3, input_size = 224, num_classes = 1000):
"""Get model
Args :
--model_name: model's name
--in_channels: default is 3
--input_size: default is 224
--num_classes: default is 1000 for ImageNet
return :
--model: model instance
"""
string = model_name
if model_name == 'cmt_ti':
model = CMT_Ti(in_channels = in_channels, input_size = input_size, num_classes = num_classes)
elif model_name == 'cmt_xs':
model = CMT_XS(in_channels = in_channels, input_size = input_size, num_classes = num_classes)
elif model_name == 'cmt_s':
model = CMT_S(in_channels = in_channels, input_size = input_size, num_classes = num_classes)
elif model_name == 'cmt_b':
model = CMT_B(in_channels = in_channels, input_size = input_size, num_classes = num_classes)
else:
raise Exception('No other models!')
print(string + ': \n', model)
total = sum(p.numel() for p in model.parameters())
print("Total params: %.2fM" % (total / 1e6))
return model
| 9,453
|
def check_edge_heights(
stack, shifts, height_resistance, shift_lines, height_arr, MIN_H, MAX_H,
RESOLUTION
):
"""
Check all edges and output an array indicating which ones are
0 - okay at minimum pylon height, 2 - forbidden, 1 - to be computed
NOTE: function not used here! only for test purposes
"""
# print(len(stack))
for i in range(len(stack)):
v_x = stack[-i - 1][0]
v_y = stack[-i - 1][1]
# so far height on in edges
for s in range(len(shifts)):
neigh_x = v_x + shifts[s][0]
neigh_y = v_y + shifts[s][1]
# get minimum heights of v_x,v_y dependent on incoming edge
bres_line = shift_lines[s] + np.array([v_x, v_y])
# required heights
S = int(
np.sqrt((v_x - neigh_x)**2 + (v_y - neigh_y)**2)
) * RESOLUTION
# left and right point
yA = height_resistance[v_x, v_y] + MIN_H
yB = height_resistance[neigh_x, neigh_y] + MIN_H
# compute lowest point of sag
x0 = S / 2 - ((yB - yA) * CAT_H / (CAT_W * S))
# compute height above x0 at left point
A_height = (CAT_W * x0**2) / (2 * CAT_H)
# print(height_bline)
# iterate over points on bres_line
stepsize = S / (len(bres_line) + 1)
heights_above = np.zeros(len(bres_line))
for k, (i, j) in enumerate(bres_line):
x = x0 - stepsize * (k + 1)
cat = (CAT_W * x**2) / (2 * CAT_H)
heights_above[k
] = yA - A_height - height_resistance[i, j] + cat
# analyse heights_above:
if np.all(heights_above >= 11):
# whole cable is okay
fine_60 = 0
elif np.any(heights_above < -MAX_H - MIN_H):
# would not work with 80 - 80
fine_60 = 2
else:
# somewhere inbetween
fine_60 = 1
height_arr[s, neigh_x, neigh_y] = fine_60
return height_arr
| 9,454
|
def linear_int_ext(data_pts, p, scale=None, allow_extrap=False):
"""
Interpolate data points to find remaining unknown values absent from
`p` with optionally scaled axes. If `p` is not in the range and
`allow_extra` == True, a linear extrapolation is done using the two data
points at the end corresponding to the `p`.
Parameters
----------
data_pts : list_like(tuple)
[(a_1, ... a_n), ...] sorted on the required axis (either direction).
p : list_like
Required point to interpolate / extrapolate with at least a single
known component, i.e. :math:`(..., None, p_i, None, ...)`. If
more than one is supplied, the first is used.
scale :
Same as ``line_pt`` scale.
allow_extrap : bool, optional
If True linear extrapolation from the two adjacent endpoints is
permitted. Default = False.
Returns
-------
list :
Interpolated / extrapolated point :math:`[q_1, ..., q_n]` where
:math:`q_i = p_i` from above.
"""
if len(data_pts) < 2:
raise ValueError("At least two data points required.")
if scale is None:
scale = [None] * len(data_pts[0])
# Get working axis.
for ax, x in enumerate(p):
if x is not None:
break
else:
raise ValueError("Requested point must include at least one known "
"value.")
def on_axis(li): # Return value along required axis.
return li[ax]
# Get two adjacent points for straight line.
try:
# Try interpolation.
l_idx, r_idx = bracket_list(data_pts, p, key=on_axis)
except ValueError:
if not allow_extrap:
raise ValueError(f"Point not within data range.")
if ((on_axis(data_pts[0]) < on_axis(data_pts[-1])) != (
on_axis(p) < on_axis(data_pts[0]))):
l_idx, r_idx = -2, -1 # RHS extrapolation.
else:
l_idx, r_idx = 0, 1 # LHS extrapolation.
return line_pt(data_pts[l_idx], data_pts[r_idx], p, scale)
| 9,455
|
def eval_in_els_and_qp(expression, ig, iels, coors,
fields, materials, variables,
functions=None, mode='eval', term_mode=None,
extra_args=None, verbose=True, kwargs=None):
"""
Evaluate an expression in given elements and points.
Parameters
----------
expression : str
The expression to evaluate.
fields : dict
The dictionary of fields used in `variables`.
materials : Materials instance
The materials used in the expression.
variables : Variables instance
The variables used in the expression.
functions : Functions instance, optional
The user functions for materials etc.
mode : one of 'eval', 'el_avg', 'qp'
The evaluation mode - 'qp' requests the values in quadrature points,
'el_avg' element averages and 'eval' means integration over
each term region.
term_mode : str
The term call mode - some terms support different call modes
and depending on the call mode different values are
returned.
extra_args : dict, optional
Extra arguments to be passed to terms in the expression.
verbose : bool
If False, reduce verbosity.
kwargs : dict, optional
The variables (dictionary of (variable name) : (Variable
instance)) to be used in the expression.
Returns
-------
out : array
The result of the evaluation.
"""
weights = nm.ones_like(coors[:, 0])
integral = Integral('ie', coors=coors, weights=weights)
domain = fields.values()[0].domain
region = Region('Elements', 'given elements', domain, '')
region.cells = iels + domain.mesh.el_offsets[ig]
region.update_shape()
domain.regions.append(region)
for field in fields.itervalues():
field.clear_mappings(clear_all=True)
for ap in field.aps.itervalues():
ap.clear_qp_base()
aux = create_evaluable(expression, fields, materials,
variables.itervalues(), Integrals([integral]),
functions=functions,
mode=mode, extra_args=extra_args, verbose=verbose,
kwargs=kwargs)
equations, variables = aux
out = eval_equations(equations, variables,
preserve_caches=False,
mode=mode, term_mode=term_mode)
domain.regions.pop()
return out
| 9,456
|
def _add_tokenization_exceptions(language):
"""
Tons of tokenization exceptions for this dataset
:param language:
:return:
"""
#N2C2 2019 and Share 2013 Concept Normalization
language.tokenizer.add_special_case('empiricvancomycin', [{ORTH: "empiric"}, {ORTH: "vancomycin"}])
language.tokenizer.add_special_case('dobutamine-MIBI', [{ORTH: 'dobutamine'}, {ORTH: '-'}, {ORTH: 'MIBI'}])
language.tokenizer.add_special_case('cLVH', [{ORTH: 'c'}, {ORTH: 'LVH'}])
language.tokenizer.add_special_case('UPEP/Beta', [{ORTH: 'UPEP'}, {ORTH: '/'}, {ORTH: 'Beta'}])
language.tokenizer.add_special_case('constipation-related', [{ORTH: 'constipation'}, {ORTH: '-'}, {ORTH: 'related'}])
language.tokenizer.add_special_case('anteriordysplasia', [{ORTH: 'anterior'}, {ORTH: 'dysplasia'}])
language.tokenizer.add_special_case('F.', [{ORTH: 'F'}, {ORTH: '.'}])
language.tokenizer.add_special_case('extrapleural-pleural', [{ORTH: 'extrapleural'}, {ORTH: '-'}, {ORTH: 'pleural'}])
language.tokenizer.add_special_case('saphenous', [{ORTH: 'sap'}, {ORTH: 'henous'}])
language.tokenizer.add_special_case('T97.5', [{ORTH: 'T'}, {ORTH: '97.5'}])
language.tokenizer.add_special_case('P59', [{ORTH: 'P'}, {ORTH: '59'}])
language.tokenizer.add_special_case('RR20', [{ORTH: 'RR'}, {ORTH: '20'}])
language.tokenizer.add_special_case('Demerolprn', [{ORTH: 'Demerol'}, {ORTH: 'prn'}])
language.tokenizer.add_special_case('Carboplatin-Taxolchemo', [{ORTH: 'Carboplatin'}, {ORTH: '-'}, {ORTH: 'Taxol'}, {ORTH: 'chemo'}])
language.tokenizer.add_special_case('midepigastric', [{ORTH: 'mid'}, {ORTH: 'epigastric'}])
language.tokenizer.add_special_case('BRBPR/melena', [{ORTH: 'BRBPR'}, {ORTH: '/'}, {ORTH: 'melena'}])
language.tokenizer.add_special_case('CPAP+PS', [{ORTH: 'CPAP'}, {ORTH: '+'}, {ORTH: 'PS'}])
language.tokenizer.add_special_case('medications', [{ORTH: 'medication'}, {ORTH: 's'}])
language.tokenizer.add_special_case('mass-', [{ORTH: 'mass'}, {ORTH: '-'}])
language.tokenizer.add_special_case('1.ampullary', [{ORTH: '1'}, {ORTH: '.'}, {ORTH: 'ampullary'}])
language.tokenizer.add_special_case('membranes', [{ORTH: 'membrane'}, {ORTH: 's'}])
language.tokenizer.add_special_case('SOBx', [{ORTH: 'SOB'}, {ORTH: 'x'}])
language.tokenizer.add_special_case('Mass.', [{ORTH: 'Mass'}, {ORTH: '.'}])
language.tokenizer.add_special_case('Atroventnebulizer', [{ORTH: 'Atrovent'}, {ORTH: 'nebulizer'}])
language.tokenizer.add_special_case('PCO227', [{ORTH: 'PC02'}, {ORTH: '27'}])
language.tokenizer.add_special_case('PCO227', [{ORTH: 'PC02'}, {ORTH: '27'}])
language.tokenizer.add_special_case('MB's', [{ORTH: 'MB'}, {ORTH: ''s'}])
language.tokenizer.add_special_case('Q's', [{ORTH: 'Q'}, {ORTH: ''s'}])
language.tokenizer.add_special_case('predischarge', [{ORTH: 'pre'}, {ORTH: 'discharge'}])
language.tokenizer.add_special_case('1. Diabetes mellitus type 2.', [{ORTH: '1. '}, {ORTH: 'Diabetes mellitus type 2'}, {ORTH: '.'}])
#N2C2 2018 NER
language.tokenizer.add_special_case('ons', [{ORTH: 'on'}, {ORTH: 's'}])
language.tokenizer.add_special_case('DAILY16', [{ORTH: 'DAILY'}, {ORTH: '16'}])
language.tokenizer.add_special_case('2uRBCs,', [{ORTH: '2u'}, {ORTH: 'RBCs'},{ORTH: ','}])
language.tokenizer.add_special_case('1.amlodipine', [{ORTH: '1'}, {ORTH: '.'},{ORTH: 'amlodipine'}])
language.tokenizer.add_special_case('2.fexofenadine', [{ORTH: '2'}, {ORTH: '.'},{ORTH: 'fexofenadine'}])
language.tokenizer.add_special_case('3.levothyroxine', [{ORTH: '3'}, {ORTH: '.'},{ORTH: 'levothyroxine'}])
language.tokenizer.add_special_case('4.omeprazole', [{ORTH: '4'}, {ORTH: '.'},{ORTH: 'omeprazole'}])
language.tokenizer.add_special_case('5.multivitamin', [{ORTH: '5'}, {ORTH: '.'},{ORTH: 'multivitamin'}])
language.tokenizer.add_special_case('6.tiotropium', [{ORTH: '6'}, {ORTH: '.'},{ORTH: 'tiotropium'}])
language.tokenizer.add_special_case('7.atorvastatin', [{ORTH: '7'}, {ORTH: '.'},{ORTH: 'atorvastatin'}])
language.tokenizer.add_special_case('8.docusate', [{ORTH: '8'}, {ORTH: '.'},{ORTH: 'docusate'}])
language.tokenizer.add_special_case('9.dofetilide', [{ORTH: '9'}, {ORTH: '.'},{ORTH: 'dofetilide'}])
language.tokenizer.add_special_case('10.albuterol', [{ORTH: '10'}, {ORTH: '.'},{ORTH: 'albuterol'}])
language.tokenizer.add_special_case('11.cholecalciferol', [{ORTH: '11'}, {ORTH: '.'},{ORTH: 'cholecalciferol'}])
language.tokenizer.add_special_case('12.fluticasone', [{ORTH: '12'}, {ORTH: '.'},{ORTH: 'fluticasone'}])
language.tokenizer.add_special_case('13.morphine', [{ORTH: '13'}, {ORTH: '.'},{ORTH: 'morphine'}])
language.tokenizer.add_special_case('14.morphine', [{ORTH: '14'}, {ORTH: '.'},{ORTH: 'morphine'}])
language.tokenizer.add_special_case('15.calcium', [{ORTH: '15'}, {ORTH: '.'},{ORTH: 'calcium'}])
language.tokenizer.add_special_case('16.warfarin', [{ORTH: '16'}, {ORTH: '.'},{ORTH: 'warfarin'}])
language.tokenizer.add_special_case('17.warfarin', [{ORTH: '17'}, {ORTH: '.'},{ORTH: 'warfarin'}])
language.tokenizer.add_special_case('18.Epogen', [{ORTH: '18'}, {ORTH: '.'},{ORTH: 'Epogen'}])
language.tokenizer.add_special_case('19.guaifenesin', [{ORTH: '19'}, {ORTH: '.'},{ORTH: 'guaifenesin'}])
language.tokenizer.add_special_case('20.bumetanide', [{ORTH: '20'}, {ORTH: '.'},{ORTH: 'bumetanide'}])
language.tokenizer.add_special_case('21.prednisone', [{ORTH: '21'}, {ORTH: '.'},{ORTH: 'prednisone'}])
language.tokenizer.add_special_case('22.ferrous', [{ORTH: '22'}, {ORTH: '.'},{ORTH: 'ferrous'}])
language.tokenizer.add_special_case('23.spironolactone', [{ORTH: '23'}, {ORTH: '.'},{ORTH: 'spironolactone'}])
language.tokenizer.add_special_case('1.lasix', [{ORTH: '1'}, {ORTH: '.'},{ORTH: 'lasix'}])
language.tokenizer.add_special_case('6.lasix', [{ORTH: '6'}, {ORTH: '.'},{ORTH: 'lasix'}])
language.tokenizer.add_special_case('10.citalopram', [{ORTH: '10'}, {ORTH: '.'},{ORTH: 'citalopram'}])
language.tokenizer.add_special_case('2.haloperidol', [{ORTH: '2'}, {ORTH: '.'},{ORTH: 'haloperidol'}])
language.tokenizer.add_special_case('4.tiotropium', [{ORTH: '4'}, {ORTH: '.'},{ORTH: 'tiotropium'}])
language.tokenizer.add_special_case('8.omeprazole', [{ORTH: '8'}, {ORTH: '.'},{ORTH: 'omeprazole'}])
language.tokenizer.add_special_case('3.tamsulosin', [{ORTH: '3'}, {ORTH: '.'},{ORTH: 'tamsulosin'}])
#language.tokenizer.add_special_case('.atorvastatin', [{ORTH:'.'},{ORTH: 'atorvastatin'}])
language.tokenizer.add_special_case('5.atorvastatin', [{ORTH: '5'}, {ORTH: '.'},{ORTH: 'atorvastatin'}])
language.tokenizer.add_special_case('9.aspirin', [{ORTH: '5'}, {ORTH: '.'},{ORTH: 'atorvastatin'}])
language.tokenizer.add_special_case('10.citalopram', [{ORTH: '10'}, {ORTH: '.'},{ORTH: 'citalopram'}])
language.tokenizer.add_special_case('1.fluticasone-salmeterol', [{ORTH: '1'}, {ORTH: '.'},{ORTH: 'fluticasone'},{ORTH:'-'}, {ORTH: 'salmeterol'}])
language.tokenizer.add_special_case('6.lisinopril', [{ORTH: '6'}, {ORTH: '.'}, {ORTH: 'lisinopril'}])
language.tokenizer.add_special_case('7.senna', [{ORTH: '7'}, {ORTH: '.'}, {ORTH: 'senna'}])
language.tokenizer.add_special_case('hours).', [{ORTH: 'hours'}, {ORTH: ')'}, {ORTH: '.'}])
language.tokenizer.add_special_case('.Talon', [{ORTH: '.'}, {ORTH: 'Talon'}])
language.tokenizer.add_special_case('RR<', [{ORTH: 'RR'}, {ORTH: '<'}])
language.tokenizer.add_special_case('(2', [{ORTH: '('}, {ORTH: '2'}])
language.tokenizer.add_special_case('IDDM:', [{ORTH: 'ID'}, {ORTH: 'DM'}, {ORTH: ':'}])
language.tokenizer.add_special_case('@HS,tramadol', [{ORTH: '@'}, {ORTH: 'HS'},{ORTH: ','}, {ORTH: 'tramadol'}])
language.tokenizer.add_special_case('1-2Lnc', [{ORTH: '1-2L'}, {ORTH: 'nc'}])
language.tokenizer.add_special_case('withantibiotic', [{ORTH: 'with'}, {ORTH: 'antibiotic'}])
language.tokenizer.add_special_case('startingKeppra,', [{ORTH: 'starting'}, {ORTH: 'Keppra'}])
language.tokenizer.add_special_case('Warfarin5', [{ORTH: 'Warfarin'}, {ORTH: '5'}])
language.tokenizer.add_special_case('IDDM', [{ORTH: 'I'}, {ORTH: 'DDM'}])
language.tokenizer.add_special_case('1u', [{ORTH: '1'}, {ORTH: 'u'}])
language.tokenizer.add_special_case('6U', [{ORTH: '6'}, {ORTH: 'U'}])
language.tokenizer.add_special_case('HSQ', [{ORTH: 'H'}, {ORTH: 'SQ'}])
language.tokenizer.add_special_case('GD20', [{ORTH: 'GD'}, {ORTH: '20'}])
language.tokenizer.add_special_case('FAFA', [{ORTH: 'FA'}, {ORTH: 'FA'}])
language.tokenizer.add_special_case('FACB', [{ORTH: 'FA'}, {ORTH: 'CB'}])
language.tokenizer.add_special_case('O3CB', [{ORTH: 'O3'}, {ORTH: 'CB'}])
language.tokenizer.add_special_case('O3FA', [{ORTH: '03'}, {ORTH: 'FA'}])
language.tokenizer.add_special_case('PND5', [{ORTH: 'PND'}, {ORTH: '5'}])
language.tokenizer.add_special_case('PND60:', [{ORTH: 'PND'}, {ORTH: '60'}, {ORTH: ':'}])
language.tokenizer.add_special_case('mice/treatment)', [{ORTH: 'mice'}, {ORTH: '/'}, {ORTH: 'treatment'}, {ORTH: ')'}])
#TAC 2018
language.tokenizer.add_special_case('Kunmingmouse', [{ORTH: 'Kunming'}, {ORTH: 'mouse'}])
language.tokenizer.add_special_case('24h', [{ORTH: '24'}, {ORTH: 'h'}])
language.tokenizer.add_special_case('72h', [{ORTH: '72'}, {ORTH: 'h'}])
language.tokenizer.add_special_case('[15N5]8-oxodG', [{ORTH: '[15N5]'}, {ORTH: '8-oxodG'}])
language.tokenizer.add_special_case('ratswerepermitted', [{ORTH: 'rats'}, {ORTH: 'were'} ,{ORTH: 'permitted'}])
language.tokenizer.add_special_case('mgTi', [{ORTH: 'mg'}, {ORTH: 'Ti'}])
language.tokenizer.add_special_case('ND60', [{ORTH: 'ND'}, {ORTH: '60'}])
# language.tokenizer.add_special_case('PND30–35', [{ORTH: 'PND'}, {ORTH: '30–35'}])
language.tokenizer.add_special_case('198Au', [{ORTH: '198'}, {ORTH: 'Au'}])
language.tokenizer.add_special_case('8weeks,', [{ORTH: '8'}, {ORTH: 'weeks'}, {ORTH:','}])
language.tokenizer.add_special_case('weeks:55', [{ORTH: 'weeks'}, {ORTH: ':'}, {ORTH:'55'}])
language.tokenizer.add_special_case('ininfected', [{ORTH: 'in'}, {ORTH: 'infected'}])
language.tokenizer.add_special_case('15days.', [{ORTH: '15'}, {ORTH: 'days'},{ORTH: '.'}])
language.tokenizer.add_special_case('GD18', [{ORTH: 'GD'},{ORTH: '18'}])
language.tokenizer.add_special_case('day).', [{ORTH: 'day'},{ORTH: ')'}, {ORTH: '.'}])
language.tokenizer.add_special_case('x11days).', [{ORTH: 'x11'},{ORTH: 'days'}, {ORTH: ')'},{ORTH: '.'}])
language.tokenizer.add_special_case('4.5hours', [{ORTH: '4.5'}, {ORTH: 'hours'}])
language.tokenizer.add_special_case('0.5mg', [{ORTH: '0.5'}, {ORTH: 'mg'}])
#N2C2 2010
language.tokenizer.add_special_case('periprosthetic', [{ORTH: 'peri'}, {ORTH: 'prosthetic'}])
language.tokenizer.add_special_case('MIER', [{ORTH: 'MI'}, {ORTH: 'ER'}])
#END 2017
language.tokenizer.add_special_case('PeripheralPeripheral', [{ORTH: 'Peripheral'}, {ORTH: 'Peripheral'}])
language.tokenizer.add_special_case('SeriousSerious', [{ORTH: 'Serious'}, {ORTH: 'Serious'}])
language.tokenizer.add_special_case('ADC-CD30', [{ORTH: 'ADC-CD'}, {ORTH: '30'}])
language.tokenizer.add_special_case('MCC-DM1', [{ORTH: 'MCC-DM'}, {ORTH: '1'}])
language.tokenizer.add_special_case('syndrome[see', [{ORTH: 'syndrome'}, {ORTH: '['}, {ORTH: 'see'}])
language.tokenizer.add_special_case('5.1Anaphylaxis', [{ORTH: '5.1'}, {ORTH: 'Anaphylaxis'}])
language.tokenizer.add_special_case('HIGHLIGHTSPEGINTRON', [{ORTH: 'HIGHLIGHTS'}, {ORTH: 'PEGINTRON'}])
language.tokenizer.add_special_case('HIGHLIGHTSRibavirin', [{ORTH: 'HIGHLIGHTS'}, {ORTH: 'Ribavirin'}])
language.tokenizer.add_special_case('COPEGUS[see', [{ORTH: 'COPEGUS'}, {ORTH: '[see'}])
#I2B2 2014
language.tokenizer.add_special_case('FAT', [{ORTH: 'F'}, {ORTH: 'A'}, {ORTH: 'T'}])
language.tokenizer.add_special_case('TTS', [{ORTH: 'T'}, {ORTH: 'T'}, {ORTH: 'S'}])
language.tokenizer.add_special_case('STTh', [{ORTH: 'S'}, {ORTH: 'T'}, {ORTH: 'Th'}])
language.tokenizer.add_special_case('TThSa', [{ORTH: 'T'}, {ORTH: 'h'}, {ORTH: 'Sa'}])
language.tokenizer.add_special_case('MWFS', [{ORTH: 'M'}, {ORTH: 'W'}, {ORTH: 'F'}, {ORTH: 'S'}])
language.tokenizer.add_special_case('MWF', [{ORTH: 'M'}, {ORTH: 'W'}, {ORTH: 'F'}])
language.tokenizer.add_special_case('ThisRoberta', [{ORTH: 'This'}, {ORTH: 'Roberta'}])
language.tokenizer.add_special_case('GambiaHome', [{ORTH: 'Gambia'}, {ORTH: 'Home'}])
language.tokenizer.add_special_case('SupervisorSupport', [{ORTH: 'Supervisor'}, {ORTH: 'Support'}])
language.tokenizer.add_special_case('inhartsville', [{ORTH: 'in'}, {ORTH: 'hartsville'}])
language.tokenizer.add_special_case('ELLENMRN:', [{ORTH: 'ELLEN'}, {ORTH: 'MRN:'}])
language.tokenizer.add_special_case('0.411/29/2088', [{ORTH: '0.4'}, {ORTH: '11/29/2088'}])
language.tokenizer.add_special_case('past11/29/2088', [{ORTH: 'past'}, {ORTH: '11/29/2088'}])
language.tokenizer.add_special_case('Hospital0021', [{ORTH: 'Hospital'}, {ORTH: '0021'}])
language.tokenizer.add_special_case('HospitalAdmission', [{ORTH: 'Hospital'}, {ORTH: 'Admission'}])
language.tokenizer.add_special_case('AvenueKigali,', [{ORTH: 'Avenue'}, {ORTH: 'Kigali,'}])
language.tokenizer.add_special_case('47798497-045-1949', [{ORTH: '47798'}, {ORTH: '497-045-1949'}])
language.tokenizer.add_special_case('.02/23/2077:', [{ORTH: '.'}, {ORTH: '02/23/2077:'}])
language.tokenizer.add_special_case('34712RadiologyExam', [{ORTH: '34712'}, {ORTH: 'RadiologyExam'}])
language.tokenizer.add_special_case('3041038MARY', [{ORTH: '3041038'}, {ORTH: 'MARY'}])
language.tokenizer.add_special_case('PLAN88F', [{ORTH: 'PLAN'}, {ORTH: '88'}, {ORTH: 'F'}])
language.tokenizer.add_special_case('~2112Hypothyroidism', [{ORTH: '~'}, {ORTH: '2112'}, {ORTH: 'Hypothyroidism'}])
language.tokenizer.add_special_case('97198841PGH', [{ORTH: '97198841'}, {ORTH: 'PGH'}])
language.tokenizer.add_special_case('5694653MEDIQUIK', [{ORTH: '5694653'}, {ORTH: 'MEDIQUIK'}])
language.tokenizer.add_special_case('0083716SNH', [{ORTH: '0083716'}, {ORTH: 'SNH'}])
language.tokenizer.add_special_case('20626842267', [{ORTH: '2062'}, {ORTH: '6842267'}])
language.tokenizer.add_special_case('0370149RSC', [{ORTH: '0370149'}, {ORTH: 'RSC'}])
language.tokenizer.add_special_case('4832978HOB', [{ORTH: '4832978'}, {ORTH: 'HOB'}])
language.tokenizer.add_special_case('0907307PCC', [{ORTH: '0907307'}, {ORTH: 'PCC'}])
language.tokenizer.add_special_case('LittletonColonoscopy', [{ORTH: 'Littleton'}, {ORTH: 'Colonoscopy'}])
language.tokenizer.add_special_case('34674TSH', [{ORTH: '34674'}, {ORTH: 'TSH'}])
language.tokenizer.add_special_case('b93D', [{ORTH: 'b'}, {ORTH: '93'}, {ORTH: 'D'}])
language.tokenizer.add_special_case('due22D', [{ORTH: 'due'}, {ORTH: '22'}, {ORTH: 'D'}])
language.tokenizer.add_special_case('33182William', [{ORTH: '33182'}, {ORTH: 'William'}])
#French 2014 NER
language.tokenizer.add_special_case('postopératoires', [{ORTH: 'post'}, {ORTH: 'opératoires'}])
custom_infixes = [r'\d+\.\d+','[P|p]RBCs?','cap|CAP','qhs|QHS|mg','tab|TAB|BPA', 'BB', 'yo','ASA','gtt|GTT','iv|IV', 'FFP',
'inh|INH', 'pf|PF', 'bid|BID|PND','prn|PRN','puffs?',r'\dL',
'QD|qd','Q?(AM|PM)','O2','MWF',r'q\d+', 'HS' , 'ye423zc',
'-|;|:|–|#|<|{|}', r'\-' ] + [r"\\", "/", "%", r"\+", r"\,", r"\(", r"\)", r"\.", r"\d\d/\d\d/\d\d\d\d", r"\d\d\d\d\d\d\d", r"\^"]
language.tokenizer.infix_finditer = compile_infix_regex(tuple(list(language.Defaults.infixes) + custom_infixes)).finditer
#language.tokenizer.infix_finditer = compile_infix_regex(custom_infixes).finditer
language.tokenizer.prefix_search = compile_prefix_regex(tuple(list(language.Defaults.prefixes)
+ ['-|:|_+', '/', '~','x|X',r'\dL', 'O2','VN',"Coumadin",
"HIGHLIGHTS","PROMPTCARE", "VISUDYNE","weeks", "week", 'ASA', 'pap',
"ZT", "BaP", "PND|BID", "BPA", "GD", 'BB', 'PBS',
"days", "day","Kx", 'mg', r"\d\.'", "Results", "RoeID", "at", r"\d/\d\d\d\d"])).search
language.tokenizer.suffix_search = compile_suffix_regex(tuple(list(language.Defaults.suffixes)
+ ['weeks|minutes|day|days|hours|year',
'Au',r"\[see", 'induced|IMA|HBMC|mice|MARY|SLO|RHM|solutions|—and|for|III|FINAL|The|Scattered|Intern|Left|Emergency|Staff|Chief',
'μl|μL|μg/L|AGH|HCC|RHN|MC|yM|GMH|Code|Hyperlipidemia|Adenomatous|greater|Drug|MEDIQUIK|and|Date|Procedure|Problems|Ordering|CLOSURE|Total|Status',
':|_+',r"\.\w+", 'U|D', "Mouse", r"\d\d/\d\d/\d\d\d\d", r"\d\d/\d\d"])).search
#exit()
#print(list(language.Defaults.infixes))
| 9,457
|
def analyze_disc_learning(logpath, figpath=None, show=False, **kwds):
"""
Plot learning curves of a discriminator loaded from `logpath`.
"""
disc = load_disc_log(logpath)
fig = disc.plot_all(**kwds)
fig.tight_layout()
if show:
pyplot.show()
if figpath:
fig.savefig(figpath)
| 9,458
|
def tracks2Dataframe(tracks):
"""
Saves lsit of Track objects to pandas dataframe
Input:
tracks: List of Track objects
Output:
df: Pandas dataframe
"""
if(len(tracks) == 0):
print("Error saving to CSV. List of tracks is empty")
return
# Collect tracks into single dataframe
df = pd.DataFrame()
for t in tracks:
df = df.append(t.toDataframe())
df = df.sort_values(by=['frame', 'id'], ascending=[True, True])
return df
| 9,459
|
def sift_point_to_best(target_point, point, sift_dist):
"""
Move a point to target point given a distance. Based on Jensen's inequality formula.
Args:
target_point: A ndarray or tensor, the target point of pca,
point: A ndarray or tensor, point of pca,
sift_dist: A float, distance where point will sift to new one.
Returns:
new_points: A tuple, a couple of new updated points.
References:
https://en.wikipedia.org/wiki/Jensen%27s_inequality
"""
dist = np.sqrt(np.sum((point - target_point) ** 2))
a = sift_dist / dist
new_point = np.array([
point[0] * a + (1 - a) * target_point[0],
point[1] * a + (1 - a) * target_point[1]
])
new_points = (new_point[0], new_point[1])
return new_points
| 9,460
|
def jrandom_counts(sample, randoms, j_index, j_index_randoms, N_sub_vol, rp_bins, pi_bins,
period, num_threads, do_DR, do_RR):
"""
Count jackknife random pairs: DR, RR
"""
if do_DR is True:
DR = npairs_jackknife_xy_z(sample, randoms, rp_bins, pi_bins, period=period,
jtags1=j_index, jtags2=j_index_randoms,
N_samples=N_sub_vol, num_threads=num_threads)
DR = np.diff(np.diff(DR, axis=1), axis=2)
else:
DR = None
if do_RR is True:
RR = npairs_jackknife_xy_z(randoms, randoms, rp_bins, pi_bins, period=period,
jtags1=j_index_randoms, jtags2=j_index_randoms,
N_samples=N_sub_vol, num_threads=num_threads)
RR = np.diff(np.diff(RR, axis=1), axis=2)
else:
RR = None
return DR, RR
| 9,461
|
def cli():
"""Calliope: a multi-scale energy systems (MUSES) modeling framework"""
pass
| 9,462
|
def process_not_inferred_array(ex: pa.ArrowInvalid, values: Any) -> pa.Array:
"""Infer `pyarrow.array` from PyArrow inference exception."""
dtype = process_not_inferred_dtype(ex=ex)
if dtype == pa.string():
array: pa.Array = pa.array(obj=[str(x) for x in values], type=dtype, safe=True)
else:
raise ex # pragma: no cover
return array
| 9,463
|
def start_end(tf):
"""Find start and end indices of running streaks of True values"""
n = len(tf)
tf = np.insert(tf, [0, len(tf)], [False, False])
# 01 and 10 masks
start_mask = (tf[:-1] == 0) & (tf[1:] == 1)
end_mask = (tf[:-1] == 1) & (tf[1:] == 0)
# Locations
start_loc = np.where(start_mask)[0]
end_loc = np.minimum(np.where(end_mask)[0] - 1, n-1)
return start_loc, end_loc
| 9,464
|
def open_with_lock(fpath, mode="rb+", timeout=None, **kwargs):
"""Open file with lock."""
if timeout is None:
timeout = constants.lock_timeout
with Lock(fpath, mode, timeout=timeout, **kwargs) as file_handle:
try:
yield file_handle
finally:
file_handle.flush()
if "w" in mode or "+" in mode or "a" in mode:
try:
os.fsync(file_handle.fileno())
except OSError:
pass
| 9,465
|
def add_metadata_from_dis_file(dis_file, res):
"""
This function parses the .dis file and populates relevant metadata terms for the MODFLOWModel-
InstanceResource object being passed. Data from the .dis file is used to populate portions of
the StressPeriod, GridDimensions, and StudyArea terms. Information about what the parts of the
.dis file is at : https://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?dis.htm
inputs:
dis_file: file object of .dis file
res: MODFLOWModelInstanceResource object
"""
lines = dis_file.resource_file.readlines()
first_line = True
ss = False
tr = False
total_y_length = None
total_x_length = None
study_area_info = dict()
stress_period_info = dict()
for l in lines:
l = l.strip()
l = l.split()
first_char = l[0].strip()
l = [c.lower() for c in l]
if not first_char.startswith('#'):
if first_line:
grid_dimension_info = dict(
numberOfLayers=l[0],
numberOfRows=l[1],
numberOfColumns=l[2],
)
lenuni = l[5]
first_line = False
unit_con_factor = get_unit_conversion_factor(int(lenuni))
if 'delr' in l:
if 'constant' in l:
grid_dimension_info['typeOfRows'] = 'Regular'
row_len = l[1]
if int(lenuni) > 0:
total_y_length = float(row_len) * float(
grid_dimension_info['numberOfRows']) * unit_con_factor
elif 'internal' in l:
grid_dimension_info['typeOfRows'] = 'Irregular'
if 'delc' in l:
if 'constant' in l:
grid_dimension_info['typeOfColumns'] = 'Regular'
col_len = l[1]
if int(lenuni) > 0:
total_x_length = float(col_len) * float(
grid_dimension_info['numberOfColumns']) * unit_con_factor
elif 'internal' in l:
grid_dimension_info['typeOfColumns'] = 'Irregular'
if 'ss' in l:
ss = True
if 'tr' in l:
tr = True
if total_y_length and total_x_length:
study_area_info['totalLength'] = max(total_y_length, total_x_length)
study_area_info['totalWidth'] = min(total_y_length, total_x_length)
if ss and not tr:
stress_period_info['stressPeriodType'] = 'Steady'
elif tr and not ss:
stress_period_info['stressPeriodType'] = 'Transient'
elif ss and tr:
stress_period_info['stressPeriodType'] = 'Steady and Transient'
create_or_update_from_package(res, modflow_models.StressPeriod, **stress_period_info)
create_or_update_from_package(res, modflow_models.GridDimensions, **grid_dimension_info)
create_or_update_from_package(res, modflow_models.StudyArea, **study_area_info)
| 9,466
|
def get_posts(positive_tags: List[str], negative_tags: List[str]=None) -> Iterable[Post]:
"""Retrieve all post data that contains and doesn't contain certain tags.
Args:
positive_tags: The tags that the posts retrieved must contain.
negative_tags: Optional, blacklisted tags.
Yields:
A post in JSON format, which contains the positive tags and doesn't contain the negative
tags.
"""
if negative_tags is None:
negative_tags = list()
_LOGGER.debug('Retrieving posts with positive_tags=%s and negative_tags=%s',
str(positive_tags), str(negative_tags))
try:
positive_post_urls = _get_post_urls(positive_tags)
negative_post_urls = _get_post_urls(negative_tags)
relevant_post_urls = set(positive_post_urls) - set(negative_post_urls)
for post_url in relevant_post_urls:
post_data = requests.get(post_url).json()
_LOGGER.debug(post_data)
yield from_dict(data_class=Post, data=post_data)
except InvalidTagFormat:
raise
except Exception as ex:
_LOGGER.exception(ex)
raise
| 9,467
|
def remove_provinces(data, date_range):
"""
REMOVE PROVINCES
:param data: The Data received from the API
:param date_range: the date range of the data
:return: data after removing provinces
"""
countries_with_provinces = []
names_of_countries_with_prov = []
# get countries with provinces
for country in data[:]:
if country['province'] is not None:
if country['country'] not in names_of_countries_with_prov:
names_of_countries_with_prov.append(country['country'])
countries_with_provinces.append(data.pop(data.index(country)))
else:
pass
# deal with countries with provinces
for country_name in names_of_countries_with_prov[:]: # for each country,
countries = list(
filter(lambda x: x['country'] == country_name, countries_with_provinces))
names_of_countries_with_prov.remove(country_name)
# calculate total cases, deaths & recovered per day
cases = {}
recovered = {}
deaths = {}
for date in date_range:
cs = 0
dt = 0
rc = 0
# sum data up per province
for prov in countries:
cs += prov['timeline']['cases'][date]
dt += prov['timeline']['deaths'][date]
rc += prov['timeline']['recovered'][date]
cases[date] = cs
recovered[date] = rc
deaths[date] = dt
# return country after removing provinces
totals = ({'country': country_name, 'province': None, 'timeline': {
'cases': cases, 'deaths': deaths, 'recovered': recovered}})
data.append(totals)
return data
| 9,468
|
def testDisabled(component):
"""
Tests whether a component is enabled.
Parameters
----------
component: Component
The component used for testing
"""
if "disabled" not in component.params:
return
if component.params['disabled'].val:
alert(4305, component, strFields={'name': component.params['name']})
| 9,469
|
def face_detection() -> None:
"""Initiates face recognition script and looks for images stored in named directories within ``train`` directory."""
support.flush_screen()
train_dir = 'train'
os.mkdir(train_dir) if not os.path.isdir(train_dir) else None
speaker.speak(text='Initializing facial recognition. Please smile at the camera for me.', run=True)
sys.stdout.write('\rLooking for faces to recognize.')
try:
result = Face().face_recognition()
except CameraError:
support.flush_screen()
logger.error('Unable to access the camera.')
speaker.speak(text="I was unable to access the camera. Facial recognition can work only when cameras are "
"present and accessible.")
return
if not result:
sys.stdout.write('\rLooking for faces to detect.')
speaker.speak(text="No faces were recognized. Switching on to face detection.", run=True)
result = Face().face_detection()
if not result:
sys.stdout.write('\rNo faces were recognized nor detected.')
speaker.speak(text='No faces were recognized. nor detected. Please check if your camera is working, '
'and look at the camera when you retry.')
return
sys.stdout.write('\rNew face has been detected. Like to give it a name?')
speaker.speak(text='I was able to detect a face, but was unable to recognize it.')
Image.open('cv2_open.jpg').show()
speaker.speak(text="I've taken a photo of you. Preview on your screen. Would you like to give it a name, "
"so that I can add it to my database of known list? If you're ready, please tell me a name, "
"or simply say exit.", run=True)
phrase = listener.listen(timeout=3, phrase_limit=5)
if not phrase or 'exit' in phrase or 'quit' in phrase or 'Xzibit' in phrase:
os.remove('cv2_open.jpg')
speaker.speak(text="I've deleted the image.", run=True)
else:
phrase = phrase.replace(' ', '_')
# creates a named directory if it is not found already
if not os.path.exists(f'{train_dir}/{phrase}'):
os.makedirs(f'{train_dir}/{phrase}')
c_time = datetime.now().strftime("%I_%M_%p")
img_name = f"{phrase}_{c_time}.jpg" # adds current time to image name to avoid overwrite
os.rename('cv2_open.jpg', img_name) # renames the files
shutil.move(src=img_name, dst=f'{train_dir}/{phrase}') # move files into named directory within train_dir
speaker.speak(text=f"Image has been saved as {img_name}. I will be able to recognize {phrase} in future.")
else:
speaker.speak(text=f'Hi {result}! How can I be of service to you?')
| 9,470
|
def plot_publish(families, targets=None, identifiers=None, keys=None):
"""Parse and plot all plugins by families and targets
Args:
families (list): List of interested instance family names
targets (list, optional): List of target names
identifiers (list, optional): List of interested dict names, take
["context.data", "instance.data"] if not provided.
keys (list, optional): List of interested key names, return all dict
keys if not provided.
"""
if not targets:
targets = ["default"] + api.registered_targets()
plugins = api.discover()
plugins = logic.plugins_by_families(plugins, families)
plugins = logic.plugins_by_targets(plugins, targets)
reports = list()
for plugin in plugins:
report = plot_plugin(plugin, identifiers, keys)
if report:
reports.append(report)
return reports
| 9,471
|
def get_job_view(execution, prev_execution, stackstorm_url):
"""
Gets a job view from the specified execution and previous execution
:param execution: dict
:param prev_execution: dict
:param stackstorm_url: string
:return: dict
"""
current_time = datetime.datetime.utcnow()
hash_code = abs(hash(execution['action']['name'])) % (10 ** 8)
estimated_duration = ''
prev_time_elapsed_since = ''
if execution and 'start_timestamp' in execution:
start_time = datetime.datetime.strptime(execution['start_timestamp'], '%Y-%m-%dT%H:%M:%S.%fZ')
elapsed_seconds = int((current_time - start_time).total_seconds())
else:
elapsed_seconds = 0
if prev_execution and 'elapsed_seconds' in prev_execution:
prev_elapsed_seconds = int(math.ceil(prev_execution['elapsed_seconds']))
else:
prev_elapsed_seconds = 0
if prev_execution:
prev_execution_id = prev_execution['id']
prev_build_name = prev_execution['id']
if 'end_timestamp' in prev_execution:
prev_end_time = datetime.datetime.strptime(prev_execution['end_timestamp'], '%Y-%m-%dT%H:%M:%S.%fZ')
prev_time_elapsed_since = int((current_time - prev_end_time).total_seconds())
if 'elapsed_seconds' in prev_execution:
estimated_duration = '{}s'.format(int(math.ceil(prev_execution['elapsed_seconds'])))
else:
prev_execution_id = ''
prev_build_name = ''
prev_build_duration = estimated_duration
progress = 0
if execution['status'] == 'succeeded':
status = 'successful'
elif execution['status'] == 'failed':
status = 'failing'
elif execution['status'] == 'running':
if prev_execution and prev_execution['status'] == 'failed':
status = 'failing running'
elif prev_execution and prev_execution['status'] == 'succeeded':
status = 'successful running'
else:
status = 'unknown running'
if prev_execution and (prev_execution['status'] == 'failed' or prev_execution['status'] == 'succeeded'):
if prev_elapsed_seconds > 0:
progress = int(math.floor((float(elapsed_seconds) / float(prev_elapsed_seconds)) * 100))
if progress > 100:
progress = 100
else:
progress = 100
else:
progress = 100
else:
status = 'unknown'
job_view = {
'name': execution['action']['name'],
'url': '{}/#/history/{}/general'.format(stackstorm_url, execution['id']),
'status': status,
'hashCode': hash_code,
'progress': progress,
'estimatedDuration': estimated_duration,
'headline': '',
'lastBuild': {
"timeElapsedSince": str(prev_time_elapsed_since),
"duration": prev_build_duration,
"description": '',
"name": prev_build_name,
"url": '{}/#/history/{}/general'.format(stackstorm_url, prev_execution_id),
},
'debug': {
'elapsed_seconds': elapsed_seconds,
'prev_elapsed_seconds': prev_elapsed_seconds,
}
}
return job_view
| 9,472
|
def mdetr_resnet101_refcocoplus(pretrained=False, return_postprocessor=False):
"""
MDETR R101 with 6 encoder and 6 decoder layers.
Trained on refcoco+, achieves 79.52 val accuracy
"""
model = _make_detr("resnet101")
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://zenodo.org/record/4721981/files/refcoco%2B_resnet101_checkpoint.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
| 9,473
|
def launch_duo_report(related_genome_id, duo_relation, duo_affected,
proband_genome_id, proband_sex, score_indels,
accession_id):
"""Launch a family report. Return the JSON response.
"""
# Construct url and request
url = "{}/reports/".format(FABRIC_API_URL)
url_payload = {'report_type': "Duo",
'duo_relation_genome_id': int(related_genome_id),
'duo_relation': duo_relation,
'duo_affected': duo_affected == 'affected',
'proband_genome_id': int(proband_genome_id),
'proband_sex': ('f' if proband_sex == 'female' else 'm'),
'background': 'FULL',
'score_indels': bool(score_indels),
'accession_id': accession_id}
sys.stdout.write("Launching family report...\n")
result = requests.post(url, auth=auth, data=json.dumps(url_payload))
return result.json()
| 9,474
|
def readConfigFile(filePath):
""" Read the config file and generate a dictionnary containing an entry for
every modules of the installation. """
modules_attributes_list = []
confFile = open(filePath, "r")
for i, line in enumerate(confFile.readlines()):
# Remove everything that is written after "#" character (comments)
line = line.split("#")[0]
line = line.split("//")[0]
line = line.split("$")[0]
# Remove special characters
line = re.sub('[!@#$\0\\n ]','',line)
# Get the MAC addresses and the modules number
words = line.split(",")
if len(words) == 4:
modID = int(words[0])
posY = int(words[1])
posX = int(words[2])
macAddr = words[3]
modules_attributes_list.append((modID, posY, posX, macAddr))
elif len(words) < 2:
pass
else :
raise AttributeError("Wrong formatting of the MAC file.")
return modules_attributes_list
| 9,475
|
def test_date_rounding():
""" https://github.com/SheetJS/ssf/issues/32 """
dt = 4018.99999998843
cases = [("mm/dd/yyyy hh:mm:ss.000", "12/31/1910 23:59:59.999"),
("mm/dd/yyyy hh:mm:ss.00", "01/01/1911 00:00:00.00"),
("mm/dd/yyyy hh:mm:ss.0", "01/01/1911 00:00:00.0"),
("mm/dd/yyyy hh:mm:ss", "01/01/1911 00:00:00"),
("mm/dd/yyyy hh:mm", "01/01/1911 00:00"),
("mm/dd/yyyy hh", "01/01/1911 00"),
("[hh]:mm:ss.000", "96455:59:59.999"),
("[hh]:mm:ss.00", "96456:00:00.00"),
("[hh]:mm:ss.0", "96456:00:00.0"),
("[hh]:mm:ss", "96456:00:00"),
("[hh]:mm", "96456:00"),
("[hh]", "96456"),
("[mm]:ss.000", "5787359:59.999"),
("[mm]:ss.00", "5787360:00.00"),
("[mm]:ss.0", "5787360:00.0"),
("[mm]:ss", "5787360:00"),
("[ss].000", "347241599.999"),
("[ss].00", "347241600.00"),
("[ss].0", "347241600.0"),
("[ss]", "347241600"),
("General", "4019"),
]
for case in cases:
fmt, expected = case
assert ssf.format(fmt, dt) == expected
assert ssf.format('[ss]', -0.9999999) == '-86400'
| 9,476
|
def unpad_pkcs7(data):
"""
Strips PKCS#7 padding from data.
Raises ValueError if padding is invalid.
"""
if len(data) == 0:
raise ValueError("Error: Empty input.")
pad_value = data[-1]
if pad_value == 0 or pad_value > 16:
raise ValueError("Error: Invalid padding.")
for i in range(1, pad_value + 1):
if data[-i] != pad_value:
raise ValueError("Error: Invalid padding.")
unpadded = data[: (len(data) - pad_value)]
return unpadded
| 9,477
|
def create_output_channel(
mgr: sl_tag.TagManager, group: str, name: str, data_type: sl_tag.DataType
) -> sl_tag.TagData:
"""Create a FlexLogger output channel."""
# "Import" the channel into FlexLogger.
full_name = get_tag_prefix() + ".Import.Setpoint.{}.{}".format(group, name)
mgr.open(full_name, data_type, create=True)
# Once FlexLogger creates the channel, we'll interact with it as an "export" channel
# (for both reads and writes).
full_name = get_tag_prefix() + ".Export.Setpoint.{}".format(name)
# Go ahead and pre-create the output channel, for ease-of-use. Otherwise, when
# trying to read its value, we'd have to be prepared for an ApiException complaining
# that the tag doesn't exist.
mgr.open(full_name, data_type, create=True)
return sl_tag.TagData(full_name, data_type)
| 9,478
|
def slugify(value, allow_unicode=False):
"""
adapted from https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, or hyphens. Convert to lowercase. Also strip leading and
trailing whitespace, dashes, and underscores.
"""
value = str(value)
value = value.replace(":", "_")
value = value.replace("/", "_")
value = re.sub(r'[^\w\s-]', '', value.lower())
return re.sub(r'[-\s]+', '-', value).strip('-_')
| 9,479
|
def _checkwavelet(wavelet):
"""Check that wavelet belongs to pywt.wavelist
"""
wavelist = pywt.wavelist(kind='discrete')
if wavelet not in wavelist:
raise ValueError("'%s' not in family set = %s" % (wavelet,
wavelist))
| 9,480
|
def data_process(raw_text_iter: dataset.IterableDataset) -> Tensor:
"""Converts raw text into a flat Tensor."""
data = [torch.tensor(vocab(tokenizer(item)), dtype=torch.long) for item in raw_text_iter]
return torch.cat(tuple(filter(lambda t: t.numel() > 0, data)))
| 9,481
|
def rna_view_redirect(request, upi, taxid):
"""Redirect from urs_taxid to urs/taxid."""
return redirect('unique-rna-sequence', upi=upi, taxid=taxid, permanent=True)
| 9,482
|
def skip_for_variants(meta: MetaData, variant_keys: AbstractSet[str]) -> bool:
"""Check if the recipe uses any given variant keys
Args:
meta: Variant MetaData object
Returns:
True if any variant key from variant_keys is used
"""
# This is the same behavior as in
# conda_build.metadata.Metadata.get_hash_contents but without leaving out
# "build_string_excludes" (python, r_base, etc.).
dependencies = set(meta.get_used_vars())
trim_build_only_deps(meta, dependencies)
return not dependencies.isdisjoint(variant_keys)
| 9,483
|
def _is_safe_url(url, request):
"""Override the Django `is_safe_url()` to pass a configured list of allowed
hosts and enforce HTTPS."""
allowed_hosts = (
settings.DOMAIN,
urlparse(settings.EXTERNAL_SITE_URL).netloc,
)
require_https = request.is_secure() if request else False
return is_safe_url(url, allowed_hosts=allowed_hosts, require_https=require_https)
| 9,484
|
def bootstrap():
"""
Initialize all the infrastructure for the app.
:return:
"""
# Initialize the ORM
start_mappers()
| 9,485
|
def test_full_pipeline() -> None:
"""Test the full pipeline."""
# Define a class that can send messages and one that can receive them.
class TestClassS:
"""Test class incorporating send functionality."""
msg = _TestMessageSenderBoth()
def __init__(self, target: Union[TestClassRSync,
TestClassRAsync]) -> None:
self._target = target
@msg.send_method
def _send_raw_message(self, data: str) -> str:
"""Handle synchronous sending of raw json message data."""
# Just talk directly to the receiver for this example.
# (currently only support synchronous receivers)
assert isinstance(self._target, TestClassRSync)
return self._target.receiver.handle_raw_message(data)
@msg.send_async_method
async def _send_raw_message_async(self, data: str) -> str:
"""Handle asynchronous sending of raw json message data."""
# Just talk directly to the receiver for this example.
# (we can do sync or async receivers)
if isinstance(self._target, TestClassRSync):
return self._target.receiver.handle_raw_message(data)
return await self._target.receiver.handle_raw_message(data)
class TestClassRSync:
"""Test class incorporating synchronous receive functionality."""
receiver = _TestSyncMessageReceiver()
@receiver.handler
def handle_test_message_1(self, msg: _TMsg1) -> _TResp1:
"""Test."""
if msg.ival == 1:
raise CleanError('Testing Clean Error')
if msg.ival == 2:
raise RuntimeError('Testing Runtime Error')
return _TResp1(bval=True)
@receiver.handler
def handle_test_message_2(self,
msg: _TMsg2) -> Union[_TResp1, _TResp2]:
"""Test."""
del msg # Unused
return _TResp2(fval=1.2)
@receiver.handler
def handle_test_message_3(self, msg: _TMsg3) -> None:
"""Test."""
del msg # Unused
receiver.validate()
class TestClassRAsync:
"""Test class incorporating asynchronous receive functionality."""
receiver = _TestAsyncMessageReceiver()
@receiver.handler
async def handle_test_message_1(self, msg: _TMsg1) -> _TResp1:
"""Test."""
if msg.ival == 1:
raise CleanError('Testing Clean Error')
if msg.ival == 2:
raise RuntimeError('Testing Runtime Error')
return _TResp1(bval=True)
@receiver.handler
async def handle_test_message_2(
self, msg: _TMsg2) -> Union[_TResp1, _TResp2]:
"""Test."""
del msg # Unused
return _TResp2(fval=1.2)
@receiver.handler
async def handle_test_message_3(self, msg: _TMsg3) -> None:
"""Test."""
del msg # Unused
receiver.validate()
obj_r_sync = TestClassRSync()
obj_r_async = TestClassRAsync()
obj = TestClassS(target=obj_r_sync)
obj2 = TestClassS(target=obj_r_async)
# Test sends (of sync and async varieties).
response1 = obj.msg.send(_TMsg1(ival=0))
response2 = obj.msg.send(_TMsg2(sval='rah'))
response3 = obj.msg.send(_TMsg3(sval='rah'))
response4 = asyncio.run(obj.msg.send_async(_TMsg1(ival=0)))
# Make sure static typing lines up with what we expect.
if os.environ.get('EFRO_TEST_MESSAGE_FAST') != '1':
assert static_type_equals(response1, _TResp1)
assert static_type_equals(response3, None)
assert isinstance(response1, _TResp1)
assert isinstance(response2, (_TResp1, _TResp2))
assert response3 is None
assert isinstance(response4, _TResp1)
# Remote CleanErrors should come across locally as the same.
try:
_response5 = obj.msg.send(_TMsg1(ival=1))
except Exception as exc:
assert isinstance(exc, CleanError)
assert str(exc) == 'Testing Clean Error'
# Other remote errors should result in RemoteError.
with pytest.raises(RemoteError):
_response5 = obj.msg.send(_TMsg1(ival=2))
# Now test sends to async handlers.
response6 = asyncio.run(obj2.msg.send_async(_TMsg1(ival=0)))
assert isinstance(response6, _TResp1)
# Make sure static typing lines up with what we expect.
if os.environ.get('EFRO_TEST_MESSAGE_FAST') != '1':
assert static_type_equals(response6, _TResp1)
| 9,486
|
def bn_calibration_init(m):
""" calculating post-statistics of batch normalization """
if getattr(m, 'track_running_stats', False):
# reset all values for post-statistics
m.reset_running_stats()
# set bn in training mode to update post-statistics
m.training = True
# if use cumulative moving average
if getattr(FLAGS, 'cumulative_bn_stats', False):
m.momentum = None
| 9,487
|
def use_blackontrans_style():
"""Use blackontrans matplotlib style"""
plt.style.use(resource_filename("pynba", "blackontrans.mplstyle"))
| 9,488
|
def sanitize_file_lines(file):
"""Enumerate a line iterator and returns the pairs of (line number, line) that are cleaned.
:param iter[str] file: An iterable over the lines in a BEL Script
:rtype: iter[tuple[int,str]]
"""
line_iterator = sanitize_file_line_iter(file)
for line_number, line in line_iterator:
if line.endswith('\\'):
log.log(4, 'Multiline quote starting on line: %d', line_number)
line = line.strip('\\').strip()
next_line_number, next_line = next(line_iterator)
while next_line.endswith('\\'):
log.log(3, 'Extending line: %s', next_line)
line += " " + next_line.strip('\\').strip()
next_line_number, next_line = next(line_iterator)
line += " " + next_line.strip()
log.log(3, 'Final line: %s', line)
elif 1 == line.count('"'):
log.log(4, 'PyBEL013 Missing new line escapes [line: %d]', line_number)
next_line_number, next_line = next(line_iterator)
next_line = next_line.strip()
while not next_line.endswith('"'):
log.log(3, 'Extending line: %s', next_line)
line = '{} {}'.format(line.strip(), next_line)
next_line_number, next_line = next(line_iterator)
next_line = next_line.strip()
line = '{} {}'.format(line, next_line)
log.log(3, 'Final line: %s', line)
comment_loc = line.rfind(' //')
if 0 <= comment_loc:
line = line[:comment_loc]
yield line_number, line
| 9,489
|
def _server():
"""
Reconstitute the name of this Blueprint I/O Server.
"""
return urlparse.urlunparse((request.environ.get('wsgi.url_scheme',
'https'),
request.environ.get('HTTP_HOST',
'devstructure.com'),
'', '', '', ''))
| 9,490
|
def get_range_api(spreadsheetToken, sheet_id, range, valueRenderOption=False):
"""
该接口用于根据 spreadsheetToken 和 range 读取表格单个范围的值,返回数据限制为10M。
:return:
"""
range_fmt = sheet_id + '!' + range
get_range_url = cfg.get_range_url.format(spreadsheetToken=spreadsheetToken, range=range_fmt)
headers = {
"Authorization": "Bearer " + cfg.access_token,
"Content-Type": "application/json"
}
params = {
"valueRenderOption": "ToString" if valueRenderOption else None
}
result = get_http_request(get_range_url, headers=headers, params=params)
return result
| 9,491
|
def rotate_rboxes90(rboxes: tf.Tensor,
image_width: int,
image_height: int,
rotation_count: int = 1) -> tf.Tensor:
"""Rotate oriented rectangles counter-clockwise by multiples of 90 degrees."""
image_width = tf.cast(image_width, dtype=tf.float32)
image_height = tf.cast(image_height, dtype=tf.float32)
rotation_count = rotation_count % 4
x, y, w, h, angle = tf.split(rboxes, 5, axis=1)
if rotation_count == 0:
return rboxes
elif rotation_count == 1:
angle = tf.where(angle < -90.0, angle + 270, angle - 90)
return tf.concat([y, image_width - x - 1, w, h, angle], axis=1)
elif rotation_count == 2:
angle = tf.where(angle < 0.0, angle + 180, angle - 180)
return tf.concat([image_width - x - 1, image_height - y - 1, w, h, angle],
axis=1)
else:
angle = tf.where(angle > 90.0, angle - 270, angle + 90)
return tf.concat([image_height - y - 1, x, w, h, angle], axis=1)
| 9,492
|
def get_wrong_user_credentials():
"""
Monkeypatch GithubBackend.get_user_credentials to force the case where
invalid credentias were provided
"""
return dict(username='invalid',
password='invalid',
token='invalid',
remember=False,
remember_token=False)
| 9,493
|
def get_points(coords, m, b=None, diagonal=False):
"""Returns all discrete points on a line"""
points = []
x1, y1, x2, y2 = coords[0], coords[1], coords[2], coords[3]
# vertical line
if m is np.nan:
# bottom to top
y = min(y1, y2)
while y <= max(y1, y2):
points.append((x1, y))
y += 1
# horizontal line
elif m == 0:
# left to right
x = min(x1, x2)
while x <= max(x1, x2):
points.append((x, y1))
x += 1
else:
# diagonal line
if diagonal:
x = x1
y = y1
if x1 < x2:
# left to right
while x <= x2:
points.append((x, y))
x += 1
y = m * x + b
else:
# right to left
while x >= x2:
points.append((x, y))
x -= 1
y = m * x + b
else:
return None
return points
| 9,494
|
def get_region_geo(region_id):
"""Get Geo/TopoJSON of a region.
Args:
region_id (str): Region ID (e.g. LK-1, LK-23)
Returns:
Geo-spatial data as GeoPandasFrame
"""
region_type = get_entity_type(region_id)
region_to_geo = _get_region_to_geo(region_type)
return region_to_geo.get(region_id, {})
| 9,495
|
def load_vocabulary(f):
"""
Load the vocabulary from file.
:param f: Filename or file object.
:type f: str or file
:return: Vocabulary
"""
v = Vocabulary()
if isinstance(f, str):
file_ = open(f, 'r')
else:
file_ = f
for line in file_:
wordid, word, wordcount = line.strip().split('\t')
wordid, wordcount = int(wordid), int(wordcount)
v.id2word[wordid] = word
v.word2id[word] = wordid
if wordcount != 0:
v.word_count[wordid] = wordcount
if isinstance(f, str):
file_.close()
return v
| 9,496
|
def populate_objects(phylodata_objects, project_name, path_to_species_trees, path_to_gene_trees, path_to_ranger_outputs):
"""
this function will try and associate each phylodata object with the correct
species_besttree
gene_bootstrap_trees
and rangerDTL output files (if they exist)
args:
list of phylodata objects
name of project
paths (to species trees, to bootstrap gene trees, to rangerDTL
returns
True if everything was associated
False if something has gone horribly awry
"""
#try and populate the species and gene files. should work.
for obj in phylodata_objects:
#print("Populating species trees")
obj.populate_species_tree(path_to_species_trees)
#print("Populating gene trees")
obj.populate_gene_boots(path_to_gene_trees)
#now try and populate ranger output, if not make directory and run run_rangerDTL
for obj in phylodata_objects:
#print("Checking for rangerDTL outputs")
exists = obj.populate_ranger_dtl_outputs(path_to_ranger_outputs)
if exists is False:
#run the program.
print("Running RangerDTL")
path_to_ranger_outputs, list_of_ranger_outputs = annotate_ranger.run_rangerDTL(obj, project_name)
#print("Checking for new rangerDTL outputs")
exists = obj.populate_ranger_dtl_outputs(path_to_ranger_outputs)
if exists is False:
print ("error in rangerdtl_output assignation")
raise SystemExit
return True
| 9,497
|
def extract_peers_dataset(
work_dict,
scrub_mode='sort-by-date'):
"""extract_peers_dataset
Fetch the IEX peers data for a ticker and
return it as a pandas Dataframe
:param work_dict: dictionary of args
:param scrub_mode: type of scrubbing handler to run
"""
label = work_dict.get('label', 'extract')
df_type = iex_consts.DATAFEED_PEERS
df_str = iex_consts.get_datafeed_str(df_type=df_type)
req = copy.deepcopy(work_dict)
if 'redis_key' not in work_dict:
# see if it's get dataset dictionary
if 'peers' in work_dict:
req['redis_key'] = req['peers']
req['s3_key'] = req['peers']
# end of support for the get dataset dictionary
log.debug(f'{label} - {df_str} - start')
return extract_utils.perform_extract(
df_type=df_type,
df_str=df_str,
work_dict=req,
scrub_mode=scrub_mode)
| 9,498
|
def check_pipeline_can_be_updated(client, version_name, pipeline_name, project_name, data):
"""
Check if pipeline can be updated:
- If desired input/output is changed; check if other versions exists (besides current version)
:param ubiops.CoreApi client: the core API client to make requests to the API
:param str version_name: the name of the pipeline version
:param str pipeline_name: the name of the pipeline
:param str project_name: the name of the project
:param dict data: the pipeline input/output data to update to
"""
if data:
versions = client.pipeline_versions_list(project_name=project_name, pipeline_name=pipeline_name)
if versions and len(versions) > 0 and (len(versions) > 1 or versions[0].version != version_name):
raise Exception(
"It is not possible to update the input/output fields/type of the pipeline because it contains "
"multiple versions"
)
| 9,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.