content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def weight_variable_glorot(input_dim, output_dim, name=""):
"""Create a weight variable with Glorot & Bengio (AISTATS 2010)
initialization.
"""
init_range = np.sqrt(6.0 / (input_dim + output_dim))
initial = tf.random_uniform([input_dim, output_dim], minval=-init_range,
maxval=init_range, dtype=tf.float32)
return tf.Variable(initial, name=name)
| 14,800
|
def pareto_plot(column: pd.Series,
use_given_index: bool = False,
figsize: Tuple[int, int] = (12, 8),
return_freq_df: bool = False):
"""
Draw Pareto plot for categorical variable
Arguments:
----------
column: pd.Series
Categorical input
figsize: Tuple
size of the figure
return_freq_df: bool
Returns frequency dataframe if True
Example:
--------
>>> pareto_plot(df['state'], figsize=(20, 10))
>>> df = pareto_plot(df['area code'], return_freq_df=True)
>>> df
label frequency cumpercentage
0 415 1655 49.654965
1 510 840 74.857486
2 408 838 100.000000
"""
freq = column.copy()
if use_given_index:
freq = column.value_counts().sort_values(ascending=False)
freq_df = pd.DataFrame({'label': freq.index,
'frequency': freq.values})
freq_df['cumpercentage'] = freq_df['frequency'].cumsum()/freq_df['frequency'].sum()*100
# plot
fig, ax = plt.subplots(figsize=figsize)
ax.bar(freq_df.index, freq_df['frequency'],
color='C0')
ax2 = ax.twinx()
ax2.plot(freq_df.index, freq_df['cumpercentage'],
color='C1', marker='D', ms=7)
ax2.yaxis.set_major_formatter(PercentFormatter())
ax.set_xticks(freq_df.index)
ax.set_xticklabels(freq_df['label'], fontsize=10,
rotation=35)
ax.tick_params(axis='y', colors='C0')
ax2.tick_params(axis='y', colors='C1')
plt.show()
if return_freq_df:
return freq_df
| 14,801
|
def get_openshift_installer(
version=None,
bin_dir=None,
force_download=False,
):
"""
Download the OpenShift installer binary, if not already present.
Update env. PATH and get path of the openshift installer binary.
Args:
version (str): Version of the installer to download
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
force_download (bool): Force installer download even if already present
Returns:
str: Path to the installer binary
"""
version = version or config.DEPLOYMENT["installer_version"]
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
installer_filename = "openshift-install"
installer_binary_path = os.path.join(bin_dir, installer_filename)
if os.path.isfile(installer_binary_path) and force_download:
delete_file(installer_binary_path)
if os.path.isfile(installer_binary_path):
log.debug(f"Installer exists ({installer_binary_path}), skipping download.")
# TODO: check installer version
else:
version = expose_ocp_version(version)
log.info(f"Downloading openshift installer ({version}).")
prepare_bin_dir()
# record current working directory and switch to BIN_DIR
previous_dir = os.getcwd()
os.chdir(bin_dir)
tarball = f"{installer_filename}.tar.gz"
url = get_openshift_mirror_url(installer_filename, version)
download_file(url, tarball)
run_cmd(f"tar xzvf {tarball} {installer_filename}")
delete_file(tarball)
# return to the previous working directory
os.chdir(previous_dir)
installer_version = run_cmd(f"{installer_binary_path} version")
log.info(f"OpenShift Installer version: {installer_version}")
return installer_binary_path
| 14,802
|
async def activate_prompt_toolkit_async_mode() -> None:
"""Configure prompt toolkit to use the asyncio event loop.
Needs to be async, so we use the right event loop in py 3.5"""
global ACTIVATED_ASYNC_MODE
if not is_prompt_toolkit_3():
# Tell prompt_toolkit to use asyncio for the event loop.
from prompt_toolkit.eventloop import use_asyncio_event_loop
use_asyncio_event_loop()
ACTIVATED_ASYNC_MODE = True
| 14,803
|
def parse_arguments():
"""
Parse input arguments and store them in a global variable.
Returns:
Parsed arguments.
"""
parser = argparse.ArgumentParser(description='Generates a lexicon for gender recognition.')
parser.add_argument('dataset', help='file with JSON objects to be processed')
parser.add_argument('--faces', action='store_true', help='apply facial recognition over profile images')
parser.add_argument('--confidence', metavar='N', type=float, default=0.75,
help="minimal confidence for a valid recognition (default=0.75)")
parser.add_argument('--lexicon-percentage', metavar='N', type=float, default=0.5,
help="Percentage of words to get from the generated lexicon")
parser.add_argument('--surnames', action='store_true', help='require fullnames (at least one surname)')
parser.add_argument('--remove-outliers', action='store_true',
help='remove outliers before generate training and test datasets')
return parser.parse_args()
| 14,804
|
def remove_external_id(
role_name: str,
dir_path: Optional[str],
session=None,
client=None,
backup_policy: Optional[str] = "",
bucket: Optional[str] = None,
) -> Dict:
"""The remove_external_id method takes a role_name as a string
to allow the removal of an externalId condition.
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
if backup_policy:
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
arpd["Statement"][0]["Condition"] = {}
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
return arpd
except ClientError as error:
raise error
| 14,805
|
def find_binaries(fw_path):
"""
Gets a list of possible binaries within a firmare sample.
The list might contain false positives, angr will ignore them.
:param fw_path: firmware path
:return: a list of binaries
"""
cmd = "find \""+ fw_path + "\""
cmd += " -executable -type f -exec file {} \; | grep -iv image | grep -iv text | awk -F':' '{print $1}'"
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
o, e = p.communicate()
if o:
return o.split('\n')
return []
| 14,806
|
def get_core_count():
"""
Find out how many CPU cores this system has.
"""
try:
cores = str(compat.enum_cpus()) # 3.4 and up
except NotImplementedError:
cores = "1" # 3.2-3.3
else:
if compat.enum_cpus() is None:
cores = "1"
return cores
| 14,807
|
def test_artefact_update() -> None:
"""Test updating a const artefact."""
db = Redis()
store = RedisStorage(db)
art = _graph.constant_artefact(db, store, b"bla bla")
with pytest.raises(TypeError):
_graph.set_data(db, store, art.hash, b"b", _graph.ArtefactStatus.done)
| 14,808
|
def ASTTailrec(func):
"""
This approach involves modifying the ast tree so we can just stick a decorator on such as
```
@ASTTailrec
def fac(n, k=1):
if n == 1: return k
return fac(n-1, k*n)
```
This function has been heavily inspired by Robin Hillard's pipeop library at
https://github.com/robinhilliard/pipes. It was used as reference when developing this decorator
"""
if isclass(func):
raise TypeError("Cannot apply tail recursion to a class")
in_context = func.__globals__
new_context = {"Tailrec": Tailrec, "Tailcall": Tailcall}
# these need to be included in the imports else we're gonna have some trouble
# if they've already been imported, let that import hold precedence.
new_context.update(in_context)
# now let's try and get the source
source = getsource(func)
# we get the tree
tree = ast.parse(dedent(source))
# update for debugger
first_line_number = func.__code__.co_firstlineno
ast.increment_lineno(tree, first_line_number - 1)
# let's grab the name of the function here. func.__name__ is not reliable in case
# of other decorators and no use of `functools.wraps`
func_name = tree.body[0].name
# we want to replace with the standard tailrec decorator here
replace_decorator(tree)
# now every time we find the function, let's replace with func_name.recur
# as in the standard case
tree = TailTransformer(func_name).visit(tree)
# now the tree has been modified satisfactorily, let's compile
code = compile(tree, filename=new_context['__file__'], mode='exec')
# exec the code in the scope of the new_context
exec(code, new_context)
# and return the function
return new_context[func_name]
| 14,809
|
def vec2str(vec):
""" transform the vector to captcha str"""
_str = ""
for i in range(4):
v = vec[i*43: (i+1)*43]
_str += chr(np.argwhere(v == 1)[0][0] + ord('0'))
return _str
| 14,810
|
def mnist(path=None, batchsize=20, xpreptrain=None, ypreptrain=None, dataset="train", **kwargs):
"""
Legacy MNIST loader.
:type path: str
:param path: Path to MNIST pickle file.
:type batchsize: int
:param batchsize: Batch size (no shit sherlock)
:type xpreptrain: prepkit.preptrain
:param xpreptrain: Train of preprocessing functions on X. See preptrain's documentation in prepkit.
:type ypreptrain: prepkit.preptrain
:param ypreptrain: Train of preprocessing functions on Y. Can be set to -1 to channel X,Y through xpreptrain.
:type dataset: str
:param dataset: Which dataset to use ('train', 'test' or 'validate')
:rtype: tincan
"""
# Compatibility patch
if "preptrain" in kwargs.keys():
xpreptrain = kwargs["preptrain"]
# Parse path
if path is None:
path = "/Users/nasimrahaman/Documents/Python/DeepBrain/Datasets/mnist.pkl"
# Unpickle data
data = pkl.load(open(path))
# Load the correct X and Y data
assert dataset in ["train", "test", "validate"], "Dataset can be either of the three strings: " \
"'train', 'test', 'validate'. "
datindex = 0 if dataset is "train" else 1 if dataset is "test" else 2
X, Y = data[datindex]
# Generate MNIST tincan
return tincan(data=(X, Y), numclasses=10, batchsize=batchsize, xpreptrain=xpreptrain, ypreptrain=ypreptrain,
xhowtransform=['b', 1, 's', 's'], yhowtransform=['b', 'nc', 1, 1])
| 14,811
|
def aseta_hiiri_kasittelija(kasittelija):
"""
Asettaa funktion, jota käytetään hiiren klikkausten käsittelyyn.
Käsittelijää kutsutaan aina, kun hiiren nappi painetaan alas missä tahansa
peli-ikkunan sisällä. Käsittelijän tulee olla funktio, jolla on tasan neljä
parametria: x, y, nappi sekä muokkausnäppäimet. Näistä x ja y määrittävät
klikkauksen sijainnin ruudulla ja nappi kertoo mitä nappia painettiin (saa
arvoja HIIRI_VASEN, HIIRI_KESKI, HIIRI_OIKEA). Muokkausnäppäimet on
selitetty moduulin dokumentaatiossa ja niitä ei pitäisi tarvita
perustoteutuksessa.
Eli koodissasi sinun tulee määritellä funktio
def hiiri_kasittelija(x, y, nappi, muokkausnapit):
# asioita tapahtuu
ja sen jälkeen rekisteröidä se:
haravasto.aseta_hiiri_kasittelija(hiiri_kasittelija)
Tällä tavalla pystyt vastaanottamaan hiiren klikkaukset koodissasi.
:param function kasittelija: käsittelijäfunktio klikkauksille
"""
if grafiikka["ikkuna"]:
grafiikka["ikkuna"].on_mouse_press = kasittelija
else:
print("Ikkunaa ei ole luotu!")
| 14,812
|
def rand_pad(ctvol):
"""Introduce random padding between 0 and 15 pixels on each of the 6 sides
of the <ctvol>"""
randpad = np.random.randint(low=0,high=15,size=(6))
ctvol = np.pad(ctvol, pad_width = ((randpad[0],randpad[1]), (randpad[2],randpad[3]), (randpad[4], randpad[5])),
mode = 'constant', constant_values = np.amin(ctvol))
return ctvol
| 14,813
|
def images(stack_ref, region, output, field, hide_older_than, show_instances):
"""Show all used AMIs and available Taupage AMIs"""
stack_refs = get_stack_refs(stack_ref)
region = get_region(region)
check_credentials(region)
ec2 = boto3.resource("ec2", region)
instances_by_image = collections.defaultdict(list)
for inst in ec2.instances.all():
if inst.state["Name"] == "terminated":
# do not count TERMINATED EC2 instances
continue
stack_name = get_tag(inst.tags, "aws:cloudformation:stack-name")
if not stack_refs or matches_any(stack_name, stack_refs):
instances_by_image[inst.image_id].append(inst)
images = {}
for image in ec2.images.filter(ImageIds=list(instances_by_image.keys())):
images[image.id] = image
if not stack_refs:
for channel in taupage.CHANNELS.values():
filters = [
{"Name": "name", "Values": [channel.ami_wildcard]},
{"Name": "state", "Values": ["available"]},
]
for image in ec2.images.filter(Filters=filters):
images[image.id] = image
rows = []
cutoff = datetime.datetime.now() - datetime.timedelta(days=hide_older_than)
for image in images.values():
row = image.meta.data.copy()
creation_time = parse_time(image.creation_date)
row["creation_time"] = creation_time
row["instances"] = ", ".join(sorted(i.id for i in instances_by_image[image.id]))
row["total_instances"] = len(instances_by_image[image.id])
stacks = set()
for instance in instances_by_image[image.id]:
stack_name = get_tag(instance.tags, "aws:cloudformation:stack-name")
# EC2 instance might not be part of a CF stack
if stack_name:
stacks.add(stack_name)
row["stacks"] = ", ".join(sorted(stacks))
if creation_time is None:
continue
if creation_time > cutoff.timestamp() or row["total_instances"]:
rows.append(row)
rows.sort(key=lambda x: x.get("Name"))
with OutputFormat(output):
instances_column_name = "instances" if show_instances else "total_instances"
columns = filter_output_columns(
[
"ImageId",
"Name",
"OwnerId",
"Description",
"stacks",
instances_column_name,
"creation_time",
],
field,
)
print_table(columns, rows, titles=TITLES, max_column_widths=MAX_COLUMN_WIDTHS)
| 14,814
|
def store_event(event: AddProcessStatus) -> None:
"""Store an :class:`.AddProcessStatus` event."""
try:
db.session.add(ProcessStatusEvent(
created=event.created,
event_id=event.event_id,
submission_id=event.submission_id,
process_id=event.process_id,
process=event.process,
status=event.status,
reason=event.reason,
agent_type=event.creator.agent_type,
agent_id=event.creator.native_id
))
db.session.commit()
except OperationalError as e:
db.session.rollback()
raise Unavailable('Caught op error') from e
| 14,815
|
def test_nbconvert(container, test_file, output_format):
"""Check if nbconvert is able to convert a notebook file"""
host_data_dir = os.path.join(THIS_DIR, "data")
cont_data_dir = "/home/jovyan/data"
output_dir = "/tmp"
LOGGER.info(
f"Test that the example notebook {test_file} can be converted to {output_format} ..."
)
command = f"jupyter nbconvert {cont_data_dir}/{test_file}.ipynb --output-dir {output_dir} --to {output_format}"
c = container.run(
volumes={host_data_dir: {"bind": cont_data_dir, "mode": "ro"}},
tty=True,
command=["start.sh", "bash", "-c", command],
)
rv = c.wait(timeout=30)
logs = c.logs(stdout=True).decode("utf-8")
LOGGER.debug(logs)
assert rv == 0 or rv["StatusCode"] == 0, f"Command {command} failed"
expected_file = f"{output_dir}/{test_file}.{output_format}"
assert expected_file in logs, f"Expected file {expected_file} not generated"
| 14,816
|
def _improve(tour: np.ndarray, matrix: np.ndarray, neighbours: np.ndarray, dlb: np.ndarray,
it1: int, t1: int, solutions: set, k: int) -> Tuple[float, np.ndarray]:
""" Последовательный 2-opt для эвристики Лина-Кернига
tour: список городов
matrix: матрица весов
neighbours: набор кандидатов
dlb: don't look bits
it1, t1: индекс, значение города, с которого начинать
solutions: полученные ранее туры
set_x, set_y: наборы удаленных, добавленных ребер
k: k-opt, k - кол-во сколько можно сделать последовательных улучшений
return: выигрыш, новый тур
"""
around_t1 = around(tour, it1)
for it2, t2 in around_t1:
set_x = {make_pair(t1, t2)}
for t3 in neighbours[t2]:
gain = matrix[t1][t2] - matrix[t2][t3]
if t3 == around_t1[0][1] or t3 == around_t1[1][1] or not gain > 1.e-10:
continue
set_y = {make_pair(t2, t3)}
it3 = np.where(tour == t3)[0][0]
_gain, _tour = __choose_t4(tour, matrix, it1, it2, it3, neighbours, gain, set_x, set_y, dlb, solutions, k)
if _gain > 1.e-10:
return _gain, _tour
return 0., tour
| 14,817
|
def plot_step_with_errorbar(lefts, widths, y_coords, y_errs,
errors_enabled=True, use_errorrects_for_legend=False, **kwargs):
"""Makes a step plot with error bars."""
lefts.append(lefts[-1] + widths[-1])
y_coords.append(y_coords[-1])
# prevent that we have labels for the step and the errorbar,
# otherwise we have two legend entries per data set
step_kwargs = dict(kwargs)
rect_kwargs = dict(kwargs)
if errors_enabled and "label" in kwargs:
if use_errorrects_for_legend:
del step_kwargs["label"]
else:
del rect_kwargs["label"]
# delete kw args that are not defined for plt.step
try:
del step_kwargs["hatch"]
except KeyError:
pass
step_result = plt.step(lefts, y_coords, where='post', **step_kwargs)
if errors_enabled:
try:
ecolor = rect_kwargs["color"]
del rect_kwargs["color"]
except KeyError:
ecolor = plt.gca().lines[-1].get_color() # do not use the next color from the color cycle
try:
del rect_kwargs["marker"]
except KeyError:
pass
try:
del rect_kwargs["zorder"]
except KeyError:
pass
zorder = plt.gca().lines[-1].get_zorder() - 1 # make sure it's drawn below
errorrects_result = plot_errorrects(lefts, y_coords, y_errs, ecolor, zorder, **rect_kwargs)
# x_mids = [left + width / 2.0 for left, width in zip(lefts[:-1], widths)]
# plt.errorbar(x_mids, y_coords[:-1], fmt='none', yerr=y_errs, ecolor=ecolor)
else:
errorrects_result = None
return step_result, errorrects_result
| 14,818
|
def giveKarma(bot, trigger):
"""Increases/decreases a user's karma - no spaces allowed"""
nick = trigger.group(1)
nickdb = nick.lower()
change = [0,1][trigger.group(2) == '++']
# command user matches karma target - or multiple karma in single line
if (nickdb == trigger.nick.lower()) or trigger.count(['--','++'][change]) > 1 or ('--' in trigger and '++' in trigger):
bot.reply("Nice try.")
return
try:
current_plus = int(bot.db.get_nick_value(nickdb, KPLUS, 0))
current_minus = int(bot.db.get_nick_value(nickdb, KMINUS, 0))
except NameError:
current_plus, current_minus = 0, 0
if change > 0:
bot.db.set_nick_value(nickdb, KPLUS, current_plus + 1)
karma_val = int(current_plus - current_minus + 1)
else:
bot.db.set_nick_value(nickdb, KMINUS, current_minus + 1)
karma_val = int(current_plus - current_minus - 1)
bot.say("%s's karma is now %d" % (nick, karma_val))
| 14,819
|
def intensity_modification(x):
""" Intensity modification
Parameters
x: Tensor
Returns
x: Tensor
"""
x = x + tf.random.uniform(shape=[], minval=-0.05, maxval=0.05, dtype=tf.dtypes.float32)
return x
| 14,820
|
def show_learning_curve(
est: BaseEstimator,
conf_mat_labels: List,
X_train: DataFrame,
y_train: Series,
X_test: DataFrame,
y_test: Series,
scoring_metric: str = "f1_micro",
cv: StratifiedKFold = StratifiedKFold(n_splits=12),
sizes: np.linspace = np.linspace(0.3, 1.0, 10),
fig_size: Tuple = (8, 8),
savefig: Path = Path().cwd() / "reports" / "figures" / "cm.png",
) -> None:
"""Plot the learning curve"""
fig, ax = plt.subplots(figsize=fig_size)
cm = LearningCurve(
est, cv=cv, scoring=scoring_metric, train_sizes=sizes, n_jobs=-1
)
cm = LearningCurve(est, classes=conf_mat_labels, ax=ax)
cm.fit(X_train, y_train)
cm.score(X_test, y_test)
cm.finalize()
if not savefig.is_file():
fig.savefig(savefig, bbox_inches="tight", dpi=300)
| 14,821
|
def test_pype_get_arguments_group_str_interpolate():
"""Parse group as interpolated str input from context."""
context = Context({
'group': 'gr',
'pype': {
'name': 'pipe name',
'groups': '{group}',
}
})
with get_arb_pipeline_scope(context):
(pipeline_name,
args,
out,
use_parent_context,
pipe_arg,
skip_parse,
raise_error,
loader,
groups,
success_group,
failure_group,
py_dir,
parent) = pype.get_arguments(context)
assert pipeline_name == 'pipe name'
assert args is None
assert out is None
assert use_parent_context
assert isinstance(use_parent_context, bool)
assert pipe_arg is None
assert skip_parse
assert isinstance(skip_parse, bool)
assert raise_error
assert isinstance(raise_error, bool)
assert loader is None
assert groups == ['gr']
assert success_group is None
assert failure_group is None
assert py_dir is None
assert parent is None
| 14,822
|
def FormIdProperty(expression, **kwargs):
"""
Create a StringProperty that references a form ID. This is necessary because
form IDs change when apps are copied so we need to make sure we update
any references to the them.
:param expression: jsonpath expression that can be used to find the field
:param kwargs: arguments to be passed to the underlying StringProperty
"""
path_expression = parse(expression)
assert isinstance(path_expression, jsonpath.Child), "only child path expressions are supported"
field = path_expression.right
assert len(field.fields) == 1, 'path expression can only reference a single field'
form_id_references.append(path_expression)
return StringProperty(**kwargs)
| 14,823
|
def remember_subreddit(name=None):
"""Add current subreddit to history."""
if name:
last = wf.cached_data('--last', max_age=0, session=True) or {}
sr = last.get(name)
if not sr: # must be a multi
sr = dict(name=name, title=name, type="public",
url=subreddit_url(name))
else:
sr = subreddit_from_env()
if not sr:
log.debug('no subreddit to save to history')
return
subreddits = wf.cached_data('__history', max_age=0) or []
log.debug('%d subreddit(s) in history', len(subreddits))
for d in subreddits:
if sr['name'].lower() == d['name'].lower():
log.debug('%r already in history', sr['name'])
return
subreddits.append(sr)
wf.cache_data('__history', subreddits)
log.debug('added %r to history', sr['name'])
log.debug('%d subreddit(s) in history', len(subreddits))
| 14,824
|
def gen_lang(lang, queue):
"""generate data for a language"""
try:
start_time = time.time()
print("Lang: %s: generating..." % lang)
# list(str)
all_words = init_words(lang)
output_filename = "output/%s.csv" % lang
make_file_dirs(output_filename)
with open(output_filename, "w") as output:
last_start_level = 1
used_words = set()
for batch, levels_conf in enumerate(conf.LEVELS):
gen_levels(
lang,
all_words,
output,
used_words,
batch + 1,
last_start_level,
levels_conf
)
last_start_level = levels_conf[0] + 1
used_time = time.time() - start_time
print("Lang: %s: generated in:%.2fs." % (lang, used_time))
queue.put(True)
except Exception as e:
print("Lang: %s: EXCEPTION: %s!" % (lang, e))
queue.put(False)
| 14,825
|
def do_roll(dice: int, sides: int, _: int):
"""Given an amount of dice and the number of sides per die, simulate a dice roll and return
a list of ints representing the outcome values.
Modifier is ignored.
"""
dice = dice or 1
sides = sides or 1
values = sorted(((secrets.randbelow(sides) + 1) for _ in range(0, dice)), reverse=True)
return values
| 14,826
|
def compute_logp_independent_block(X, alpha=None):
"""Compute the analytical log likelihood of a matrix under the
assumption of independence.
"""
if alpha is None: alpha = np.ones(X.shape[1])
logp_ib = gammaln(alpha.sum()) - (gammaln(alpha)).sum()
logp_ib += gammaln(X.sum(0) + alpha).sum() - gammaln(X.sum() + alpha.sum())
logp_ib += gammaln(X.sum(1) + 1).sum() - gammaln(X + 1).sum()
return logp_ib
| 14,827
|
def check_validity_label(labels):
"""
Check to see whether it makes a valid tuple
Parameters:
-----------
labels: A tuple of labels (Object_1, Object_2, Object_3,
Return:
-------
"""
# Event is None -> All other values are None
if labels[3] == 0:
for i in xrange(5):
if labels[i] != 0:
return False
return True
# If two roles have the same object return False
for i in xrange(3):
for j in xrange(3):
if i != j and labels[i] == labels[j] and labels[i] != 0:
return False
# If there is a Theme, there needs to be a Preposition and vice versa
if labels[2] != 0 and labels[4] == 0:
return False
if labels[2] == 0 and labels[4] != 0:
return False
return True
| 14,828
|
def flat_proj(v1, v2):
""" Returns the flat projection of direction unit vector, v1 onto v2 """
temp1 = np.cross(v1, v2)
temp2 = np.cross(temp1, v1)
return proj(temp2, v2)
| 14,829
|
def marching_cubes_naive(
volume_data_batch: torch.Tensor,
isolevel: Optional[float] = None,
spacing: int = 1,
return_local_coords: bool = True,
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
"""
Runs the classic marching cubes algorithm, iterating over
the coordinates of the volume_data and using a given isolevel
for determining intersected edges of cubes of size `spacing`.
Returns vertices and faces of the obtained mesh.
This operation is non-differentiable.
This is a naive implementation, and is not optimized for efficiency.
Args:
volume_data_batch: a Tensor of size (N, D, H, W) corresponding to
a batch of 3D scalar fields
isolevel: the isosurface value to use as the threshold to determine
whether points are within a volume. If None, then the average of the
maximum and minimum value of the scalar field will be used.
spacing: an integer specifying the cube size to use
return_local_coords: bool. If True the output vertices will be in local coordinates in
the range [-1, 1] x [-1, 1] x [-1, 1]. If False they will be in the range
[0, W-1] x [0, H-1] x [0, D-1]
Returns:
verts: [(V_0, 3), (V_1, 3), ...] List of N FloatTensors of vertices.
faces: [(F_0, 3), (F_1, 3), ...] List of N LongTensors of faces.
"""
volume_data_batch = volume_data_batch.detach().cpu()
batched_verts, batched_faces = [], []
D, H, W = volume_data_batch.shape[1:]
volume_size_xyz = volume_data_batch.new_tensor([W, H, D])[None]
if return_local_coords:
# Convert from local coordinates in the range [-1, 1] range to
# world coordinates in the range [0, D-1], [0, H-1], [0, W-1]
local_to_world_transform = Translate(
x=+1.0, y=+1.0, z=+1.0, device=volume_data_batch.device
).scale((volume_size_xyz - 1) * spacing * 0.5)
# Perform the inverse to go from world to local
world_to_local_transform = local_to_world_transform.inverse()
for i in range(len(volume_data_batch)):
volume_data = volume_data_batch[i]
curr_isolevel = (
((volume_data.max() + volume_data.min()) / 2).item()
if isolevel is None
else isolevel
)
edge_vertices_to_index = {}
vertex_coords_to_index = {}
verts, faces = [], []
# Use length - spacing for the bounds since we are using
# cubes of size spacing, with the lowest x,y,z values
# (bottom front left)
for x in range(0, W - spacing, spacing):
for y in range(0, H - spacing, spacing):
for z in range(0, D - spacing, spacing):
cube = Cube((x, y, z), spacing)
new_verts, new_faces = polygonise(
cube,
curr_isolevel,
volume_data,
edge_vertices_to_index,
vertex_coords_to_index,
)
verts.extend(new_verts)
faces.extend(new_faces)
if len(faces) > 0 and len(verts) > 0:
verts = torch.tensor(verts, dtype=torch.float32)
# Convert vertices from world to local coords
if return_local_coords:
verts = world_to_local_transform.transform_points(verts[None, ...])
verts = verts.squeeze()
batched_verts.append(verts)
batched_faces.append(torch.tensor(faces, dtype=torch.int64))
return batched_verts, batched_faces
| 14,830
|
def get_reshaped_ann_input(begin_state, new_state, action, pieces_player_begin, dice):
""" save STATE and ACTION into 1-dimensional np.array. This should be an input to a ANN """
# look for the position of the given pawn before and after a move
current_player = 0
input_ann = np.array(begin_state)
input_ann = input_ann.reshape((240, 1))
"""TODO: To estimate the $Q(s,a)$ with a neural network,
it is needed for its input to consist the information of transitioning from the previous to the next state with
visible action taken.
Every action is represented as a tuple
(x_0 / 60, x_f / 60), where x_0 is the initial position and x_f is the
final position. The components are divided by 58 in order
to obtain a number between 0 and 1
"""
tile_piece_before, tile_piece_after = get_before_after_tile_id(pieces_player_begin, begin_state, new_state, action, dice)
# action_tuple = (begin_state[current_player][action] / 60, new_state[current_player][action] / 60)
action_tuple = (tile_piece_before / 59, tile_piece_after / 59)
# print(input_ann.shape)
input_ann = np.append(input_ann, action_tuple)
return input_ann
| 14,831
|
def _test_series(case, site, url, expected):
"""
Helper to use in site-specific test cases. See test_kissmanga.py for usage.
"""
resp = site.get_manga_seed_page(url)
if resp.status_code != 200:
raise Exception('Failed to download series html')
html = resp.text
series = site.series_info(html)
chapters = expected.pop('chapters', None)
for key, val in expected.items():
case.assertEqual(getattr(series, key), val)
if chapters is None:
return
case.assertEqual(series.chapters[-1], chapters['first'])
if 'last' in chapters:
case.assertEqual(series.chapters[0], chapters['last'])
| 14,832
|
def gc_resnet101(num_classes):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(GCBottleneck, [3, 4, 23, 3], num_classes=num_classes)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model
| 14,833
|
def _write_data(x, y, sett, jtv=None):
""" Format algorithm output.
Args:
jtv (torch.tensor, optional): Joint-total variation image, defaults to None.
Returns:
dat_y (torch.tensor): Reconstructed image data, (dim_y, C).
pth_y ([str, ...]): Paths to reconstructed images.
label : (dim_y) tensor: Reconstructed label image
pth_label : str, Paths to reconstructed label image.
"""
# Output orientation matrix
mat = y[0].mat
# Output directory
dir_out = sett.dir_out
if dir_out is None:
# No output directory given, use directory of input data
if x[0][0].direc is None:
dir_out = 'UniRes-output'
else:
dir_out = x[0][0].direc
print(dir_out)
if sett.write_out and not os.path.isdir(dir_out):
os.makedirs(dir_out, exist_ok=True)
prefix_y = sett.prefix
pth_y = []
pth_label = None
label = None
for c in range(len(x)):
dat = y[c].dat
mn = inf
mx = -inf
for n in range(len(x[c])):
if torch.min(x[c][n].dat) < mn:
mn = torch.min(x[c][n].dat)
if torch.max(x[c][n].dat) > mx:
mx = torch.max(x[c][n].dat)
dat[dat < mn] = mn
dat[dat > mx] = mx
if sett.write_out and sett.mat is None:
# Write reconstructed images (as separate niftis, because given as separate niftis)
if x[c][0].nam is None:
nam = str(c) + '.nii.gz'
else:
nam = x[c][0].nam
fname = os.path.join(dir_out, prefix_y + nam)
pth_y.append(fname)
_write_image(dat, fname, bids=sett.bids, mat=mat, file=x[c][0].file)
if y[c].label is not None:
# Do label image
pth_label = os.path.join(dir_out, prefix_y + 'label_' + nam)
label = y[c].label
_write_image(label, pth_label, bids=sett.bids, mat=mat,
file=x[c][0].label[1])
if c == 0:
dat_y = dat[..., None].clone()
else:
dat_y = torch.cat((dat_y, dat[..., None]), dim=3)
if sett.write_out and sett.mat is not None:
# Write reconstructed images as 4D volume (because given as 4D volume)
c = 0
if x[c][0].nam is None:
nam = str(c) + '.nii.gz'
else:
nam = x[c][0].nam
fname = os.path.join(dir_out, prefix_y + nam)
pth_y.append(fname)
_write_image(dat_y, fname, bids=sett.bids, mat=mat, file=x[c][0].file)
if sett.write_jtv and jtv is not None:
# Write JTV
if x[c][0].nam is None:
nam = str(c) + '.nii.gz'
else:
nam = x[c][0].nam
fname = os.path.join(dir_out, 'jtv_' + prefix_y + nam)
_write_image(jtv, fname, bids=sett.bids, mat=mat)
return dat_y, pth_y, label, pth_label
| 14,834
|
def compute_seatable_votes(votes, votetypes):
"""Compute the seatable votes.
Parameters
----------
votes: pandas.DataFrame
the votes of the seatable votes.
votetypes: dict
the information of the different types of vote variables.
Returns
-------
seatable_votes: numpy.ndarray
the variables which have votes which can be transformed into seats.
"""
votetypes = prepare_votetypes(votes, votetypes)
seatable_votes = votes[votetypes['seatable']]
return seatable_votes
| 14,835
|
async def read_cookie(refresh_token: Optional[str] = Cookie(None)) -> JSONResponse:
"""Reads a cookie.
Args:
refresh_token: Name of the cookie.
Returns:
JSONResponse:
Returns the value of the cookie as a json blurb.
"""
if refresh_token:
return JSONResponse(
content={
"refresh_token": refresh_token
},
status_code=200,
headers=RESET_HEADERS
)
else:
return JSONResponse(
content={
"refresh_token": status.HTTP_404_NOT_FOUND
},
status_code=404,
headers=RESET_HEADERS
)
| 14,836
|
def test_sort_values_simple_no_order():
""" It should sort dataframe """
data = pd.DataFrame(
[
{'variable': 'toto', 'Category': 2, 'value': 300},
{'variable': 'toto', 'Category': 3, 'value': 100},
{'variable': 'toto', 'Category': 4, 'value': 250},
{'variable': 'toto', 'Category': 1, 'value': 450},
]
)
expected = [450, 300, 100, 250]
output = sort(data, 'Category')
assert output['value'].tolist() == expected
| 14,837
|
def get_role_output(role_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRoleResult]:
"""
Use this data source to access information about an existing resource.
"""
...
| 14,838
|
def readAirfoilFile(fileName, bluntTe=False, bluntTaperRange=0.1, bluntThickness=0.002):
"""Load the airfoil file"""
f = open(fileName)
line = f.readline() # Read (and ignore) the first line
r = []
try:
r.append([float(s) for s in line.split()])
except Exception:
pass
while 1:
line = f.readline()
if not line:
break # end of file
if line.isspace():
break # blank line
r.append([float(s) for s in line.split()])
rr = np.array(r)
x = rr[:, 0]
y = rr[:, 1]
npt = len(x)
xMin = min(x)
# There are 4 possibilites we have to deal with:
# a. Given a sharp TE -- User wants a sharp TE
# b. Given a sharp TE -- User wants a blunt TE
# c. Given a blunt TE -- User wants a sharp TE
# d. Given a blunt TE -- User wants a blunt TE
# (possibly with different TE thickness)
# Check for blunt TE:
if bluntTe is False:
if y[0] != y[-1]:
print("Blunt Trailing Edge on airfoil: %s" % (fileName))
print("Merging to a point over final %f ..." % (bluntTaperRange))
yAvg = 0.5 * (y[0] + y[-1])
xAvg = 0.5 * (x[0] + x[-1])
yTop = y[0]
yBot = y[-1]
xTop = x[0]
xBot = x[-1]
# Indices on the TOP surface of the wing
indices = np.where(x[0 : npt // 2] >= (1 - bluntTaperRange))[0]
for i in range(len(indices)):
fact = (x[indices[i]] - (x[0] - bluntTaperRange)) / bluntTaperRange
y[indices[i]] = y[indices[i]] - fact * (yTop - yAvg)
x[indices[i]] = x[indices[i]] - fact * (xTop - xAvg)
# Indices on the BOTTOM surface of the wing
indices = np.where(x[npt // 2 :] >= (1 - bluntTaperRange))[0]
indices = indices + npt // 2
for i in range(len(indices)):
fact = (x[indices[i]] - (x[-1] - bluntTaperRange)) / bluntTaperRange
y[indices[i]] = y[indices[i]] - fact * (yBot - yAvg)
x[indices[i]] = x[indices[i]] - fact * (xBot - xAvg)
elif bluntTe is True:
# Since we will be rescaling the TE regardless, the sharp TE
# case and the case where the TE is already blunt can be
# handled in the same manner
# Get the current thickness
curThick = y[0] - y[-1]
# Set the new TE values:
xBreak = 1.0 - bluntTaperRange
# Rescale upper surface:
for i in range(0, npt // 2):
if x[i] > xBreak:
s = (x[i] - xMin - xBreak) / bluntTaperRange
y[i] += s * 0.5 * (bluntThickness - curThick)
# Rescale lower surface:
for i in range(npt // 2, npt):
if x[i] > xBreak:
s = (x[i] - xMin - xBreak) / bluntTaperRange
y[i] -= s * 0.5 * (bluntThickness - curThick)
return x, y
| 14,839
|
def batchRenderBegin(info, userData, *args, **kwargs):
"""
Hook called before a render begins. The render will be blocked
until this function returns.
:param info: Empty dictionary for now. Might have parameters in the future.
:param userData: Object that will be carried over into the render end hooks.
This can be used by the hook to pass black box data around.
:note: This hook is available in Flame 2019.1 and up only.
"""
import sgtk
engine = sgtk.platform.current_engine()
# We can't do anything without the Shotgun engine.
# The engine is None when the user decides to not use the plugin for the project.
if engine is None:
return
engine.clear_export_info()
engine.trigger_batch_callback("batchRenderBegin", info)
| 14,840
|
def get_args():
"""
Return the args from the arg parser.
:return: args (arg parser object).
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-d',
dest='debug',
action='store_true',
default=False,
help='Enable debug mode for logging messages')
arg_parser.add_argument('-q',
dest='queuename',
required=True,
help='Queue name (e.g., AGLT2_TEST-condor')
arg_parser.add_argument('-w',
dest='workdir',
required=False,
default=os.getcwd(),
help='Working directory')
arg_parser.add_argument('--scopes',
dest='scopes',
required=True,
help='List of Rucio scopes (e.g., mc16_13TeV,mc16_13TeV')
arg_parser.add_argument('--lfns',
dest='lfns',
required=True,
help='LFN list (e.g., filename1,filename2')
arg_parser.add_argument('--eventtype',
dest='eventtype',
required=True,
help='Event type')
arg_parser.add_argument('--localsite',
dest='localsite',
required=True,
help='Local site')
arg_parser.add_argument('--remotesite',
dest='remotesite',
required=True,
help='Remote site')
arg_parser.add_argument('--produserid',
dest='produserid',
required=True,
help='produserid')
arg_parser.add_argument('--jobid',
dest='jobid',
required=True,
help='PanDA job id')
arg_parser.add_argument('--taskid',
dest='taskid',
required=True,
help='PanDA task id')
arg_parser.add_argument('--jobdefinitionid',
dest='jobdefinitionid',
required=True,
help='Job definition id')
arg_parser.add_argument('--eventservicemerge',
dest='eventservicemerge',
type=str2bool,
default=False,
help='Event service merge boolean')
arg_parser.add_argument('--usepcache',
dest='usepcache',
type=str2bool,
default=False,
help='pcache boolean from queuedata')
arg_parser.add_argument('--no-pilot-log',
dest='nopilotlog',
action='store_true',
default=False,
help='Do not write the pilot log to file')
return arg_parser.parse_args()
| 14,841
|
def run(
input_file=parser.get_default("input_file"),
event_level=parser.get_default("event_level"),
):
"""
The inner loop for sending syslog lines as events and breadcrumbs to Sentry.
Expects the Sentry Python logging integration to be initialized before being
called.
"""
for syslog_line in input_file:
try:
log_syslog_line(syslog_line[:-1], event_level)
except Exception:
logger.exception(
"Exception raised while tyring to log syslog line:\n%s", syslog_line
)
| 14,842
|
def depListToArtifactList(depList):
"""Convert the maven GAV to a URL relative path"""
regexComment = re.compile('#.*$')
#regexLog = re.compile('^\[\w*\]')
artifactList = []
for nextLine in depList:
nextLine = regexComment.sub('', nextLine)
nextLine = nextLine.strip()
gav = maven_repo_util.parseGATCVS(nextLine)
if gav:
artifactList.append(MavenArtifact.createFromGAV(gav))
return artifactList
| 14,843
|
def configure_plugins_plugin_install_to_version(request, pk, version):
"""
View rendering for the install to version modal interface
:param request: Request
:param pk: The primary key for the plugin
:param version: The version to install
:return: a renderer
"""
plugin = get_object_or_404(Plugin, pk=pk)
action = reverse(
"api_dispatch_install_to_version",
kwargs={
"api_name": "v1",
"resource_name": "plugin",
"pk": pk,
"version": version,
},
)
_installVersionedName = Plugin(name=plugin.name, version=version).versionedName()
ctx = RequestContext(
request,
{
"method": "POST",
"action": action,
"i18n": {
"title": ugettext_lazy(
"configure_plugins_plugin_install_to_version.title"
), # 'Confirm Install Plugin'
"confirmmsg": ugettext_lazy(
"configure_plugins_plugin_install_to_version.messages.confirmmsg.singular"
)
% { # 'Are you sure you want to install %(versionedName)s?'
"versionedName": _installVersionedName
},
"submit": ugettext_lazy(
"configure_plugins_plugin_install_to_version.action.submit"
), # 'Yes, Upgrade!'
"cancel": ugettext_lazy("global.action.modal.cancel"),
"submitmsg": ugettext_lazy(
"configure_plugins_plugin_install_to_version.messages.submitmsg"
), # 'Now upgrading, please wait.'
},
},
)
return render_to_response(
"rundb/configure/modal_confirm_plugin_install_to_version.html",
context_instance=ctx,
)
| 14,844
|
def save_model(model, model_filepath):
"""Stores the model in a pickle file."""
pkl_filename = model_filepath
with open(pkl_filename, 'wb') as file:
pickle.dump(model, file)
| 14,845
|
def test_certificates(host, site):
"""Validate that the letsencrypt certificates are set up """
assert host.file(f"/etc/letsencrypt/live/{site}/fullchain.pem").exists
assert host.file(f"/etc/letsencrypt/live/{site}/privkey.pem").exists
site_conf = host.file(f"/etc/nginx/sites-available/{site}")
assert site_conf.exists
assert site_conf.contains(
f"/etc/letsencrypt/live/{site}/fullchain.pem")
assert site_conf.contains(f"/etc/letsencrypt/live/{site}/privkey.pem")
| 14,846
|
def _parameters_to_vector(parameters):
"""
This fix is required for pytorch >= 1.6.0, due to the change
in memory format promotion rule.
For more info, check:
* https://github.com/pytorch/pytorch/pull/37968
* https://github.com/pytorch/pytorch/releases/tag/v1.6.0
and search "Note: BC-breaking memory format changes"
"""
parameters = [p.contiguous() for p in parameters]
return th.nn.utils.parameters_to_vector(parameters)
| 14,847
|
def create_app(config, enable_config_file=False):
"""
创建应用
:param config: 配置信息对象
:param enable_config_file: 是否允许运行环境中的配置文件覆盖已加载的配置信息
:return: 应用
"""
app = create_flask_app(config, enable_config_file)
# 创建Snowflake ID worker
from utils.snowflake.id_worker import IdWorker
app.id_worker = IdWorker(app.config['DATACENTER_ID'],
app.config['WORKER_ID'],
app.config['SEQUENCE'])
# 限流器
from utils.limiter import limiter as lmt
lmt.init_app(app)
# 配置日志
from utils.logging import create_logger
create_logger(app)
# 注册url转换器
from utils.converters import register_converters
register_converters(app)
from redis.sentinel import Sentinel
_sentinel = Sentinel(app.config['REDIS_SENTINELS'])
app.redis_master = _sentinel.master_for(app.config['REDIS_SENTINEL_SERVICE_NAME'])
app.redis_slave = _sentinel.slave_for(app.config['REDIS_SENTINEL_SERVICE_NAME'])
from rediscluster import StrictRedisCluster
app.redis_cluster = StrictRedisCluster(startup_nodes=app.config['REDIS_CLUSTER'])
# rpc
# app.rpc_reco = grpc.insecure_channel(app.config['RPC'].RECOMMEND)
# Elasticsearch
app.es = Elasticsearch(
app.config['ES'],
# sniff before doing anything
sniff_on_start=True,
# refresh nodes after a node fails to respond
sniff_on_connection_fail=True,
# and also every 60 seconds
sniffer_timeout=60
)
# socket.io
# app.sio = socketio.KombuManager(app.config['RABBITMQ'], write_only=True)
# MySQL数据库连接初始化
from models import db
db.init_app(app)
# # 添加请求钩子
# from utils.middlewares import jwt_authentication
# app.before_request(jwt_authentication)
# 注册用户模块蓝图
from .resources.user import user_bp
app.register_blueprint(user_bp)
# 注册新闻模块蓝图
from .resources.news import news_bp
app.register_blueprint(news_bp)
# 注册通知模块
from .resources.notice import notice_bp
app.register_blueprint(notice_bp)
# 搜索
from .resources.search import search_bp
app.register_blueprint(search_bp)
return app
| 14,848
|
def select_variables(expr):
"""When called on an expression, will yield selectors to the variable.
A selector will either return the variable (or equivalent fragment) in
an expression, or will return an entirely new expression with the
fragment replaced with the value of `swap`.
e.g.
>>> from qiime2.core.type.tests.test_grammar import (MockTemplate,
... MockPredicate)
>>> Example = MockTemplate('Example', fields=('x',))
>>> Foo = MockTemplate('Foo')
>>> Bar = MockPredicate('Bar')
>>> T = TypeMatch([Foo])
>>> U = TypeMatch([Bar])
>>> select_u, select_t = select_variables(Example[T] % U)
>>> t = select_t(Example[T] % U)
>>> assert T is t
>>> u = select_u(Example[T] % U)
>>> assert U is u
>>> frag = select_t(Example[Foo] % Bar)
>>> assert frag is Foo
>>> new_expr = select_t(Example[T] % U, swap=frag)
>>> assert new_expr == Example[Foo] % U
"""
if type(expr) is TypeVarExp:
def select(x, swap=None):
if swap is not None:
return swap
return x
yield select
return
if type(expr) is not TypeExp:
return
if type(expr.full_predicate) is TypeVarExp:
def select(x, swap=None):
if swap is not None:
return x.duplicate(predicate=swap)
return x.full_predicate
yield select
for idx, field in enumerate(expr.fields):
for sel in select_variables(field):
# Without this closure, the idx in select will be the last
# value of the enumerate, same for sel
# (Same problem as JS with callbacks inside a loop)
def closure(idx, sel):
def select(x, swap=None):
if swap is not None:
new_fields = list(x.fields)
new_fields[idx] = sel(x.fields[idx], swap)
return x.duplicate(fields=tuple(new_fields))
return sel(x.fields[idx])
return select
yield closure(idx, sel)
| 14,849
|
def get_login(name_p: str, pass_p: str, auth_error: bytes = b'') -> Callable:
"""Decorator to ensure a player's login information is correct."""
# NOTE: this function does NOT verify whether the arguments have
# been passed into the connection, and assumes you have already
# called the appropriate decorator above, @required_x.
def wrapper(f: Callable) -> Callable:
# modify the handler code to get the player
# object before calling the handler itself.
@wraps(f)
async def handler(conn: Connection) -> Optional[bytes]:
# args may be provided in regular args
# or multipart, but only one at a time.
argset = conn.args or conn.multipart_args
if not (
p := await glob.players.get_login(
name = unquote(argset[name_p]),
pw_md5 = argset[pass_p]
)
):
# player login incorrect
return auth_error
# login verified, call the handler
return await f(p, conn)
return handler
return wrapper
| 14,850
|
def map_visualize(df: gpd.GeoDataFrame,
lyrs='s',
scale=0.5,
figsize = (12,9),
color = "red",
ax = None,
fig=None,
*args, **kwargs):
"""Draw the geodataframe with the satellite image as the background
Args:
`df` (gpd.GeoDataFrame): the gpd.GeoDataFrame need to plot
`ax`: the ax define to draw
`lyrs` (str, optional): [ m 路线图; t 地形图; p 带标签的地形图; s 卫星图; y 带标签的卫星图; h 标签层(路名、地名等)]. Defaults to 'p'.
`scale` (float): border percentage
`color`: the color the the geometry drawed
Returns:
[ax]: [description]
"""
# lyrs='y';scale=0.5;figsize = (12,9); color = "red";ax = None;fig=None;
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
df.plot(color = color, ax=ax, zorder=1, *args, **kwargs)
# df.plot(color = color, zorder=1)
[x0, x1], [y0, y1] = plt.xlim(), plt.ylim()
gap_x, gap_y = (x1-x0), (y1-y0)
[a, b, c, d] = df.total_bounds
if a == c:
x0, x1 = a - 0.001, c + 0.001
gap_x = x1- x0
if b == d:
y0, y1 = b - 0.001, d + 0.001
gap_y = y1 - y0
if not 0.4 <= gap_y / gap_x <= 2.5:
mid_x, mid_y = (x1+x0)/2, (y1+y0)/2
gap = max(gap_x, gap_y) * (1 + scale) / 2
[x0, y0, x1, y1] = [mid_x - gap, mid_y - gap, mid_x + gap, mid_y + gap]
else:
[x0, y0, x1, y1] = [x0-(x1-x0) * scale, y0+(y0-y1) * scale,
x1+(x1-x0) * scale, y1-(y0-y1) * scale]
zoom = 15 - int(math.log2(haversine((x0, y1), (x1, y0))/3))
# print([x0, x1], [y0, y1], haversine((x0, y1), (x1, y0))/3)
# warming: if zoom big than 19 then there will be somthing wrong
zoom = 19 if zoom > 19 else zoom
img = tile.Tiles()
f_lst, img_bbox = img.get_tiles_by_bbox([x0, y1, x1, y0], zoom, lyrs)
to_image = merge_tiles(f_lst)
background, _ = clip_background( to_image, img_bbox, [x0, y1, x1, y0], False)
ax.imshow(background, extent=[x0, x1, y0, y1], alpha=.6, zorder=0)
plt.xlim(x0, x1)
plt.ylim(y0, y1)
# 去除科学记数法
ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.get_yaxis().get_major_formatter().set_useOffset(False)
# set_major_locator
# ax.xaxis.set_major_locator(plt.NullLocator())
# ax.yaxis.set_major_locator(plt.NullLocator())
return fig, ax
| 14,851
|
def unorm_to_byte(x):
"""float x in [0, 1] to an integer [0, 255]"""
return min(int(256 * x), 255)
| 14,852
|
def eigh(a, largest: bool = False):
"""
Get eigenvalues / eigenvectors of hermitian matrix a.
Args:
a: square hermitian float matrix
largest: if True, return order is based on descending eigenvalues, otherwise
ascending.
Returns:
w: [m] eigenvalues
v: [m, m] eigenvectors
"""
return _eigh(a, largest)
| 14,853
|
def test_param_reorder():
"""Parameters can be reordered and doesn't affect the outcome."""
standard = pytest.mark.parametrize(
argnames="foo,bar,baz",
argvalues=[
(3, "something", 777),
(None, -100, "aaaaa"),
([10, 20, 30], ..., 0),
],
)
wrapped = parametrize_cases(
Case(foo=3, bar="something", baz=777),
Case(bar=-100, baz="aaaaa", foo=None),
Case(baz=0, bar=..., foo=[10, 20, 30]),
)
assert wrapped == standard
| 14,854
|
def lowpass(x, dt, fc, order=5):
"""
Low pass filter data signal x at cut off frequency fc, blocking harmonic content above fc.
Parameters
----------
x : array_like
Signal
dt : float
Signal sampling rate (s)
fc : float
Cut off frequency (Hz)
order : int, optional
Butterworth filter order. Default 5.
Returns
-------
array
Filtered signal
See Also
--------
scipy.signal.butter, scipy.signal.filtfilt
"""
nyq = 0.5 * 1. / dt # nyquist frequency
normal_cutoff = fc / nyq # normalized cut off frequency
b, a = butter(order, normal_cutoff, btype='lowpass', analog=False)
y = filtfilt(b, a, x)
return y
| 14,855
|
def doRunFixPlanets(msName):
"""Generate code for running fixplanets on fields with (0,0) coordinates"""
print('\n*** doRunFixPlanets ***')
fieldIds = sfsdr.getFieldsForFixPlanets(msName)
if len(fieldIds) != 0:
casaCmd = ''
mytb = aU.createCasaTool(tbtool)
mytb.open(msName+'/FIELD')
fieldNames = mytb.getcol('NAME')
mytb.close()
fieldNames = ['%s' %fieldNames[i] for i in fieldIds]
fieldNames = ','.join(fieldNames)
fieldIds = ['%s' %i for i in fieldIds]
fieldIds = ','.join(fieldIds)
casaCmd = casaCmd + "fixplanets(vis = '"+msName+"',\n"
casaCmd = casaCmd + " field = '"+fieldIds+"', # "+fieldNames+"\n"
casaCmd = casaCmd + " fixuvw = True)\n"
return casaCmd
| 14,856
|
def add_name_suffix(
suffix, obj_names=None, filter_type=None, add_underscore=False, search_hierarchy=False,
selection_only=True, **kwargs):
"""
Add prefix to node name
:param suffix: str, string to add to the end of the current node
:param obj_names: str or list(str), name of list of node names to rename
:param filter_type: str, name of object type to filter the objects to apply changes ('Group, 'Joint', etc)
:param add_underscore: bool, Whether or not to add underscore before the suffix
:param search_hierarchy: bool, Whether to search objects in hierarchies
:param selection_only: bool, Whether to search only selected objects or all scene objects
:param kwargs:
"""
rename_shape = kwargs.get('rename_shape', True)
if filter_type:
return name.add_suffix_by_filter(
suffix=suffix, filter_type=filter_type, add_underscore=add_underscore, rename_shape=rename_shape,
search_hierarchy=search_hierarchy, selection_only=selection_only, dag=False, remove_maya_defaults=True,
transforms_only=True)
else:
return name.add_suffix(
suffix=suffix, obj_names=obj_names, add_underscore=add_underscore, rename_shape=rename_shape)
| 14,857
|
def checksum(hdpgroup: list,
algorithm: str = 'CRC32',
chktag: str = '\'α') -> list:
"""List of checksums-like for detection of Non-intentional data corruption
See https://en.wikipedia.org/wiki/Cksum
See https://en.wikipedia.org/wiki/Checksum
Args:
hdpgroup (list): list of HDP-like objects
type (str): The type of checker
htag (str): select only by special tags (for complex documents) mixing
several hashings. See hashable()
Returns:
list: List of strings optimized to be used as input for hashing
>>> import hxlm.core as HXLm
>>> UDUR_LAT = HXLm.util.load_file(HXLm.HDATUM_UDHR + '/udhr.lat.hdp.yml')
>>> checksum(UDUR_LAT)
['(CRC32 \\'\\'α "3839021470")']
>>> UDUR_RUS = HXLm.util.load_file(HXLm.HDATUM_UDHR + '/udhr.rus.hdp.yml')
>>> checksum(UDUR_RUS)
['(CRC32 \\'\\'α "3839021470")']
"""
if algorithm != 'CRC32':
raise NotImplementedError('algorithm [' +
str(algorithm) + '] not implemented')
# Escape ' is not an walk in the park. Just to simplify, we will replace
# double '' with '
if chktag.find("''") > -1:
chktag = chktag.replace("''", "'")
result = []
for hsilo in hdpgroup:
hashable_str = hashable([hsilo])[0]
hashable_code = _get_checksum(hashable_str, chktag=chktag)
result.append(hashable_code)
return result
| 14,858
|
def run_job(runner, runner_thread, queue, function, input):
""" Call this to start a new job """
runner.job_function = function
runner.job_input = input
runner.comm_queue = queue
runner_thread.start()
| 14,859
|
def install():
"""Install Storyboard Pro specific functionality of avalon-core.
This function is called automatically on calling
`api.install(storyboardpro)`.
"""
print("Installing Avalon Storyboard Pro...")
pyblish.api.register_host("storyboardpro")
| 14,860
|
def dsum(i0,i1,step = 1, box=[]):
""" for a range of fits files
compute the mean and dispersion from the mean
"""
for i in range(i0,i1+1,step):
ff = 'IMG%05d.FIT' % i
h1, d1 = getData(ff,box)
#very specific for 16 bit data, since we want to keep the data in uint16
bzero = h1['BZERO']
bscale = h1['BSCALE']
if i == i0:
sum0 = 1.0
sum1 = d1*bscale+bzero
sum2 = sum1*sum1
#sum1 = d1
#sum2 = d1*d1
h = h1
nx = d1.shape[1]
ny = d1.shape[0]
nz = i1 + 1 - i0
c = np.zeros((nz, ny, nx))
c[0,:,:] = d1.reshape(ny,nx)
else:
sum0 = sum0 + 1.0
sum1 = sum1 + (d1 * bscale + bzero)
sum2 = sum2 + (d1 * bscale + bzero) * (d1 * bscale + bzero)
#sum2 = sum2+d1*d1
c[i - i0,:,:] = d1.reshape(ny,nx)
sum1 = sum1 / sum0
sum2 = sum2 / sum0 - sum1*sum1
print (type(sum1), type(sum2))
return (h,sum1,np.sqrt(sum2),c)
| 14,861
|
def test_sync_buckets(db):
"""
Test that bucket syncing only pulls study buckets
"""
client = boto3.client("s3")
bucket1 = client.create_bucket(Bucket="not-a-study")
bucket2 = client.create_bucket(Bucket="kf-dev-sd-00000000")
assert Bucket.objects.count() == 0
sync_buckets()
assert Bucket.objects.count() == 1
assert Bucket.objects.first().name == "kf-dev-sd-00000000"
| 14,862
|
def _env_translate_obs(obs):
"""
This should only be used for the Tiger ENV.
Parameters
----------
obs : list or array-like
The observation to be translated.
Returns
-------
str
A representation of the observation in English.
"""
if obs[0] == 1:
return 'GROWL_LEFT'
elif obs[1] == 1:
return 'GROWL_RIGHT'
elif obs[2] == 1:
return 'START'
elif obs[3] == 1:
return 'END'
else:
raise ValueError('Invalid observation: '.format(obs))
| 14,863
|
def voter(address):
"""
Returns voter credentials.
Parameters:
address: address
Returns:
list of three values addresss (str), is_voter (bool),
voted (bool).
"""
return contract.functions.voters(address).call()
| 14,864
|
def E_disp_z(m, N, j_star=3.):
"""Vertical displacement as a function of vertical wavenumber."""
num = E0*b**3*N0**2
den = 2*j_star*np.pi*N**2 * (1 + m/beta_star(N, j_star))**2
return num/den
| 14,865
|
def get_stereo_image():
"""Retrieve one stereo camera image
Returns:
(mat): cv2 image
"""
img = core.get_stereo_image()
if img is not None:
return img
else:
return None
| 14,866
|
def f():
"""<caret>
class Class:
"""
bar
"""
| 14,867
|
def request_set_arm_state(token: str, arm_state: str):
"""Request set arm state."""
headers = {
'Authorization': 'Bearer %s' % token,
'Content-Type': 'application/json'
}
payload = {
"Created": int(time.time()),
"AppVersion": APP_VERSION,
"AppType": APPTYPE,
"App": APP
}
response = requests.post(
_build_url('Location/{}'.format(arm_state)),
headers=headers,
json=payload)
response.raise_for_status()
return response.json()
| 14,868
|
def get_all_config(filename=None):
"""
Set default configuration options for configparse
Config with defaults settings if no file will be passed
Also with defaults sections and defaults keys for missing options in config
:param filename: options config file to read
:return: configparser object with default config for missing sections
"""
_config = parse_config2(filename)
default_config = set_defaults()
# Verify each section in default_config
for s in range(len(default_config.sections())):
section = default_config.sections()[s]
# Add the missing section to the config obtained
if not _config.has_section(section):
_config.add_section(section)
# Add missing keys to config obtained
for key in default_config[section]:
if not _config.has_option(section, key):
_config[section][key] = default_config[section][key]
return _config
| 14,869
|
def check_for_end_or_abort(e):
"""Return a closure checking for END or ABORT notifications
Arguments:
e -- event to signal when the action is completed
(will be set when an END or ABORT occurs)
"""
def check(notification, e = e):
print("EVENT : " + \
Base_pb2.ActionEvent.Name(notification.action_event))
if notification.action_event == Base_pb2.ACTION_END \
or notification.action_event == Base_pb2.ACTION_ABORT:
e.set()
return check
| 14,870
|
def check_vacancy_at_cell(house_map, cell):
"""
Return True if the given cell is vacant.
Vacancy is defined as a '0' in the house map at the given coordinates.
(i.e. there is no wall at that location)
"""
x = cell[0]
y = cell[1]
if not 0 <= x < MAP_WIDTH:
return False
if not 0 <= y < MAP_HEIGHT:
return False
return house_map[y][x] == '0'
| 14,871
|
def extract_text_and_vertices(x: Dict[str, str]):
"""Extracts all annotations and bounding box vertices from a single OCR
output from Google Cloud Vision API.
The first element is the full OCR. It's equivalent to the output of
`extract_full_text_annotation` for the same OCR output.
Args:
x (Dict[str, str]): whole OCR output.
Returns:
list where each item is a tuple where the first element is the text and
the second are the 4 vertices of the corresponding bounding box.
"""
blocks = []
for annotation in x["textAnnotations"]:
text = annotation['description']
vertices = [
tuple(x.values()) for x in annotation['boundingPoly']['vertices']
]
blocks.append((text, vertices))
return blocks
| 14,872
|
def _set(name, par, val):
"""Set a source parameter."""
import sherpa.astro.ui as sau
sau.set_par('{name}.{par}'.format(**locals()), val)
# try:
# exec(name + '.' + par + '=' + str(val))
# except Exception as e:
# print e
| 14,873
|
def test_grid_queue():
"""Test GridQueue"""
queue = GridQueue(40, 2, "B")
push1 = queue.push()
push2 = queue.push()
with pytest.raises(GridSiteException):
queue.push()
pop1 = queue.pop()
pop2 = queue.pop()
with pytest.raises(GridSiteException):
queue.pop()
assert push1 == pop1
assert push2 == pop2
| 14,874
|
def cspace3(obs, bot, theta_steps):
"""
Compute the 3D (x, y, yaw) configuration space obstacle for a lit of convex 2D obstacles given by [obs] and a convex 2D robot given by vertices in [bot] at a variety of theta values.
obs should be a 3D array of size (2, vertices_per_obstacle, num_obstacles)
bot should be a 2d array of size (2, num_bot_vertices)
theta_steps can either be a scalar, in which case it specifies the number of theta values, evenly spaced between -pi and +pi; or it can be a vector of theta values.
"""
bot = -np.array(bot)
if np.isscalar(theta_steps):
thetas = np.linspace(-np.pi, np.pi, num=theta_steps)
else:
thetas = theta_steps
c_obs = []
for k in range(obs.shape[2]):
for j in range(len(thetas)-1):
th0 = thetas[j]
th1 = thetas[j+1]
bot_rot0 = rotmat(th0).dot(bot)
c_obs0 = minkowski_sum(bot_rot0, obs[:,:,k])
bot_rot1 = rotmat(th1).dot(bot)
c_obs1 = minkowski_sum(bot_rot1, obs[:,:,k])
c_pts = np.vstack((np.hstack((c_obs0, c_obs1)),
np.hstack((th0 + np.zeros(c_obs0.shape[1]),
th1 + np.zeros(c_obs1.shape[1])))))
c_obs.append(c_pts)
if len(c_obs) == 0:
return np.zeros((3, bot.shape[1] * 2, 0))
max_n_vert = max((x.shape[1] for x in c_obs))
return np.dstack((np.pad(c, pad_width=((0,0), (0,max_n_vert-c.shape[1])), mode='edge') for c in c_obs))
| 14,875
|
def Graph(backend:Optional[str]=None) -> BaseGraph:
"""Returns an instance of an implementation of :class:`~pyzx.graph.base.BaseGraph`.
By default :class:`~pyzx.graph.graph_s.GraphS` is used.
Currently ``backend`` is allowed to be `simple` (for the default),
or 'graph_tool' and 'igraph'.
This method is the preferred way to instantiate a ZX-diagram in PyZX.
Example:
To construct an empty ZX-diagram, just write::
g = zx.Graph()
"""
if backend is None: backend = 'simple'
if backend not in backends:
raise KeyError("Unavailable backend '{}'".format(backend))
if backend == 'simple': return GraphS()
if backend == 'graph_tool':
return GraphGT()
if backend == 'igraph': return GraphIG()
if backend == 'quizx-vec': return quizx.VecGraph() # type: ignore
return GraphS()
| 14,876
|
def describe_stack_events(StackName=None, NextToken=None):
"""
Returns all stack related events for a specified stack in reverse chronological order. For more information about a stack's event history, go to Stacks in the AWS CloudFormation User Guide.
See also: AWS API Documentation
:example: response = client.describe_stack_events(
StackName='string',
NextToken='string'
)
:type StackName: string
:param StackName: The name or the unique stack ID that is associated with the stack, which are not always interchangeable:
Running stacks: You can specify either the stack's name or its unique stack ID.
Deleted stacks: You must specify the unique stack ID.
Default: There is no default value.
:type NextToken: string
:param NextToken: A string that identifies the next page of events that you want to retrieve.
:rtype: dict
:return: {
'StackEvents': [
{
'StackId': 'string',
'EventId': 'string',
'StackName': 'string',
'LogicalResourceId': 'string',
'PhysicalResourceId': 'string',
'ResourceType': 'string',
'Timestamp': datetime(2015, 1, 1),
'ResourceStatus': 'CREATE_IN_PROGRESS'|'CREATE_FAILED'|'CREATE_COMPLETE'|'DELETE_IN_PROGRESS'|'DELETE_FAILED'|'DELETE_COMPLETE'|'DELETE_SKIPPED'|'UPDATE_IN_PROGRESS'|'UPDATE_FAILED'|'UPDATE_COMPLETE',
'ResourceStatusReason': 'string',
'ResourceProperties': 'string',
'ClientRequestToken': 'string'
},
],
'NextToken': 'string'
}
"""
pass
| 14,877
|
def set_param(component, param, value):
"""
Sets the specified parameter to a particular value.
Args:
component (`BondGraphBase`): The particular component.
param: The parameter to set
value: The value to assign it to, may be None
"""
component.set_param(param, value)
| 14,878
|
def get_carb_data(data, offset=0):
""" Load carb information from an issue report cached_carbs dictionary
Arguments:
data -- dictionary containing cached carb information
offset -- the offset from UTC in seconds
Output:
3 lists in (carb_values, carb_start_dates, carb_absorption_times)
format
"""
carb_values = [float(dict_.get("quantity")) for dict_ in data]
start_dates = [
datetime.strptime(
dict_.get("startDate"),
" %Y-%m-%d %H:%M:%S %z"
) + timedelta(seconds=offset)
for dict_ in data
]
absorption_times = [
float(dict_.get("absorptionTime")) / 60
if dict_.get("absorptionTime") is not None
else None for dict_ in data
]
assert len(start_dates) == len(carb_values) == len(absorption_times),\
"expected input shapes to match"
return (start_dates, carb_values, absorption_times)
| 14,879
|
def _discover_bounds(cdf, tol=1e-7):
"""
Uses scipy's general continuous distribution methods
which compute the ppf from the cdf, then use the ppf
to find the lower and upper limits of the distribution.
"""
class DistFromCDF(stats.distributions.rv_continuous):
def cdf(self, x):
return cdf(x)
dist = DistFromCDF()
# the ppf is the inverse cdf
lower = dist.ppf(tol)
upper = dist.ppf(1. - tol)
return lower, upper
| 14,880
|
def ml_app_instances_ml_app_instance_id_get(ml_app_instance_id): # noqa: E501
"""ml_app_instances_ml_app_instance_id_get
# noqa: E501
:param ml_app_instance_id: MLApp instance identifier
:type ml_app_instance_id: str
:rtype: None
"""
return 'do some magic!'
| 14,881
|
def transcribe_file_with_word_time_offsets(speech_file,output_path):
"""Transcribe the given audio file synchronously and output the word time
offsets."""
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
client = speech.SpeechClient()
with io.open(speech_file, 'rb') as audio_file:
content = audio_file.read()
audio = types.RecognitionAudio(content=content)
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=44100,
language_code='es-CL',
enable_word_time_offsets=True)
response = client.recognize(config, audio)
file_name = basename(speech_file)
file_name = file_name[:-4]+'.txt'
transcription_folder = join(output_path,'transcription')
confidence_folder = join(output_path,'confidence')
word_offset_folder = join(output_path,'word_offset')
if not isdir(transcription_folder):
mkdir(transcription_folder)
if not isdir(confidence_folder):
mkdir(confidence_folder)
if not isdir(word_offset_folder):
mkdir(word_offset_folder)
transcription = open(join(output_path,'transcription',file_name),'w')
confidence = open(join(output_path,'confidence',file_name),'w')
word_offset = open(join(output_path,'word_offset',file_name),'w')
for result in response.results:
alternative = result.alternatives[0]
transcription.write(alternative.transcript.encode('utf-8'))
confidence.write(str(alternative.confidence))
for word_info in alternative.words:
word = word_info.word
start_time = word_info.start_time
end_time = word_info.end_time
a_str = ",".join([word,str(start_time.seconds + start_time.nanos * 1e-9),str(end_time.seconds + end_time.nanos * 1e-9)]).encode('utf-8')+'\n'
word_offset.write(a_str)
transcription.close()
confidence.close()
word_offset.close()
| 14,882
|
def plot_distribution2D(results,n_qubits,savepath,title = None, clear_fig = True):
"""plots diffusion for 2D data"""
plt.rcParams.update({'figure.figsize': (10,10)})
y,x,probability_density = results["dimension_0"],results["dimension_1"],results["probability_density"]
axes_limit = (2**n_qubits)-1
if title is None:
title = "diffusion on an {0}x{0} grid".format(axes_limit+1)
if sys.version_info >= (3, 7):
alpha = [i**(1/4) for i in probability_density]
else:
alpha = None
plt.cla()
plt.scatter(x,y,alpha=alpha,linewidths=[20*i**0.5 for i in probability_density], s = [400*i**0.5 for i in probability_density])
plt.xlim(0,axes_limit)
plt.ylim(0,axes_limit)
plt.xlabel('X')
plt.ylabel('Y')
plt.title(title)
plt.savefig(savepath,dpi = 300)
if clear_fig:
plt.cla()
| 14,883
|
def generate_name(style: str = 'underscore', seed: int = None) -> str:
"""Generate a random name."""
if seed is not None:
random.seed(seed)
return format_names(random_names(), style=style)
| 14,884
|
def argunique(a, b):
"""
找出a--b对应体中的唯一对应体,即保证最终输出的aa--bb没有重复元素,也没有多重对应
:param a:
:param b:
:return: aaa, bbb 使得aaa-bbb是唯一对
"""
# 先对a中元素进行逐个检查,如果第一次出现,那么添加到aa中,如果不是第一次,那么检查是否一致,不一致则设置成-1
# 设置成-1,代表a中当前元素i有过一对多纪录,剔除。同时-1也不会被再匹配到
seta = {}
for i, j in zip(a, b):
if i not in seta:
seta[i] = j
elif seta[i] != j:
seta[i] = -1
aa = [i for i in seta if seta[i] != -1]
bb = [seta[i] for i in seta if seta[i] != -1]
# 再反过来做一遍,以b为索引,剔除重复项
setb = {}
for i, j in zip(aa, bb):
if j not in setb:
setb[j] = i
elif setb[j] != i:
setb[j] = -1
aaa = [setb[j] for j in setb if setb[j] != -1]
bbb = [j for j in setb if setb[j] != -1]
return aaa, bbb
| 14,885
|
def create_config_flow(hass: core.HomeAssistant, host: str) -> None:
"""Start a config flow."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={"host": host},
)
)
| 14,886
|
def move(obj, direction):
"""
Moves object by (dx, dy).
Returns true if move succeeded.
"""
goal = obj.pos + direction
if (goal.x < 0 or goal.y < 0 or
goal.x >= obj.current_map.width or
goal.y >= obj.current_map.height):
# try_ catches this for the player, but need to
# check here for NPCs
return False
if not obj.current_map.is_blocked_from(obj.pos, goal):
obj.pos = goal
if obj.fighter:
obj.fighter.exhaustion += MOVE_EXHAUSTION
return True
return False
| 14,887
|
def addMedicine(medicine: object):
"""Data required are "name", "description", "price", "quantity", "medicalId" """
return mr.makePostRequest(mr.API + "/medicine/", medicine)
| 14,888
|
def get_train():
""" Training data generator """
for file in train_files:
print("Train File: ", file)
img = ( np.array(nc.Dataset(file, "r")["ims"][0:1])[0], np.array(nc.Dataset(file, "r")["migrants"][0:1]) )
yield img
| 14,889
|
def begin(command, project, ename, group):
"""
Begin a run in the database log.
Args:
command: The command that will be executed.
pname: The project name we belong to.
ename: The experiment name we belong to.
group: The run group we belong to.
Returns:
(run, session), where run is the generated run instance and session the
associated transaction for later use.
"""
from benchbuild.utils.db import create_run
from benchbuild.utils import schema as s
from benchbuild.settings import CFG
from datetime import datetime
db_run, session = create_run(command, project, ename, group)
db_run.begin = datetime.now()
db_run.status = 'running'
log = s.RunLog()
log.run_id = db_run.id
log.begin = datetime.now()
log.config = repr(CFG)
session.add(log)
session.commit()
return db_run, session
| 14,890
|
def test_ultrasound_distance() -> None:
"""Test that we can read an ultrasound distance."""
backend = SBArduinoConsoleBackend(
"TestBoard",
console_class=MockConsole,
)
backend._console.next_input = "1.23" # type: ignore
metres = backend.get_ultrasound_distance(3, 4)
assert metres is not None
assert isclose(metres, 1.23)
# Check backend updated its view of what modes the pins are in now.
assert backend.get_gpio_pin_mode(3) is GPIOPinMode.DIGITAL_OUTPUT
assert backend.get_gpio_pin_digital_state(3) is False
assert backend.get_gpio_pin_mode(4) is GPIOPinMode.DIGITAL_INPUT
| 14,891
|
def fortran_library_item(lib_name,
sources,
**attrs
): #obsolete feature
""" Helper function for creating fortran_libraries items. """
build_info = {'sources':sources}
known_attrs = ['module_files','module_dirs',
'libraries','library_dirs']
for key,value in attrs.items():
if key not in known_attrs:
raise TypeError,\
"fortran_library_item() got an unexpected keyword "\
"argument '%s'" % key
build_info[key] = value
return (lib_name,build_info)
| 14,892
|
def drawPolygon(t,r,n):
"""
draw a polygon of n sides, centered at (0,0)
r is radius of the circle that would circumscribe the polygon
leave turtle at position (0,0) facing right
"""
# pick up the pen, move to the starting point, and put down the pen
t.up(); t.goto(r,0); t.down()
# connect the dots for all the points
for i in range(1,n+1): # gives points 1 through n-1
t.goto( ithOfNPointsOnCircleX(i%n,n,r),
ithOfNPointsOnCircleY(i%n,n,r) )
# Now pick up the pen, and move back to 0,0, facing east
t.up(); t.goto(0,0); t.setheading(0);
| 14,893
|
def copy_generator(generator):
"""Copy an existing numpy (random number) generator.
Parameters
----------
generator : numpy.random.Generator or numpy.random.RandomState
The generator to copy.
Returns
-------
numpy.random.Generator or numpy.random.RandomState
In numpy <=1.16 a ``RandomState``, in 1.17+ a ``Generator``.
Both are copies of the input argument.
"""
if isinstance(generator, np.random.RandomState):
return _copy_generator_np116(generator)
return _copy_generator_np117(generator)
| 14,894
|
def uptime():
"""Returns a datetime.timedelta instance representing the uptime in a Windows 2000/NT/XP machine"""
import os, sys
import subprocess
if not sys.platform.startswith('win'):
raise RuntimeError, "This function is to be used in windows only"
cmd = "net statistics server"
p = subprocess.Popen(cmd, shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
lines = child_stdout.readlines()
child_stdin.close()
child_stdout.close()
lines = [line.strip() for line in lines if line.strip()]
date, time, ampm = lines[1].split()[2:5]
#print date, time, ampm
m, d, y = [int(v) for v in date.split('/')]
H, M = [int(v) for v in time.split(':')]
if ampm.lower() == 'pm':
H += 12
import datetime
now = datetime.datetime.now()
then = datetime.datetime(y, m, d, H, M)
diff = now - then
return diff
| 14,895
|
def sort_from_avro(df: 'pd.DataFrame', cur_filename: str, order_folder: str) -> 'pd.DataFrame':
"""Shuffle a dataframe with the given seed
:param df: the input dataframe
:type df: pandas.DataFrame
:param cur_filename: the initial file name
:type cur_filename: str
:param order_folder: the order_folder path
:type order_folder: str
:return: the shuffled dataframe
:rtype: pandas.DataFrame
"""
real_filename = cur_filename.split(".", 1)[0].replace("results_", "")
ord_df = None
for root, _, files in walk(order_folder):
for file_ in files:
if file_.find(real_filename) != -1:
ord_df = pd.read_csv(path.join(root, file_))
ord_df.rename(columns={'FileName': "Filename"}, inplace=True)
if ord_df is None:
return None
print(
f"{STATUS_ARROW}[File:{STATUS_WARNING(cur_filename)}][Order dataframe with avro indexes]")
df_mask = df.Filename.duplicated(keep=False)
ord_df_mask = ord_df.Filename.duplicated(keep=False)
# Add counter number for unique indexes
df.loc[df_mask, 'Filename'] += "_#" + \
df.groupby('Filename').cumcount().add(1).astype(str)
ord_df.loc[ord_df_mask, 'Filename'] += "_#" + \
ord_df.groupby('Filename').cumcount().add(1).astype(str)
# Change indexes
df = df.set_index("Filename")
ord_df = ord_df.set_index("Filename")
# Reindex
new_index = df.reindex_like(ord_df, method=None).dropna()
df.set_index(new_index.index, inplace=True)
df.reset_index(inplace=True)
ord_df.reset_index(inplace=True)
# Remove duplicate counters
df.Filename = df.Filename.apply(lambda elm: elm.rsplit("_#", 1)[
0] if elm.find("_#") else elm)
ord_df.Filename = ord_df.Filename.apply(lambda elm: elm.rsplit("_#", 1)[
0] if elm.find("_#") else elm)
if not all(ord_df.Filename.eq(df.Filename)):
print("File name not equal...")
exit(-1)
return df
| 14,896
|
def test_pages_kingdom_successful(args, protein_gen_success, cazy_home_url, monkeypatch):
"""Test parse_family_by_kingdom() when all is successful."""
test_fam = Family("famName", "CAZyClass", "http://www.cazy.org/GH14.html")
def mock_get_pag(*args, **kwargs):
return ["http://www.cazy.org/GH14_all.html"]
def mock_get_pages(*args, **kwargs):
return protein_gen_success
monkeypatch.setattr(get_cazy_pages, "get_pagination_pages_kingdom", mock_get_pag)
monkeypatch.setattr(get_cazy_pages, "get_html_page", mock_get_pages)
get_cazy_pages.parse_family_by_kingdom(
family=test_fam,
cazy_home=cazy_home_url,
args=args["args"],
kingdoms=["Bacteria"],
)
| 14,897
|
def convert_yolo(
df: pd.DataFrame,
root: Union[str, os.PathLike, PosixPath],
copy_images: bool = False,
save_under: Optional[str] = None,
output_dir: Optional[Union[str, os.PathLike, PosixPath]] = None,
):
"""converts to yolo from master dataframe
Args:
df (pd.DataFrame): the master df
root (Union[str, os.PathLike, PosixPath]): root directory of the source format
has_image_split (bool, optional): If the images are arranged under the splits. Defaults to False.
copy_images (bool, optional): Whether to copy the images to a different directory. Defaults to False.
save_under (str, optional): Name of the folder to save the target annotations. Defaults to "labels".
output_dir (Optional[Union[str, os.PathLike, PosixPath]], optional): Output directory for the target
annotation. Defaults to ``None``.
"""
save_under = ifnone(save_under, "yolo")
output_imagedir, output_labeldir = _makedirs(root, save_under, output_dir)
splits = df.split.unique().tolist()
lbl = LabelEncoder()
dataset = dict()
for split in splits:
output_subdir = output_labeldir / split if len(splits) > 1 else output_labeldir
output_subdir.mkdir(parents=True, exist_ok=True)
split_df = df.query("split == @split").copy()
# drop images missing width or height information
hw_missing = split_df[pd.isnull(split_df["image_width"]) | pd.isnull(split_df["image_height"])]
if len(hw_missing) > 0:
warnings.warn(
f"{hw_missing['image_id'].nunique()} has height/width information missing in split `{split}`. "
+ f"{len(hw_missing)} annotations will be removed."
)
split_df = split_df[pd.notnull(split_df["image_width"]) & pd.notnull(split_df["image_height"])]
split_df["x_center"] = split_df["x_min"] + split_df["width"] / 2
split_df["y_center"] = split_df["y_min"] + split_df["height"] / 2
# normalize
split_df["x_center"] = split_df["x_center"] / split_df["image_width"]
split_df["y_center"] = split_df["y_center"] / split_df["image_height"]
split_df["width"] = split_df["width"] / split_df["image_width"]
split_df["height"] = split_df["height"] / split_df["image_height"]
split_df["class_index"] = lbl.fit_transform(split_df["category"])
split_df["yolo_string"] = (
split_df["class_index"].astype(str)
+ " "
+ split_df["x_center"].astype(str)
+ " "
+ split_df["y_center"].astype(str)
+ " "
+ split_df["width"].astype(str)
+ " "
+ split_df["height"].astype(str)
)
ds = split_df.groupby("image_id")["yolo_string"].agg(lambda x: "\n".join(x)).reset_index()
image_ids = ds["image_id"].tolist()
yolo_strings = ds["yolo_string"].tolist()
dataset[split] = str(Path(root) / "images" / split)
for image_id, ystr in tqdm(zip(image_ids, yolo_strings), total=len(image_ids), desc=f"split: {split}"):
write_yolo_txt(image_id, output_subdir, ystr)
if copy_images:
dest_dir = output_imagedir / split
dest_dir.mkdir(parents=True, exist_ok=True)
_fastcopy(split_df["image_path"].unique().tolist(), dest_dir)
dataset["nc"] = len(lbl._map)
dataset["names"] = list(lbl._map.keys())
with open(Path(output_labeldir).joinpath("dataset.yaml"), "w") as f:
yaml.dump(dataset, f, default_flow_style=None, allow_unicode=True)
| 14,898
|
def _truncate_and_pad_token_ids(token_ids, max_length):
"""Truncates or pads the token id list to max length."""
token_ids = token_ids[:max_length]
padding_size = max_length - len(token_ids)
if padding_size > 0:
token_ids += [0] * padding_size
return token_ids
| 14,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.