content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def scored_ngrams(
docs: Documents,
n: int = 2,
metric: str = "pmi",
tokenizer: Tokenizer = DEFAULT_TOKENIZER,
preprocessor: CallableOnStr = None,
stopwords: Union[str, Collection[str]] = None,
min_freq: int = 0,
fuse_tuples: bool = False,
sep: str = " ",
) -> Series:
"""Get Series of collocations and scores.
Parameters
----------
docs : str or iterable of str
Documents to scan for ngrams.
n : int, optional
Size of collocations, by default 2.
metric : str, optional
Scoring metric to use. Valid options include:
'raw_freq', 'pmi', 'mi_like', 'likelihood_ratio',
'jaccard', 'poisson_stirling', 'chi_sq', 'student_t'.
See nltk.BigramAssocMeasures, nltk.TrigramAssocMeasures,
and nltk.QuadgramAssocMeasures for additional size-specific
options.
tokenizer : callable, optional
Callable for tokenizing docs.
preprocessor : callable, optional
Callable for preprocessing docs before tokenization, by default None.
stopwords : str or collection of str, optional
Name of known stopwords set or collection of stopwords to remove from docs.
By default None.
min_freq : int, optional
Drop ngrams below this frequency, by default 0.
fuse_tuples : bool, optional
Join ngram tuples with `sep`, by default True.
sep : str, optional
Separator to use for joining ngram tuples, by default " ".
Only relevant if `fuze_tuples=True`.
Returns
-------
Series
Series {ngrams -> scores}.
"""
_validate_strings(docs)
# Get collocation finder and measures
if not isinstance(n, int):
raise TypeError(f"Expected `n` to be int, got {type(n)}.")
if 1 < n < 5:
n = int(n)
finder = NGRAM_FINDERS[n]
measures = NGRAM_METRICS[n]()
else:
raise ValueError(f"Valid `n` values are 2, 3, and 4. Got {n}.")
pre_pipe = []
if preprocessor is not None:
# Apply preprocessing
pre_pipe.append(preprocessor)
# Tokenize
pre_pipe.append(tokenizer)
if stopwords is not None:
# Fetch stopwords if passed str
if isinstance(stopwords, str):
stopwords = fetch_stopwords(stopwords)
# Remove stopwords
pre_pipe.append(partial(remove_stopwords, stopwords=stopwords))
docs = chain_processors(docs, pre_pipe)
# Find and score collocations
ngrams = finder.from_documents(docs)
ngrams.apply_freq_filter(min_freq)
ngram_score = ngrams.score_ngrams(getattr(measures, metric))
# Put the results in a DataFrame, squeeze into Series
kind = {2: "bigram", 3: "trigram", 4: "quadgram"}[n]
ngram_score = pd.DataFrame(ngram_score, columns=[kind, "score"])
if fuse_tuples:
# Join ngram tuples
ngram_score[kind] = ngram_score[kind].str.join(sep)
ngram_score.set_index(kind, inplace=True)
if ngram_score.shape[0] > 1:
ngram_score = ngram_score.squeeze()
return ngram_score | 32,700 |
def parse_command_line_arguments() -> argparse.Namespace:
"""Specifies the command line parser and returns a
:class:`argparse.Namespace` containing the arguments."""
parser = argparse.ArgumentParser(
description=f"supreme-pancake v{__version__}")
parser.add_argument(
"-c",
"--credentials",
action="store",
help="Credential JSON file",
)
parser.add_argument(
"-k",
"--sheet-key",
action="store",
help="Google Sheet key",
)
parser.add_argument(
"-l",
"--logging-level",
action="store",
default="INFO",
help='Logging level, either "DEBUG", "INFO", "WARNING", "ERROR", '
'or "CRITICAL"',
)
parser.add_argument(
"--one-shot",
action="store_true",
default=False,
help="Runs all queries once and exit",
)
parser.add_argument(
"-s",
"--secret",
action="append",
default=[],
help='Adds a secret. Example: "-s PASS=123456789". Can be used '
'multiple times',
)
return parser.parse_args() | 32,701 |
def imurl(image_url, return_as_array = False , **kwargs):
"""
Read image from url and convert to bytes or ndarray
Paramters
---------
image_url: http / https url of image
return_as_array: Convert image directly to numpy array
default: False
kwargs:
Keyword arguments of imread can be passed for image modification:
Example:
imurl(image_url,to_array=True,resize=(224,224),color_mode = 'rgb',dtype='float32')
Note: kwargs only works with return_as_array = True
Returns:
--------
PIL Image by default:
if return_as_array is True:
image will be returned as numpy array.
Additional params like resize, color_mode, dtype , return_original can also be passed inorder to refine the image
Raises:
-------
ImportError if requests library is not installed
"""
if request_image is None:
raise ImportError('requests library is required from reading image from url '
'Install it using pip install requests')
if not image_url.startswith('http'):
raise ValueError(f'invalid url found. Required http or https url but got {image_url} instead')
image_response = request_image.get(image_url)
imbytes = BytesIO(image_response.content)
if return_as_array:
return imread(imbytes,**kwargs)
image = pilimage.open(imbytes)
return image | 32,702 |
def generate_command(config, work_dir, output_analysis_id_dir, errors, warnings):
"""Build the main command line command to run.
Args:
config (GearToolkitContext.config): run-time options from config.json
work_dir (path): scratch directory where non-saved files can be put
output_analysis_id_dir (path): directory where output will be saved
errors (list of str): error messages
warnings (list of str): warning messages
Returns:
cmd (list of str): command to execute
"""
# start with the command itself:
cmd = [
BIDS_APP,
str(work_dir / "bids"),
str(output_analysis_id_dir),
ANALYSIS_LEVEL,
]
# 3 positional args: bids path, output dir, 'participant'
# This should be done here in case there are nargs='*' arguments
# These follow the BIDS Apps definition (https://github.com/BIDS-Apps)
# editme: add any positional arguments that the command needs
# get parameters to pass to the command by skipping gear config parameters
# (which start with "gear-").
command_parameters = {}
for key, val in config.items():
# these arguments are passed directly to the command as is
if key == "bids_app_args":
bids_app_args = val.split(" ")
for baa in bids_app_args:
cmd.append(baa)
elif not key.startswith("gear-"):
command_parameters[key] = val
# editme: Validate the command parameter dictionary - make sure everything is
# ready to run so errors will appear before launching the actual gear
# code. Add descriptions of problems to errors & warnings lists.
# print("command_parameters:", json.dumps(command_parameters, indent=4))
if "bad_arg" in cmd:
errors.append("A bad argument was found in the config.")
num_things = command_parameters.get("num-things")
if num_things and num_things > 41:
warnings.append(
f"The num-things config value should not be > 41. It is {command_parameters['num-things']}."
)
cmd = build_command_list(cmd, command_parameters)
# editme: fix --verbose argparse argument
for ii, cc in enumerate(cmd):
if cc.startswith("--verbose"):
# handle a 'count' argparse argument where manifest gives
# enumerated possibilities like v, vv, or vvv
# e.g. replace "--verbose=vvv' with '-vvv'
cmd[ii] = "-" + cc.split("=")[1]
elif " " in cc: # then is is a space-separated list so take out "="
# this allows argparse "nargs" to work properly
cmd[ii] = cc.replace("=", " ")
log.info("command is: %s", str(cmd))
return cmd | 32,703 |
def ip(
context,
api_client,
api_key,
input_file,
output_file,
output_format,
verbose,
ip_address,
):
"""Query GreyNoise for all information on a given IP."""
ip_addresses = get_ip_addresses(context, input_file, ip_address)
results = [api_client.ip(ip_address=ip_address) for ip_address in ip_addresses]
return results | 32,704 |
def test_start_end_attack(asset_address, raiden_chain, deposit):
""" An attacker can try to steal assets from a hub or the last node in a
path.
The attacker needs to use two addresses (A1 and A2) and connect both to the
hub H, once connected a mediated transfer is initialized from A1 to A2
through H, once the node A2 receives the mediated transfer the attacker
uses the it's know secret and reveal to close and settles the channel H-A2,
without revealing the secret to H's raiden node.
The intention is to make the hub transfer the asset but for him to be
unable to require the asset A1.
"""
amount = 30
asset = asset_address[0]
app0, app1, app2 = raiden_chain # pylint: disable=unbalanced-tuple-unpacking
# the attacker owns app0 and app2 and creates a transfer throught app1
secret = pending_mediated_transfer(
raiden_chain,
asset,
amount,
1 # TODO: fill in identifier
)
hashlock = sha3(secret)
attack_channel = channel(app2, app1, asset)
attack_transfer = get_received_transfer(attack_channel, 0)
attack_contract = attack_channel.external_state.netting_channel.address
hub_contract = channel(app1, app0, asset).external_state.netting_channel.address
# the attacker can create a merkle proof of the locked transfer
lock = attack_channel.our_state.balance_proof.get_lock_by_hashlock(hashlock)
unlock_proof = attack_channel.our_state.balance_proof.compute_proof_for_lock(secret, lock)
# start the settle counter
attack_channel.netting_channel.close(
app2.raiden.address,
attack_transfer,
None
)
# wait until the last block to reveal the secret, hopefully we are not
# missing a block during the test
wait_until_block(app2.raiden.chain, attack_transfer.lock.expiration - 1)
# since the attacker knows the secret he can net the lock
attack_channel.netting_channel.unlock(
[(unlock_proof, attack_transfer.lock, secret)],
)
# XXX: verify that the secret was publicized
# at this point the hub might not know yet the secret, and won't be able to
# claim the asset from the channel A1 - H
# the attacker settle the contract
app2.raiden.chain.next_block()
attack_channel.netting_channel.settle(asset, attack_contract)
# at this point the attack has the "stolen" funds
attack_contract = app2.raiden.chain.asset_hashchannel[asset][attack_contract]
assert attack_contract.participants[app2.raiden.address]['netted'] == deposit + amount
assert attack_contract.participants[app1.raiden.address]['netted'] == deposit - amount
# and the hub's channel A1-H doesn't
hub_contract = app1.raiden.chain.asset_hashchannel[asset][hub_contract]
assert hub_contract.participants[app0.raiden.address]['netted'] == deposit
assert hub_contract.participants[app1.raiden.address]['netted'] == deposit
# to mitigate the attack the Hub _needs_ to use a lower expiration for the
# locked transfer between H-A2 than A1-H, since for A2 to acquire the asset
# it needs to make the secret public in the block chain we publish the
# secret through an event and the Hub will be able to require it's funds
app1.raiden.chain.next_block()
# XXX: verify that the Hub has found the secret, close and settle the channel
# the hub has acquired it's asset
hub_contract = app1.raiden.chain.asset_hashchannel[asset][hub_contract]
assert hub_contract.participants[app0.raiden.address]['netted'] == deposit + amount
assert hub_contract.participants[app1.raiden.address]['netted'] == deposit - amount | 32,705 |
def get_layer(neurons, neuron_loc, depth=None, return_closest: bool=False):
"""Obtain the layer of neurons corresponding to layer number or specific depth."""
layers = np.unique(neuron_loc[2, :])
if depth is not None:
if depth in layers:
pass
elif return_closest:
depth = layers[np.argmin(np.abs(layers - depth))]
else:
raise Exception('Provided depth does not correspond to layer.')
neuron_mask = neuron_loc[2, :] == depth
return neurons[:, neuron_mask] | 32,706 |
def train_single_env(
algo: AlgoProtocol,
env: gym.Env,
buffer: Buffer,
explorer: Optional[Explorer] = None,
n_steps: int = 1000000,
n_steps_per_epoch: int = 10000,
update_interval: int = 1,
update_start_step: int = 0,
random_steps: int = 0,
eval_env: Optional[gym.Env] = None,
eval_epsilon: float = 0.0,
save_metrics: bool = True,
save_interval: int = 1,
experiment_name: Optional[str] = None,
with_timestamp: bool = True,
logdir: str = "d3rlpy_logs",
verbose: bool = True,
show_progress: bool = True,
tensorboard_dir: Optional[str] = None,
timelimit_aware: bool = True,
callback: Optional[Callable[[AlgoProtocol, int, int], None]] = None,
) -> None:
"""Start training loop of online deep reinforcement learning.
Args:
algo: algorithm object.
env: gym-like environment.
buffer : replay buffer.
explorer: action explorer.
n_steps: the number of total steps to train.
n_steps_per_epoch: the number of steps per epoch.
update_interval: the number of steps per update.
update_start_step: the steps before starting updates.
random_steps: the steps for the initial random explortion.
eval_env: gym-like environment. If None, evaluation is skipped.
eval_epsilon: :math:`\\epsilon`-greedy factor during evaluation.
save_metrics: flag to record metrics. If False, the log
directory is not created and the model parameters are not saved.
save_interval: the number of epochs before saving models.
experiment_name: experiment name for logging. If not passed,
the directory name will be ``{class name}_online_{timestamp}``.
with_timestamp: flag to add timestamp string to the last of
directory name.
logdir: root directory name to save logs.
verbose: flag to show logged information on stdout.
show_progress: flag to show progress bar for iterations.
tensorboard_dir: directory to save logged information in
tensorboard (additional to the csv data). if ``None``, the
directory will not be created.
timelimit_aware: flag to turn ``terminal`` flag ``False`` when
``TimeLimit.truncated`` flag is ``True``, which is designed to
incorporate with ``gym.wrappers.TimeLimit``.
callback: callable function that takes ``(algo, epoch, total_step)``
, which is called at the end of epochs.
"""
# setup logger
if experiment_name is None:
experiment_name = algo.__class__.__name__ + "_online"
logger = D3RLPyLogger(
experiment_name,
save_metrics=save_metrics,
root_dir=logdir,
verbose=verbose,
tensorboard_dir=tensorboard_dir,
with_timestamp=with_timestamp,
)
algo.set_active_logger(logger)
# initialize algorithm parameters
_setup_algo(algo, env)
observation_shape = env.observation_space.shape
is_image = len(observation_shape) == 3
# prepare stacked observation
if is_image:
stacked_frame = StackedObservation(observation_shape, algo.n_frames)
# save hyperparameters
algo.save_params(logger)
# switch based on show_progress flag
xrange = trange if show_progress else range
# setup evaluation scorer
eval_scorer: Optional[Callable[..., float]]
if eval_env:
eval_scorer = evaluate_on_environment(eval_env, epsilon=eval_epsilon)
else:
eval_scorer = None
# start training loop
observation, reward, terminal = env.reset(), 0.0, False
rollout_return = 0.0
clip_episode = False
for total_step in xrange(1, n_steps + 1):
with logger.measure_time("step"):
# stack observation if necessary
if is_image:
stacked_frame.append(observation)
fed_observation = stacked_frame.eval()
else:
observation = observation.astype("f4")
fed_observation = observation
# sample exploration action
with logger.measure_time("inference"):
if total_step < random_steps:
action = env.action_space.sample()
elif explorer:
x = fed_observation.reshape((1,) + fed_observation.shape)
action = explorer.sample(algo, x, total_step)[0]
else:
action = algo.sample_action([fed_observation])[0]
# store observation
buffer.append(
observation=observation,
action=action,
reward=reward,
terminal=terminal,
clip_episode=clip_episode,
)
# get next observation
if clip_episode:
observation, reward, terminal = env.reset(), 0.0, False
clip_episode = False
logger.add_metric("rollout_return", rollout_return)
rollout_return = 0.0
# for image observation
if is_image:
stacked_frame.clear()
else:
with logger.measure_time("environment_step"):
observation, reward, terminal, info = env.step(action)
rollout_return += reward
# special case for TimeLimit wrapper
if timelimit_aware and "TimeLimit.truncated" in info:
clip_episode = True
terminal = False
else:
clip_episode = terminal
# psuedo epoch count
epoch = total_step // n_steps_per_epoch
if total_step > update_start_step and len(buffer) > algo.batch_size:
if total_step % update_interval == 0:
# sample mini-batch
with logger.measure_time("sample_batch"):
batch = buffer.sample(
batch_size=algo.batch_size,
n_frames=algo.n_frames,
n_steps=algo.n_steps,
gamma=algo.gamma,
)
# update parameters
with logger.measure_time("algorithm_update"):
loss = algo.update(batch)
# record metrics
for name, val in loss.items():
logger.add_metric(name, val)
# call callback if given
if callback:
callback(algo, epoch, total_step)
if epoch > 0 and total_step % n_steps_per_epoch == 0:
# evaluation
if eval_scorer:
logger.add_metric("evaluation", eval_scorer(algo))
if epoch % save_interval == 0:
logger.save_model(total_step, algo)
# save metrics
logger.commit(epoch, total_step)
# clip the last episode
buffer.clip_episode() | 32,707 |
async def test_init_ignores_tolerance(hass, setup_comp_3):
"""Test if tolerance is ignored on initialization."""
calls = await _setup_switch(hass, True)
_setup_sensor(hass, 39)
await hass.async_block_till_done()
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_OFF == call.service
assert ENT_SWITCH == call.data["entity_id"] | 32,708 |
def apply_wa_title(tree):
"""
Replace page's ``<title>`` contents with a contents of
``<wa-title>`` element and remove ``<wa-title>`` tag.
WebAnnotator > 1.14 allows annotation of ``<title>`` contents;
it is stored after body in ``<wa-title>`` elements.
"""
for wa_title in tree.xpath('//wa-title'):
titles = tree.xpath('//title')
if not titles:
wa_title.drop_tree()
return
title = titles[0]
head = title.getparent()
head.insert(head.index(title), wa_title)
title.drop_tree()
wa_title.tag = 'title'
for attr in wa_title.attrib:
wa_title.attrib.pop(attr)
return | 32,709 |
def capacitor_charge_curve():
""" Plots the charging of a capacitor overtime. """
r = 100
c = 10 * Units.u
DURATION = 0.01
ts = irangef(0, DURATION, Config.time_step)
ts = np.array(ts)
# Model
graph = Graph()
node_5 = Node(graph, value=5, fixed=True, source=True)
node_gnd = Node(graph, value=0, fixed=True, source=True)
node = Node(graph)
# Resistor
edge = Edge(graph, node_5, node)
graph.add_component(Resistor(graph, r, node_5, node, edge))
# Capacitor
edge = Edge(graph, node, node_gnd)
graph.add_component(Capacitor(graph, c, node, node_gnd, edge))
vs = []
for t in ts:
graph.solve()
v = node.value()
np.append(ts, t)
vs.append(v)
plt.title("Capacitor Charge Curve (R = 100 Ohms, C = 10 uF, V = 5 V)")
plt.xlabel("Time (s)")
plt.ylabel("Voltage (V)")
plt.plot(ts, vs, label="Model")
# Plot ideal
vs_ideal = 5 * (1 - np.exp(-ts / (r * c)))
plt.plot(ts, vs_ideal, label="Ideal")
plt.legend()
plt.show() | 32,710 |
def _format_unpack_code_level(message,
signal_names,
variable_lines,
helper_kinds):
"""Format one unpack level in a signal tree.
"""
body_lines = []
muxes_lines = []
for signal_name in signal_names:
if isinstance(signal_name, dict):
mux_lines = _format_unpack_code_mux(message,
signal_name,
body_lines,
variable_lines,
helper_kinds)
if muxes_lines:
muxes_lines.append('')
muxes_lines += mux_lines
else:
_format_unpack_code_signal(message,
signal_name,
body_lines,
variable_lines,
helper_kinds)
if body_lines:
if body_lines[-1] != '':
body_lines.append('')
if muxes_lines:
muxes_lines.append('')
body_lines = body_lines + muxes_lines
if body_lines:
body_lines = [''] + body_lines
return body_lines | 32,711 |
def find_phones():
"""
This function broadcasts on the LAN to the Shexter ports, and looks for a reply from a phone.
:return: (IP, Port) tuple representing the phone the user selects. None if no phone found.
"""
sock_sender = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock_sender.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# IP, Port tuple representing the phone
phone = None
rejected_hosts = []
broadcast_addrs = _get_broadcast_addrs()
if not broadcast_addrs:
print('There was a problem running the phone finder. You will have to configure manually.')
return None
print('Ready to search for phones.')
manual = input('Press Enter when the app is open on your phone, or type "m" to skip to manual configuration.\n')
manual = manual.lower()
if manual.lower() == 'm':
return None
for port in range(PORT_MIN, PORT_MAX+1):
count = 0
# Search more on the earlier ports which are much more likely to be the right one
#if port == PORT_MIN:
# tries = 4
#else:
# tries = 2
tries = 2
print('Searching on port ' + str(port), end="")
while not phone and count < tries:
count += 1
print('.', end='')
stdout.flush()
# Send on ALL the interfaces (required by Windows!)
for broadcast_addr in broadcast_addrs:
#print('\nbroadcasting on ' + broadcast_addr + ' to ' + str(port))
discover_bytes = bytes(DISCOVER_REQUEST, ENCODING)
sock_sender.sendto(discover_bytes, (broadcast_addr, port))
sock_recvr = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock_recvr.bind(('', port))
# Wait for phone to respond
# I don't know what an appropriate timeout for this would be - shorter is better but how short
# is too short?
ready = select([sock_recvr], [], [sock_sender, sock_recvr], 0.25)
if ready[0]:
# Buffsize must match ConnectionInitThread.BUFFSIZE
data, other_host = sock_recvr.recvfrom(256)
data = data.decode(ENCODING).rstrip(' \0')
if not data.startswith(DISCOVER_CONFIRM):
print('Received a strange response: ' + data)
continue
# Skip over rejected hosts
if not other_host[0] in rejected_hosts:
print()
print('Got a response from ' + str(other_host))
# The first line of the response is a confirm, the second is phone info, the third is port#
# Print out the phone info received, and get the user to confirm
print('Phone info: ' + data.splitlines()[1])
confirm = input('Is this your phone? y/N: ')
if confirm.lower() == 'y':
# Get the port the TCP Socket is listening for from the third line of the request
tcp_port_str = data.splitlines()[2]
# Convert to an int
tcp_port = port_str_to_int(tcp_port_str)
if not tcp_port:
# Cannot recover from this; it's a server bug. Manual config only workaround.
print('Received invalid port from phone; cannot continue.'.format(tcp_port_str))
return None
return other_host[0], tcp_port
else:
rejected_hosts.append(other_host[0])
if ready[2]:
print('There was an error selecting ' + ready[2])
sock_recvr.close()
print()
return None | 32,712 |
def fileGDB_schema() -> StructType:
"""Schema for dummy FileGDB."""
return StructType(
[
StructField("id", LongType()),
StructField("category", StringType()),
StructField("geometry", BinaryType()),
]
) | 32,713 |
def set_atom_stereo_parities(sgr, atm_par_dct):
""" set atom parities
"""
atm_dct = mdict.set_by_key_by_position(atoms(sgr), atm_par_dct,
ATM_STE_PAR_POS)
return _create.from_atoms_and_bonds(atm_dct, bonds(sgr)) | 32,714 |
def clean(expr):
"""
cleans up an expression string
Arguments:
expr: string, expression
"""
expr = expr.replace("^", "**")
return expr | 32,715 |
def atom_stereo_keys(sgr):
""" keys to atom stereo-centers
"""
atm_ste_keys = dict_.keys_by_value(_atom_stereo_parities(sgr),
lambda x: x in [True, False])
return atm_ste_keys | 32,716 |
def scale(query, pdfs):
"""Scale PDF files to a given page size."""
try:
for pdf in pdfs:
reader = PdfFileReader(pdf, strict=False)
if reader.isEncrypted:
raise FileEncryptedError
writer = PdfFileWriter()
w, h = [float(i) * 72 for i in query.split('x')]
for i in xrange(reader.numPages):
inp_page = reader.getPage(i)
inp_page_w = float(inp_page.mediaBox[2])
inp_page_h = float(inp_page.mediaBox[3])
scale_w = w / inp_page_w
scale_h = h / inp_page_h
scale = min(scale_w, scale_h)
out_page = PageObject.createBlankPage(None, w, h)
out_page.mergeScaledTranslatedPage(inp_page, scale, 0, 0)
writer.addPage(out_page)
noextpath = os.path.splitext(pdf)[0]
out_file = '{} (scaled).pdf'.format(noextpath)
with open(out_file, 'wb') as f:
writer.write(f)
except FileEncryptedError:
notify.notify('Alfred PDF Tools',
'Scale action cannot handle an encrypted PDF file.')
except PdfReadError:
notify.notify('Alfred PDF Tools',
'Cannot scale a malformed PDF file.') | 32,717 |
def get_node_rd(graph, k=3):
"""
Get k nodes to defend based on Recalculated Degree (RD) Removal :cite:`holme2002attack`.
:param graph: an undirected NetworkX graph
:param k: number of nodes to defend
:return: a list of nodes to defend
"""
return get_node_rd_attack(graph, k) | 32,718 |
def get_frog():
"""Returns the interface object to frog NLP. (There should only be one
instance, because it spawns a frog process that consumes a lot of RAM.)
"""
global FROG
if FROG is None:
FROG = frog.Frog(frog.FrogOptions(
tok=True, lemma=True, morph=False, daringmorph=False, mwu=True,
chunking=False, ner=False, parser=False
), "/home/rahiel/hortiradar/venv/share/frog/nld/frog.cfg")
return FROG | 32,719 |
def fetch(url: str, **kwargs) -> Selector:
"""
Send HTTP request and parse it as a DOM selector.
Args:
url (str): The url of the site.
Returns:
Selector: allows you to select parts of HTML text using CSS or XPath expressions.
"""
kwargs.setdefault('headers', DEFAULT_HEADERS)
try:
res = requests.get(url, **kwargs)
res.encoding = kwargs.get('encoding', DEFAULT_ENCODING)
res.raise_for_status()
except requests.RequestException as e:
print(e)
else:
html = res.text
tree = Selector(text=html)
return tree | 32,720 |
def calc_most_populare_per_company(df):
"""Counts the number of the same car ordered by
each company to find the most populare one per company
Arguments:
df {dict} -- A pandas dataframe which is a dict-like container for series.
"""
header_text = "Table 3: Most populare car per company"
mpc = (df.groupby(['company_id', 'car'])['car'].agg(['count']).sort_values(
by='count', ascending=False).reset_index().drop_duplicates('company_id', keep='first'))
print_result(mpc, header_text) | 32,721 |
def growth(x, a, b):
""" Growth model. a is the value at t=0. b is the so-called R number.
Doesnt work. FIX IT """
return np.power(a * 0.5, (x / (4 * (math.log(0.5) / math.log(b))))) | 32,722 |
def __to_localdatetime(val):
"""Convert val into a local datetime for tz Europe/Amsterdam."""
try:
# "timestamp": "2019-02-03T19:20:00",
dt = datetime.strptime(val, __DATE_FORMAT)
dt = pytz.timezone(__TIMEZONE).localize(dt)
return dt
except (ValueError, TypeError):
return None | 32,723 |
def skip_pfcwd_test(duthost, trigger_pfcwd):
"""
Skip PFC watchdog tests that may cause fake alerts
PFC watchdog on Broadcom devices use some approximation techniques to detect
PFC storms, which may cause some fake alerts. Therefore, we skip test cases
whose trigger_pfcwd is False for Broadcom devices.
Args:
duthost (obj): device to test
trigger_pfcwd (bool): if PFC watchdog is supposed to trigger
Returns:
N/A
"""
pytest_require(trigger_pfcwd is True or is_broadcom_device(duthost) is False,
'Skip trigger_pfcwd=False test cases for Broadcom devices') | 32,724 |
def imgMinMaxScaler(img, scale_range):
"""
:param img: image to be rescaled
:param scale_range: (tuple) (min, max) of the desired rescaling
"""
warnings.filterwarnings("ignore")
img = img.astype("float64")
img_std = (img - np.min(img)) / (np.max(img) - np.min(img))
img_scaled = img_std * float(scale_range[1] - scale_range[0]) + float(
scale_range[0]
)
# round at closest integer and transform to integer
img_scaled = np.rint(img_scaled).astype("uint8")
return img_scaled | 32,725 |
def conditional_vff(Xnew, inducing_variable, kernel, f, *,
full_cov=False, full_output_cov=False, q_sqrt=None, white=False):
"""
- Xnew are the points of the data or minibatch, size N x D (tf.array, 2d)
- feat is an instance of features.InducingFeature that provides `Kuu` and `Kuf` methods
for Fourier features, this contains the limits of the bounding box and the frequencies
- f is the value (or mean value) of the features (i.e. the weights)
- q_sqrt (default None) is the Cholesky factor of the uncertainty about f
(to be propagated through the conditional as per the GPflow inducing-point implementation)
- white (defaults False) specifies whether the whitening has been applied
Given the GP represented by the inducing points specified in `feat`, produce the mean and
(co-)variance of the GP at the points Xnew.
Xnew :: N x D
Kuu :: M x M
Kuf :: M x N
f :: M x K, K = 1
q_sqrt :: K x M x M, with K = 1
"""
if full_output_cov:
raise NotImplementedError
# num_data = tf.shape(Xnew)[0] # M
num_func = tf.shape(f)[1] # K
Kuu = cov.Kuu(inducing_variable, kernel) # this is now a LinearOperator
Kuf = cov.Kuf(inducing_variable, kernel, Xnew) # still a Tensor
KuuInv_Kuf = Kuu.solve(Kuf)
# compute the covariance due to the conditioning
if full_cov:
fvar = kernel(Xnew) - tf.matmul(Kuf, KuuInv_Kuf, transpose_a=True)
shape = (num_func, 1, 1)
else:
KufT_KuuInv_Kuf_diag = tf.reduce_sum(Kuf * KuuInv_Kuf, axis=-2)
fvar = kernel(Xnew, full=False) - KufT_KuuInv_Kuf_diag
shape = (num_func, 1)
fvar = tf.expand_dims(fvar, 0) * tf.ones(shape, dtype=gpflow.default_float()) # K x N x N or K x N
# another backsubstitution in the unwhitened case
if white:
raise NotImplementedError
A = KuuInv_Kuf
# construct the conditional mean
fmean = tf.matmul(A, f, transpose_a=True)
if q_sqrt is not None:
if q_sqrt.get_shape().ndims == 2:
# LTA = A * tf.expand_dims(q_sqrt, 2) # K x M x N
# won't work # make ticket for this?
raise NotImplementedError
elif q_sqrt.get_shape().ndims == 3:
# L = tf.matrix_band_part(tf.transpose(q_sqrt, (2, 0, 1)), -1, 0) # K x M x M
# K x M x N
# A_tiled = tf.expand_dims(A.get(), 0) * tf.ones((num_func, 1, 1), dtype=float_type)
# LTA = tf.matmul(L, A_tiled, transpose_a=True) # K x M x N
# TODO the following won't work for K > 1
assert q_sqrt.shape[0] == 1
# LTA = (A.T @ DenseMatrix(q_sqrt[:,:,0])).T.get()[None, :, :]
ATL = tf.matmul(A, q_sqrt, transpose_a=True)
else:
raise ValueError("Bad dimension for q_sqrt: %s" %
str(q_sqrt.get_shape().ndims))
if full_cov:
# fvar = fvar + tf.matmul(LTA, LTA, transpose_a=True) # K x N x N
fvar = fvar + tf.matmul(ATL, ATL, transpose_b=True) # K x N x N
else:
# fvar = fvar + tf.reduce_sum(tf.square(LTA), 1) # K x N
fvar = fvar + tf.reduce_sum(tf.square(ATL), 2) # K x N
fvar = tf.transpose(fvar) # N x K or N x N x K
return fmean, fvar | 32,726 |
def compute_stat(a_basedata_dir, a_dir1, a_dir2, a_ptrn="",
a_cmp=BINARY_OVERLAP):
"""Compare markables in two annotation directories.
Args:
a_basedata_dir (str): directory containing basedata for MMAX project
a_dir1 (str): directory containing markables for the first annotator
a_dir2 (str): directory containing markables for the second annotator
a_ptrn (str): shell pattern for markable files
a_cmp (int): mode for comparing two annotation spans
Returns:
(void)
"""
global STATISTICS
# find annotation files from first directory
if a_ptrn:
dir1_iterator = glob.iglob(os.path.join(a_dir1, a_ptrn))
else:
dir1_iterator = os.listdir(a_dir1)
# iterate over files from the first directory
f1 = f2 = ""
basename1 = ""
base_key = ""
fd1 = fd2 = None
t1 = t2 = None
for f1 in dir1_iterator:
# get name of second file
basename1 = os.path.basename(f1)
# print("Processing file '{:s}'".format(f1), file=sys.stderr)
f2 = os.path.join(a_dir2, basename1)
# open both files for reading
fd1 = open(os.path.join(a_dir1, basename1), 'r')
try:
t1 = ET.parse(fd1)
except (IOError, ET.ParseError):
t1 = None
finally:
fd1.close()
# read XML information from second file ignoring non-existent, empty,
# and wrong formatted files
try:
fd2 = open(f2, 'r')
try:
t2 = ET.parse(fd2)
finally:
fd2.close()
except (IOError, ET.ParseError):
t2 = None
if t1 is None and t2 is None:
continue
# determine the name of the markable for which we should calculate
# annotations
mname = MRKBL_NAME_RE.match(basename1).group(1).lower()
base_key = MARK_SFX_RE.sub("", basename1)
# compare two XML trees
_update_stat(t1, t2, base_key, mname, a_cmp) | 32,727 |
def gtzan_music_speech_download(dst='gtzan_music_speech'):
"""Download the GTZAN music and speech dataset.
Parameters
----------
dst : str, optional
Location to put the GTZAN music and speech datset.
"""
path = 'http://opihi.cs.uvic.ca/sound/music_speech.tar.gz'
download_and_extract_tar(path, dst) | 32,728 |
def xor(text, key):
"""Returns the given string XORed with given key."""
while len(key) < len(text): key += key
key = key[:len(text)]
return "".join(chr(ord(a) ^ ord(b)) for (a, b) in zip(text, key)) | 32,729 |
def get_emojis_voc_counts(path):
"""
Generate a value count of words for every emoji present in the csv files
found in the child directories of "path"
Args:
path (str): parent path of the csv files
Return:
em2vocab [dict of dict]: a dict associating each word to its count is mapped for each emoji
"""
path = Path(path)
em2vocab = {}
for path in path.glob("**/[0-9]*.csv"):
df = pd.read_csv(path)
emojis = [col for col in df.columns if col in EMOJIS]
for em in emojis:
vocab = em2vocab.get(em, {})
for word, count in df[em].value_counts().iteritems():
pre_count = vocab.get(word, 0)
pre_count += count
vocab[word] = pre_count
em2vocab[em] = vocab
return em2vocab | 32,730 |
async def test_camera_snapshot_connection_closed(driver):
"""Test camera snapshot when the other side closes the connection."""
loop = MagicMock()
transport = MagicMock()
transport.is_closing = Mock(return_value=True)
connections = {}
async def _async_get_snapshot(*_):
return b"fakesnap"
acc = Accessory(driver, "TestAcc")
acc.async_get_snapshot = _async_get_snapshot
driver.add_accessory(acc)
hap_proto = hap_protocol.HAPServerProtocol(loop, connections, driver)
hap_proto.connection_made(transport)
hap_proto.hap_crypto = MockHAPCrypto()
hap_proto.handler.is_encrypted = True
with patch.object(hap_proto.transport, "write") as writer:
hap_proto.data_received(
b'POST /resource HTTP/1.1\r\nHost: HASS\\032Bridge\\032BROZ\\0323BF435._hap._tcp.local\r\nContent-Length: 79\r\nContent-Type: application/hap+json\r\n\r\n{"image-height":360,"resource-type":"image","image-width":640,"aid":1411620844}' # pylint: disable=line-too-long
)
hap_proto.close()
await hap_proto.response.task
await asyncio.sleep(0)
assert writer.call_args_list == []
hap_proto.close() | 32,731 |
def test_create_influxdb_sink() -> None:
"""Test create influxdb-sink connector with default configuration."""
runner = CliRunner()
result = runner.invoke(
main, ["create", "influxdb-sink", "--dry-run", "t1"]
)
assert result.exit_code == 0
# This query is built by InfluxConfig.update_influx_kcql()
assert (
'"connect.influx.kcql": '
'"INSERT INTO t1 SELECT * FROM t1 WITHTIMESTAMP sys_time()"'
in result.output
)
# Topics are added by ConnectConfig.update_topics()
assert '"topics": "t1"' in result.output | 32,732 |
def write_pic(svg_path, filebuffer, picture_width, picture_height, vbox_h, mcv):
"""Write picture data."""
if os.path.isfile(TEMPLATE_PATH_1):
template_file = TEMPLATE_PATH_1
elif os.path.isfile(TEMPLATE_PATH_2):
template_file = TEMPLATE_PATH_2
else:
sys.exit(f"Error! Cannot locate svg template")
with open(template_file, "r") as f:
template = f.read()
# replace all placeholders with our values
vbox_val = min(0, MAX_VERTICAL_SPACE / 2 - vbox_h)
rep = {"FILE_BUFFER": str(filebuffer),
"PICTURE_WIDTH": str(picture_width),
"PICTURE_HEIGHT": str(picture_height),
"VBOX": str(vbox_val),
"EXON_ANC_STYLE": EXON_ANC_STYLE,
"EXON_NON_ANC_STYLE": EXON_NON_ANC_STYLE,
"STOP_LABEL_FONTSIZE": str(STOP_LABEL_FONTSIZE),
"FONT_FAMILY": FONTFAMILY,
"MOUSEOVERCOUNTER": str(mcv)}
rep_d = {re.escape(k): v for k, v in rep.items()}
pattern = re.compile("|".join(rep_d.keys()))
text = pattern.sub(lambda m: rep[re.escape(m.group(0))], template)
f = open(svg_path, "w")
f.write(text)
f.close() | 32,733 |
def test_requisites_watch_any(state, state_tree):
"""
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
"""
if salt.utils.platform.is_windows():
cmd_true = "exit"
cmd_false = "exit /B 1"
else:
cmd_true = "true"
cmd_false = "false"
sls_contents = """
A:
cmd.wait:
- name: '{cmd_true}'
- watch_any:
- cmd: B
- cmd: C
- cmd: D
B:
cmd.run:
- name: '{cmd_true}'
C:
cmd.run:
- name: '{cmd_false}'
D:
cmd.run:
- name: '{cmd_true}'
E:
cmd.wait:
- name: '{cmd_true}'
- watch_any:
- cmd: F
- cmd: G
- cmd: H
F:
cmd.run:
- name: '{cmd_true}'
G:
cmd.run:
- name: '{cmd_false}'
H:
cmd.run:
- name: '{cmd_false}'
""".format(
cmd_true=cmd_true, cmd_false=cmd_false
)
expected_result = {
"cmd_|-A_|-{}_|-wait".format(cmd_true): {
"__run_num__": 4,
"comment": 'Command "{}" run'.format(cmd_true),
"result": True,
"changes": True,
},
"cmd_|-B_|-{}_|-run".format(cmd_true): {
"__run_num__": 0,
"comment": 'Command "{}" run'.format(cmd_true),
"result": True,
"changes": True,
},
"cmd_|-C_|-{}_|-run".format(cmd_false): {
"__run_num__": 1,
"comment": 'Command "{}" run'.format(cmd_false),
"result": False,
"changes": True,
},
"cmd_|-D_|-{}_|-run".format(cmd_true): {
"__run_num__": 2,
"comment": 'Command "{}" run'.format(cmd_true),
"result": True,
"changes": True,
},
"cmd_|-E_|-{}_|-wait".format(cmd_true): {
"__run_num__": 9,
"comment": 'Command "{}" run'.format(cmd_true),
"result": True,
"changes": True,
},
"cmd_|-F_|-{}_|-run".format(cmd_true): {
"__run_num__": 5,
"comment": 'Command "{}" run'.format(cmd_true),
"result": True,
"changes": True,
},
"cmd_|-G_|-{}_|-run".format(cmd_false): {
"__run_num__": 6,
"comment": 'Command "{}" run'.format(cmd_false),
"result": False,
"changes": True,
},
"cmd_|-H_|-{}_|-run".format(cmd_false): {
"__run_num__": 7,
"comment": 'Command "{}" run'.format(cmd_false),
"result": False,
"changes": True,
},
}
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
result = normalize_ret(ret.raw)
assert result == expected_result | 32,734 |
def _as_scalar(res, dtype=None):
"""Return None or a TensorVariable whose type is in T.float_scalar_types"""
if dtype is None:
dtype = config.floatX
if numpy.all(res.type.broadcastable):
while res.owner and isinstance(res.owner.op, T.DimShuffle):
res = res.owner.inputs[0]
# may still have some number of True's
if res.type.broadcastable:
rval = res.dimshuffle()
else:
rval = res
if rval.type.dtype[:3] in ('int', 'uin'):
# We check that the upcast of res and dtype won't change dtype.
# If dtype is float64, we will cast int64 to float64.
# This is valid when res is a scalar used as input to a dot22
# as the cast of the scalar can be done before or after the dot22
# and this will give the same result.
if theano.scalar.upcast(res.dtype, dtype) == dtype:
return T.cast(rval, dtype)
else:
return None
return rval | 32,735 |
def partition_dataset():
""" Partitioning MNIST """
dataset = datasets.MNIST(
'./data',
train=True,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))
]))
size = dist.get_world_size()
bsz = 128 / float(size)
partition_sizes = [1.0 / size for _ in range(size)]
partition = DataPartitioner(dataset, partition_sizes)
partition = partition.use(dist.get_rank())
train_set = torch.utils.data.DataLoader(
partition, batch_size=int(bsz), shuffle=True)
return train_set, bsz | 32,736 |
def add_size_to_nus(demo_graph, pop, time_left):
"""
adds either nu, or [nu0, growth_rate], where nu0 is the size at the beginning of the epoch
use time_left to set nu0 to the size at the beginning of the epoch
"""
if 'nu' in demo_graph.nodes[pop]:
return demo_graph.nodes[pop]['nu']
else:
tt = demo_graph.nodes[pop]['T'] - time_left
if 'nu0' in demo_graph.nodes[pop] and 'nuF' in demo_graph.nodes[pop]:
growth_rate = np.log(demo_graph.nodes[pop]['nuF']/demo_graph.nodes[pop]['nu0']) / demo_graph.nodes[pop]['T']
nu0 = demo_graph.nodes[pop]['nu0'] * np.exp(growth_rate * tt)
return [nu0, growth_rate]
elif 'growth_rate' in demo_graph.nodes[pop] and 'nuF' in demo_graph.nodes[pop]:
nu0_pop = demo_graph.nodes[pop]['nuF'] * np.exp(-demo_graph.nodes[pop]['growth_rate']*demo_graph.nodes[pop]['T'])
nu0 = nu0_pop * np.exp(growth_rate * tt)
return [nu0, demo_graph.nodes[pop]['growth_rate']]
elif 'growth_rate' in demo_graph.nodes[pop] and 'nu0' in demo_graph.nodes[pop]:
nu0 = demo_graph.nodes[pop]['nu0'] * np.exp(demo_graph.nodes[pop]['growth_rate'] * tt)
return [nu0, demo_graph.nodes[pop]['growth_rate']] | 32,737 |
def _rand_lognormals(logs, sigma):
"""Mock-point"""
return np.random.lognormal(mean=logs, sigma=sigma, size=logs.shape) | 32,738 |
def hostMicContinuous():
"""
Sample continuous from host mic
"""
import sounddevice as sd
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
sd.default.samplerate = fs
sd.default.channels = 1
threshold = 0.8
mic_data = np.array([], dtype='int16')
net_input = np.array([], dtype='int16')
init = 1
frame_ctr = 0
mfcc = np.array([])
last_pred = 0
with sd.Stream() as stream:
print('Filling buffer...')
while True:
frame, overflowed = stream.read(frame_length)
# print('read frame of size', len(frame), 'and type', type(frame), 'overflow', overflowed)
frame = ((2**16/2-1)*frame[:,0]).astype('int16')
frame_ctr += 1
nSamples = frame_length
o_mfcc = mfu.mfcc_mcu(frame, fs, nSamples, frame_len, frame_step, frame_count, fft_len,
num_mel_bins, lower_edge_hertz, upper_edge_hertz, mel_mtx_scale)
data_mfcc = np.array([x['mfcc'][:num_mfcc] for x in o_mfcc])
ax1.clear()
ax1.plot(frame)
plt.draw()
if init == 1:
net_input = np.append(net_input.ravel(), data_mfcc)
if (frame_ctr >= n_frames):
print('Live!')
init = 0
else:
net_input = np.array(net_input.reshape([1]+input_shape), dtype='float32')
host_pred = model.predict(net_input)[0]
net_input = np.append(data_mfcc, net_input.ravel()[:-num_mfcc])
# progress = int(100*host_pred)*'+' + (100-int(100*host_pred))*'-'
# print('\rprediction: %.3f %s' %(host_pred, progress) , end=" ")
# print('')
if (host_pred.max() > threshold):
spotted_kwd = keywords[np.argmax(host_pred)]
print('Spotted', spotted_kwd)
last_pred = host_pred | 32,739 |
def add_start_end_qualifiers(statement, startVal, endVal):
"""Add start/end qualifiers to a statement if non-None, or return None.
@param statement: The statement to decorate
@type statement: WD.Statement
@param startVal: An ISO date string for the starting point
@type startVal: str, unicode, or None
@param endVal: An ISO date string for the end point
@type endVal: str, unicode, or None
@return: A statement decorated with start/end qualifiers
@rtype: WD.Statement, or None
"""
if not isinstance(statement, WD.Statement):
raise pywikibot.Error(u'Non-statement recieved: %s' % statement)
if statement.isNone():
return None
# add qualifiers
quals = []
if startVal:
quals.append(
WD.Qualifier(
P=START_P,
itis=iso_to_WbTime(startVal)))
if endVal:
quals.append(
WD.Qualifier(
P=END_P,
itis=iso_to_WbTime(endVal)))
for q in quals:
statement.addQualifier(q)
return statement | 32,740 |
def _CheckFilter(text):
"""CHecks if a string could be a filter.
@rtype: bool
"""
return bool(frozenset(text) & FILTER_DETECTION_CHARS) | 32,741 |
def test_check_axis_angle():
"""Test input validation for axis-angle representation."""
a_list = [1, 0, 0, 0]
a = pr.check_axis_angle(a_list)
assert_array_almost_equal(a_list, a)
assert_equal(type(a), np.ndarray)
assert_equal(a.dtype, np.float64)
random_state = np.random.RandomState(0)
a = np.empty(4)
a[:3] = pr.random_vector(random_state, 3)
a[3] = random_state.randn() * 4.0 * np.pi
a2 = pr.check_axis_angle(a)
pr.assert_axis_angle_equal(a, a2)
assert_almost_equal(np.linalg.norm(a2[:3]), 1.0)
assert_greater(a2[3], 0)
assert_greater(np.pi, a2[3])
assert_raises_regexp(
ValueError, "Expected axis and angle in array with shape",
pr.check_axis_angle, np.zeros(3))
assert_raises_regexp(
ValueError, "Expected axis and angle in array with shape",
pr.check_axis_angle, np.zeros((3, 3))) | 32,742 |
def listProxyServers():
"""return a list of proxy servers as a list of lists.
E.g. [['nodename','proxyname'], ['nodename','proxyname']].
Typical usage:
for (nodename,proxyname) in listProxyServers():
callSomething(nodename,proxyname)
"""
return listServersOfType("PROXY_SERVER") | 32,743 |
def test_blackbody_emission_value_array():
"""Tests the return value given a temperature array."""
emission = blackbody_emission(TEMPERATURE_ARRAY, FREQUENCY)
true_values = np.array([1.82147825366e-15, 3.06550295038e-15, 3.78860400626e-15])
assert emission == pytest.approx(true_values, abs=1e-20) | 32,744 |
def api_version(func):
"""
API版本验证装饰器
:param func:
:return:
"""
@wraps(func)
def wrapper(*args, **kwargs):
# 验证api版本
verify_result = verify_version(kwargs.get('version'))
if not verify_result:
raise ApiVersionException() #抛出异常,返回结果状态码400, message:api version is invalid
return func(*args, **kwargs)
return wrapper | 32,745 |
def test_prewitt_h_horizontal():
"""Horizontal prewitt on an edge should be a horizontal line."""
i, j = np.mgrid[-5:6, -5:6]
image = (i >= 0).astype(float)
result = filters.prewitt_h(image)
# Check if result match transform direction
assert (np.all(result[i == 0] == 1))
assert_allclose(result[np.abs(i) > 1], 0, atol=1e-10) | 32,746 |
def count_words(filename):
"""Count the approximate number of words in a file."""
try:
with open(filename, 'r', encoding='utf-8') as f_obj:
contents = f_obj.read()
except FileNotFoundError:
msg = "Sorry, the file " + filename + " does not exist."
print(msg)
else:
# Count approximate number of words in the file.
words = contents.split()
num_words = len(words)
print("The file " + filename + " has about " + str(num_words) + " words.") | 32,747 |
def is_prefix(a: List[Union[int, str]], b: List[Union[int, str]]):
"""Check if `a` is a prefix of `b`."""
if len(a) >= len(b):
return False
for i in range(len(a)):
if a[i] != b[i]:
return False
return True | 32,748 |
def shm_data_find(ifo, ldr_type, start, stride, directory='.', verbose=False):
"""a routine to automate discovery of frames within /dev/shm
"""
end = start+stride
frames = []
for frame in sorted(glob.glob(shm_glob_tmp%(directory, ifo, ifo, ldr_type))):
s, d = utils.extract_start_dur(frame, suffix=".gwf")
if (s <= end) and (s+d > start): ### there is some overlap!
frames.append( (frame, s, d) )
return frames | 32,749 |
def backward_inference(protocol, subsys_x, t_x, subsys_y, t_y, silent=True):
"""
Forward inference answers the question:
Given a measurement result of 'subsys_y' at the end of the protocol,
what can I say about the result an Agent would have received had she done
a measurement of 'subsys_x' before the protocol?
running the protocol?
"""
forward_mapping = forward_inference(protocol, subsys_x, t_x, subsys_y, t_y, silent)['table']
output_vals = list(set(chain(*forward_mapping.values())))
backward_mapping = {v: [] for v in output_vals}
for inpt, possible_outputs in forward_mapping.items():
for output in possible_outputs:
backward_mapping[output] += [inpt]
return InferenceTable(subsys_y, t_y,
subsys_x, t_x,
backward_mapping) | 32,750 |
def rainfall_interception_hbv(Rainfall, PotEvaporation, Cmax, InterceptionStorage):
"""
Returns:
TF, Interception, IntEvap,InterceptionStorage
"""
Interception = pcr.min(
Rainfall, Cmax - InterceptionStorage
) #: Interception in mm/timestep
InterceptionStorage = (
InterceptionStorage + Interception
) #: Current interception storage
TF = Rainfall - Interception
IntEvap = pcr.min(
InterceptionStorage, PotEvaporation
) #: Evaporation from interception storage
InterceptionStorage = InterceptionStorage - IntEvap
return TF, Interception, IntEvap, InterceptionStorage | 32,751 |
def learn_mspn(
data,
ds_context,
cols="rdc",
rows="kmeans",
min_instances_slice=200,
threshold=0.3,
max_sampling_threshold_cols=10000,
max_sampling_threshold_rows=100000,
ohe=False,
leaves=None,
memory=None,
rand_gen=None,
cpus=-1
):
"""
Adapts normal learn_mspn to use custom identity leafs and use sampling for structure learning.
:param max_sampling_threshold_rows:
:param max_sampling_threshold_cols:
:param data:
:param ds_context:
:param cols:
:param rows:
:param min_instances_slice:
:param threshold:
:param ohe:
:param leaves:
:param memory:
:param rand_gen:
:param cpus:
:return:
"""
if leaves is None:
leaves = create_custom_leaf
if rand_gen is None:
rand_gen = np.random.RandomState(17)
from rspn.learning.structure_learning import get_next_operation, learn_structure
def l_mspn(data, ds_context, cols, rows, min_instances_slice, threshold, ohe):
split_cols, split_rows = get_splitting_functions(max_sampling_threshold_rows, max_sampling_threshold_cols, cols,
rows, ohe, threshold, rand_gen, cpus)
nextop = get_next_operation(min_instances_slice)
node = learn_structure(data, ds_context, split_rows, split_cols, leaves, next_operation=nextop)
return node
if memory:
l_mspn = memory.cache(l_mspn)
spn = l_mspn(data, ds_context, cols, rows, min_instances_slice, threshold, ohe)
return spn | 32,752 |
def _sanitize_filename(dfile, no_symlink=True):
"""Check and sanitize 'dfile' for use as a target file.
"""
dirname, basename = os.path.split(dfile)
dirname = os.path.abspath(dirname)
dfile = os.path.join(dirname, basename)
if no_symlink:
if os.path.islink(dfile):
msg = ('{} is a symlink and will be changed into a regular file if '
'the compiler writes a compiled file to it')
raise FileExistsError(msg.format(dfile))
elif os.path.exists(dfile) and not os.path.isfile(dfile):
msg = ('{} is a non-regular file and will be changed into a regular '
'one if the compiler writes a compiled file to it')
raise FileExistsError(msg.format(dfile))
os.makedirs(dirname, exist_ok=True)
return dfile | 32,753 |
def _get_replies(
base_path: Path, message: Dict[str, Any], channel_id: str, channel_info: str
) -> None:
"""
リプライメッセージを取得
Parameters
----------
base_path : Path
出力先のBaseとなるPath
message : Dict[str, Any]
メッセージ情報
channel_id : str
チャンネルID
channel_info : str
チャンネル情報
"""
replies = get_replies(channel_id, message)
if replies:
# NOTE: '1638883139.000600' のように「.」が入るとファイル名として不適格なので「_」に置換
thread_ts = message["thread_ts"].replace(".", "_")
replies_path = base_path / f"{thread_ts}.json"
logger.info(f"save replies message. {channel_info}, path: {replies_path}")
_save_to_json(replies, replies_path) | 32,754 |
def generate_image_anim(img, interval=200, save_path=None):
"""
Given CT img, return an animation across axial slice
img: [D,H,W] or [D,H,W,3]
interval: interval between each slice, default 200
save_path: path to save the animation if not None, default None
return: matplotlib.animation.Animation
"""
fig = plt.figure()
ims = []
for i in range(len(img)):
im = plt.imshow(img[i], animated=True)
ims.append([im])
anim = animation.ArtistAnimation(fig, ims, interval=interval, blit=True,
repeat_delay=1000)
if save_path:
Writer = animation.writers['ffmpeg']
writer = Writer(fps=30, metadata=dict(artist='Me'), bitrate=1800)
anim.save(save_path)
return anim | 32,755 |
def get_routes(config, prefix=None, group_by=None):
"""Executes the helper script that extracts the routes out of the
pyramid app."""
python = sys.executable
script = os.path.join(os.path.dirname(__file__), "extract.py")
config = os.path.expanduser(config)
args = [python, script, config]
if group_by:
args.append("--group=" + group_by)
if prefix:
args.append("--prefix=" + prefix)
p = subprocess.Popen(args=args, stdout=subprocess.PIPE)
(stdout, _) = p.communicate()
return json.loads(stdout.decode("utf-8")) | 32,756 |
def square_root(s):
""" Function to compute square roots using the Babylonian method
"""
x = s/2
while True:
temp = x
x = (1/2) * ( x + (s/x) )
if temp == x:
return x
# Como la convergencia se alcanza rápidamente, llega un momento en que el error
# es menor que la precisión de la máquina y el valor no cambia de un paso a otro. | 32,757 |
def quisort(uslist, lo=None, hi=None):
"""Sort in-place an unsorted list or slice of a list
lo and hi correspond to the start and stop indices for the list slice"""
if hi is None:
hi = len(uslist) - 1
if lo is None:
lo = 0
def partition(uslist, lo, hi):
"""Compare and swap values over list slice"""
p = uslist[hi]
i = lo - 1
j = lo
while j < hi:
if uslist[j] <= p:
i = i + 1
uslist[i], uslist[j] = uslist[j], uslist[i]
j += 1
i += 1
uslist[i], uslist[hi] = uslist[hi], uslist[i]
return i
if lo < hi:
p = partition(uslist, lo, hi)
quisort(uslist, lo, p - 1)
quisort(uslist, p + 1, hi) | 32,758 |
def datetime_range(datetime_start, datetime_end, step_timedelta):
"""Yield a datetime range, in the range [datetime_start; datetime_end[,
with step step_timedelta."""
assert_is_utc_datetime(datetime_start)
assert_is_utc_datetime(datetime_end)
ras(isinstance(step_timedelta, datetime.timedelta))
ras(datetime_start < datetime_end)
ras(step_timedelta > datetime.timedelta(0))
crrt_time = datetime_start
yield crrt_time
while True:
crrt_time += step_timedelta
if crrt_time < datetime_end:
yield crrt_time
else:
break | 32,759 |
def is_generator(f):
"""Return True if a function is a generator."""
isgen = (f.__code__.co_flags & CO_GENERATOR) != 0
return isgen | 32,760 |
def reward(sample_solution, use_cuda=True, name='reward'):
"""
Args:
sample_solution seq_len of [batch_size]
"""
'''
if 'TSP' in name:
batch_size = sample_solution[0].size(0)
n = len(sample_solution)
tour_len = Variable(torch.zeros([batch_size]))
if use_cuda:
tour_len = tour_len.cuda()
for i in range(n - 1):
distance = torch.norm(sample_solution[i] - sample_solution[i + 1], dim=1)
tour_len += distance
distance = torch.norm(sample_solution[n - 1] - sample_solution[0], dim=1)
tour_len += distance
reward = tour_len
'''
if 'CH' in name:
batch_size = sample_solution[0].size(0)
n = len(sample_solution)
#print "batch_size batch_size batch_size"
#print batch_size
#print "n n n"
#print n
#tour_area = Variable(torch.zeros([batch_size]))
vec_area = Variable(torch.zeros([batch_size]))
#if use_cuda:
#area = area.cuda()
for s in range(batch_size):
points = []
poly_area = 0
for t in range(n):
points.append(sample_solution[t][s].tolist())
if t >= 2:
hull = ConvexHull(points)
poly_area = max (hull.area,poly_area)
vec_area[s] = poly_area
#for i in range(n - 1):
#area = torch.norm(sample_solution[i] - sample_solution[i + 1], dim=1)
#tour_area += area
#area = torch.norm(sample_solution[n - 1] - sample_solution[0], dim=1)
#tour_area += area
#reward = tour_area
reward = vec_area
return reward | 32,761 |
def addRegionEntry(Id: int, parentId: int, name: str, RegionType: RegionType, alias=''):
"""
添加自定义地址信息
:param Id: 地址的ID
:param parentId: 地址的父ID, 必须存在
:param name: 地址的名称
:param RegionType: 地址类型,RegionType,
:param alias: 地址的别名, default=''
:return:
"""
geocoding = jpype.JClass('io.patamon.geocoding.Geocoding')
try:
geocoding.addRegionEntry(Id, parentId, name, RegionType, alias)
return True
except:
return False | 32,762 |
def check_movement(pagination):
"""Check for ability to navigate backward or forward between pages."""
pagination_movements = pagination.find_element_by_xpath(
'.//div[@class="search_pagination_right"]'
).find_elements_by_class_name("pagebtn")
# Check for ability to move back
try:
move_back_a = pagination_movements[0]
assert move_back_a.text == "<"
can_move_back = True
print("Can move back, ", end="")
except Exception:
can_move_back = False
print("Can not move back, ", end="")
# Check for ability to move forward
try:
move_forward_a = pagination_movements[-1]
assert move_forward_a.text == ">"
can_move_forward = True
print("Can move forward")
except Exception:
can_move_forward = False
print("Can not move forward, ", end="")
return [can_move_back, can_move_forward] | 32,763 |
def glint_correct_image(imarr, glintarr, nir_band=7):
"""
Apply the sunglint removal algorithm from section III of Lyzenga et al.
2006 to a multispectral image array.
Parameters
----------
imarr : numpy array (RxCxBands shape)
The multispectral image array. See `OpticalRS.RasterDS` for more info.
glintarr : numpy array
A subset of `imarr` from an optically deep location with sun glint.
nir_band : int (Default value = 7)
The default `nir_band` value of 7 selects the NIR2 band in WorldView-2
imagery. If you're working with a different type of imagery, you will
need figure out the appropriate value to use instead. This is a zero
indexed number (the first band is 0, not 1).
Returns
-------
numpy array
A de-glinted copy of `imarr`.
Notes
-----
This deglinting method may not work well on WorldView-2 imagery because the
bands are not captured exactly concurrently. See section II B of Eugenio et
al. 2015 [1]_ for more information and a different sunglint correction
algorithm that may be more appropriate.
References
----------
.. [1] Eugenio, F., Marcello, J., Martin, J., 2015. High-Resolution Maps of
Bathymetry and Benthic Habitats in Shallow-Water Environments Using
Multispectral Remote Sensing Imagery. IEEE Transactions on Geoscience
and Remote Sensing 53, 3539–3549. doi:10.1109/TGRS.2014.2377300
"""
# calculate the covariance ratios
cov_rats = cov_ratios(glintarr,nir_band)
# get the NIR mean
nirm = nir_mean(glintarr,nir_band)
# we don't want to try to apply the correction
# to the NIR band
nbands = imarr.shape[-1]
bands = range(nbands)
bands.remove(nir_band)
outarr = imarr.copy()
for i,band in enumerate(bands):
outarr[:,:,band] = imarr[:,:,band] - cov_rats[i] * ( imarr[:,:,nir_band] - nirm )
# this will leave the NIR band unchanged
return outarr | 32,764 |
def draw_reconstructions(ins, outs, states, shape_in, shape_state, Nh):
"""Vizualizacija ulaza i pripadajucih rekonstrkcija i stanja skrivenog sloja
ins -- ualzni vektori
outs -- rekonstruirani vektori
states -- vektori stanja skrivenog sloja
shape_in -- dimezije ulaznih slika npr. (28,28)
shape_state -- dimezije za 2D prikaz stanja (npr. za 100 stanja (10,10)
"""
plt.figure(figsize=(8, 12*4))
for i in range(20):
plt.subplot(20, 4, 4*i + 1)
plt.imshow(ins[i].reshape(shape_in), vmin=0, vmax=1, interpolation="nearest")
plt.title("Test input")
plt.subplot(20, 4, 4*i + 2)
plt.imshow(outs[i][0:784].reshape(shape_in), vmin=0, vmax=1, interpolation="nearest")
plt.title("Reconstruction")
plt.subplot(20, 4, 4*i + 3)
plt.imshow(states[i][0:Nh].reshape(shape_state), vmin=0, vmax=1, interpolation="nearest")
plt.title("States")
plt.tight_layout() | 32,765 |
def get_named_game(id):
"""Get specific game from GB API."""
query_uri = f"{GB_GAME_URL}{id}?format=json&api_key={API_KEY}"
return query_for_goty(query_uri, expect_list=False, always_return_something=False) | 32,766 |
def isoweek_datetime(year, week, timezone='UTC', naive=False):
"""
Returns a datetime matching the starting point of a specified ISO week
in the specified timezone (default UTC). Returns a naive datetime in
UTC if requested (default False).
>>> isoweek_datetime(2017, 1)
datetime.datetime(2017, 1, 2, 0, 0, tzinfo=<UTC>)
>>> isoweek_datetime(2017, 1, 'Asia/Kolkata')
datetime.datetime(2017, 1, 1, 18, 30, tzinfo=<UTC>)
>>> isoweek_datetime(2017, 1, 'Asia/Kolkata', naive=True)
datetime.datetime(2017, 1, 1, 18, 30)
>>> isoweek_datetime(2008, 1, 'Asia/Kolkata')
datetime.datetime(2007, 12, 30, 18, 30, tzinfo=<UTC>)
"""
naivedt = datetime.combine(isoweek.Week(year, week).day(0), datetime.min.time())
if isinstance(timezone, str):
tz = pytz.timezone(timezone)
else:
tz = timezone
dt = tz.localize(naivedt).astimezone(pytz.UTC)
if naive:
return dt.replace(tzinfo=None)
else:
return dt | 32,767 |
def mkdirs(path, filepath=None):
"""
mkdirs(path)
Make directory tree for path. If path is a file with an extention, only the folders are created.
Parameters
----------
path: str
Full path as directory tree or file
filepath: bool
Force to think of path as a file (happens also if a '.' is included in the path)
"""
if filepath:
basepath = os.path.dirname(path)
elif '.' in os.path.basename(path):
basepath = os.path.dirname(path)
else:
basepath = path
try:
os.makedirs(basepath)
except OSError:
if not os.path.isdir(basepath):
raise OSError('Path ''{}'' does not exist and was not created'.format(path)) | 32,768 |
def map_new_program_rest_to_new_control_rest(request):
"""Map Control to Program object via REST API return response from server.
"""
yield _common_fixtures(request.fixturename) | 32,769 |
def test_config_check(global_integration_cli_args, local_config):
"""
:type global_integration_cli_args: tuple[str]
:type local_config: datacube.config.LocalConfig
"""
# This is not a very thorough check, we just check to see that
# it prints something vaguely related and does not error-out.
opts = list(global_integration_cli_args)
opts.extend(
[
'-v', 'system', 'check'
]
)
result = _run_cli(
datacube.scripts.cli_app.cli,
opts
)
assert result.exit_code == 0
host_line = 'Host: {}'.format(local_config.db_hostname)
assert host_line in result.output
user_line = 'User: {}'.format(local_config.db_username)
assert user_line in result.output | 32,770 |
def returned(n):
"""Generate a random walk and return True if the walker has returned to
the origin after taking `n` steps.
"""
## `takei` yield lazily so we can short-circuit and avoid computing the rest of the walk
for pos in randwalk() >> drop(1) >> takei(xrange(n-1)):
if pos == Origin:
return True
return False | 32,771 |
def test_trie_can_insert_str(trie):
"""Test if we can insert str into tree."""
trie.insert('hello')
assert trie.contains('hello') | 32,772 |
def get_ego_as_agent(frame: np.ndarray) -> np.ndarray:
"""Get a valid agent with information from the AV. Ford Fusion extent is used.
:param frame: The frame from which the Ego states are extracted
:return: An agent numpy array of the Ego states
"""
ego_agent = np.zeros(1, dtype=AGENT_DTYPE)
ego_agent[0]["centroid"] = frame["ego_translation"][:2]
ego_agent[0]["yaw"] = rotation33_as_yaw(frame["ego_rotation"])
ego_agent[0]["extent"] = np.asarray((EGO_EXTENT_LENGTH, EGO_EXTENT_WIDTH, EGO_EXTENT_HEIGHT))
return ego_agent | 32,773 |
def _handle_get_api_stream(handler, path_match, data):
""" Provide a streaming interface for the event bus. """
gracefully_closed = False
hass = handler.server.hass
wfile = handler.wfile
write_lock = threading.Lock()
block = threading.Event()
restrict = data.get('restrict')
if restrict:
restrict = restrict.split(',')
def write_message(payload):
""" Writes a message to the output. """
with write_lock:
msg = "data: {}\n\n".format(payload)
try:
wfile.write(msg.encode("UTF-8"))
wfile.flush()
except IOError:
block.set()
def forward_events(event):
""" Forwards events to the open request. """
nonlocal gracefully_closed
if block.is_set() or event.event_type == EVENT_TIME_CHANGED or \
restrict and event.event_type not in restrict:
return
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
gracefully_closed = True
block.set()
return
write_message(json.dumps(event, cls=rem.JSONEncoder))
handler.send_response(HTTP_OK)
handler.send_header('Content-type', 'text/event-stream')
handler.end_headers()
hass.bus.listen(MATCH_ALL, forward_events)
while True:
write_message(STREAM_PING_PAYLOAD)
block.wait(STREAM_PING_INTERVAL)
if block.is_set():
break
if not gracefully_closed:
_LOGGER.info("Found broken event stream to %s, cleaning up",
handler.client_address[0])
hass.bus.remove_listener(MATCH_ALL, forward_events) | 32,774 |
def import_dir(path):
"""
imports all XML files in the given directory
"""
# to speed up the import, we disable indexing during the import and only
# rebuild the index at afterwards
os.environ['DISABLE_INDEXING_DURING_IMPORT'] = 'True'
_files = os.listdir(path)
for _file in _files:
test_utils.import_xml_or_zip("%s%s" % (path, _file))
os.environ['DISABLE_INDEXING_DURING_IMPORT'] = 'False'
update_index.Command().handle(using=[settings.TEST_MODE_NAME,]) | 32,775 |
def load_users(dir="private/users"):
"""load_users will load up all of the user json files in the dir."""
files = get_files_in_dir(dir)
dict = {}
for filename in files:
user = {}
filepath = join(dir, filename)
with open(filepath) as file:
try:
user = json.load(file)
except json.JSONDecodeError:
print("Could not decode file {0}".format(filepath))
except UnicodeDecodeError:
print("Could not decode unicode in {0}".format(filepath))
id = user.get("user_id")
dict[id] = user
return dict | 32,776 |
def _scale_annots_dict(annot, new_sz, ann_im_sz):
"""Scale annotations to the new_sz, provided the original ann_im_sz.
:param annot: bounding box in dict format
:param new_sz: new size of image (after linear transforms like resize)
:param ann_im_sz: original size of image for which the bounding boxes were given.
:return:
"""
d = {}
for k, v in annot.items():
if k.startswith('x'):
v_ = new_sz[0] * v / ann_im_sz[0]
elif k.startswith('y'):
v_ = new_sz[1] * v / ann_im_sz[1]
else:
# don't destroy other keys
v_ = v
d.update({k: v_})
return d | 32,777 |
def test_process_children(cbcsdk_mock, get_summary_response, guid, expected_num_children):
"""Testing Process.children property."""
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a process search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the POST of a summary search (using same Job ID)
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/summary_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check process search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check summary search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SUMMARY_RESP)
# mock the GET to get process search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get summary search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920/results"),
get_summary_response)
api = cbcsdk_mock.api
process = api.select(Process, guid)
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/summary", get_summary_response)
# if there's children, check that Process.children returns the right objects
if isinstance(process.summary.children, list):
assert isinstance(process.children, list)
assert [isinstance(child, Process) for child in process.children]
else:
assert process.children == []
assert len(process.children) == expected_num_children | 32,778 |
def get_handlers_in_instance(inst: Any) -> Tuple[List[Handler], List[Handler]]:
"""Get all handlers from the members of an instance.
Args:
inst: Instance to get handlers from.
Returns:
2-tuple containing the list of all registration and all subscription
handlers.
Raises:
TypeError: If inst isn't an instance.
"""
if inspect.isclass(inst):
raise TypeError("expected instance, not class. "
"Please create an instance of your template class first")
registrations = []
subscriptions = []
for _, value in inspect.getmembers(inst, callable):
if inspect.ismethod(value):
reg, sub = get_bound_handlers(value)
else:
reg, sub = get_handlers(value)
if reg is not None:
registrations.append(reg)
if sub is not None:
subscriptions.append(sub)
return registrations, subscriptions | 32,779 |
def list(show_deleted: bool, verbose: bool):
"""
List submitted jobs.
Note that, in the non-verbose output, `Last Result` is reporting according to
whether Automate could successfully submit the job. It's possible for Transfer
to run into errors attempting to run your submission, which timer/Automate are not
aware of.
CHECK THE --verbose OUTPUT TO BE CERTAIN YOUR TRANSFERS ARE WORKING.
"""
response = job_list(show_deleted=show_deleted)
show_job_list(response, verbose=verbose) | 32,780 |
def django_op_to_flag(op):
"""
Converts a django admin operation string to the matching
grainy permission flag
Arguments:
- op <str>
Returns:
- int
"""
return DJANGO_OP_TO_FLAG.get(op, 0) | 32,781 |
def rgb2gray(images):
"""将RGB图像转为灰度图"""
# Y' = 0.299 R + 0.587 G + 0.114 B
# https://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale
return np.dot(images[..., :3], [0.299, 0.587, 0.114]) | 32,782 |
def suppress_tensorflow_warnings():
"""
Suppresses tensorflow warnings
"""
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
_tf = _minimal_package_import_check("tensorflow.compat.v1")
_tf.logging.set_verbosity(_tf.logging.ERROR)
_tf.debugging.set_log_device_placement(False) | 32,783 |
def wav2vec2_local(ckpt, *args, **kwargs):
"""
The model from local ckpt
ckpt (str): PATH
"""
assert os.path.isfile(ckpt)
return _UpstreamExpert(ckpt, *args, **kwargs) | 32,784 |
def mIou(y_true, y_pred, n_classes):
"""
Mean Intersect over Union metric.
Computes the one versus all IoU for each class and returns the average.
Classes that do not appear in the provided set are not counted in the average.
Args:
y_true (1D-array): True labels
y_pred (1D-array): Predicted labels
n_classes (int): Total number of classes
Returns:
mean Iou (float)
"""
iou = 0
n_observed = n_classes
for i in range(n_classes):
y_t = (np.array(y_true) == i).astype(int)
y_p = (np.array(y_pred) == i).astype(int)
inter = np.sum(y_t * y_p)
union = np.sum((y_t + y_p > 0).astype(int))
if union == 0:
n_observed -= 1
else:
iou += inter / union
return iou / n_observed | 32,785 |
def drop_last(iterable, n=1):
"""Drops the last item of iterable"""
t1, t2 = tee(iterable)
return map(operator.itemgetter(0), zip(t1, islice(t2, n, None))) | 32,786 |
def assert_called_once_with(mocked, *args, **kwargs):
"""Safe convenient methods for mock asserts"""
assert_called_once(mocked)
assert mocked.call_args == mock.call(*args, **kwargs) | 32,787 |
def density(height: float) -> float:
"""
Returns the air density in slug/ft^3 based on altitude
Equations from https://www.grc.nasa.gov/www/k-12/rocket/atmos.html
:param height: Altitude in feet
:return: Density in slugs/ft^3
"""
if height < 36152.0:
temp = 59 - 0.00356 * height
p = 2116 * ((temp + 459.7)/518.6)**5.256
elif 36152 <= height < 82345:
temp = -70
p = 473.1*np.exp(1.73 - 0.000048*height)
else:
temp = -205.05 + 0.00164 * height
p = 51.97*((temp + 459.7)/389.98)**-11.388
rho = p/(1718*(temp+459.7))
return rho | 32,788 |
def parse(opts):
"""
Entry point for XML Schema parsing into an OME Model.
"""
# The following two statements are required to "prime" the generateDS
# code and ensure we have reasonable namespace support.
filenames = opts.args
namespace = opts.namespace
schemas = dict()
logging.debug("Namespace: %s" % namespace)
set_type_constants(namespace)
generateDS.generateDS.XsdNameSpace = namespace
logging.debug("Type map: %s" % opts.lang.type_map)
parser = sax.make_parser()
ch = XschemaHandler()
parser.setContentHandler(ch)
for filename in filenames:
parser.parse(filename)
schemaname = os.path.split(filename)[1]
schemaname = os.path.splitext(schemaname)[0]
schema = ElementTree.parse(filename)
schemas[schemaname] = schema
root = ch.getRoot()
if root is None:
raise ModelProcessingError(
"No model objects found, have you set the correct namespace?")
root.annotate()
return OMEModel.process(ch, schemas, opts) | 32,789 |
async def test_sensor(hass, stream_reader_writer):
"""Test that sensor works."""
with patch("asyncio.StreamWriter.close", return_value=None), patch(
"asyncio.open_connection",
return_value=stream_reader_writer,
), patch(
"FoldingAtHomeControl.serialconnection.SerialConnection.send_async",
return_value=AsyncMock(),
):
entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_ADDRESS: "localhost",
CONF_PORT: 36330,
CONF_PASSWORD: "CONF_UNIT_SYSTEM_IMPERIAL",
},
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
await asyncio.sleep(1)
assert len(hass.states.async_all()) > 0 | 32,790 |
def has_paired_before() -> bool:
"""Simple check for whether a device has previously been paired.
This does not verify that the pairing information is valid or up to date.
The assumption being - if it's previously paired, then it has previously
connected to the internet.
"""
identity = IdentityManager.get()
return identity.uuid != "" | 32,791 |
def caller_linkedin(user_input: dict) -> dict:
"""
Call LinkedIn scraping methods to get info about found and potential subjects.
Args:
`user_input`: user input represented as a dictionary.
Returns:
`dict`: the dictionary with information about found or potential subjects.
"""
results_to_filter = {}
linkedin_obj = LinkedinSearchSubjects(user_input)
linkedin_obj.linkedin_search()
linkedin_obj.linkedin_find_ids()
linkedin_obj.linkedin_search_for_info()
if linkedin_obj.found_subjects_info:
results_to_filter["linkedin"] = {"found_subjects": linkedin_obj.found_subjects_info}
else:
results_to_filter["linkedin"] = {
"potential_subjects_after_filtering":
linkedin_obj.potential_subjects_info_after_filtering
}
return results_to_filter | 32,792 |
def service_request_eqf(stub_response):
"""
Return a function to be used as the value matching a ServiceRequest in
:class:`EQFDispatcher`.
"""
def resolve_service_request(service_request_intent):
eff = concretize_service_request(
authenticator=object(),
log=object(),
service_configs=make_service_configs(),
throttler=lambda stype, method, tid: None,
tenant_id='000000',
service_request=service_request_intent)
# "authenticate"
eff = resolve_authenticate(eff)
# make request
return resolve_effect(eff, stub_response)
return resolve_service_request | 32,793 |
def ScheduleTestPlanCronJob(test_plan_id):
"""Schedules a cron job for a test plan.
If the test plan is not for cron, no cron job will be scheduled.
Args:
test_plan_id: a test plan ID.
"""
test_plan_kicker.ScheduleCronKick(test_plan_id) | 32,794 |
def get_nearest_point_distance(points, wire1, wire2):
"""
>>> get_nearest_point_distance([(0, 0), (158, -12), (146, 46), (155, 4), (155, 11)], [((0, 0), (75, 0)), ((75, 0), (75, -30)), ((75, -30), (158, -30)), ((158, -30), (158, 53)), ((158, 53), (146, 53)), ((146, 53), (146, 4)), ((146, 4), (217, 4)), ((217, 4), (217, 11)), ((217, 11), (145, 11))], [((0, 0), (0, 62)), ((0, 62), (66, 62)), ((66, 62), (66, 117)), ((66, 117), (100, 117)), ((100, 117), (100, 46)), ((100, 46), (155, 46)), ((155, 46), (155, -12)), ((155, -12), (238, -12))])
610
>>> get_nearest_point_distance([(0, 0), (107, 47), (124, 11), (157, 18), (107, 71), (107, 51)], [((0, 0), (98, 0)), ((98, 0), (98, 47)), ((98, 47), (124, 47)), ((124, 47), (124, -16)), ((124, -16), (157, -16)), ((157, -16), (157, 71)), ((157, 71), (95, 71)), ((95, 71), (95, 51)), ((95, 51), (128, 51)), ((128, 51), (128, 104)), ((128, 104), (179, 104))], [((0, 0), (0, 98)), ((0, 98), (91, 98)), ((91, 98), (91, 78)), ((91, 78), (107, 78)), ((107, 78), (107, 11)), ((107, 11), (147, 11)), ((147, 11), (147, 18)), ((147, 18), (162, 18)), ((162, 18), (162, 24)), ((162, 24), (169, 24))])
410
"""
def get_distance(point):
d = 0
for wire in (wire1, wire2):
for part in wire:
intersection = get_intersection_point(part, (point, point))
if intersection == []:
d += abs(part[0][0] - part[1][0]) + abs(part[0][1] - part[1][1])
else:
d += abs(part[0][0] - point[0]) + abs(part[0][1] - point[1])
break
return d
points.sort(key=get_distance)
return get_distance(points[1]) | 32,795 |
def sample_hyperparameters():
"""
Yield possible hyperparameter choices.
"""
while True:
yield {
"rnn_size": np.random.choice([64, 128, 256]).item(),
"learning_schedule": np.random.choice(["RMSprop", "adagrad", "adam"]).item(),
"grad_clip": np.random.uniform(7, 12),
"learning_rate": np.random.uniform(0.001, 0.01),
"decay_rate": np.random.uniform(0.7,1),
"lambda_param" : np.random.uniform(0.0001,0.001),
"dropout": np.random.uniform(0.3,1),
"embedding_size": np.random.choice([64, 128, 256]).item(),
"neighborhood_size": np.random.choice([8, 16, 32, 64]).item(),
"grid_size": np.random.choice([2, 4, 8, 16]).item(),
} | 32,796 |
def strip_tokens(tokenized: str) -> str:
"""Replaces all tokens with the token's arguments."""
result = []
pos = 0
match = RX_TOKEN.search(tokenized, pos)
while match:
start, end = match.span()
result.append(tokenized[pos:start])
result.append(match.groupdict()['argument'])
pos = end
match = RX_TOKEN.search(tokenized, pos)
result.append(tokenized[pos:])
return ''.join(result) | 32,797 |
def test_check_orders_new_order_above_our(worker, other_orders):
""" Someone put order above ours, own order must be moved
"""
worker2 = other_orders
worker.place_orders()
own_buy_orders = worker.get_own_buy_orders()
own_sell_orders = worker.get_own_sell_orders()
log.debug('KOTH orders: {}'.format(worker.own_orders))
own_top_bid_price_before = own_buy_orders[0]['price']
own_top_ask_price_before = own_sell_orders[0]['price']
# Place top orders from another account
buy_price = own_top_bid_price_before * 1.1
sell_price = own_top_ask_price_before / 1.1
order = worker2.place_market_buy_order(10, buy_price)
buy_price_actual = order['price']
order = worker2.place_market_sell_order(10, sell_price)
sell_price_actual = order['price'] ** -1
worker.check_orders()
own_buy_orders = worker.get_own_buy_orders()
own_sell_orders = worker.get_own_sell_orders()
own_top_bid_price_after = own_buy_orders[0]['price']
own_top_ask_price_after = own_sell_orders[0]['price']
assert len(worker.own_orders) == 2
# Our orders are on top
assert own_top_bid_price_after > buy_price_actual
assert own_top_ask_price_after < sell_price_actual | 32,798 |
def circular(P=365, K=0.1, T=0, gamma=0, t=None):
"""
circular() simulates the radial velocity signal of a planet in a
circular orbit around a star.
The algorithm needs improvements.
Parameters:
P = period in days
K = semi-amplitude of the signal
T = velocity at zero phase
gamma = average velocity of the star
t = time
space = We want an observation every time/space days
Returns:
t = time
RV = rv signal generated
"""
if t is None:
print('Time needed')
RV = [K*_np.sin(2*_np.pi*x/P - T) + gamma for x in t]
#RV = [x for x in RV] #m/s
return t, RV | 32,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.