content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def check_records(msg: dict) -> int:
"""
Returns the number of records
sent in the SQS message
"""
records = 0
if msg is not None:
records = len(msg[0])
if records != 1:
raise ValueError("Not expected single record")
return records | 7036f943b733ca34adaaa5ff917b3eb246075422 | 24,800 |
def get_processes_from_tags(test):
"""Extract process slugs from tags."""
tags = getattr(test, 'tags', set())
slugs = set()
for tag_name in tags:
if not tag_name.startswith('{}.'.format(TAG_PROCESS)):
continue
slugs.add(tag_name[len(TAG_PROCESS) + 1:])
return slugs | 16d333d1371ab533aa5ed7a26c4bdd968233edf9 | 24,801 |
from src.common_paths import get_submissions_version_path
import logging
import os
import json
def get_best_score(version):
"""
Given an existing version, retrieves the alias and score of the best score obtained
:param version: version to be evaluated (str|unicode)
:return: alias, score (str, float)
"""
logger = logging.getLogger(__name__)
logger.info("Request best submission evaluation for version {0}".format(version))
filepath = os.path.join(get_submissions_version_path(version), "upload_history.jl")
if not os.path.exists(filepath):
return None, np.Inf
with open(filepath) as f:
upload_history = [json.loads(x) for x in f.read().strip().split("\n")]
best_submission = min(upload_history, key=lambda x:x["score"])
alias, score = best_submission["alias"], best_submission["score"]
logger.info("Best submission found: {0}, {1}, {2}".format(version, alias, score))
return alias, score | fa5a69da36ec439027212928f74992387e3be277 | 24,802 |
import random
def eight_ball():
""" Magic eight ball.
:return: A random answer.
:rtype: str
"""
answers = [
'It is certain', 'It is decidedly so', 'Not a fucking chance!', 'without a doubt', 'Yes definitely',
'I suppose so', 'Maybe', ' No fucking way!', 'Sure :D', 'hahahaha no you plank! :P ', 'Ohhh yes!',
'You may rely on it', 'As I see it, yes', 'Most likely', 'Outlook good', 'Yes', 'Signs point to yes',
'Try again', 'Ask again later', 'Better not tell you now as you may cry like a little girl',
'Cannot predict now', 'Fucking dead right!', 'Ohhhh most definitely',
'Concentrate and ask again', 'Don\'t count on it', 'My reply is no', 'My sources say no',
'Outlook not so good', 'Very doubtful', 'Possibly, but I think you need to chillout!'
]
return random.choice(answers) | 728aea44a111a25d878ec7686038d993fe49f71c | 24,803 |
def head(line, n: int):
"""returns the first `n` lines"""
global counter
counter += 1
if counter > n:
raise cbox.Stop() # can also raise StopIteration()
return line | 221f8c6ac5a64b5f844202622e284053738147aa | 24,804 |
def onehot(x, numclasses=None):
""" Convert integer encoding for class-labels (starting with 0 !)
to one-hot encoding.
If numclasses (the number of classes) is not provided, it is assumed
to be equal to the largest class index occuring in the labels-array + 1.
The output is an array who's shape is the shape of the input array plus
an extra dimension, containing the 'one-hot'-encoded labels.
"""
if x.shape == ():
x = x[np.newaxis]
if numclasses is None:
numclasses = x.max() + 1
result = np.zeros(list(x.shape) + [numclasses])
z = np.zeros(x.shape)
for c in range(numclasses):
z *= 0
z[np.where(x == c)] = 1
result[..., c] += z
return result | 6595ef4fc837296f6ba31c78a4b3047aaca7ee49 | 24,805 |
import numpy
def draw_graph(image, graph):
"""
Draw the graph on the image by traversing the graph structure.
Args:
| *image* : the image where the graph needs to be drawn
| *graph* : the *.txt file containing the graph information
Returns:
"""
tmp = draw_edges(image, graph)
node_size = int(numpy.ceil((max(image.shape) / float(NODESIZESCALING))))
return draw_nodes(tmp, graph, max(node_size, 1)) | 2454e654969d766af60546686d9c305c67199c8a | 24,806 |
def valid_octet (oct):
""" Validates a single IP address octet.
Args:
oct (int): The octet to validate
Returns:
bool: True if the octet is valid, otherwise false
"""
return oct >= 0 and oct <= 255 | 9dd2346bb5df5bc00bb360013abe40b8039bdc45 | 24,807 |
def load_clean_dictionaries():
"""
is loading the combilex data into two dictionaries
word2phone and phone2word
:return: g2p_dict, p2g_dict
"""
grapheme_dict = {}
phonetic_dict = {}
with open(COMBILEX_PATH, encoding='utf-8') as combilex_file:
for line in combilex_file:
# Skip commented lines
if line[0:3] == ';;;':
continue
word, phone = line.strip().split('\t')
if not should_skip_seq(word):
if word not in grapheme_dict:
grapheme_dict[word] = []
grapheme_dict[word].append(phone)
if not should_skip_seq(phone):
if phone not in phonetic_dict:
phonetic_dict[phone] = []
phonetic_dict[phone].append(word)
return grapheme_dict, phonetic_dict | ab7257c78ec8ba0786a0112362ba318468e69e02 | 24,808 |
import traceback
import itertools
import operator
def build_missing_wheels(
packages_and_envts,
build_remotely=False,
with_deps=False,
dest_dir=THIRDPARTY_DIR,
):
"""
Build all wheels in a list of tuple (Package, Environment) and save in
`dest_dir`. Return a list of tuple (Package, Environment), and a list of
built wheel filenames.
"""
not_built = []
built_filenames = []
packages_and_envts = itertools.groupby(
sorted(packages_and_envts), key=operator.itemgetter(0))
for package, pkg_envts in packages_and_envts:
envts = [envt for _pkg, envt in pkg_envts]
python_versions = sorted(set(e.python_version for e in envts))
operating_systems = sorted(set(e.operating_system for e in envts))
built = None
try:
built = build_wheels(
requirements_specifier=package.specifier,
with_deps=with_deps,
build_remotely=build_remotely,
python_versions=python_versions,
operating_systems=operating_systems,
verbose=False,
dest_dir=dest_dir,
)
print('.')
except Exception as e:
print('#############################################################')
print('############# WHEEL BUILD FAILED ######################')
traceback.print_exc()
print()
print('#############################################################')
if not built:
for envt in pkg_envts:
not_built.append((package, envt))
else:
for bfn in built:
print(f' --> Built wheel: {bfn}')
built_filenames.append(bfn)
return not_built, built_filenames | 261433deb7bc691f92d995d606e108a807201b97 | 24,809 |
import os
def load_reads(path, format='bed', paired=False, shift=100, name=None):
"""Read reads from file.
Parameters
----------
path : str
Path to load the reads.
format : str, optional
File format, default='bed'.
paired : bool, optional
Whether the reads are paired-end or not, default=False.
shift : int, optional
Shift size for single-end reads, default=100.
name : str, optional
Sample name. If not specified, the basename of the file will be used.
Returns
-------
reads : `Reads`
Loaded sequencing reads.
"""
logger.info(f"Loading reads from {path} [{format}]")
if format == 'bed' and paired:
raise FormatModeConflictError('bed', 'paired-end')
if format == 'bedpe' and not paired:
raise FormatModeConflictError('bedpe', 'single-end')
if name is None:
name = os.path.splitext(os.path.basename(path))[0]
reads = Reads(name=name)
parser = get_read_parser(format)(path)
for chrom, pos in parser.parse(paired=paired, shift=shift):
reads.add(chrom, pos)
reads.sort()
logger.info(f"Loaded {reads.size:,} reads")
return reads | 43595ad806ebf00513841b8324ccb19263333cb6 | 24,810 |
import modulefinder
def sketch_blocks(modulepaths, pkg_dirs):
"""Creates a graph of all the modules in `modulepaths` that are related to each other by their
imports. The directories used to resolve an import is `pkg_dirs`
Args:
modulepaths (List[str]): list of modules filepaths to analyze.
pkg_dirs (List[str]): list of directories used to resolve the imports
Returns:
networkx.Graph: graph of the modules as nodes with their imports as edges.
"""
attributes = init(pkg_dirs)
graph = attributes['graph']
Python = 'python'
graph.add_node(Python, attributes[Python])
for filepath in modulepaths:
# bug - if the finder is not reinitialized, the previous modules.values()
# are kept, thus been useless
finder = modulefinder.ModuleFinder(path=pkg_dirs)
print('processing:\t', filepath)
# Calculate complexity and maintainability indexes
with open(filepath) as source:
size, color = scientist.get_size_color(source.read(), initsize=80)
# Insert current module info
module_info = {'shape':'square', 'name':filepath, 'size':size, 'color':color}
graph.add_node(filepath, module_info)
# Find module imports
finder.run_script(filepath)
for edge in scientist.compute_edges(filepath, Python, finder.modules.values(),
finder.badmodules.keys()):
graph.add_edge(*edge)
return graph | c4989d9df214205f9beaadeb619fde92d05ec65b | 24,811 |
def str_to_bool(string):
"""
Parses string into boolean
"""
string = string.lower()
return True if string == "true" or string == "yes" else False | e7c1645ab3ba59fc4721872df76f406c571cab8f | 24,812 |
def rerotateExtremaPoints(minSepPoints_x, minSepPoints_y, maxSepPoints_x, maxSepPoints_y,\
lminSepPoints_x, lminSepPoints_y, lmaxSepPoints_x, lmaxSepPoints_y,\
Phi, Op, yrealAllRealInds):
""" Rotate the extrema points from (the projected ellipse centered at the origin
and x-axis aligned with semi-major axis) to the original projected ellipse
Args:
minSepPoints_x (numpy array):
the first quadrant x-coordinates of the minimum separations (with length n)
minSepPoints_y (numpy array):
the first quadrant y-coordinates of the minimum separations (with length n)
maxSepPoints_x (numpy array):
the first quadrant x-coordinates of the maximum separations (with length n)
maxSepPoints_y (numpy array):
the first quadrant y-coordinates of the maximum separations (with length n)
lminSepPoints_x (numpy array):
the first quadrant x-coordinates of the local minimum separations (with same length as yrealImagInds)
lminSepPoints_y (numpy array):
the first quadrant y-coordinates of the local minimum separations (with same length as yrealImagInds)
lmaxSepPoints_x (numpy array):
the first quadrant x-coordinates of the local maximum separations (with same length as yrealImagInds)
lmaxSepPoints_y (numpy array):
the first quadrant y-coordinates of the local maximum separations (with same length as yrealImagInds)
phi (numpy array):
angle from X-axis to semi-minor axis of projected ellipse
Op (numpy array):
the geometric center of the projected ellipse
yrealAllRealInds (numpy array):
an array of integers acting as indicies of planets which have min, max, local min, local max
Returns:
minSepPoints_x_dr (numpy array):
derotated minSepPoints_x
minSepPoints_y_dr (numpy array):
derotated minSepPoints_y
maxSepPoints_x_dr (numpy array):
derotated maxSepPoints_x
maxSepPoints_y_dr (numpy array):
derotated maxSepPoints_y
lminSepPoints_x_dr (numpy array):
derotated lminSepPoints_x
lminSepPoints_y_dr (numpy array):
derotated lminSepPoints_y
lmaxSepPoints_x_dr (numpy array):
derotated lmaxSepPoints_x
lmaxSepPoints_y_dr (numpy array):
derotated lmaxSepPoints_y
"""
minSepPoints_x_dr = np.zeros(len(minSepPoints_x))
minSepPoints_y_dr = np.zeros(len(minSepPoints_y))
maxSepPoints_x_dr = np.zeros(len(maxSepPoints_x))
maxSepPoints_y_dr = np.zeros(len(maxSepPoints_y))
lminSepPoints_x_dr = np.zeros(len(lminSepPoints_x))
lminSepPoints_y_dr = np.zeros(len(lminSepPoints_y))
lmaxSepPoints_x_dr = np.zeros(len(lmaxSepPoints_x))
lmaxSepPoints_y_dr = np.zeros(len(lmaxSepPoints_y))
minSepPoints_x_dr, minSepPoints_y_dr = rerotateEllipsePoints(minSepPoints_x, minSepPoints_y,Phi,Op[0],Op[1])
maxSepPoints_x_dr, maxSepPoints_y_dr = rerotateEllipsePoints(maxSepPoints_x, maxSepPoints_y,Phi,Op[0],Op[1])
lminSepPoints_x_dr, lminSepPoints_y_dr = rerotateEllipsePoints(lminSepPoints_x, lminSepPoints_y,Phi[yrealAllRealInds],Op[0][yrealAllRealInds],Op[1][yrealAllRealInds])
lmaxSepPoints_x_dr, lmaxSepPoints_y_dr = rerotateEllipsePoints(lmaxSepPoints_x, lmaxSepPoints_y,Phi[yrealAllRealInds],Op[0][yrealAllRealInds],Op[1][yrealAllRealInds])
return minSepPoints_x_dr, minSepPoints_y_dr, maxSepPoints_x_dr, maxSepPoints_y_dr,\
lminSepPoints_x_dr, lminSepPoints_y_dr, lmaxSepPoints_x_dr, lmaxSepPoints_y_dr | b656116d73cd98903fae0f54e3d575a59ae4b102 | 24,813 |
import glob
def does_name_exist(name):
""" check if a file with that name already exists """
return len(glob.glob('./photos/'+name+'.*')) > 0 | c377f5fdb15d1d88ba6082c9be0e0400f5a8094d | 24,814 |
def cont_hires(npoints, elecs, start_timestamp=0):
"""
Retrieve hires data (sampled at 2 kHz).
Parameters and outputs are the same as the `cont_raw` function.
Args:
npoints: number of datapoints to retrieve
elecs: list of electrodes to sample
start_timestamp: NIP timestamp to start data at, or most recent if 0
Returns:
"""
return _cont_base(_c.xl_cont_hires, npoints, elecs, start_timestamp) | 6d420e19e0de94f83992c0945e0f9a994b1e5483 | 24,815 |
import torch
def batch_grid_subsampling_kpconv_gpu(points, batches_len, features=None, labels=None, sampleDl=0.1, max_p=0):
"""
Same as batch_grid_subsampling, but implemented in GPU. This is a hack by using Minkowski
engine's sparse quantization functions
Note: This function is not deterministic and may return subsampled points
in a different ordering, which will cause the subsequent steps to differ slightly.
"""
if labels is not None or features is not None:
raise NotImplementedError('subsampling not implemented for features and labels')
if max_p != 0:
raise NotImplementedError('subsampling only implemented by considering all points')
B = len(batches_len)
batch_start_end = torch.nn.functional.pad(torch.cumsum(batches_len, 0), (1, 0))
device = points[0].device
coord_batched = ME.utils.batched_coordinates(
[points[batch_start_end[b]:batch_start_end[b + 1]] / sampleDl for b in range(B)], device=device)
sparse_tensor = ME.SparseTensor(
features=points,
coordinates=coord_batched,
quantization_mode=ME.SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE
)
s_points = sparse_tensor.features
s_len = torch.tensor([f.shape[0] for f in sparse_tensor.decomposed_features], device=device)
return s_points, s_len | 9d4eb2b0d5ad7d36199cc6ff6ba567c49dccff4b | 24,816 |
import configparser
import re
def hot_word_detection(lang='en'):
"""
Hot word (wake word / background listen) detection
What is Hot word detection?
ANSWER: Hot word listens for specific key words chosen to activate the “OK Google” voice interface. ...
Voice interfaces use speech recognition technologies to allow user input through spoken commands.
You can set your custom HOT WORD just by calling setup(). Your bot_name is your Hot word
:param lang: str
default 'en'
:return: Bool, str
status, command
"""
try:
config = configparser.ConfigParser()
config.read('config/config.ini')
bot_name = config['default']['bot_name']
except Exception as e:
raise DefaultFileNotFound
try:
r = sr.Recognizer()
with sr.Microphone() as source:
print("Background listening")
r.pause_threshold = 1
r.adjust_for_ambient_noise(source, duration=1)
audio = r.listen(source)
command = r.recognize_google(audio, language=lang).lower()
if re.search(bot_name, command):
print("Waking up...")
return True, command
else:
return False, False
except Exception:
return False, None | d982c33dc9b8af0e1592a88438664342fb25b8cc | 24,817 |
def parse_ph5_length(length):
"""
Method for parsing length argument.
:param length: length
:type: str, numeric, or None
:returns: length value as a float
:type: float or None
"""
err_msg = "Invalid length value. %s" % (length)
return str_to_pos_float(length, err_msg) | f5f669bdcd28611e45bbefa80fce6f2bf16a663f | 24,818 |
import torch
import os
from typing import Iterator
def _generate(payload: ModelSpec, is_udf: bool = True):
"""Construct a UDF to run pytorch model.
Parameters
----------
payload : ModelSpec
the model specifications object
Returns
-------
A Spark Pandas UDF.
"""
model = payload.model_type
default_device = "gpu" if torch.cuda.is_available() else "cpu"
options = payload.options
use_gpu = options.get("device", default_device) == "gpu"
num_workers = int(
options.get("num_workers", min(os.cpu_count(), DEFAULT_NUM_WORKERS))
)
batch_size = int(options.get("batch_size", DEFAULT_BATCH_SIZE))
return_type = Iterator[pd.Series]
def torch_inference_udf(
iter: Iterator[pd.DataFrame],
) -> return_type:
device = torch.device("cuda" if use_gpu else "cpu")
model.load_model(payload, device=device)
if isinstance(model, AnonymousModelType):
# We will remove them after AnonymousModelType deprecation
model.model.eval()
model.model.to(device)
try:
with torch.no_grad():
for series in iter:
dataset = PandasDataset(
series,
transform=model.transform(),
unpickle=is_udf,
use_pil=True,
)
results = []
for batch in DataLoader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
):
batch = move_tensor_to_device(batch, device)
predictions = model(batch)
bin_predictions = [
_pickler.dumps(p) if is_udf else p
for p in predictions
]
results.extend(bin_predictions)
yield pd.Series(results)
finally:
if use_gpu:
model.release()
if is_udf:
return pandas_udf(torch_inference_udf, returnType=BinaryType())
else:
return torch_inference_udf | 06ded035bfa2c9410561e6aeb4b02cd49f08c93b | 24,819 |
from typing import Tuple
def check_proper_torsion(
torsion: Tuple[int, int, int, int], molecule: "Ligand"
) -> bool:
"""
Check that the given torsion is valid for the molecule graph.
"""
for i in range(3):
try:
_ = molecule.get_bond_between(
atom1_index=torsion[i], atom2_index=torsion[i + 1]
)
except TopologyMismatch:
return False
return True | 7b43e4838bf65ebb4505d1660819ace98bdbd038 | 24,820 |
def find_all_occurrences_and_indexes(seq):
"""
seq: array-like of pretty_midi Note
Finds all patterns and indexes of those patterns.
"""
list_patterns = list()
list_indexes = list()
res = list()
seq_x = seq
while res!=None:
seq_x, res, indexes = find_occurrences_and_indexes(seq_x)
if res!=None:
list_patterns.append(res)
list_indexes.append(indexes)
for i in range(len(seq_x)):
# special case for non recurring patterns: notes that appear only once
if seq_x[i]!=None:
list_patterns.append([seq_x[i]])
list_indexes.append([i])
return list_patterns,list_indexes | ab85dca7f30768d75e28ab76b974e50364e8746a | 24,821 |
from functools import reduce
from .model_store import get_model_file
import os
def get_drn(blocks,
simplified=False,
model_name=None,
pretrained=False,
root=os.path.join('~', '.chainer', 'models'),
**kwargs):
"""
Create DRN-C or DRN-D model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
simplified : bool, default False
Whether to use simplified scheme (D architecture).
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
if blocks == 22:
assert simplified
layers = [1, 1, 2, 2, 2, 2, 1, 1]
elif blocks == 26:
layers = [1, 1, 2, 2, 2, 2, 1, 1]
elif blocks == 38:
assert simplified
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 42:
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 54:
assert simplified
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 58:
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 105:
assert simplified
layers = [1, 1, 3, 4, 23, 3, 1, 1]
else:
raise ValueError("Unsupported DRN with number of blocks: {}".format(blocks))
if blocks < 50:
channels_per_layers = [16, 32, 64, 128, 256, 512, 512, 512]
bottlenecks_per_layers = [0, 0, 0, 0, 0, 0, 0, 0]
else:
channels_per_layers = [16, 32, 256, 512, 1024, 2048, 512, 512]
bottlenecks_per_layers = [0, 0, 1, 1, 1, 1, 0, 0]
if simplified:
simplifieds_per_layers = [1, 1, 0, 0, 0, 0, 1, 1]
residuals_per_layers = [0, 0, 1, 1, 1, 1, 0, 0]
else:
simplifieds_per_layers = [0, 0, 0, 0, 0, 0, 0, 0]
residuals_per_layers = [1, 1, 1, 1, 1, 1, 0, 0]
dilations_per_layers = [1, 1, 1, 1, 2, 4, 2, 1]
downsample = [0, 1, 1, 1, 0, 0, 0, 0]
def expand(property_per_layers):
return reduce(
lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(property_per_layers, layers, downsample),
[[]])
channels = expand(channels_per_layers)
dilations = expand(dilations_per_layers)
bottlenecks = expand(bottlenecks_per_layers)
residuals = expand(residuals_per_layers)
simplifieds = expand(simplifieds_per_layers)
init_block_channels = channels_per_layers[0]
net = DRN(
channels=channels,
init_block_channels=init_block_channels,
dilations=dilations,
bottlenecks=bottlenecks,
simplifieds=simplifieds,
residuals=residuals,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net | 06bdf9c3005ad580d239ee380f6284a59879f78a | 24,822 |
def get_uas_volume_admin(volume_id):
"""Get volume info for volume ID
Get volume info for volume_id
:param volume_id:
:type volume_id: str
:rtype: AdminVolume
"""
if not volume_id:
return "Must provide volume_id to get."
return UasManager().get_volume(volume_id=volume_id) | 55cd59c8e7c116f8a975ef4f14f808c07700d955 | 24,823 |
def cyclic_learning_rate(global_step,
learning_rate=0.01,
max_lr=0.1,
step_size=50000.,
gamma=0.99994,
max_steps=100000.,
scale_rate=0.9,
mode='triangular',
policy=None,
name=None):
"""Cyclic learning rate (CLR).
This method is revised from [TensorFlow pull request: Add support for Cyclic Learning Rate](https://github.com/tensorflow/tensorflow/pull/20758)
From the paper:
Smith, Leslie N. "Cyclical learning
rates for training neural networks." 2017.
[https://arxiv.org/pdf/1506.01186.pdf]
This method lets the learning rate cyclically
vary between reasonable boundary values
achieving improved classification accuracy and
often in fewer iterations.
This code varies the learning rate linearly between the
minimum (learning_rate) and the maximum (max_lr).
It returns the cyclic learning rate. It is computed as:
```python
cycle = floor( 1 + global_step / ( 2 * step_size ) )
x = abs( global_step / step_size – 2 * cycle + 1 )
clr = learning_rate + ( max_lr – learning_rate ) * max( 0 , 1 - x )
```
Modes:
'triangular':
Default, linearly increasing then linearly decreasing the
learning rate at each cycle.
'triangular2':
The same as the triangular policy except the learning
rate difference is cut in half at the end of each cycle.
This means the learning rate difference drops after each cycle.
'exp_range':
The learning rate varies between the minimum and maximum
boundaries and each boundary value declines by an exponential
factor of: gamma^global_step.
Args:
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the cyclic computation. Must not be negative.
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate which is the lower bound
of the cycle (default = 0.1).
max_lr: A scalar. The maximum learning rate boundary.
step_size: A scalar. The number of iterations in half a cycle.
The paper suggests step_size = 2-8 x training iterations in epoch.
gamma: constant in 'exp_range' mode:
gamma**(global_step)
max_steps: A scalar. The number of total iterations.
scale_rate: A scale factor for decreasing the learning rate after the completion of one cycle.
Must be between 0 and 1.
mode: one of {triangular, triangular2, exp_range}.
Default 'triangular'.
Values correspond to policies detailed above.
policy: one of {None, one-cycle}.
Default 'None'.
name: String. Optional name of the operation. Defaults to
'CyclicLearningRate'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The cyclic
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
"""
if global_step is None:
raise ValueError("global_step is required for cyclic_learning_rate.")
with ops.name_scope(name, "CyclicLearningRate",
[learning_rate, global_step]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
step_size = math_ops.cast(step_size, dtype)
max_steps = math_ops.cast(max_steps, dtype)
def cyclic_lr():
"""Helper to recompute learning rate; most helpful in eager-mode."""
# computing: cycle = floor( 1 + global_step / ( 2 * step_size ) )
double_step = math_ops.multiply(2., step_size)
global_div_double_step = math_ops.divide(global_step, double_step)
cycle = math_ops.floor(math_ops.add(1., global_div_double_step))
# computing: x = abs( global_step / step_size – 2 * cycle + 1 )
double_cycle = math_ops.multiply(2., cycle)
global_div_step = math_ops.divide(global_step, step_size)
tmp = math_ops.subtract(global_div_step, double_cycle)
x = math_ops.abs(math_ops.add(1., tmp))
# computing: clr = learning_rate + ( max_lr – learning_rate ) * max( 0, 1 - x )
a1 = math_ops.maximum(0., math_ops.subtract(1., x))
a2 = math_ops.subtract(max_lr, learning_rate)
clr = math_ops.multiply(a1, a2)
if mode == 'triangular2':
clr = math_ops.divide(clr, math_ops.cast(math_ops.pow(2, math_ops.cast(
cycle-1, tf.int32)), tf.float32))
if mode == 'exp_range':
clr = math_ops.multiply(math_ops.pow(gamma, global_step), clr)
return math_ops.add(clr, learning_rate, name=name)
def after_cycle():
gap = math_ops.subtract(global_step, math_ops.multiply(2., step_size))
cur_percent = math_ops.divide(gap, math_ops.subtract(max_steps, math_ops.multiply(2., step_size)))
temp = math_ops.add(1., math_ops.multiply(cur_percent, -0.99))
next_lr = math_ops.multiply(learning_rate, math_ops.multiply(temp, scale_rate))
return next_lr
if policy == 'one_cycle':
cyclic_lr = tf.cond(tf.less(global_step, 2*step_size), cyclic_lr , after_cycle)
else:
cyclic_lr = cyclic_lr()
return cyclic_lr | cae33d6b167c4356dec52c0511d36fd23ca68434 | 24,824 |
def getElementsOnFirstLevelExceptTag(parent, element):
"""Return all elements below *parent* except for the ones tagged *element*.
:param parent: the parent dom object
:param elemnt: the tag-name of elements **not** to return
"""
elements = []
children = getElements(parent)
for c in children:
if c.parentNode == parent and c.tagName.lower() != element.lower():
elements.append(c)
return elements | 7ce5b578090d7079cf6bc3905d0d25fecf06461a | 24,825 |
def get_first_child_element(node, tag_name):
"""Get the first child element node with a given tag name.
:param node: Parent node.
:type node: xml.dom.Node
:returns: the first child element node with the given tag name.
:rtype: xml.dom.Node
:raises NodeNotFoundError:
if no child node with the given tag name was found.
"""
for child in node.childNodes:
if child.nodeType == node.ELEMENT_NODE and \
child.tagName == tag_name:
return child
raise NodeNotFoundError('no child element node with tag %s was found' %
(tag_name)) | 479b311ec52814b9276e401361bbb1b040527d23 | 24,826 |
import re
def parse_iso(filename='iso.log'):
""" parse the isotropy output file
Args:
filename: the isotropy output file name
Returns:
lname: list of irreps
lpt: list of atom coordinate
lpv: list of distortion vectors, might be multi-dimensional
"""
#read in the isotropy output
try:
with open(filename,'r') as f:
read_data = f.read()
except BaseException:
print('the output of isotropy is required here')
return
#parse the isotropy output
#pt - atom coordinates (kind of weird definition, pt = original reduced coordinate * supercell matrix)
#pv - distortion vectors
#lpt, lpv - list of wy, pt, pv
#lname - name of modes
#nmode - number of modes
nmode = 0
lname = []
lpt = []
lpv = []
pattern_name = re.compile(r"^[A-Z0-9\+\-]+(?=\s)")
pattern_coor = re.compile(r"(?<=\().*?(?=\))")
pattern_vec = re.compile(r"(?<=\()[0-9,\.\-]*(?=\))")
for line in read_data.split('\n'):
if pattern_name.search(line):
if nmode>0:
lpt.append(pt)
lpv.append(pv)
pt = []
pv = []
nmode += 1
lname.append(pattern_name.search(line).group())
if nmode==0:
continue
if re.search(r"Irrep|Enter", line):
continue
find = pattern_coor.findall(line)
find2 = pattern_vec.findall(line)
if (len(find)!=len(find2)):
npv = 0
for element in find:
coor = list(map(float, element.split(',')))
if npv==0:
pt.append(coor)
if npv==1:
pv.append([coor])
if npv>1:
pv[-1].append(coor)
npv += 1
else:
for element in find:
coor = list(map(float, element.split(',')))
if npv==1:
pv.append([coor])
if npv>1:
pv[-1].append(coor)
npv += 1
lpt.append(pt)
lpv.append(pv)
return lname, lpt, lpv | f0331c7a0c962d9763f1b3a15c997dda5a3c951c | 24,827 |
def revoke_database(cursor: Cursor, user: str, db: str) -> Result:
"""
Remove any permissions for the user to create, manage and delete this database.
"""
db = db.replace("%", "%%")
return Result(_truthy(query(cursor, _format("REVOKE ALL ON {}.* FROM %s@'%%'", db), user))) | 9cb496ffde12fbbed4750a9442572a6bbd74497a | 24,828 |
def get_ex1():
"""Loads array A for example 1 and its TruncatedSVD with top 10 components
Uk, Sk, Vk = argmin || A - Uk*diag(Sk)*Vk||
Over;
Uk, Sk, Vk
Where;
Uk is a Orthonormal Matrix of size (20000, 10)
Sk is a 10 dimensional non-negative vector
Vk is a Orthonormal Matrix of size (10, 8000)
Returns
-------
A : numpy.ndarray
array of size (20000, 8000)
Uk : numpy.ndarray
orthonormal array of size (20000, 10)
Top 10 Left Singular Vectors of `A`
Sk : numpy.ndarray
array of size (10, )
Top 10 Singular Values of `A`
Vk : numpy.ndarray
transposed orthonormal array of size (10, 8000)
Top 10 Right Singular Vectors of `A`
"""
try:
Uk = load_np_file('ex1_Uk.npy')
Sk = load_np_file('ex1_Sk.npy')
Vk = load_np_file('ex1_Vk.npy')
ex1 = _make_a_ex1()
return ex1, Uk, Sk, Vk
except FileNotFoundError:
raise FileNotFoundError("A, Uk, Sk, Vk cannot be loaded. Try make_ex1()") | 9afab6220acd28eedcfed1eee9920da97c1ff207 | 24,829 |
from typing import Any
def render_variable(context: 'Context', raw: Any):
"""
Render the raw input. Does recursion with dict and list inputs, otherwise renders
string.
:param raw: The value to be rendered.
:return: The rendered value as literal type.
"""
if raw is None:
return None
elif isinstance(raw, str):
render_string(context, raw)
elif isinstance(raw, dict):
return {
render_string(context, k): render_variable(context, v)
for k, v in raw.items()
}
elif isinstance(raw, list):
return [render_variable(context, v) for v in raw]
else:
return raw
return render_string(context, raw) | 36b6148589a447c8c9397f2b199a0c9da025fd50 | 24,830 |
from pathlib import Path
import os
def resolve_ssh_config(ssh_config_file: str) -> str:
"""
Resolve ssh configuration file from provided string
If provided string is empty (`""`) try to resolve system ssh config files located at
`~/.ssh/config` or `/etc/ssh/ssh_config`.
Args:
ssh_config_file: string representation of ssh config file to try to use
Returns:
str: string to path fro ssh config file or an empty string
Raises:
N/A
"""
if Path(ssh_config_file).is_file():
return str(Path(ssh_config_file))
if Path(os.path.expanduser("~/.ssh/config")).is_file():
return str(Path(os.path.expanduser("~/.ssh/config")))
if Path("/etc/ssh/ssh_config").is_file():
return str(Path("/etc/ssh/ssh_config"))
return "" | c9f03965e8eab7b3e1c30d1740b966890c2a48fb | 24,831 |
import requests
import sys
def check_kafka_rest_ready(host, port, service_timeout):
"""Waits for Kafka REST Proxy to be ready.
Args:
host: Hostname where Kafka REST Proxy is hosted.
port: Kafka REST Proxy port.
timeout: Time in secs to wait for the service to be available.
Returns:
False, if the timeout expires and Kafka REST Proxy is unreachable, True otherwise.
"""
# Check if you can connect to the endpoint
status = wait_for_service(host, port, service_timeout)
if status:
# Check if service is responding as expected to basic request
# Try to get topic list
# NOTE: this will only test ZK <> REST Proxy interaction
url = "http://%s:%s/topics" % (host, port)
r = requests.get(url)
if r.status_code // 100 == 2:
return True
else:
print("Unexpected response with code: %s and content: %s" % (str(r.status_code), str(r.text)), file=sys.stderr)
return False
else:
print("%s cannot be reached on port %s." % (str(host), str(port)), file=sys.stderr)
return False | 1531ab2a2b89bf7f6e8f3dad592ce156c6fe8535 | 24,832 |
def is_p2wpkh_output(cscript: CScript) -> bool:
"""Checks if the output script if of the form:
OP_0 <pubkey hash>
:param script: Script to be analyzed.
:type script: CScript
:return: True if the passed in bitcoin CScript is a p2wpkh output script.
:rtype: bool
"""
if len(cscript) != 22:
return False
return cscript[0] == script.OP_0 | 1efec498daa89c1b345538d4e976aaa9ac9dd6cd | 24,833 |
def check_update_needed(db_table_object, repository_name, pushed_at):
"""
Returns True if there is a need to clone the github repository
"""
logger.info(f"This is the repo name from check_update <<{repository_name}>> and db_table <<{db_table_object}>>")
result = get_single_repository(db_table_object, repository_name)
logger.info(result)
if not result:
logger.info("result not found")
return True
else:
logger.info("result found")
logger.info(f"This is the result {result}")
epoch = date_parse(pushed_at).timestamp() ##the pushed_at timetsamp available in the repo right now
logger.info(f"Comparing {int(epoch)} and {result['downloaded_at']} for {repository_name}")
if int(epoch) > int(result["downloaded_at"]):
return True
return False
##Check if the updated is needed from the database | 1ab5b3e29e504b60deb928f634a0e8d11cf71d3b | 24,834 |
def return_one(result):
"""return one statement"""
return " return " + result | 94298fd5811877fa9e6a84cb061fc6244f3fda3b | 24,835 |
def inv(a):
"""The inverse rotation"""
return -a | dbf88fc5f8f2f289f0132a19e0d0af0e82f232bd | 24,836 |
from App import Proxys
def wrapCopy(object):
"""Wrap a copy of the object."""
return eval( serialize(object), Proxys.__dict__ ) | ece99c49d9e5cd3603ee91f6614e5f63bc122751 | 24,837 |
def return_edges(paths, config, bidirectional=False):
"""
Makes graph edges from osm paths
:param paths: dictionary {osm_way_id: {osmid: x, nodes:[a,b], osmtags: vals}}
:param config: genet.inputs_handler.osm_reader.Config object
:param bidirectional: bool value if True, reads all paths as both ways
:return:
"""
def extract_osm_data(data, es):
d = {}
for tag in (set(config.USEFUL_TAGS_PATH) | {'osmid', 'modes'}) - {'oneway'}:
if tag in data:
d[tag] = data[tag]
return [(es[i], d) for i in range(len(es))]
# the list of values OSM uses in its 'oneway' tag to denote True
osm_oneway_values = ['yes', 'true', '1', '-1', 'reverse']
edges = []
for data in paths.values():
# if this path is tagged as one-way and if it is not a walking network,
# then we'll add the path in one direction only
if ('oneway' in data and data['oneway'] in osm_oneway_values) and not bidirectional:
if data['oneway'] in ['-1', 'reverse']:
# paths with a one-way value of -1 are one-way, but in the
# reverse direction of the nodes' order, see osm documentation
data['nodes'] = list(reversed(data['nodes']))
# add this path (in only one direction) to the graph
es = return_edge(data, one_way=True)
edges.extend(extract_osm_data(data, es))
elif ('junction' in data and data['junction'] == 'roundabout') and not bidirectional:
# roundabout are also oneway but not tagged as is
es = return_edge(data, one_way=True)
edges.extend(extract_osm_data(data, es))
# else, this path is not tagged as one-way or it is a walking network
# (you can walk both directions on a one-way street)
else:
# add this path (in both directions) to the graph and set its
# 'oneway' attribute to False. if this is a walking network, this
# may very well be a one-way street (as cars/bikes go), but in a
# walking-only network it is a bi-directional edge
es = return_edge(data, one_way=False)
edges.extend(extract_osm_data(data, es))
return edges | 7f218db652cf9fdbbedde4fea4aa8c74a46a56c8 | 24,838 |
def pi_eq_func(ylag,pilag,v,s,slag,alpha,h,b,phi,gamma):
""" equilibrium value for inflation
Args:
ylag (float): lagged output
pilag (float): lagged inflation
v (float): demand disturbance
s (float): supply disturbance
slag (float): lagged supply disturbance
alpha (float): sensitivity of demand to real interest rate
h (float): coefficient on inflation in Taylor rule
b (float): coefficient on output in Taylor rule
phi (float): degree of sticiness in inflation expectations
gamma (float): effect of output on inflation in SRAS
Returns:
(float): equilibrium value for inflation
"""
return 1/(alpha*h)*(v-1/(alpha*b+alpha*gamma*h+1)*(alpha*b+1)*(-pilag*alpha*h+alpha*gamma*h*phi*ylag+alpha*h*phi*slag-alpha*h*s+v)) | fca249f970e2d97b32d0f8a8b03602370c19b36d | 24,839 |
import time
def set_mode(vehicle, mode):
"""
Set the vehicle's flight modes. 200ms period state validation.
Args:
vehicle(dronekit.Vehicle): the vehicle to be controlled.
mode(str): flight mode string, supported by the firmware.
Returns:
bool: True if success, False if failed.
Failure will set shared.status['abort'].
"""
util.log_info("Setting %s." % mode)
shared.status['manual_mode'] = mode
vehicle.mode = VehicleMode(mode)
wait_count = 0
while True:
time.sleep(.2)
wait_count = wait_count + 1
if vehicle.mode.name == mode :
return True
elif wait_count >= 45:
util.log_warning("Unable to set %s. Assume link lost." % mode)
shared.status['abort'] = True
return False
elif wait_count % 15 == 0 :
util.log_warning("Retry setting %s" % mode)
vehicle.mode = VehicleMode(mode) | 7c8590989ce7d7c0ffbc9910c8f8cf7018090e95 | 24,840 |
import joblib
import tqdm
import sys
def apply_parallel(data_frame, num_procs, func, *args, progress_bar=False, backend='loky'):
""" This function parallelizes applying a function to the rows of a data frame using the
joblib library. The function is called on each row individually.
This function is best used when func does not have much overhead compared to
the row-specific processing. For example, this function is more appropriate than
apply_parallel_split when all of the processing in func is dependent only on the
values in the data rows.
Args:
data_frame (pandas.DataFrame): A data frame
num_procs (int): The number of processors to use
func (function pointer): The function to apply to each row in the data frame
args (variable number of arguments): The other arguments to pass to func
Returns:
list: the values returned from func for each row (in the order specified by
joblib.Parallel)
Imports:
joblib
tqdm, if progress_bar is True
"""
if len(data_frame) == 0:
return []
if progress_bar:
ret_list = joblib.Parallel(n_jobs=num_procs, backend=backend)(joblib.delayed(func)(row[1], *args)
for row in tqdm.tqdm(data_frame.iterrows(), total=len(data_frame),
leave=True, file=sys.stdout))
else:
ret_list = joblib.Parallel(n_jobs=num_procs, backend=backend)(joblib.delayed(func)(row[1], *args)
for row in data_frame.iterrows())
return ret_list | ae8382e1d8ba58b4308119fc79fc9b640c089281 | 24,841 |
def _setdoc(super): # @ReservedAssignment
"""This inherits the docs on the current class. Not really needed for Python 3.5,
due to new behavior of inspect.getdoc, but still doesn't hurt."""
def deco(func):
func.__doc__ = getattr(getattr(super, func.__name__, None), "__doc__", None)
return func
return deco | 47da03ae9951e18fccaaa2cf891d39dfdcc324c9 | 24,842 |
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: client to use
:return: 'ok' if test passed.
:rtype: ``str``
"""
# This should validate all the inputs given in the integration configuration panel,
# either manually or by using an API that uses them.
if client.client_credentials:
raise DemistoException("When using a self-deployed configuration, run the !microsoft-365-defender-auth-test"
"command in order to test the connection")
test_connection(client)
return "ok" | d3bd13ee0b928d9ffb14a93efb4738f484979dfc | 24,843 |
import functools
import warnings
def warns(message, category=None):
"""警告装饰器
:param message: 警告信息
:param category: 警告类型:默认是None
:return: 装饰函数的对象
"""
def _(func):
@functools.wraps(func)
def warp(*args, **kwargs):
warnings.warn(message, category, stacklevel=2)
return func(*args, **kwargs)
return warp
return _ | 4c481dc7eeb42751aef07d87ab9da34b04c573f4 | 24,844 |
import urllib
def handle_exceptions(func) -> object:
"""
This is needed since pytube current version is
quite unstable and can raise some unexpected errors.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except KeyError as e:
window.s_append('An error with the cipher has ocurred. '
'See documentation in GitHub to resolve: '
'https://github.com/f4ll-py/ytdownloader.')
except pytube.exceptions.RegexMatchError:
window.s_append('Could not find any YouTube videos with that URL.')
except urllib.error.HTTPError:
window.s_append('This video is not available. Try again later.')
except PermissionError:
window.s_append('Permission denied for the current path.')
return wrapper | ef162302186b5da5c86cec67286dcfecefd1ddd0 | 24,845 |
def templated_sequence_component(location_descriptor_tpm3):
"""Create test fixture for templated sequence component"""
params = {
"component_type": "templated_sequence",
"region": location_descriptor_tpm3.dict(exclude_none=True),
"strand": "+"
}
return TemplatedSequenceComponent(**params) | c92cfd2e0691c097898d82d7ff2d9eb16e5e2023 | 24,846 |
def joint_probability(people, one_gene, two_genes, have_trait):
"""
Compute and return a joint probability.
The probability returned should be the probability that
* everyone in set `one_gene` has one copy of the gene, and
* everyone in set `two_genes` has two copies of the gene, and
* everyone not in `one_gene` or `two_gene` does not have the gene, and
* everyone in set `have_trait` has the trait, and
* everyone not in set` have_trait` does not have the trait.
"""
joint_p = 1
# zero_genes = set(people.keys()) - two_genes - one_gene
for person in people:
# Calculate probability to have the genes of interest
this_genes = get_nbr_genes(person, one_gene, two_genes)
if people[person]['mother'] is None: # Assumes both parents info, or nothing
gene_prob = PROBS['gene'][this_genes]
else: # If there is parent's info
prob_mother = get_parent_prob(people[person]['mother'], one_gene, two_genes)
prob_father = get_parent_prob(people[person]['father'], one_gene, two_genes)
if this_genes == 0:
gene_prob = (1 - prob_mother) * (1 - prob_father) # None can transmit
elif this_genes == 1:
gene_prob = (1 - prob_mother) * prob_father + prob_mother * (1 - prob_father) # Two possibilities
else:
gene_prob = prob_father * prob_mother # Both need to transmit
# Calculate probability to have trait, given genes of interest
trait = get_trait(person, have_trait) # Trait for this person
trait_prob = PROBS['trait'][this_genes][trait]
joint_p *= gene_prob * trait_prob # Accumulates joint probability of all people
return joint_p | c2e8d5d617220d44f625c80f9474ab7327800b6f | 24,847 |
import os
def _is_child_path(path, parent_path, link_name=None):
""" Checks that path is a path within the parent_path specified. """
b_path = to_bytes(path, errors='surrogate_or_strict')
if link_name and not os.path.isabs(b_path):
# If link_name is specified, path is the source of the link and we need to resolve the absolute path.
b_link_dir = os.path.dirname(to_bytes(link_name, errors='surrogate_or_strict'))
b_path = os.path.abspath(os.path.join(b_link_dir, b_path))
b_parent_path = to_bytes(parent_path, errors='surrogate_or_strict')
return b_path == b_parent_path or b_path.startswith(b_parent_path + to_bytes(os.path.sep)) | a46ab1872aa5d3a873b07769c15822ecbaafb435 | 24,848 |
def rf_predict_img_win(win_arr, trained_classifier, prob=True):
"""Predict image window using input trained classifier.
Args:
win_arr (numpy.arr): In rasterio order (channels, y, x)
trained_classifier (sklearn.model): Trained sklearn model to use for predictions.
prob (bool, optional): Generate probability of prediction or binary prediction. Defaults to True.
Returns:
numpy.arr: Array of predictions.
"""
# Get dims
b, y, x = win_arr.shape
segment_idx = b - 1
# Reshape for classifier
win_arr = np.transpose(win_arr.reshape(b, -1))
img_bnds = [i for i in range(0, b) if i != segment_idx]
win_arr = win_arr[:, img_bnds]
# No data rows
no_data = np.any(win_arr, axis=1).astype("uint8")
# Calc ndvi
# win_arr = calc_ndvi(win_arr, 2, 3)
# Prob predictions
if prob:
pred_arr = trained_classifier.predict_proba(win_arr)
# subset just the positive (forest) class probaility for all pixels
pred_arr = pred_arr[:, 1:]
# Or class predictions
else:
pred_arr = trained_classifier.predict(win_arr)
# Reshape back to image
pred_arr = pred_arr.reshape(y, x)
no_data = no_data.reshape(y, x)
# Apply no data mask so not positive prediction
pred_arr = pred_arr * no_data
return pred_arr | 29b79fb4bcc909cce18889bdd624d6db17eb2d29 | 24,849 |
from typing import Dict
import os
def get_schema(passed_schema: 'Schema' = None, _cached_schema: Dict[str, Schema] = {}) -> 'Schema':
"""If passed a schema (not None) it returns it. If passed none,
it checks if the default schema has been initialized. If not
initialized, it initializes it. Then it returns the default schema."""
if passed_schema:
return passed_schema
if not _cached_schema:
# Try to load the local file first
try:
schema_file = os.path.join(os.path.dirname(os.path.realpath(__file__)))
schema_file = os.path.join(schema_file, "reference_files/schema.csv")
_cached_schema['schema'] = Schema(schema_file=schema_file)
except IOError:
# Try to load from the internet
try:
_cached_schema['schema'] = Schema()
except (HTTPError, URLError):
raise ValueError("Could not load a BMRB schema from the internet or from the local repository.")
return _cached_schema['schema'] | 080bd818ac9fa8f09ae1e4c74b329ba39001a74d | 24,850 |
def ShowIPC(cmd_args=None):
""" Routine to print data for the given IPC space
Usage: showipc <address of ipc space>
"""
if not cmd_args:
print "No arguments passed"
print ShowIPC.__doc__
return False
ipc = kern.GetValueFromAddress(cmd_args[0], 'ipc_space *')
if not ipc:
print "unknown arguments:", str(cmd_args)
return False
print PrintIPCInformation.header
PrintIPCInformation(ipc, False, False) | 4511c0aa1315fe594ee8e1209b6bc2e26d633ad9 | 24,851 |
import logging
def dqn(env, n_episodes=1001, max_t=1000 * FRAME_SKIP, eps_start=1.0,
eps_end=0.001, eps_decay=0.995, solution_threshold=13.0,
checkpointfn='checkpoint.pth', load_checkpoint=False,
reload_every=None):
"""Function that uses Deep Q Networks to learn environments.
Parameters
----------
n_episodes: int
maximum number of training episodes
max_t: int
maximum number of timesteps per episode
eps_start: float
starting value of epsilon, for epsilon-greedy action selection
eps_end: float
minimum value of epsilon
eps_decay: float
multiplicative factor (per episode) for decreasing epsilon
"""
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
env_info = env.reset(train_mode=True)[brain_name]
action_size = brain.vector_action_space_size
state = env_info.vector_observations[0]
state_size = len(state)
if state_size == 0:
use_visual = True
initial_state = get_state(env_info, use_visual)
state_size = list(initial_state.shape)
state_size.insert(2, STACK_SIZE)
state_size = tuple(state_size)
if load_checkpoint:
try:
agent = Agent.load(checkpointfn, use_visual)
except Exception:
logging.exception('Failed to load checkpoint. Ignoring...')
agent = Agent(state_size, action_size, 0, use_visual)
else:
agent = Agent(state_size, action_size, 0, use_visual)
if agent.episode:
eps = (eps_start * eps_decay) ** agent.episode
else:
eps = eps_start
for i_episode in range(agent.episode, n_episodes):
state_deque = reset_deque(initial_state)
env_info = env.reset(train_mode=True)[brain_name]
state = get_state(env_info, use_visual)
state_deque.append(state)
score = 0
for t in range(max_t):
state = np.stack(state_deque, axis=-1) \
.squeeze(axis=0).transpose(0, -1, 1, 2)
action = agent.act(state, eps)
env_info = env.step(action)[brain_name]
next_state = get_state(env_info, use_visual)
state_deque.append(next_state)
next_state = np.stack(state_deque, axis=-1) \
.squeeze(axis=0).transpose(0, -1, 1, 2)
reward = env_info.rewards[0]
done = env_info.local_done[0]
agent.step(
state,
action,
reward,
next_state,
done,
)
score += reward
if done:
break
agent.scores.append(score)
eps = max(eps_end, eps_decay*eps) # decrease epsilon
agent.episode += 1
logging.debug(
'Episode {}\tAverage Score: {:.2f}\tCurrent Score: {:.2f}\tEpsilon: {:.4f}'
.format(i_episode, np.mean(agent.scores[-100:]), score, eps)
)
if (i_episode + 1) % 100 == 0:
logging.info(
'Episode {}\tAverage Score: {:.2f}'
.format(i_episode, np.mean(agent.scores[-100:]))
)
logging.info(
'Saving checkpoint file...'
)
agent.save(checkpointfn)
if np.mean(agent.scores[-100:]) >= solution_threshold:
logging.info(
'Environment solved in {:d} episodes!'
.format(i_episode - 99)
)
logging.info(
'Saving checkpoint file at %s', checkpointfn
)
agent.save(checkpointfn)
break
if reload_every and i_episode and (i_episode + 1) % reload_every == 0:
env.close()
reload_process()
return agent | 078e956b0620e7b10a27e1107224bfe15d3ea79a | 24,852 |
def get_mesh_faces(edge_array):
"""
Uses an edge array of mesh to generate the faces of the mesh. For each triangle in the mesh this returns the list of indices
contained in it as a tuple (index1, index2, index3)
"""
triangles = []
neibs = neibs_from_edges(edge_array)
for edge in edge_array:
for vert in get_opposite_verts(neibs, edge):
triangle = sorted([edge[0], edge[1], vert])
if not (triangle in triangles):
triangles.append(sorted([edge[0], edge[1], vert]))
return triangles | e1f555985e3e55c2d0fbc3d0fd92befc6eb2c878 | 24,853 |
def _make_attribution_from_nodes(mol: Mol, nodes: np.ndarray,
global_vec: np.ndarray) -> GraphsTuple:
"""Makes an attribution from node information."""
senders, receivers = _get_mol_sender_receivers(mol)
data_dict = {
'nodes': nodes.astype(np.float32),
'senders': senders,
'receivers': receivers,
'globals': global_vec.astype(np.float32)
}
return graph_nets.utils_np.data_dicts_to_graphs_tuple([data_dict]) | dcf3f82c0634afa8ba6ea97694ea36e9fd62c563 | 24,854 |
def subrepo(repo, subset, x):
"""Changesets that add, modify or remove the given subrepo. If no subrepo
pattern is named, any subrepo changes are returned.
"""
# i18n: "subrepo" is a keyword
args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
pat = None
if len(args) != 0:
pat = getstring(args[0], _("subrepo requires a pattern"))
m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
def submatches(names):
k, p, m = util.stringmatcher(pat)
for name in names:
if m(name):
yield name
def matches(x):
c = repo[x]
s = repo.status(c.p1().node(), c.node(), match=m)
if pat is None:
return s.added or s.modified or s.removed
if s.added:
return any(submatches(c.substate.keys()))
if s.modified:
subs = set(c.p1().substate.keys())
subs.update(c.substate.keys())
for path in submatches(subs):
if c.p1().substate.get(path) != c.substate.get(path):
return True
if s.removed:
return any(submatches(c.p1().substate.keys()))
return False
return subset.filter(matches, condrepr=('<subrepo %r>', pat)) | c95cdb08671ca1800ffbc0df94833cdb29ba534a | 24,855 |
def format_output(item, show_url=False):
""" takes a voat post and returns a formatted string """
if not item["Title"]:
item["Title"] = formatting.truncate(item["Linkdescription"], 70)
else:
item["Title"] = formatting.truncate(item["Title"], 70)
item["link"] = voat_fill_url.format(item["Subverse"], item["Id"])
raw_time = isodate.parse_date(item['Date'])
item["timesince"] = timeformat.time_since(raw_time, count=1, simple=True)
item["comments"] = formatting.pluralize(item["CommentCount"], 'comment')
item["points"] = formatting.pluralize(item["Likes"], 'point')
if item["Type"] == 2:
item["warning"] = " \x02Link\x02"
else:
item["warning"] = ""
if show_url:
return "\x02{Title} : {Subverse}\x02 - {comments}, {points}" \
" - \x02{Name}\x02 {timesince} ago - {link}{warning}".format(**item)
else:
return "\x02{Title} : {Subverse}\x02 - {comments}, {points}" \
" - \x02{Name}\x02, {timesince} ago{warning}".format(**item) | 1b730507fbff1ab2deadeaaefcfbcd23356ee437 | 24,856 |
def get_compiled_table_name(engine, schema, table_name):
"""Returns a table name quoted in the manner that SQLAlchemy would use to query the table
Args:
engine (sqlalchemy.engine.Engine):
schema (str, optional): The schema name for the table
table_name (str): The name of the table
Returns:
str: The compiled table name
Examples:
>>> from sqlalchemy import create_engine
>>> get_compiled_table_name(create_engine('greenplum://u:p@s'), 'a_schema', 'a_table') == six.text_type('a_schema.a_table')
True
>>> get_compiled_table_name(create_engine('greenplum://u:p@s'), 'a_schema-1', 'a_table-1') == six.text_type('"a_schema-1"."a_table-1"')
True
>>> get_compiled_table_name(create_engine('greenplum://u:p@s'), None, 'a_table-1') == six.text_type('"a_table-1"')
True
>>> get_compiled_table_name(create_engine('greenplum://u:p@s'), '', 'a_table-1') == six.text_type('"a_table-1"')
True
"""
target = sqlalchemy.Table(table_name, sqlalchemy.MetaData(), schema=schema)
return engine.dialect.identifier_preparer.format_table(target) | 91234bbfea2ff55d9d3e14b1ad70eb81ff09fc5d | 24,857 |
def build(filepath):
"""Returns the window with the popup content."""
ttitlebar = titlebar.build()
hheading = heading.build(HEADING_TITLE)
top_txt_filler = fillers.horizontal_filler(2, colors.BACKGROUND)
message = sg.Text(
text=MESSAGE_TEXT + filepath,
font=MESSAGE_FONT,
text_color=colors.BLACK,
background_color=colors.BACKGROUND,
justification='c',
pad=(10, None) # adds space between l/r borders.
)
# adds space between message and button
bottom_txt_filler = fillers.horizontal_filler(1, colors.BACKGROUND)
# the key is not needed
done = button.build(BUTTON_TEXT, '', BUTTON_FONT, BUTTON_SIZE)
bottom_sep = fillers.horizontal_filler(2, colors.BACKGROUND)
return sg.Window(
title='',
no_titlebar=True,
keep_on_top=True,
layout=[
[ttitlebar],
[hheading],
[top_txt_filler],
[message],
[bottom_txt_filler],
[done],
[bottom_sep]
],
element_justification='c'
) | b5bfd7f46955351a94763360fb44cc59eeccbb39 | 24,858 |
def format_float(digit=0, is_pct=False):
"""
Number display format for pandas
Args:
digit: number of digits to keep
if negative, add one space in front of positive pct
is_pct: % display
Returns:
lambda function to format floats
Examples:
>>> format_float(0)(1e5)
'100,000'
>>> format_float(1)(1e5)
'100,000.0'
>>> format_float(-1, True)(.2)
' 20.0%'
>>> format_float(-1, True)(-.2)
'-20.0%'
>>> pd.options.display.float_format = format_float(2)
"""
if is_pct:
space = ' ' if digit < 0 else ''
fmt = f'{{:{space}.{abs(int(digit))}%}}'
return lambda vv: 'NaN' if np.isnan(vv) else fmt.format(vv)
return lambda vv: 'NaN' if np.isnan(vv) else (
f'{{:,.{digit}f}}'.format(vv) if vv else '-' + ' ' * abs(digit)
) | 9f719b7e1609744d673226eb32f0395b50b34f51 | 24,859 |
def get_overexpressed_genes(
matrix: ExpMatrix, cell_labels: pd.Series,
exp_thresh: float = 0.05, ignore_outliers: bool = True,
num_genes: int = 20) -> pd.DataFrame:
"""Determine most over-expressed genes for each cluster."""
# make sure matrix and cell_labels are aligned
matrix = matrix.loc[:, cell_labels.index]
if ignore_outliers:
# ignore the cluster named "Outliers", if it exists
sel = (cell_labels != 'Outliers')
matrix = matrix.loc[:, sel]
cell_labels = cell_labels.loc[sel]
_LOGGER.info('Ignoring mean expression values below %.3f', exp_thresh)
data = []
# scale matrix
matrix = matrix.scale()
# determine fold-changes for all clusters
vc = cell_labels.value_counts()
clusters = vc.index.tolist()
X = np.zeros((len(clusters), matrix.num_genes), dtype=np.float32)
cluster_mean = ExpMatrix(genes=matrix.genes, cells=clusters, data=X.T)
for l in clusters:
sel = (cell_labels == l)
cluster_mean.loc[:, l] = matrix.loc[:, sel].mean(axis=1)
# in calculation of fold change,
# ignore all expression values below exp_thresh
thresh_cluster_mean = cluster_mean.copy()
thresh_cluster_mean[thresh_cluster_mean < exp_thresh] = exp_thresh
# calculate fold change relative to average of other clusters
X = np.ones((len(clusters), matrix.num_genes), dtype=np.float32)
fold_change = ExpMatrix(genes=matrix.genes, cells=clusters, data=X.T)
for l in clusters:
sel = (thresh_cluster_mean.cells != l)
fold_change.loc[:, l] = thresh_cluster_mean.loc[:, l] / \
(thresh_cluster_mean.loc[:, sel].mean(axis=1))
markers = []
for l in clusters:
change = fold_change.loc[:, l].sort_values(ascending=False)
change = change[:num_genes]
# scale mean expression values to 10K transcripts
mean = cluster_mean.loc[change.index, l]
mean = (10000 / cluster_mean.loc[:, l].sum()) * mean
cluster_index = [l] * num_genes
gene_index = change.index
index = pd.MultiIndex.from_arrays(
[cluster_index, gene_index], names=['cluster', 'gene'])
data = np.c_[change.values, mean.values]
markers.append(
pd.DataFrame(
index=index,
columns=['Fold change', 'Mean expression (TP10K)'],
data=data))
markers = pd.concat(markers, axis=0)
#markers = markers.swaplevel(0, 1).sort_index(
# level=1, sort_remaining=False).swaplevel(0, 1)
return markers | 44c02d2a9be936cee582c750c680925024604f64 | 24,860 |
def heat_degree_day(Tcolumn):
"""
Returns a list of the heating degree day from an outdoor temperature list
params:
df is a pandas dataframe with datetime index and field named 'outT' which contains outdoor temperature in Fahrenheit
base -- temperature base for the heating degree day value e.g. 65 for 65 degrees Fahrenheit
column: the string name of the column containing temperature data
Returns:
hdd -- pandas dataframe of temperature and heating degree day values arranged by day
This function provides the heating degree day value of a given list of outdoor temperature data
(in Fahrenheit) with an accompanying datetime object list, needed for the definition of a heating degree day (https://www.weather.gov/key/climate_heat_cool).
"""
Temp = Tcolumn.groupby(pd.Grouper(freq = 'D')).mean()
hdd = BASE - Temp
hdd.name='hdd'
return hdd | 60cb58520f5451b25b3e8a42bb35c262018bb902 | 24,861 |
def quantile_loss(y_true, y_pred, taus):
"""
The quantiles loss for a list of quantiles. Sums up the error contribution
from the each of the quantile loss functions.
"""
e = skewed_absolute_error(
K.flatten(y_true), K.flatten(y_pred[:, 0]), taus[0])
for i, tau in enumerate(taus[1:]):
e += skewed_absolute_error(K.flatten(y_true),
K.flatten(y_pred[:, i + 1]),
tau)
return e | 1d06085b0939cf8307d1ceb2bd65a8f7bbde53e0 | 24,862 |
from typing import Tuple
from typing import Sequence
import string
def parse_a3m(a3m_string: str) -> Tuple[Sequence[str], DeletionMatrix]:
"""Parses sequences and deletion matrix from a3m format alignment.
Args:
a3m_string: The string contents of a a3m file. The first sequence in the
file should be the query sequence.
Returns:
A tuple of:
* A list of sequences that have been aligned to the query. These
might contain duplicates.
* The deletion matrix for the alignment as a list of lists. The element
at `deletion_matrix[i][j]` is the number of residues deleted from
the aligned sequence i at residue position j.
"""
sequences, _ = parse_fasta(a3m_string)
deletion_matrix = []
for msa_sequence in sequences:
deletion_vec = []
deletion_count = 0
for j in msa_sequence:
if j.islower():
deletion_count += 1
else:
deletion_vec.append(deletion_count)
deletion_count = 0
deletion_matrix.append(deletion_vec)
# Make the MSA matrix out of aligned (deletion-free) sequences.
deletion_table = str.maketrans('', '', string.ascii_lowercase)
aligned_sequences = [s.translate(deletion_table) for s in sequences]
return aligned_sequences, deletion_matrix | 5b1f5f9cfc54cd55602e1d73b92460fbc99b3594 | 24,863 |
def build_sub_lattice(lattice, symbol):
"""Generate a sub-lattice of the lattice based on equivalent atomic species.
Args:
lattice (ASE crystal class): Input lattice
symbol (string): Symbol of species identifying sub-lattice
Returns:
list of lists:
sub_lattice: Cartesian coordinates of the sub-lattice of symbol
"""
sub_lattice = []
i = 0
atomic_labels = lattice.get_chemical_symbols()
positions = lattice.get_scaled_positions()
for atom in atomic_labels:
if atom == symbol:
sub_lattice.append(positions[i])
i = i + 1
return sub_lattice | 7e7748c31f7f082b2e5ec6f21d0a56f60d5ec06c | 24,864 |
def make_url(connection_str):
""" """
return _parse_rfc1738_args(connection_str) | 2927b541399df8ab134688ae3a3a7274e0efb648 | 24,865 |
from typing import Union
from typing import Tuple
def get_graphs_within_cutoff(structure: Union[Structure, MEGNetMolecule, Molecule],
cutoff: float = 5.0, numerical_tol: float = 1e-8) -> Tuple[np.ndarray]:
"""
Get graph representations from structure within cutoff
Args:
structure: (pymatgen Structure)
cutoff: (float) cutoff radius
numerical_tol: (float) numerical tolerance
Returns:
center_indices, neighbor_indices, images, distances
"""
if isinstance(structure, Structure):
lattice_matrix = np.ascontiguousarray(np.array(structure.lattice.matrix), dtype=float)
pbc = np.array([1, 1, 1], dtype=int)
elif isinstance(structure, MEGNetMolecule) or isinstance(structure, Molecule):
lattice_matrix = np.array([[1000.0, 0., 0.], [0., 1000., 0.], [0., 0., 1000.]], dtype=float)
pbc = np.array([0, 0, 0], dtype=int)
else:
raise ValueError('structure type not supported')
r = float(cutoff)
cart_coords = np.ascontiguousarray(np.array(structure.cart_coords), dtype=float)
center_indices, neighbor_indices, images, distances = \
find_points_in_spheres(cart_coords, cart_coords, r=r, pbc=pbc,
lattice=lattice_matrix, tol=numerical_tol)
exclude_self = (center_indices != neighbor_indices) | (distances > numerical_tol)
return center_indices[exclude_self], neighbor_indices[exclude_self], images[exclude_self], distances[exclude_self] | a745808938160148ddaa345f0e5f8aa11b4a3a5f | 24,866 |
def add_cals1():
"""
Add nutrients to daily intake for products.
"""
if 'username' in session:
food = request.form.get("keyword")
pr = Product(food)
lst = pr.get_products()
for i in lst:
lyst.append(i)
if len(lst) != 0:
return render_template('productsearch.html', username=escape(session['username']), vars=lst)
else:
return render_template("failure.html")
else:
return render_template("failure.html") | acdda46ad1fdce23baee8bae2018cf5e6510895f | 24,867 |
def format_percent(percentage, pos):
"""
Formats percentages for the 'x' axis of a plot.
:param percentage: The fraction between 0.0 and 1.0
:type percentage: float
:param pos: The position argument
:type pos: int
:return: A formatted percentage string
:rtype: str
"""
# pylint: disable=unused-argument
return '{:.0f}%'.format(percentage * 100.) | d8566ce36b21adb351141ac72413b927e0f02c11 | 24,868 |
import inspect
def simple_repr(obj, attrs: tp.Optional[tp.Sequence[str]] = None,
overrides: dict = {}):
"""
Return a simple representation string for `obj`.
If `attrs` is not None, it should be a list of attributes to include.
"""
params = inspect.signature(obj.__class__).parameters
attrs_repr = []
if attrs is None:
attrs = list(params.keys())
for attr in attrs:
display = False
if attr in overrides:
value = overrides[attr]
elif hasattr(obj, attr):
value = getattr(obj, attr)
else:
continue
if attr in params:
param = params[attr]
if param.default is inspect._empty or value != param.default: # type: ignore
display = True
else:
display = True
if display:
attrs_repr.append(f"{attr}={value}")
return f"{obj.__class__.__name__}({','.join(attrs_repr)})" | 4aaa3090a2a0fbb282cfc8403d365c562ae6c5d9 | 24,869 |
import torch
def Gaussian_RadialBasis(basis_size: int, max_radius: float, min_radius=0.,
num_layers: int = 0, num_units: int = 0, activation_function='relu'):
"""
Note: based on e3nn.radial.GaussianRadialModel.
:param basis_size:
:param max_radius:
:param min_radius:
:param num_layers:
:param num_units:
:param activation_function:
:return:
"""
activation_function = get_scalar_non_linearity(activation_function)
"""exp(-x^2 /spacing)"""
spacing = (max_radius - min_radius) / (basis_size - 1)
reference_points = torch.linspace(min_radius, max_radius, basis_size)
sigma = 0.8 * spacing
basis = partial(gaussian_basis_fn, sigma=sigma)
return FiniteElement_RadialBasis(reference_points, radial_basis_fn=basis,
radial_basis_type_name='φ_gauss',
num_layers=num_layers, num_units=num_units,
activation_function=activation_function) | f518e2706dcb672bf65f0ed9299c1579fec411a3 | 24,870 |
def _get_column_outliers_std(column, m=3):
"""
given a pandas Series representing a column in a dataframe
returns pandas Series without the values which are further than m*std
:param column: pandas Series representing a column in a dataframe
:param m: num of std as of to remove outliers
:return: pandas Series with the values which exceeds m*std
"""
outliers = column[abs(column - np.mean(column)) > m * np.std(column)].index
return outliers | b55dd119ce36cdae7f17bb91aae4257b2dfca29e | 24,871 |
import requests
def scrape_website(url):
"""Sends a GET request to a certain url and returns the Response
object if status code is 200.
Returns None if the server responds with a different code.
"""
result = requests.get(url)
# if (True): # debugging
if result.status_code == 200:
return result
return None | 3b7eafa468ea19a93f2509aeb6120a2c463ae579 | 24,872 |
def set_filters(request, query, result, static_items=None):
"""
Sets filters in the query
"""
query_filters = query['filter']['and']['filters']
used_filters = {}
if static_items is None:
static_items = []
# Get query string items plus any static items, then extract all the fields
qs_items = list(request.params.items())
total_items = qs_items + static_items
qs_fields = [item[0] for item in qs_items]
fields = [item[0] for item in total_items]
# Now make lists of terms indexed by field
all_terms = {}
for item in total_items:
if item[0] in all_terms:
all_terms[item[0]].append(item[1])
else:
all_terms[item[0]] = [item[1]]
for field in fields:
if field in used_filters:
continue
terms = all_terms[field]
if field in ['type', 'limit', 'y.limit', 'x.limit', 'mode', 'annotation',
'format', 'frame', 'datastore', 'field', 'region', 'genome',
'sort', 'from', 'referrer']:
continue
# Add filter to result
if field in qs_fields:
for term in terms:
qs = urlencode([
(k.encode('utf-8'), v.encode('utf-8'))
for k, v in qs_items
if '{}={}'.format(k, v) != '{}={}'.format(field, term)
])
result['filters'].append({
'field': field,
'term': term,
'remove': '{}?{}'.format(request.path, qs)
})
if field == 'searchTerm':
continue
# Add to list of active filters
used_filters[field] = terms
# Add filter to query
query_filters.append(build_terms_filter(field, terms))
return used_filters | fcd4fdb6b804fdcf0dce6dac3d19f1945d858a12 | 24,873 |
def generate_random_initial_population(population_size, n_nodes, al):
"""
Randomly create an initial population
:param population_size: population size
:type population_size: int
:param n_nodes: number of nodes
:type n_nodes: int
:param al: adjacency list
:type al: list of lists
:return: random population
:rtype: list of World_Map
"""
input_population = []
# Generate random initial population
for _ in range(population_size):
color_list = np.random.choice(['r', 'b', 'g'], n_nodes, replace=True)
color_string = "".join(color_list)
input_population.append(World_Map(color_string, al))
print('A random population of ' + str(population_size) + ' people was created')
return input_population | 0a219ee6de88f97fa099d5fbc0698cc6712c525f | 24,874 |
def import_mlp_args(hyperparameters):
"""
Returns parsed config for MultiLayerPerceptron classifier from provided settings
*Grid-search friendly
"""
types = {
'hidden_layer_sizes': make_tuple,
'activation': str,
'solver': str,
'alpha': float,
'batch_size': int,
'learning_rate': str,
'learning_rate_init': float,
'max_iter': int,
'tol': float,
}
args = {
'hidden_layer_sizes': hyperparameters.get('hidden_layer_sizes', fallback='(100,)'), # Formatting matters!
'activation': hyperparameters.get('activation', fallback='relu'),
'solver': hyperparameters.get('solver', fallback='adam'),
'alpha': hyperparameters.get('alpha', fallback='0.0001'),
'batch_size': hyperparameters.get('batch_size', fallback='200'),
'learning_rate': hyperparameters.get('learning_rate', fallback='constant'),
'learning_rate_init': hyperparameters.get('learning_rate_init', fallback='0.001'),
'max_iter': hyperparameters.get('max_iter', fallback='200'),
'tol': hyperparameters.get('tol', fallback='1e-4'),
}
for key in args.keys():
args[key] = cast_to_typed_list(args[key], types[key])
return args | 0bfe7cdd4d85b8cfeee32d82f3dd189d60359690 | 24,875 |
from typing import List
from typing import Optional
import networkx
def download_map(
location: List[str],
node_tags: Optional[List[str]] = None,
edge_tags: Optional[List[str]] = None,
api_key: Optional[str] = None,
) -> networkx.DiGraph:
"""
Download map from OSM for specific locations.
"""
logger.info(f"Download map for {location}")
return custom_graph_from_x(
location, node_tags=node_tags, edge_tags=edge_tags, api_key=api_key
) | 6873acae1dbfb6277abb5764e3783315433c4f29 | 24,876 |
import torch
def getLayers(model):
"""
get each layer's name and its module
:param model:
:return: each layer's name and its module
"""
layers = []
def unfoldLayer(model):
"""
unfold each layer
:param model: the given model or a single layer
:param root: root name
:return:
"""
# get all layers of the model
layer_list = list(model.named_children())
for item in layer_list:
module = item[1]
sublayer = list(module.named_children())
sublayer_num = len(sublayer)
# if current layer contains sublayers, add current layer name on its sublayers
if sublayer_num == 0:
layers.append(module)
# if current layer contains sublayers, unfold them
elif isinstance(module, torch.nn.Module):
unfoldLayer(module)
unfoldLayer(model)
return layers | 22565e786eb95e2b8996fad99778068ba15273ea | 24,877 |
def get_config_pdf_version(config_version: str, max_input_version: str) -> str:
"""
From the PDF version as set in the configuration and the maximum version of all input files, checks for
the best PDF output version. Logs a warning, if the version set in the configuration is lower than any of the
input files.
>>> get_config_pdf_version('auto', '1.6')
'1.6'
>>> get_config_pdf_version('1.3', '1.5')
'1.3'
>>> get_config_pdf_version('1.x', '1.5')
Traceback (most recent call last):
...
ValueError: ('Invalid PDF version in configuration', '1.x')
:param config_version: Version string from the configuration. Set to ``auto`` to just use ``max_input_version``.
:param max_input_version: Maximum version from all input files.
:return: ``config_version``, unless set to ``auto``, then ``max_input_version``. However, the automatic version
setting will never be lower than ``1.3``.
:raises ValueError: If the configuration-set version is an invalid pattern.
"""
if config_version == 'auto':
return max(max_input_version, '1.3')
if not PDF_VERSION_PATTERN.fullmatch(config_version):
raise ValueError("Invalid PDF version in configuration", config_version)
if max_input_version > config_version:
log.warning("PDF version specified in config (%s) is lower than at least one of the input documents (%s). "
"The resulting PDF may not be displayed correctly in all viewers.",
config_version, max_input_version)
return config_version | 6bb98c455a2b701d89c90576a958348f84344cb8 | 24,878 |
def threshold_otsu(hist):
"""Return threshold value based on Otsu's method.
hist : array, or 2-tuple of arrays, optional
Histogram from which to determine the threshold, and optionally a
corresponding array of bin center intensities.
An alternative use of this function is to pass it only hist.
Returns
-------
threshold : float
Upper threshold value. All pixels with an intensity higher than
this value are assumed to be foreground.
References
----------
.. [1] Wikipedia, https://en.wikipedia.org/wiki/Otsu's_Method
"""
counts, bin_centers = hist
bin_centers = bin_centers[:-1]
# class probabilities for all possible thresholds
weight1 = np.cumsum(counts)
weight2 = np.cumsum(counts[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(counts * bin_centers) / weight1
mean2 = (np.cumsum((counts * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of ``weight1``/``mean1`` should pair with zero values in
# ``weight2``/``mean2``, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
if len(variance12) == 0:
return 0
idx = np.nanargmax(variance12)
threshold = bin_centers[idx]
return threshold | ea55dd483b0b60f8428240a62720fb53d4e0e80c | 24,879 |
def filter_options(v):
"""Disable option v"""
iris = dataframe()
return [
{"label": col, "value": col, "disabled": col == v}
for col in iris.columns
] | 54277b38d30389b302f1f962667bbb91f0999b4f | 24,880 |
def scatter(x):
"""
matrix x x^t
"""
x1 = np.atleast_2d(x)
xt = np.transpose(x1)
s = np.dot(xt,x1)
assert np.array_equal( np.shape(s), [len(x),len(x)] )
return s | 9d68eba6d3ffde7fb15d21a5a0d09a775bef96e7 | 24,881 |
def get_owned_object_or_40x(klass, owner, include_staff=False,
include_superuser=True, *args, **kwargs):
"""
Returns an object if it can be found (using get_object_or_404).
If the object is not owned by the supplied owner a 403 will be raised.
"""
obj = get_object_or_404(klass, *args, **kwargs)
if obj.is_not_owned_by(owner, include_staff, include_superuser):
raise PermissionDenied()
return obj | 7535aa0ce7c77c41823f45c89885b5e2c32ed252 | 24,882 |
def ampMeritFunction2(voltages,**kwargs):
"""Simple merit function calculator.
voltages is 1D array of weights for the influence functions
distortion is 2D array of distortion map
ifuncs is 4D array of influence functions
shade is 2D array shade mask
Simply compute sum(ifuncs*voltages-distortion)**2)
"""
#Numpy way
distortion = kwargs['inp'][0]
ifuncs = kwargs['inp'][1]
res = np.mean((np.dot(ifuncs,voltages)-distortion)**2)
return res, [], 0 | 4443369f5424f536e839439c8deb479d09339e90 | 24,883 |
def get_transpose_graph(graph):
"""Get the transpose graph"""
transpose = {node: set() for node in graph.keys()}
for node, target_nodes in graph.items():
for target_node in target_nodes:
transpose[target_node].add(node)
return transpose | f7f8e083659e4214d79472961c7240778f37268d | 24,884 |
import os
def _game_data_path(game_id):
"""
Find the path to the data file for a given game.
This fully trusts game_id, and is not safe on unsanitised input.
"""
return os.path.join(_DATA_STORES, "{}{}".format(game_id, _EXTENSION)) | 60acc44e94941a7fdbc2212b3cb56260756ba69a | 24,885 |
def varimax(x, iteration=14):
""" http://www.real-statistics.com/linear-algebra-matrix-topics/varimax/"""
# TODO: set more intelligent angle evaluator
# parameter: x np.array(m_features,c_factors)
def _calculate_rotation_angle(x, y):
u = np.square(x) - np.square(y)
v = 2 * x * y
A = np.sum(u)
B = np.sum(v)
C = np.sum(np.square(u) - np.square(v))
D = np.sum(u * v)
X = D - (2 * A * B) / len(x)
Y = C - (A ** 2 - B ** 2) / len(x)
return np.arctan(X / Y) / 4
x = _normalize_numpy(x, axis=1)
for _ in range(iteration):
for factorLoad1 in range(x.shape[1]):
for factorLoad2 in range(factorLoad1 + 1, x.shape[1]):
np.sum(np.square(x[:, factorLoad1]) - np.square(x[:, factorLoad2]))
angle = _calculate_rotation_angle(x[:, factorLoad1], x[:, factorLoad2])
rotationMatrix = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
x[:, factorLoad1], x[:, factorLoad2] = np.dot(np.concatenate(([x[:, factorLoad1]],
[x[:, factorLoad2]])).T, rotationMatrix).T
return x | 98706360f6ecb81796f8d20c3f11655e1c86a5a5 | 24,886 |
def inventory_to_kml_string(
inventory,
icon_url="https://maps.google.com/mapfiles/kml/shapes/triangle.png",
icon_size=1.5, label_size=1.0, cmap="Paired", encoding="UTF-8",
timespans=True, strip_far_future_end_times=True):
"""
Convert an :class:`~obspy.core.inventory.inventory.Inventory` to a KML
string representation.
:type inventory: :class:`~obspy.core.inventory.inventory.Inventory`
:param inventory: Input station metadata.
:type icon_url: str
:param icon_url: Internet URL of icon to use for station (e.g. PNG image).
:type icon_size: float
:param icon_size: Icon size.
:type label_size: float
:param label_size: Label size.
:type encoding: str
:param encoding: Encoding used for XML string.
:type timespans: bool
:param timespans: Whether to add timespan information to the single station
elements in the KML or not. If timespans are used, the displayed
information in e.g. Google Earth will represent a snapshot in time,
such that using the time slider different states of the inventory in
time can be visualized. If timespans are not used, any station active
at any point in time is always shown.
:type strip_far_future_end_times: bool
:param strip_far_future_end_times: Leave out likely fictitious end times of
stations (more than twenty years after current time). Far future end
times may produce time sliders with bad overall time span in third
party applications viewing the KML file.
:rtype: byte string
:return: Encoded byte string containing KML information of the station
metadata.
"""
twenty_years_from_now = UTCDateTime() + 3600 * 24 * 365 * 20
# construct the KML file
kml = Element("kml")
kml.set("xmlns", "http://www.opengis.net/kml/2.2")
document = SubElement(kml, "Document")
SubElement(document, "name").text = "Inventory"
# style definition
cmap = get_cmap(name=cmap, lut=len(inventory.networks))
for i in range(len(inventory.networks)):
color = _rgba_tuple_to_kml_color_code(cmap(i))
style = SubElement(document, "Style")
style.set("id", "station_%i" % i)
iconstyle = SubElement(style, "IconStyle")
SubElement(iconstyle, "color").text = color
SubElement(iconstyle, "scale").text = str(icon_size)
icon = SubElement(iconstyle, "Icon")
SubElement(icon, "href").text = icon_url
hotspot = SubElement(iconstyle, "hotSpot")
hotspot.set("x", "0.5")
hotspot.set("y", "0.5")
hotspot.set("xunits", "fraction")
hotspot.set("yunits", "fraction")
labelstyle = SubElement(style, "LabelStyle")
SubElement(labelstyle, "color").text = color
SubElement(labelstyle, "scale").text = str(label_size)
for i, net in enumerate(inventory):
folder = SubElement(document, "Folder")
SubElement(folder, "name").text = str(net.code)
SubElement(folder, "open").text = "1"
SubElement(folder, "description").text = str(net)
style = SubElement(folder, "Style")
liststyle = SubElement(style, "ListStyle")
SubElement(liststyle, "listItemType").text = "check"
SubElement(liststyle, "bgColor").text = "00ffff"
SubElement(liststyle, "maxSnippetLines").text = "5"
# add one marker per station code
for sta in net:
placemark = SubElement(folder, "Placemark")
SubElement(placemark, "name").text = ".".join((net.code, sta.code))
SubElement(placemark, "styleUrl").text = "#station_%i" % i
SubElement(placemark, "color").text = color
if sta.longitude is not None and sta.latitude is not None:
point = SubElement(placemark, "Point")
SubElement(point, "coordinates").text = "%.6f,%.6f,0" % \
(sta.longitude, sta.latitude)
SubElement(placemark, "description").text = str(sta)
if timespans:
start = sta.start_date
end = sta.end_date
if start is not None or end is not None:
timespan = SubElement(placemark, "TimeSpan")
if start is not None:
SubElement(timespan, "begin").text = str(start)
if end is not None:
if not strip_far_future_end_times or \
end < twenty_years_from_now:
SubElement(timespan, "end").text = str(end)
if timespans:
start = net.start_date
end = net.end_date
if start is not None or end is not None:
timespan = SubElement(folder, "TimeSpan")
if start is not None:
SubElement(timespan, "begin").text = str(start)
if end is not None:
if not strip_far_future_end_times or \
end < twenty_years_from_now:
SubElement(timespan, "end").text = str(end)
# generate and return KML string
return tostring(kml, pretty_print=True, xml_declaration=True,
encoding=encoding) | 3b10decafe34b006a41be01e44a5073c2d2d13fb | 24,887 |
import torch
def get_feature_embedding(config, data_loader, topk):
"""Iterate through all items in the data loader and maintain a list
of top k highest entropy items and their embeddings
topk - the max number of samples to keep. If None, don't bother with
entropy, and just return embeddings for items in the data loader.
Return the embeddings (topk_points x feature_dimension) and the indexes of
each embedding in the original data loader.
- Only 1 forward pass to get entropy and feature embedding
- Done in a streaming fashion to be ram conscious
"""
config.model.eval()
_batched_embeddings = []
with torch.no_grad(), register_embedding_hook(
config.get_feature_embedding_layer(), _batched_embeddings):
entropy = torch.tensor([]).to(config.device)
embeddings = torch.tensor([]).to(config.device)
loader_idxs = torch.tensor([], dtype=torch.long).to(config.device)
N = 0
for X, y in data_loader:
# get entropy and embeddings for this batch
X, y = X.to(config.device), y.to(config.device)
yhat = config.model(X)
assert torch.isnan(yhat).sum() == 0
embeddings = torch.cat([embeddings, _batched_embeddings.pop()])
assert len(_batched_embeddings) == 0 # sanity check forward hook
loader_idxs = torch.cat([
loader_idxs,
torch.arange(N, N+X.shape[0], device=config.device)])
# select only top k values
if topk is not None:
_entropy = -yhat*torch.log2(yhat) - (1-yhat)*torch.log2(1-yhat)
# Work around when yhat == 1 and entropy is nan instead of 0
_m = torch.isnan(_entropy)
_entropy[_m] = 0
# check for other unexplained nan bugs
assert ((yhat[_m] == 1) | (yhat[_m] == 0)).all()
entropy = torch.cat([entropy, _entropy])
assert torch.isnan(entropy).sum() == 0
assert len(entropy) == len(embeddings)
assert len(entropy) == len(loader_idxs)
if len(entropy) > topk:
entropy2, idxs = torch.topk(entropy, topk, dim=0)
idxs = idxs.cpu().numpy().ravel()
assert torch.isnan(entropy2).sum() == 0
assert max(idxs) < len(entropy)
assert len(idxs) == len(entropy2)
assert len(idxs) == topk
embeddings = embeddings[idxs]
loader_idxs = loader_idxs[idxs]
entropy = entropy2
N += X.shape[0]
embeddings = embeddings.reshape(embeddings.shape[0], -1)
return embeddings, loader_idxs | 820ccf9750e0fc10202caca05d1396e635630281 | 24,888 |
def extract_yelp_data(term, categories, price, location,
limit, sort_by, attributes, yelp_api_key=yelp):
"""
This function takes search results (a dictionary) and obtains the
name, zip code, address of the possible restaurant matches in the
form of a pandas dataframe.
Inputs:
- yelp_api_key: a string of the Yelp API Key
- term: a string of search terms input by the user
- lat: a float representing either a user's current location
latitude or their desired location latitude
- long: a float representing either a user's current location
longitude or their desired location longitude
- limit: an integer of maximum number of Yelp results that
will be returned from the query
- sort_by: string representing a user's sorting preference
(options are: distance, best_match, rating,
review_count)
Outputs:
- yelp_results: a pandas dataframe containing the zip code,
name, address, of each potential result.
"""
yelp_api = YelpAPI(yelp_api_key)
search_results = yelp_api.search_query(term=term,
categories=categories,
price=price,
location=location,
limit=limit,
sort_by=sort_by,
attributes=attributes)
# If Yelp query returns nothing, return None
if not search_results:
return None
# Initialize lists for each planned column in Yelp results dataframe;
# these are characteristics of each business that get returned to user
addresses = []
names = []
zip_code = []
latitude = []
longitude = []
phone = []
price = []
# obtain business information
businesses = search_results['businesses']
for i in businesses:
# In case a Yelp business is missing a field:
try:
a_address = i['location']['display_address'][0]
a_name = i['name']
a_zip = i['location']['zip_code']
a_latitude = i['coordinates']['latitude']
a_longitude = i['coordinates']['longitude']
a_phone = i['phone']
a_price = i['price']
if all([a_address != "", a_name != "", a_zip != "",
a_latitude != "", a_longitude != "",
a_phone != "", a_price != ""]):
addresses.append(a_address)
names.append(a_name)
zip_code.append(a_zip)
latitude.append(a_latitude)
longitude.append(a_longitude)
phone.append(a_phone)
price.append(a_price)
except KeyError:
print("Key Error, some missing field from the Yelp return!")
# cast Yelp results lists into pandas dataframe
yelp_results = pd.DataFrame()
yelp_results['zip_code'] = zip_code
yelp_results['name'] = names
yelp_results['addr'] = addresses
yelp_results['phone'] = phone
yelp_results['price'] = price
yelp_results['latitude'] = latitude
yelp_results['longitude'] = longitude
# change zip code column to appropriate data type
yelp_results['zip_code'] = pd.to_numeric(yelp_results['zip_code'])
return yelp_results | e57ca05265944a3971dfb0af7715e9764dd3112e | 24,889 |
def collect_photo_info(api_key, tag, max_count):
"""Collects some interesting info about some photos from Flickr.com for a given tag """
photo_collection = []
url = "http://api.flickr.com/services/rest/?method=flickr.photos.search&tags=%s&format=json&nojsoncallback=1&api_key=%s" %(tag, api_key)
resp = requests.get(url)
results = resp.json()
count = 0
for p in results['photos']['photo']:
if count >= max_count:
return photo_collection
print 'Processing photo: "%s"' % p['title']
photo = {}
url = "http://api.flickr.com/services/rest/?method=flickr.photos.getInfo&photo_id=" + p['id'] + "&format=json&nojsoncallback=1&api_key=" + api_key
info = requests.get(url).json()
photo["flickrid"] = p['id']
photo["title"] = info['photo']['title']['_content']
photo["description"] = info['photo']['description']['_content']
photo["page_url"] = info['photo']['urls']['url'][0]['_content']
photo["farm"] = info['photo']['farm']
photo["server"] = info['photo']['server']
photo["secret"] = info['photo']['secret']
# comments
numcomments = int(info['photo']['comments']['_content'])
if numcomments:
#print " Now reading comments (%d)..." % numcomments
url = "http://api.flickr.com/services/rest/?method=flickr.photos.comments.getList&photo_id=" + p['id'] + "&format=json&nojsoncallback=1&api_key=" + api_key
comments = requests.get(url).json()
photo["comment"] = []
for c in comments['comments']['comment']:
comment = {}
comment["body"] = c['_content']
comment["authorid"] = c['author']
comment["authorname"] = c['authorname']
photo["comment"].append(comment)
photo_collection.append(photo)
count = count + 1
return photo_collection | 26e9525639da658f9c9920b5356dd9af4753a1c5 | 24,890 |
def yolo_eval(yolo_outputs, image_shape=(720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5):
"""
Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes.
Arguments:
yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors:
box_confidence: tensor of shape (None, 19, 19, 5, 1)
box_xy: tensor of shape (None, 19, 19, 5, 2)
box_wh: tensor of shape (None, 19, 19, 5, 2)
box_class_probs: tensor of shape (None, 19, 19, 5, 80)
image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype)
max_boxes -- integer, maximum number of predicted boxes you'd like
score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (None, ), predicted score for each box
boxes -- tensor of shape (None, 4), predicted box coordinates
classes -- tensor of shape (None,), predicted class for each box
"""
### START CODE HERE ###
# Retrieve outputs of the YOLO model (≈1 line)
box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs
# Convert boxes to be ready for filtering functions
boxes = yolo_boxes_to_corners(box_xy, box_wh)
# Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line)
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=score_threshold)
# Scale boxes back to original image shape.
boxes = scale_boxes(boxes, image_shape)
# Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes=max_boxes,iou_threshold=iou_threshold)
### END CODE HERE ###
return scores, boxes, classes | 119524a2f850abba7ff9e1c1c1ca669e44f0a181 | 24,891 |
def task(name, required=None):
"""
A decorator for creating new tasks
Args:
name (str): Name of the task
required (list): A list of required message keys
that the task expects to be present
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
logger.debug('Executing task %s', name)
try:
result = fn(*args, **kwargs)
except Exception as e:
tb = format_exc()
result = {
'success': 1,
'msg': 'Task {} failed'.format(name),
'traceback': tb
}
logger.warning('Task %s failed: %s', name, tb)
finally:
logger.debug('Returning result from task %s: %s', name, result)
return result
t = Task(name=name, function=wrapper, required=required)
registry.register(t)
return wrapper
return decorator | e23961baac5ea9a7efdd3be308f73db968b15824 | 24,892 |
import torch
def heterograph(g, max_level=4):
""" Constructing hypergraph from homograph.
Parameters
----------
g : `dgl.DGLGraph`
Input graph.
max_level : `int`
(Default value = 4)
Highest level of hypernodes.
Returns
-------
hg : `dgl.DGLHeteroGraph`
Output graph.
"""
# ==============
# initialization
# ==============
# initialize hypergraph as a dictionary
hg = {}
# ========
# indexing
# ========
# get adjacency matrix
a = g.adjacency_matrix()
# get indices
idxs = get_indices_from_adjacency_matrix(a)
# make them all numpy
idxs = {key: value.numpy() for key, value in idxs.items()}
# also include n1
idxs["n1"] = np.arange(g.number_of_nodes())[:, None]
# build a mapping between indices and the ordering
idxs_to_ordering = {}
for term in ["n%s" % level for level in range(1, max_level)]:
idxs_to_ordering[term] = {
tuple(subgraph_idxs): ordering
for (ordering, subgraph_idxs) in enumerate(list(idxs[term]))
}
# NOTE:
# here we define all the possible
# 'has' and 'in' relationships.
# TODO:
# we'll test later to see if this adds too much overhead
for small_idx in range(1, max_level+1): # child
for big_idx in range(small_idx + 1, max_level+1): # parent
for pos_idx in range(big_idx - small_idx + 1): # position
# `in` relationship
hg[ # (source, relationship, destination)
(
"n%s" % small_idx,
"n%s_as_%s_in_n%s" % (small_idx, pos_idx, big_idx),
"n%s" % big_idx,
)
] = np.stack( # use `np.array` here but convert to list later
[
np.array(
[
idxs_to_ordering["n%s" % small_idx][tuple(x)]
for x in idxs["n%s" % big_idx][
:, pos_idx : pos_idx + small_idx
]
]
),
np.arange(idxs["n%s" % big_idx].shape[0]),
],
axis=1,
)
# define the same for `has` relationship
hg[
(
"n%s" % big_idx,
"n%s_has_%s_n%s" % (big_idx, pos_idx, small_idx),
"n%s" % small_idx,
)
] = np.stack(
[
np.arange(idxs["n%s" % big_idx].shape[0]),
np.array(
[
idxs_to_ordering["n%s" % small_idx][tuple(x)]
for x in idxs["n%s" % big_idx][
:, pos_idx : pos_idx + small_idx
]
]
),
],
axis=1,
)
for term in ['n%s' % idx for idx in range(1, max_level+1)]:
hg[
(
term,
"%s_in_g" % term,
"g",
)] = np.stack(
[
np.arange(len(idxs[term])),
np.zeros(len(idxs[term]))
],
axis=1,
)
hg[
(
"g",
"g_has_%s" % term,
term
)] = np.stack(
[
np.zeros(len(idxs[term])),
np.arange(len(idxs[term])),
],
axis=1,
)
# convert all to python `List`
hg = dgl.heterograph({key: list(value) for key, value in hg.items()})
# include indices in the nodes themselves
for term in ["n%s" % level for level in range(1, max_level+1)]:
hg.nodes[term].data["idxs"] = torch.tensor(idxs[term])
hg.nodes[term].data["is_ring"] = torch.eq(
hg.nodes[term].data["idxs"][:, 0],
hg.nodes[term].data["idxs"][:, -1],
)[:, None]
for key in g.ndata.keys():
hg.nodes['n1'].data[key] = g.ndata[key]
return hg | 345ba6d883764f1c67b5bf2f092c504b857d7bc8 | 24,893 |
import re
def check_date_mention(tweet):
"""Check the tweet to see if there is a valid date mention for the
three dates of pyconopenspaces: 5/11, 5/12, 5/13. Quick fix to override
SUTime defaulting to today's date and missing numeric info about event's date
"""
date_pat = re.compile("([5]{1}\/\d{2})")
valid_dates = ["5/11", "5/12", "5/13"]
dates = [d for d in tweet.split() if date_pat.match(d) and d in valid_dates]
return dates if len(dates) == 1 else False | 67c0de3beac5036d8b7aefa161b82a15257da04f | 24,894 |
import argparse
def parse(args):
"""[--starved <int>] [--control <int>] [--other <int>]"""
parser = argparse.ArgumentParser()
parser.add_argument('--control', metavar='level', type=int, default=2)
parser.add_argument('--other', metavar='level', type=int, default=1)
parser.add_argument('--starved', metavar='level', type=int, default=0)
return parser.parse_args(args) | 6e316e3337406a4b7918474a6497c8fa03d02696 | 24,895 |
def make_nointer_beta():
"""Make two random non-intersecting triangles in R^3 that pass the beta test."""
# Corners of triangle B.
b1, b2, b3 = np.random.random(3), np.random.random(3), np.random.random(3)
# Two edges of B.
p1 = b2 - b1
p2 = b3 - b1
n = np.cross(p1, p2)
n /= np.linalg.norm(n)
T = b1 + (0.5 + 0.5 * np.random.random()) * p1 + (0.5 + 0.5 * np.random.random()) * p2
a1 = T + np.random.random() * n
a2 = T - np.random.random() * n
a3 = b1 + (1.5 + 0.5 * np.random.random()) * p1 + (1.5 + 0.5 * np.random.random()) * p2
A, B = np.array([a1, a2, a3]), np.array([b1, b2, b3])
# More fuzzing.
if np.random.randint(2) == 1:
A, B = B, A
return A, B | 6502e5992f4fe959ec1fe87f3c5e849bfc428d30 | 24,896 |
def get_all_with_given_response(rdd, response='404'):
"""
Return a rdd only with those requests
that received the response code entered.
Default set to '404'.
return type: pyspark.rdd.PipelinedRDD
"""
def status_iterator(ln):
try:
status = ln.split(' ')[-2]
return True if status == response else False
except:
pass
return rdd.filter(status_iterator) | 8268095938bbc35a6418f557af033a458f041c89 | 24,897 |
from typing import Optional
async def get_neighbourhood(postcode_like: PostCodeLike) -> Optional[Neighbourhood]:
"""
Gets a police neighbourhood from the database.
Acts as a middleware between us and the API, caching results.
:param postcode_like: The UK postcode to look up.
:return: The Neighbourhood or None if the postcode does not exist.
:raises CachingError: If the needed neighbourhood is not in cache, and the fetch isn't responding.
todo save locations/links
"""
try:
postcode = await get_postcode(postcode_like)
except CachingError as e:
raise e
else:
if postcode is None:
return None
elif postcode.neighbourhood is not None:
return postcode.neighbourhood
try:
data = await fetch_neighbourhood(postcode.lat, postcode.long)
except ApiError as e:
raise CachingError(f"Neighbourhood not in cache, and could not reach API: {e.status}")
if data is not None:
neighbourhood = Neighbourhood.from_dict(data)
locations = [Location.from_dict(neighbourhood, postcode, location) for location in data["locations"]]
links = [Link.from_dict(neighbourhood, link) for link in data["links"]]
with Neighbourhood._meta.database.atomic():
neighbourhood.save()
postcode.neighbourhood = neighbourhood
postcode.save()
for location in locations:
location.save()
for link in links:
link.save()
else:
neighbourhood = None
return neighbourhood | 1cf2f0ebdc84b01560ba1103285198cd0390b6d0 | 24,898 |
def get_refl_weight(value, source_node):
"""Returns the reflection weight for Redshift Material
:param value:
:param source_node:
:return:
"""
refl_color_map = source_node.ParameterBlock.texmap_reflection.Value
refl_color_map_name = None
try:
refl_color_map_name = refl_color_map.GetName()
except RuntimeError:
pass
if value.GetIntensity() > 0.0 or refl_color_map_name is not None:
return 1.0
else:
return 0.0 | 601f4be49c536e9efdac4873dddbc76726dc63ba | 24,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.