content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def add_gaussian_noise(images: list, var: list, random_var: float=None, gauss_noise: list=None):
"""
Add gaussian noise to input images. If random_var and gauss_noise are given, use them to compute the final images.
Otherwise, compute random_var and gauss_noise.
:param images: list of images
:param var: variance range from which the variance value is uniformly sampled if random_var is None.
:param random_var: optional value specifying the variance multiplier.
:param gauss_noise: optional value specifying the additive gaussian noise per image.
:return: transformed image, random_var value, gauss_noise_out list
"""
if random_var is None:
random_var = np.random.uniform(var[0], var[1])
mean = 0
new_images = []
gauss_noise_out = []
for i,image in enumerate(images):
row, col, c = image.shape
if gauss_noise is None or \
(gauss_noise is not None and row*col*c !=
gauss_noise[i].shape[0]*gauss_noise[i].shape[1] * gauss_noise[i].shape[2]):
gauss = np.random.normal(mean, random_var * 127.5, (row, col, c))
else:
gauss = gauss_noise[i]
gauss_noise_out.append(gauss)
gauss = gauss.reshape(row, col, c)
image1 = np.clip(image + gauss, 0., 255.)
new_images.append(image1)
return new_images, random_var, gauss_noise_out | 30,000 |
def get_sso_backend():
"""
Return SingleSignOnBackend class instance.
"""
return get_backend_instance(cfg.CONF.auth.sso_backend) | 30,001 |
def conv_relu_pool_forward(x, w, b, conv_param, pool_param):
"""
Convenience layer that performs a convolution, a ReLU, and a pool.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
- pool_param: Parameters for the pooling layer
Returns a tuple of:
- out: Output from the pooling layer
- cache: Object to give to the backward pass
"""
a, conv_cache = conv_forward_fast(x, w, b, conv_param)
s, relu_cache = relu_forward(a)
out, pool_cache = max_pool_forward_fast(s, pool_param)
cache = (conv_cache, relu_cache, pool_cache)
return out, cache | 30,002 |
def ConnectWithReader(readerName, mode):
""" ConnectWithReader """
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
if hresult != SCARD_S_SUCCESS:
raise EstablishContextException(hresult)
hresult, hcard, dwActiveProtocol = SCardConnect(hcontext, readerName,
mode, SCARD_PROTOCOL_ANY)
return hresult, hcontext, hcard | 30,003 |
def interpolate(points: type_alias.TensorLike,
weights: type_alias.TensorLike,
indices: type_alias.TensorLike,
normalize: bool = True,
allow_negative_weights: bool = False,
name: str = "weighted_interpolate") -> type_alias.TensorLike:
"""Weighted interpolation for M-D point sets.
Given an M-D point set, this function can be used to generate a new point set
that is formed by interpolating a subset of points in the set.
Note:
In the following, A1 to An, and B1 to Bk are optional batch dimensions.
Args:
points: A tensor with shape `[B1, ..., Bk, M]` and rank R > 1, where M is
the dimensionality of the points.
weights: A tensor with shape `[A1, ..., An, P]`, where P is the number of
points to interpolate for each output point.
indices: A tensor of dtype tf.int32 and shape `[A1, ..., An, P, R-1]`, which
contains the point indices to be used for each output point. The R-1
dimensional axis gives the slice index of a single point in `points`. The
first n+1 dimensions of weights and indices must match, or be broadcast
compatible.
normalize: A `bool` describing whether or not to normalize the weights on
the last axis.
allow_negative_weights: A `bool` describing whether or not negative weights
are allowed.
name: A name for this op. Defaults to "weighted_interpolate".
Returns:
A tensor of shape `[A1, ..., An, M]` storing the interpolated M-D
points. The first n dimensions will be the same as weights and indices.
"""
with tf.name_scope(name):
points = tf.convert_to_tensor(value=points)
weights = tf.convert_to_tensor(value=weights)
indices = tf.convert_to_tensor(value=indices)
shape.check_static(
tensor=points, tensor_name="points", has_rank_greater_than=1)
shape.check_static(
tensor=indices,
tensor_name="indices",
has_rank_greater_than=1,
has_dim_equals=(-1, points.shape.ndims - 1))
shape.compare_dimensions(
tensors=(weights, indices),
axes=(-1, -2),
tensor_names=("weights", "indices"))
shape.compare_batch_dimensions(
tensors=(weights, indices),
last_axes=(-2, -3),
tensor_names=("weights", "indices"),
broadcast_compatible=True)
if not allow_negative_weights:
weights = asserts.assert_all_above(weights, 0.0, open_bound=False)
if normalize:
sums = tf.reduce_sum(input_tensor=weights, axis=-1, keepdims=True)
sums = asserts.assert_nonzero_norm(sums)
weights = safe_ops.safe_signed_div(weights, sums)
point_lists = tf.gather_nd(points, indices)
return vector.dot(
point_lists, tf.expand_dims(weights, axis=-1), axis=-2, keepdims=False) | 30,004 |
def main(
argv,
install_path=INSTALL_PATH, service_class=WindowsService,
namespace='winservice.WindowsService'):
"""Method to handle direct server invocation."""
win32serviceutil.HandleCommandLine(
service_class,
'%s%s' % (install_path, namespace),
argv=argv) | 30,005 |
def calcADPs(atom):
"""Calculate anisotropic displacement parameters (ADPs) from
anisotropic temperature factors (ATFs).
*atom* must have ATF values set for ADP calculation. ADPs are returned
as a tuple, i.e. (eigenvalues, eigenvectors)."""
linalg = importLA()
if not isinstance(atom, Atom):
raise TypeError('atom must be of type Atom, not {0:s}'
.format(type(atom)))
anisou = atom.getAnisou()
if anisou is None:
raise ValueError('atom does not have anisotropic temperature factors')
element = zeros((3,3))
element[0,0] = anisou[0]
element[1,1] = anisou[1]
element[2,2] = anisou[2]
element[0,1] = element[1,0] = anisou[3]
element[0,2] = element[2,0] = anisou[4]
element[1,2] = element[2,1] = anisou[5]
vals, vecs = linalg.eigh(element)
return vals[[2,1,0]], vecs[:, [2,1,0]] | 30,006 |
def _create_groups(properties):
"""Create a tree of groups from a list of properties.
Returns:
Group: The root group of the tree. The name of the group is set to None.
"""
# We first convert properties into a dictionary structure. Each dictionary
# represents a group. The None key corresponds to the fields directly stored
# on that group. The other keys map from group name to another dictionary.
# For example:
# {
# None: [field1, field2, ...]
# 'groupA': { None: [field3] },
# 'groupB': {
# None: [],
# 'groupC': { None: [field4] },
# },
# }
#
# We then recursively convert this dictionary into a tree of Groups.
# TODO(shend): Skip the first step by changing Group attributes into methods.
def _dict_to_group(name, group_dict):
fields_in_current_group = group_dict.pop(None)
subgroups = [_dict_to_group(subgroup_name, subgroup_dict) for subgroup_name, subgroup_dict in group_dict.items()]
return Group(name, subgroups, _reorder_fields(fields_in_current_group))
root_group_dict = {None: []}
for property_ in properties:
current_group_dict = root_group_dict
if property_['field_group']:
for group_name in property_['field_group'].split('->'):
current_group_dict[group_name] = current_group_dict.get(group_name, {None: []})
current_group_dict = current_group_dict[group_name]
current_group_dict[None].extend(_create_fields(property_))
return _dict_to_group(None, root_group_dict) | 30,007 |
def activate_move_mode(layer):
"""Activate move tool."""
layer.mode = Mode.MOVE | 30,008 |
def bitset(array, bits):
"""
To determine if the given bits are set in an array.
Input Parameters
----------------
array : array
A numpy array to search.
bits : list or array
A list or numpy array of bits to search.
Note that the "first" bit is denoted as zero,
while the "second" bit is denoted as 1.
Optional Parameters:
None
Returns
--------
array
Returns a byte array of the same size as array. A pixel is
set if any of the bits requested are set in the same pixel
of array.
Procedure
---------
Uses the Gumley IDL ishft technique.
Example
--------
>>> bitset(np.array([3,4,1]),[0])
array([1, 0, 1])
Modification History
--------------------
2022-03-09 - Written by M. Cushing, University of Toledo.
Based on the mc_bitset.pro IDL program.
"""
# Define empty mask
mask = np.zeros_like(array, dtype=np.int8)
# Loop over every bit requested and identify those pixels for which that bit is set.
for val in bits:
tmp = (array >> val) & 1
mask = mask | tmp
return mask | 30,009 |
def test1():
""" Test FancyHelloWorld2
"""
pre = __name__ + "test1 :"
print(pre, "test1")
print(pre, "Let's test FancyHelloWorlds")
print(pre, "FancyHelloWorld")
try:
hw = FancyHelloWorld(address="Helsinki", age=40)
except AttributeError as e:
print(e)
else:
print(hw)
try:
hw = FancyHelloWorld(person="Sampsa", address="Helsinki", age="40")
except AttributeError as e:
print(e)
else:
print(hw)
try:
hw = FancyHelloWorld(person="Sampsa")
except AttributeError as e:
print(e)
else:
print(hw)
print(pre, "FancyHelloWorld2")
try:
hw2 = FancyHelloWorld2(address="Helsinki", age=40)
except AttributeError as e:
print(e)
else:
print(hw)
try:
hw2 = FancyHelloWorld2(person="Sampsa", address="Helsinki", age="40")
except AttributeError as e:
print(e)
else:
print(hw)
try:
hw2 = FancyHelloWorld2(person="Sampsa", address="Helsinki", age=40)
except AttributeError as e:
print(e)
else:
print(hw)
print(pre, "FancyHelloWorld3")
try:
hw3 = FancyHelloWorld3(person="Sampsa", subgreeter="x")
except AttributeError as e:
print(e)
else:
print(hw)
try:
hw3 = FancyHelloWorld3(person="Sampsa", subgreeter=hw)
except AttributeError as e:
print(e)
else:
print(hw)
print(pre, "FancyHelloWorld4")
class NameSpace: # test object
def __init__(self):
self.person = "sampsa"
self.age = "40" # wrong attribute
try:
hw4 = FancyHelloWorld4(person="me", vague=NameSpace())
except AttributeError as e:
print(e)
else:
print(hw)
class NameSpace: # test object
def __init__(self):
self.person = "sampsa" # correct attributes
self.age = 40
try:
hw4 = FancyHelloWorld4(person="me", vague=NameSpace())
except AttributeError as e:
print(e)
else:
print(hw)
print(pre, "FancyHelloWorld5")
try:
hw5 = FancyHelloWorld5(person="me", vague=None)
except AttributeError as e:
print(e)
else:
print(hw) | 30,010 |
def build_classifier_pipeline(
input_files,
output_files,
config,
use_fake_tables = False,
converter_impl = ConverterImplType.PYTHON,
):
"""Pipeline for converting finetuning examples."""
if len(output_files) != len(input_files):
raise ValueError(f'Size mismatch: {output_files} {input_files}')
def _pipeline(root):
"""Pipeline."""
for (input_file, output_file) in zip(input_files, output_files):
name = os.path.basename(input_file)
interactions = read_interactions(root, input_file, name)
if use_fake_tables:
interactions = (
interactions
| f'InsertFakeTable_{name}' >> beam.Map(insert_fake_table_fn))
examples = (
interactions
| f'CheckTableId_{name}' >> beam.FlatMap(
pretrain_utils.check_table_id_fn)
| f'AddNumericValues_{name}' >> beam.Map(
pretrain_utils.add_numeric_values_fn)
| f'ToClassifierTensorflowExample_{name}' >> beam.ParDo(
ToClassifierTensorflowExample(
config,
name,
convert_impl_value=converter_impl.value,
)))
pretrain_utils.write_proto_outputs(output_file, f'WriteExamples_{name}',
examples, tf.train.Example)
return _pipeline | 30,011 |
def sample_gene_matrix(request, variant_annotation_version, samples, gene_list,
gene_count_type, highlight_gene_symbols=None):
""" highlight_gene_symbols - put these genes 1st """
# 19/07/18 - Plotly can't display a categorical color map. See: https://github.com/plotly/plotly.js/issues/1747
# So just doing as HTML table
if gene_list:
genes = gene_list.get_genes(variant_annotation_version.gene_annotation_release)
gene_symbols = set(gene_list.get_gene_names())
else:
# This was originally designed around a gene list, but now we need to support no gene list (only when uses
# variant classifications)
genes = []
gene_symbols = []
qs = gene_count_type.get_variant_queryset(variant_annotation_version)
GS_PATH = "variantannotation__transcript_version__gene_version__gene_symbol"
qs = qs.filter(**{GS_PATH + "__isnull": False})
for gene, gene_symbol in qs.values_list("variantannotation__gene", GS_PATH).distinct():
genes.append(gene)
gene_symbols.append(gene_symbol)
gene_values = list(gene_count_type.genevalue_set.all().order_by("id"))
default_color = "#d9d9d9"
default_text = ""
empty_gene_value = list(filter(lambda x: x.use_as_empty_value, gene_values))
if len(empty_gene_value) == 1:
default_color = empty_gene_value[0].rgb
phenotypes = ["Age", "HPO", "OMIM"]
highlight_gene_labels = []
other_gene_labels = []
gene_links_lookup = OrderedDict()
for gene_symbol in sorted(gene_symbols):
gene_classes_list = ["gene-label", gene_symbol]
highlight = highlight_gene_symbols and gene_symbol in highlight_gene_symbols
if highlight:
gene_classes_list.append("highlight-gene")
gene_classes = ' '.join(gene_classes_list)
if request.user.is_authenticated: # Only display links to logged in users
url = reverse('view_gene_symbol', kwargs={"gene_symbol": gene_symbol})
gene_symbol_text = f'<a class="{gene_classes}" href="{url}">{gene_symbol}</a>'
else:
gene_symbol_text = f"<span class='{gene_classes}'>{gene_symbol}</span>"
if highlight:
highlight_gene_labels.append(gene_symbol_text)
else:
other_gene_labels.append(gene_symbol_text)
gene_links_lookup[gene_symbol] = gene_symbol_text
matrix_rows = phenotypes + highlight_gene_labels + other_gene_labels
color_df = pd.DataFrame(index=matrix_rows, dtype='O')
text_df = pd.DataFrame(index=matrix_rows)
sample_names = []
used_sample_names = set()
for i, sample in enumerate(samples):
try:
can_access = False
if request.user.is_authenticated: # Only display links to logged in users
try:
Sample.get_for_user(request.user, sample.pk) # Throws exception
can_access = True
except (Sample.DoesNotExist, PermissionDenied):
pass
source = SampleAnnotationVersionVariantSource.objects.get(sample=sample,
variant_annotation_version=variant_annotation_version)
gvcc = GeneValueCountCollection.objects.get(source=source,
gene_count_type=gene_count_type)
gvc_qs = gvcc.genevaluecount_set.filter(gene__in=genes)
sample_code = "%03d" % i
if can_access:
view_sample_url = reverse('view_sample', kwargs={'sample_id': sample.pk})
sample_link = f'<a href="{view_sample_url}">{sample.name}</a>'
if sample_link in used_sample_names:
uniq_sample_name = sample.name + "_" + sample_code
sample_link = f'<a href="{view_sample_url}">{uniq_sample_name}</a>'
sample_name = sample_link
else:
sample_name = "S" + sample_code
sample_names.append(sample_name)
used_sample_names.add(sample_name)
color_df[sample_name] = default_color
color_df.loc["Age", sample_name] = '#FFFFFF'
color_df.loc["HPO", sample_name] = '#FFFFFF'
color_df.loc["OMIM", sample_name] = '#FFFFFF'
text_df[sample_name] = default_text
if sample.patient:
try:
# Check you have Patient permissions
patient = Patient.get_for_user(request.user, sample.patient.pk)
def format_ontology(ontology_term):
return f"<div title='{ontology_term}'>{ontology_term.name}</div>"
hpo, omim = OntologyTerm.split_hpo_and_omim(patient.get_ontology_term_ids())
hpo_text = " ".join(map(format_ontology, hpo))
omim_text = " ".join(map(format_ontology, omim))
try:
age = sample.specimen.age_at_collection_date
except:
age = None
text_df.loc["Age", sample_name] = age or ''
text_df.loc["HPO", sample_name] = hpo_text
text_df.loc["OMIM", sample_name] = omim_text
except PermissionDenied:
pass
except Patient.DoesNotExist:
pass
FIELDS = ["gene__geneversion__gene_symbol", "value__rgb", "value__show_counts", "count"]
for gene_symbol, rgb, show_counts, count in gvc_qs.values_list(*FIELDS):
gene_link = gene_links_lookup[gene_symbol]
color_df.loc[gene_link, sample_name] = rgb
if show_counts:
text_df.loc[gene_link, sample_name] = count
except (SampleAnnotationVersionVariantSource.DoesNotExist, GeneValueCountCollection.DoesNotExist):
pass
def set_style(s):
color_series = color_df[s.name]
styles = []
for color in color_series:
styles.append(f"color: {rgb_invert(color)}; background-color: {color};")
return styles
style = text_df.style.apply(set_style)
style = style.set_table_attributes('class="sample-gene-matrix"')
text_table_html = style.render()
context = {"text_table_html": text_table_html,
"gene_values": gene_values}
return render(request, 'snpdb/patients/cohort_gene_counts_matrix.html', context) | 30,012 |
def dcm2json(ctx, inpath, outpath):
"""Convert a DICOM file or directory of files at INPATH into
dictionaries and save result in json format at OUTPATH."""
click.echo(click.style('Covert DICOM to JSON', underline=True, bold=True))
def convert_file(infile: Path, outfile: Path):
dixel = DcmDir().get(infile)
# TextFileHandler().put(dixel.tags, outfile)
_inpath = Path(inpath)
_outpath = Path(outpath)
if _inpath.is_file():
fni = _inpath
if _outpath.suffix in [".json", ".txt"]:
# outpath is a file
fno = _outpath
elif _outpath.is_dir():
# outpath is a dir
fno = _outpath / _inpath.with_suffix(".json").name
else:
raise ValueError("Output path must be *.json/*.txt or a directory")
convert_file(fni, fno)
if _inpath.is_dir():
if not _outpath.is_dir():
raise ValueError("Output path must be a directory for a directory input path")
infiles = _inpath.glob("*.dcm")
for fn in infiles:
fni = _inpath / fn
fno = _outpath / fn.with_suffix(".json")
convert_file(fni, fno) | 30,013 |
def validate_yaml_online(data, schema_uri=None):
"""
Validates the given data structure against an online
schema definition provided by schema_uri.
If schema_uri is not given, we try to get it from
the 'descriptor_schema' field in 'data'.
Returns: True/False
"""
if schema_uri is None:
# try to get schema_uri from data
schema_uri = data.get("descriptor_schema", None)
if schema_uri is None:
LOG.error("Cannot find URI pointing to schema.")
return False
try:
# try to download schema
r = requests.get(schema_uri, timeout=3)
# try to parse schema
schema = yaml.load(r.text)
except BaseException as e:
LOG.warning("Couldn't fetch schema from '{}': {}".format(
schema_uri, e))
# ok, no internet? lets try to use a local NAPD schema
try:
path = os.path.join(
os.path.expanduser("~"),
".tng-schema/package-specification/napd-schema.yml")
LOG.info("Using local schema: {}".format(path))
with open(path, "r") as f:
schema = yaml.load(f)
except BaseException as e:
LOG.error("Get schema from '{}' or '{}': {}".format(
schema_uri, path, e))
return False
try:
if schema is None:
raise BaseException("No schema found online and offile")
# validate data against schema
validate(data, schema)
except BaseException as e:
LOG.error("Couldn't validate against schema from '{}': {}".format(
schema_uri, e))
return False
return True | 30,014 |
def normal218(startt,endt,money2,first,second,third,forth,fifth,sixth,seventh,zz1,zz2,bb1,bb2,bb3,aa1,aa2):
"""
for source and destination id generation
"""
"""
for type of banking work,label of fraud and type of fraud
"""
idvariz=random.choice(zz2)
idgirande=random.choice(bb2)
first.append("transfer")
second.append(idvariz)
third.append(idgirande)
sixth.append("0")
seventh.append("none")
"""
for amount of money generation
"""
numberofmoney=random.randrange(50000,money2)
forth.append(numberofmoney)
"""
for date and time generation randomly between two dates
"""
final=randomDate(startt,endt, random.random())
fifth.append(final)
return (first,second,third,forth,fifth,sixth,seventh) | 30,015 |
def get_fast_loaders(dataset, batch_size, test, device, data_path=None, train_transform=None, validation_transform=None,
train_percentage=0.85, workers=4):
"""Return :class:`FastLoader` for training and validation, outfitted with a random sampler.
If set to run on the test set, :param:`train_percentage` will be ignored and set to 1.
The transforms should only include operations on PIL images and should not convert the images to a tensor, nor
handle normalization of the tensors. This is handled at runtime by the fast loaders.
If you are not looking for high-performance, prefer :func:`get_loaders`.
:param dataset: name of the dataset, (MNIST, FashionMNIST, CIFAR-10, CIFAR-100, ImageNette, ImageWoof, ImageNet)
are available.
:type dataset: str
:param batch_size: batch size for training and validation.
:type batch_size: int
:param test: run validation on the test set.
:type test: bool
:param data_path: path to folder containing dataset.
:type data_path: str
:param train_transform: PyTorch transform to apply to images for training.
:type train_transform: torchvision.transforms.Compose
:param validation_transform: PyTorch transform to apply to images for validation.
:type validation_transform: torchvision.transforms.Compose
:param train_percentage: percentage of the data in the training set.
:type train_percentage: float
:param workers: number of subprocesses to use for data loading. Use 0 for loading in the main process.
:type workers: int
:return: training and validation fast data loaders.
:rtype: (FastLoader, FastLoader)
"""
# Check if any parameters has been set to its default value, and if so, setup the defaults.
data_path, train_transform, validation_transform = _setup_defaults(dataset, data_path, train_transform,
validation_transform, fast=True)
# Get all of the training data available.
train_data = _get_train_data(dataset, data_path, train_transform)
log.log("Training data succesfully fetched!", LOGTAG, log.Level.DEBUG)
if not test:
# Perform a train/validation split on the training data available:
# For performance reasons, the train/validation split will always be the same.
# TODO: Implement random train/validation split with fast loading and distributed training.
log.log("Running in standard training/validation mode.", LOGTAG, log.Level.INFO)
dataset_size = len(train_data)
split_index = int(dataset_size * train_percentage)
log.log("{0}:{1}".format(dataset_size, split_index), LOGTAG, log.Level.HIGHLIGHT)
validation_data = train_data[split_index:]
train_data = train_data[:split_index]
log.log("Validation data succesfully fetched!", LOGTAG, log.Level.DEBUG)
else:
# Fetch the test data:
log.log("Running in <b>test</b> mode. All training data available will be used, and "
"validation will be done on the test set. Are you really ready to publish?", LOGTAG, log.Level.WARNING)
validation_data = _get_test_data(dataset, data_path, validation_transform)
log.log("Test data succesfully fetched!", LOGTAG, log.Level.DEBUG)
if distributed.is_initialized():
# If running in distributed mode, use a DistributedSampler:
log.log("Running in <b>distributed</b> mode. This hasn't been thoroughly tested, beware!",
LOGTAG, log.Level.WARNING)
train_sampler = data_utils.distributed.DistributedSampler(train_data)
else:
# Otherwise, default to a RandomSampler:
train_sampler = data_utils.RandomSampler(train_data)
# Build the train and validation loaders, using pinned memory and a custom collate function to build the batches.
train_loader = data_utils.DataLoader(train_data, batch_size=batch_size, num_workers=workers, pin_memory=True,
sampler=train_sampler, collate_fn=_fast_collate, drop_last=True)
log.log("Train loader succesfully created!", LOGTAG, log.Level.DEBUG)
validation_loader = data_utils.DataLoader(validation_data, batch_size=batch_size, num_workers=workers,
pin_memory=True, collate_fn=_fast_collate)
log.log("Validation loader succesfully created!", LOGTAG, log.Level.DEBUG)
# Wrap the PyTorch loaders in the custom FastLoader class and feed it the normalization parameters associated
# with the dataset.
return FastLoader(train_loader, device, *NORMALIZATION[DATASETS[dataset]]), \
FastLoader(validation_loader, device, *NORMALIZATION[DATASETS[dataset]]) | 30,016 |
def find_paths(root: TreeNode, required_sum: int) -> List[List[int]]:
"""
Time Complexity: O(N^2)
Space Complexity: O(N)
Parameters
----------
root : TreeNode
Input binary tree.
required_sum : int
Input number 'S'.
Returns
-------
all_paths : List[List[int]]
All paths from root-to-leaf such that the sum of all the node values of each path equals 'S'.
"""
def find_paths_recursive(cur_node: TreeNode, path_sum: int, cur_path: List[int], ins_all_paths: List[List[int]]):
if not cur_node:
return
cur_path.append(cur_node.val)
if cur_node.val == path_sum and not cur_node.left and not cur_node.right:
ins_all_paths.append(cur_path.copy())
else:
find_paths_recursive(cur_node.left, path_sum - cur_node.val, cur_path, ins_all_paths)
find_paths_recursive(cur_node.right, path_sum - cur_node.val, cur_path, ins_all_paths)
del cur_path[-1]
all_paths = []
find_paths_recursive(root, required_sum, [], all_paths)
return all_paths | 30,017 |
def test_prepareHostConfig(settings, detectSystemDevices):
"""
Test paradrop.core.config.hostconfig.prepareHostConfig
"""
from paradrop.core.config.hostconfig import prepareHostConfig
devices = {
'wan': [{'name': 'eth0'}],
'lan': list(),
'wifi': list()
}
detectSystemDevices.return_value = devices
source = tempfile.NamedTemporaryFile(delete=True)
source.write("{test: value}")
source.flush()
settings.HOST_CONFIG_FILE = source.name
settings.DEFAULT_LAN_ADDRESS = "1.2.3.4"
settings.DEFAULT_LAN_NETWORK = "1.0.0.0/24"
config = prepareHostConfig()
assert config['test'] == 'value' | 30,018 |
def macro_bank_switzerland_interest_rate():
"""
瑞士央行利率决议报告, 数据区间从20080313-至今
https://datacenter.jin10.com/reportType/dc_switzerland_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_switzerland_interest_rate_decision_all.js?v=1578582240
:return: 瑞士央行利率决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_switzerland_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["瑞士央行利率决议报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "25",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "switzerland_interest_rate"
temp_df = temp_df.astype("float")
return temp_df | 30,019 |
def create_dataframe(message):
"""Create Pandas DataFrame from CSV."""
dropdowns = []
df = pd.DataFrame()
if message != "":
df = pd.read_csv(message)
df = df.sample(n = 50, random_state = 2) # reducing Data Load Running on Heroku Free !!!!
if len(df) == 0:
return pd.DataFrame()
df.insert(0,"Index", df.index)
for column in df.columns:
dropdowns.append({"label":column, "value":column})
return df, dropdowns | 30,020 |
def test_str_special():
"""Test type of __str__ method results."""
s = str(m)
assert_true(type(s) is str) | 30,021 |
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the HomematicIP Cloud weather sensor."""
pass | 30,022 |
def load_model(path):
"""
This function ...
:param path:
:return:
"""
# Get the first line of the file
with open(path, 'r') as f: first_line = f.readline()
# Create the appropriate model
if "SersicModel" in first_line: return SersicModel.from_file(path)
elif "ExponentialDiskModel" in first_line: return ExponentialDiskModel.from_file(path)
elif "DeprojectionModel" in first_line: return DeprojectionModel.from_file(path)
else: raise ValueError("Unrecognized model file") | 30,023 |
def add_metaclass(metaclass):
"""
Class decorator for creating a class with a metaclass.
Borrowed from `six` module.
"""
@functools.wraps(metaclass)
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper | 30,024 |
def setup_logging():
"""Configure the logger."""
log.propagate = False
log.setLevel(logging.INFO)
# Log to stdout
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
formatter = logging.Formatter('[%(levelname)s] %(message)s')
console_handler.setFormatter(formatter)
log.addHandler(console_handler) | 30,025 |
def iter_repl(seq, subseq, repl):
"""
Replace sub-list `subseq` in `seq` with `repl`.
"""
seq, subseq, repl = list(seq), list(subseq), list(repl)
subseq_len = len(subseq)
rem = seq[:]
while rem:
if rem[:subseq_len] == subseq:
for c in repl:
yield c
rem = rem[subseq_len:]
else:
yield rem.pop(0) | 30,026 |
def test_checkpoint_horovod(tmpdir, ray_start_4_cpus):
"""Tests if Tune checkpointing works with HorovodRayAccelerator."""
accelerator = HorovodRayAccelerator(
num_hosts=1, num_slots=2, use_gpu=False)
checkpoint_test(tmpdir, accelerator) | 30,027 |
def max_pool_nd_inverse(layer, relevance_in : torch.Tensor, indices : torch.Tensor = None,
max : bool = False) -> torch.Tensor :
"""
Inversion of LogSoftmax layer
Arguments
---------
relevance : torch.Tensor
Input relavance
indices : torch.Tensor
Maximum feature indexes obtained when max pooling
max : bool
Implement winner takes all scheme in relevance re-distribution
Returns
-------
torch.Tensor
Output relevance
"""
if indices is None :
indices = layer.indices
out_shape = layer.out_shape
bs = relevance_in.size(0)
relevance_in = torch.cat([r.view(out_shape) for r in relevance_in ], dim=0)
indices = torch.cat([indices] * bs, dim=0)
return ( winner_takes_all(relevance_in, layer.in_shape, layer.indices)
if max else relevance_in ) | 30,028 |
def write_done():
"""
Tells the user that we're done with the previous action.
Does not print to the logger
"""
if not LOG_ONLY:
global _LEVEL, _LAST_CLOSED
_LEVEL -= 1
# If there has been actions in between, write tabs
if _LAST_CLOSED > _LEVEL:
for _index in range(_LEVEL):
sys.stdout.write("\t")
sys.stdout.write("done!\n")
_LAST_CLOSED = _LEVEL
sys.stdout.flush() | 30,029 |
def aroon_up(close, n=25, fillna=False):
"""Aroon Indicator (AI)
Identify when trends are likely to change direction (uptrend).
Aroon Up - ((N - Days Since N-day High) / N) x 100
https://www.investopedia.com/terms/a/aroon.asp
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
return AroonIndicator(close=close, n=n, fillna=fillna).aroon_up() | 30,030 |
def exists():
"""real_abs.c exists"""
check50.exists("real_abs.c") | 30,031 |
def hash_file(filename):
"""
computes hash value of file contents, to simplify pytest assert statements for
complex test cases that output files. For cross-platform compatibility, make sure
files are read/written in binary, and use unix-style line endings, otherwise hashes
will not match despite content being same in ASCII.
Args:
filename
Returns:
hashnumber
"""
if os.path.isfile(filename) is False:
raise Exception("File not found for hash operation")
# open file for reading in binary mode
with open(filename, "rb") as file:
return hashlib.sha512(file.read()).hexdigest() | 30,032 |
def spatial_batchnorm_forward(x, gamma, beta, bn_param):
"""
Computes the forward pass for spatial batch normalization.
Inputs:
- x: Input data of shape (N, C, H, W)
- gamma: Scale parameter, of shape (C,)
- beta: Shift parameter, of shape (C,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance. momentum=0 means that
old information is discarded completely at every time step, while
momentum=1 means that new information is never incorporated. The
default of momentum=0.9 should work well in most situations.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: Output data, of shape (N, C, H, W)
- cache: Values needed for the backward pass
"""
out, cache = None, None
###########################################################################
# TODO: Implement the forward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should#
# be very short; ours is less than five lines. #
###########################################################################
N, C, H, W = x.shape
out = np.zeros((N, C, H, W))
cache = []
for c in range(C):
xc = x[:, c, :, :]
outc, cachec = batchnorm_forward(xc.reshape(N, H * W), gamma[c], beta[c], bn_param)
out[:,c,:,:] = outc.reshape(N, H, W)
cache += [cachec]
###########################################################################
# END OF YOUR CODE #
###########################################################################
return out, cache | 30,033 |
def load_sheet_2(workbook, db_name, collection_name, mongo=False, startrow=32, startcol='M', lastcol='Y'):
"""
"""
columns = ['parameter', 'cas', 'contaminant', 'cancer_risk_residential', 'cancer_risk_ci', 'cancer_risk_workers', 'hard_quotient', 'metal', 'volatile', 'persistent', 'modeled_koc', 'code', 'notes']
df = pandas.read_excel(
workbook,
sheetname=2,
header=startrow-1,
parse_cols="{}:{}".format(startcol,lastcol),
)
df.columns = columns
logging.debug(df.head())
if mongo:
mongo_load(df, db_name, collection_name)
else:
sqlite_load(df, db_name, collection_name, columns) | 30,034 |
def triangulate(points):
""" triangulate the plane for operation and visualization
"""
num_points = len(points)
indices = np.arange(num_points, dtype=np.int)
segments = np.vstack((indices, np.roll(indices, -1))).T
tri = pymesh.triangle()
tri.points = np.array(points)
tri.segments = segments
tri.verbosity = 0
tri.run()
return tri.mesh | 30,035 |
def immutable():
""" Get group 1. """
allowed_values = {'NumberOfPenguins', 'NumberOfSharks'}
return ImmutableDict(allowed_values) | 30,036 |
def traceroute_udp(addr, hops, q, timeout, ttl, wait, port): # {{{1
"""trace route to dest using UDP"""
sock = S.socket(S.AF_INET, S.SOCK_RAW, S.IPPROTO_UDP)
try:
for x in traceroute(send_probe_udp, recv_probe_udp, [sock], addr,
hops, q, timeout, ttl, wait, port):
yield x
finally:
sock.close()
# }}}1 | 30,037 |
def parse(meta, recipe, args):
""" Parses metadata, creates headers, content, and footer, and yields
these to be written into a file consecutively """
clsmodule = cls_module(args.cls)
## HEADER
yield COMMENT.format(
now = datetime.now().strftime("%d-%m-%Y %H:%M:%S"),
title = meta['title'].upper(),
authors = authors_list(meta['authors'], short = True))
yield clsmodule.header(
meta,
cname=args.cname,
anonymous=args.anonymous,
classoptions=args.opts,
include=recipe.header)
## BODY
_include = include if args.include else \
lambda fn, end: render_command('input', fn) + end
for filename in recipe.content:
yield _include(filename, '\n')
if recipe.appendix:
body += '\n\\appendix\n\n'
for filename in recipe.appendix:
yield include(filename, '\n')
## FOOTER
yield clsmodule.footer(meta, recipe.bib)
return | 30,038 |
def test_plot_colors_sizes_proj(data, region):
"""
Plot the data using z as sizes and colors with a projection.
"""
fig = Figure()
fig.coast(region=region, projection="M15c", frame="af", water="skyblue")
fig.plot(
x=data[:, 0],
y=data[:, 1],
color=data[:, 2],
size=0.5 * data[:, 2],
style="cc",
cmap="copper",
)
return fig | 30,039 |
def test_get_buffer_mode():
"""
ensures that strings passed to get_buffer_mode are properly handled
"""
# None cases
assert ConfigdocsHelper.get_buffer_mode('') == BufferMode.REJECTONCONTENTS
assert ConfigdocsHelper.get_buffer_mode(
None) == BufferMode.REJECTONCONTENTS
# valid cases
assert ConfigdocsHelper.get_buffer_mode(
'rejectoncontents') == BufferMode.REJECTONCONTENTS
assert ConfigdocsHelper.get_buffer_mode('append') == BufferMode.APPEND
assert ConfigdocsHelper.get_buffer_mode('replace') == BufferMode.REPLACE
# case insensitive
assert ConfigdocsHelper.get_buffer_mode(
'ReJEcTOnConTenTs') == BufferMode.REJECTONCONTENTS
# bad value
assert ConfigdocsHelper.get_buffer_mode('hippopotomus') is None | 30,040 |
def predict_to_score(predicts, num_class):
"""
Checked: the last is for 0
===
Example: score=1.2, num_class=3 (for 0-2)
(0.8, 0.2, 0.0) * (1, 2, 0)
:param predicts:
:param num_class:
:return:
"""
scores = 0.
i = 0
while i < num_class:
scores += i * predicts[:, i - 1]
i += 1
return scores | 30,041 |
def quasi_diagonalize(link):
"""sort clustered assets by distance"""
link = link.astype(int)
sort_idx = pd.Series([link[-1, 0], link[-1, 1]])
num_items = link[-1, 3] # idx of original items
while sort_idx.max() >= num_items:
sort_idx.index = list(range(0, sort_idx.shape[0] * 2, 2)) # make space
df0 = sort_idx[sort_idx >= num_items] # find clusters
i = df0.index
j = df0.values - num_items
sort_idx[i] = link[j, 0] # item 1
df0 = pd.Series(link[j, 1], index=i + 1)
sort_idx = sort_idx.append(df0) # item 2
sort_idx = sort_idx.sort_index() # re-sort
sort_idx.index = list(range(sort_idx.shape[0])) # re-index
return sort_idx.tolist() | 30,042 |
def dataset_files(rootdir, pattern):
"""Returns a list of all image files in the given directory"""
matches = []
for root, dirnames, filenames in os.walk(rootdir):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches | 30,043 |
def keyword(variable):
"""
Verify that the field_name isn't part of know Python keywords
:param variable: String
:return: Boolean
"""
for backend in ADAPTERS:
if variable.upper() in ADAPTERS[backend]:
msg = (
f'Variable "{variable}" is a "{backend.upper()}" '
f"reserved SQL/NOSQL keyword"
)
raise SyntaxError(msg)
if not VALID_TABLE_FIELD.match(variable) or PYTHON_KEYWORDS.match(variable):
raise SyntaxError(f"Field: invalid field name: {variable}")
return f"{variable} isn't a known keyword" | 30,044 |
def cli():
"""Historical commandline for managing historical functions."""
pass | 30,045 |
def gcp_api_main(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`.
"""
logging.basicConfig(level=logging.INFO)
try:
request_json = request.get_json()
if request.args and 'message' in request.args:
return request.args.get('message')
elif request_json and 'message' in request_json:
return request_json['message']
elif request_json and 'stock_data' in request_json and 'name' in request_json:
logging.info('run_fb_prophet')
return json.dumps(
FBProphetThread.run_fb_prophet(
json.dumps(request_json['model_input']))).replace('NaN', '"-"')
else:
return f'Hello World!'
except Exception as ex:
err_msg = 'Generated an exception: {ex}'.format(ex=ex)
logging.error(err_msg)
return err_msg | 30,046 |
def box_area_3d(boxes: Tensor) -> Tensor:
"""
Computes the area of a set of bounding boxes, which are specified by its
(x1, y1, x2, y2, z1, z2) coordinates.
Arguments:
boxes (Union[Tensor, ndarray]): boxes for which the area will be computed. They
are expected to be in (x1, y1, x2, y2, z1, z2) format. [N, 6]
Returns:
area (Union[Tensor, ndarray]): area for each box [N]
"""
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 5] - boxes[:, 4]) | 30,047 |
def set_device_parameters(request):
"""Set up the class."""
def fin():
request.cls.device.close()
request.addfinalizer(fin)
request.cls.driver = ftos.FTOSDriver
request.cls.patched_driver = PatchedFTOSDriver
request.cls.vendor = 'ftos'
parent_conftest.set_device_parameters(request) | 30,048 |
def mock_engine(dialect_name=None):
"""Provides a mocking engine based on the current testing.db.
This is normally used to test DDL generation flow as emitted
by an Engine.
It should not be used in other cases, as assert_compile() and
assert_sql_execution() are much better choices with fewer
moving parts.
"""
from sqlalchemy import create_mock_engine
if not dialect_name:
dialect_name = config.db.name
buffer = []
def executor(sql, *a, **kw):
buffer.append(sql)
def assert_sql(stmts):
recv = [re.sub(r"[\n\t]", "", str(s)) for s in buffer]
assert recv == stmts, recv
def print_sql():
d = engine.dialect
return "\n".join(str(s.compile(dialect=d)) for s in engine.mock)
engine = create_mock_engine(dialect_name + "://", executor)
assert not hasattr(engine, "mock")
engine.mock = buffer
engine.assert_sql = assert_sql
engine.print_sql = print_sql
return engine | 30,049 |
def get_feedback_thread_reply_info_by_reply_to_id(reply_to_id):
"""Gets the domain object corresponding to the model which is fetched by
reply-to-id field.
Args:
reply_to_id: str. The reply_to_id to search for.
Returns:
FeedbackThreadReplyInfo or None. The corresponding domain object.
"""
model = email_models.GeneralFeedbackEmailReplyToIdModel.get_by_reply_to_id(
reply_to_id)
if model is None:
return None
return get_feedback_thread_reply_info_from_model(model) | 30,050 |
def _cross_correlations(n_states):
"""Returns list of crosscorrelations
Args:
n_states: number of local states
Returns:
list of tuples for crosscorrelations
>>> l = _cross_correlations(np.arange(3))
>>> assert l == [(0, 1), (0, 2), (1, 2)]
"""
l = n_states
cross_corr = [[(l[i], l[j]) for j in l[1:][i:]] for i in l[:-1]]
return [item for sublist in cross_corr for item in sublist] | 30,051 |
def select_interface(worker):
"""
It gets a worker interface channel to do something.
"""
interfaces = worker.interfaces_list()
if len(interfaces) == 0:
print ' Error. Worker without interface known.'
return -1
elif len(interfaces) == 1:
return 1
option = raw_input(' Select interface -> ')
if option == '':
return -1
while not option.isdigit() or int(option) < 1 or int(option) > len(interfaces):
print ' Error. None worker interface was selected.'
option = raw_input(' Select interface -> ')
if option == '':
return -1
return int(option) | 30,052 |
def save_trained_model(
model_name: str, theModel: keras.models.Sequential, trainHistory: cbacks.History
) -> None:
"""Saves a tensorflow model and training history to local file storage"""
with open(
f"{CONFIG['Modelling']['ModelDir']}/{model_name}/trainHistoryDict.p", "wb"
) as fp:
pickle.dump(trainHistory.history, fp)
theModel.save(f"{CONFIG['Modelling']['ModelDir']}/{model_name}") | 30,053 |
def get_middle(arr):
"""
Get middle point ????
"""
n_val = np.array(arr.shape) / 2.0
n_int = n_val.astype(np.int0)
# print(n_int)
if n_val[0] % 2 == 1 and n_val[1] % 2 == 1:
return arr[n_int[0], n_int[1]]
if n_val[0] % 2 == 0 and n_val[1] % 2 == 0:
return np.average(arr[n_int[0]:n_int[0] + 2, n_int[1]:n_int[1] + 2])
if n_val[0] % 2 == 1 and n_val[1] % 2 == 0:
return np.average(arr[n_int[0], n_int[1]:n_int[1]+2])
return np.average(arr[n_int[0]:n_int[0]+2, n_int[1]]) | 30,054 |
def annotate_segmentation(image, segmentation):
"""Return annotated segmentation."""
annotation = AnnotatedImage.from_grayscale(image)
for i in segmentation.identifiers:
region = segmentation.region_by_identifier(i)
color = pretty_color()
annotation.mask_region(region.border.dilate(), color)
props = skimage.measure.regionprops(segmentation)
for p in props:
try:
minr, minc, maxr, maxc = p.bbox
cval = int(p.centroid[1])
line = skimage.draw.line(minr, cval, maxr, cval)
annotation.mask_region(line, (0, 255, 0))
except IndexError:
# Don't draw line if it falls outside of the image.
pass
return annotation | 30,055 |
def get_champ_data(champ: str, tier: int, rank: int):
"""
Gives Champ Information by their champname, tier, and rank.
"""
champ_info = NewChampsDB()
try:
champ_info.get_data(champ, tier, rank)
champs_dict = {
"name": f"{champ_info.name}",
"released": champ_info.released,
"class": champ_info.class_type,
"tier": champ_info.tier,
"rank": champ_info.rank,
"prestige": champ_info.prestige,
"hp": champ_info.hp,
"attack": champ_info.attack,
"crit_rate": champ_info.crit_rate,
"crit_dmge": champ_info.crit_dmge,
"armor": champ_info.armor,
"block_prof": champ_info.block_prof,
"energy_resist": champ_info.energy_resist,
"physical_resist": champ_info.physical_resist,
"crit_resist": champ_info.crit_resist,
"sig_info": champ_info.sig_info,
"abilities": champ_info.abilities,
"challenger_rating": champ_info.challenger_rating,
"find": champ_info.find,
"tags": champ_info.tags,
"abilities": champ_info.abilities,
"contact": champ_info.contact,
"url_page": f"{champ_info.url_page}",
"img_portrait": f"{champ_info.img_portrait}",
"champid": f"{champ_info.champid}",
}
champs_dict.update({"status": 200, "detail": "Successful"})
return champs_dict
except Exception as e:
if isinstance(e, FileNotFoundError):
raise HTTPException(status_code=404, detail="404: " + champ_info.error)
elif isinstance(e, KeyError):
raise HTTPException(status_code=400, detail="400: " + champ_info.error)
else:
raise e | 30,056 |
def create_jobs(source, valid_affinities, externally_blocked=False):
"""
Create jobs for Source `source`, using the an architecture matching
`valid_affinities` for any arch "all" jobs.
"""
aall = None
for arch in source.group_suite.arches:
if arch.name == "all":
aall = arch
break
else:
raise ValueError("Can't find arch:all in the suite arches.")
# Sources building arch-dependent packages should build any
# arch-independent packages on an architecture it is building
# arch-dependent packages on.
valid_arches = [x for x in source.arches if x.name != "all"] or \
[x for x in source.group_suite.arches if x.name != "all"]
affinity = get_preferred_affinity(
debile.master.core.config["affinity_preference"],
valid_affinities.split(),
valid_arches
)
for check in source.group_suite.get_source_checks():
j = Job(name="%s [%s]" % (check.name, "source"),
check=check, arch=aall, affinity=affinity,
source=source, binary=None,
externally_blocked=externally_blocked,
builder=None, assigned_at=None,
finished_at=None, failed=None)
source.jobs.append(j)
builds = {}
for check in source.group_suite.get_build_checks():
for arch in source.arches:
jobaffinity = affinity if arch == aall else arch
j = Job(name="%s [%s]" % (check.name, arch.name),
check=check, arch=arch, affinity=jobaffinity,
source=source, binary=None,
externally_blocked=externally_blocked,
builder=None, assigned_at=None,
finished_at=None, failed=None)
builds[arch] = j
source.jobs.append(j)
for check in source.group_suite.get_binary_checks():
for arch in source.arches:
jobaffinity = affinity if arch == aall else arch
deps = []
deps.append(builds[arch])
if aall in builds and aall != arch:
deps.append(builds[aall])
j = Job(name="%s [%s]" % (check.name, arch.name),
check=check, arch=arch, affinity=jobaffinity,
source=source, binary=None,
externally_blocked=externally_blocked,
builder=None, assigned_at=None,
finished_at=None, failed=None)
source.jobs.append(j)
jds = [JobDependencies(blocked_job=j, blocking_job=x)
for x in deps]
for dep in jds:
j.depedencies.append(dep) | 30,057 |
def handler(event, context):
"""
Handler method for insert resource function.
"""
if event is None:
return response(http.HTTPStatus.BAD_REQUEST, Constants.error_insufficient_parameters())
if event is None or Constants.event_body() not in event or Constants.event_http_method() not in event:
return response(http.HTTPStatus.BAD_REQUEST, Constants.error_insufficient_parameters())
if event[Constants.event_body()] is None or len(event[Constants.event_body()]) is 0:
return response(http.HTTPStatus.BAD_REQUEST, Constants.error_insufficient_parameters())
global _dynamodb
if _dynamodb is None:
try:
ddb = DynamoDB()
_dynamodb = ddb.connect(os.environ[Constants.env_var_region()])
except Exception as e:
return response(http.HTTPStatus.INTERNAL_SERVER_ERROR, str(e))
try:
request_handler = RequestHandler(_dynamodb)
except Exception as e:
return response(http.HTTPStatus.INTERNAL_SERVER_ERROR, str(e))
return request_handler.handler(event, context) | 30,058 |
def _topic_scores(run_scores):
"""
Helping function returning a generator for determining the topic scores for each measure.
@param run_scores: The run scores of the previously evaluated run.
@return: Generator with topic scores for each trec_eval evaluation measure.
"""
measures_all = list(list(run_scores.values())[0].keys())
measures_valid = [m for m in measures_all if m not in exclude]
topics = run_scores.keys()
for measure in measures_valid:
yield measure, [run_scores.get(topic).get(measure) for topic in topics] | 30,059 |
def doctest_ObjectWriter_get_state_sub_doc_object_with_no_pobj():
"""ObjectWriter: get_state(): Called with a sub-document object and no pobj
While this should not really happen, we want to make sure we are properly
protected against it. Usually, the writer sets the jar of the parent
object equal to its jar. But it cannot do so, if `pobj` or `pobj._p_jar`
is `None`.
>>> writer = serialize.ObjectWriter(dm)
>>> t2 = Tier2()
>>> writer.get_state(t2)
{'_py_persistent_type': 'pjpersist.tests.test_serialize.Tier2'}
>>> t2._p_jar is None
True
>>> t2._p_pj_doc_object is None
True
Let's now pass in a `pobj` without a jar:
>>> top = Top()
>>> writer.get_state(t2, top)
{'_py_persistent_type': 'pjpersist.tests.test_serialize.Tier2'}
>>> t2._p_jar is None
True
>>> t2._p_pj_doc_object is top
True
""" | 30,060 |
def serving_input_receiver_fn():
"""This is used to define inputs to serve the model.
Returns:
A ServingInputReciever object.
"""
csv_row = tf.placeholder(shape=[None], dtype=tf.string)
features, _ = _make_input_parser(with_target=False)(csv_row)
return tf.estimator.export.ServingInputReceiver(features,
{'csv_row': csv_row}) | 30,061 |
def offsetTimer():
"""
'Starts' a timer when called, returns a timer function that returns the
time in seconds elapsed since the timer was started
"""
start_time = time.monotonic()
def time_func():
return time.monotonic() - start_time
return time_func | 30,062 |
def _get_sender(pusher_email):
"""Returns "From" address based on env config and default from."""
use_author = 'GITHUB_COMMIT_EMAILER_SEND_FROM_AUTHOR' in os.environ
if use_author:
sender = pusher_email
else:
sender = os.environ.get('GITHUB_COMMIT_EMAILER_SENDER')
return sender | 30,063 |
def test_report_a_tag_anlger_report_create_report_link_not_authenticated(
client, db_setup
):
"""If an unauthorized user accessess the view the angler reports through the report-a-tag url, they
should NOT see the link to create reports
"""
angler = JoePublic.objects.get(first_name="Homer")
response = client.get(
reverse("tfat:report_a_tag_angler_reports", kwargs={"angler_id": angler.id})
)
content = str(response.content)
url = reverse("tfat:create_report", kwargs={"angler_id": angler.id})
assert url not in content
url = reverse("tfat:report_a_tag_create_report", kwargs={"angler_id": angler.id})
assert url not in content | 30,064 |
def main():
"""Main execution when called as a script."""
warnings.filterwarnings('ignore')
profile = 'asc'
pseudo_family_id = 'SSSP/1.1/PBE/efficiency'
load_profile(profile)
controller = PwBaseSubmissionController(
pw_code_id='pw-6.7MaX_conda',
structure_group_id='structures/mp/2018_10_18',
structure_filters={
'attributes.sites': {
'longer': 0,
'shorter': 3
},
},
pseudo_family_id=pseudo_family_id,
group_label='tests/pw_base',
max_concurrent=2)
print('Max concurrent :', controller.max_concurrent)
print('Active slots :', controller.num_active_slots)
print('Available slots:', controller.num_available_slots)
print('Already run :', controller.num_already_run)
print('Still to run :', controller.num_to_run)
print()
print('Submitting...')
run_processes = controller.submit_new_batch(dry_run=False)
for run_process_extras, run_process in run_processes.items():
print(f'{run_process_extras} --> <{run_process}>')
print('Done.') | 30,065 |
def find_resolution(func: Callable = None) -> Callable:
"""Decorator that gives the decorated function the image resolution."""
@functools.wraps(func)
def wrapper(self: MultiTraceChart, *args, **kwargs):
if 'width' not in kwargs:
kwargs['width'] = self.resolution[0]
if 'height' not in kwargs:
kwargs['height'] = self.resolution[1]
if 'resolution' in kwargs:
kwargs['width'] = kwargs['resolution'][0]
kwargs['height'] = kwargs['resolution'][1]
del kwargs['resolution']
if 'size' in kwargs:
kwargs['width'] = kwargs['size'][0]
kwargs['height'] = kwargs['size'][1]
del kwargs['size']
return func(self, *args, **kwargs)
return wrapper | 30,066 |
def transform_spikes_to_isi(self, spikes, time_epoch, last_event_is_spike=False):
"""Convert spike times to data array, which is a suitable format for optimization.
Parameters
----------
spikes : numpy array (num_neuron,N), dtype=np.ndarray
A sequence of spike times for each neuron on each trial. Each entry is 1D array of floats.
time_epoch : list of tuples
List of N tuples, where N is the number of trials. Each tuple consists of the trial's start time and end time in seconds.
Note that the end time should be an actual end time, but not the timeout in the case of last_event_is_spike is True.
last_event_is_spike : bool
If true, trial termination time will not be recorded. Otherwise, trial termination time will be recorded.
Returns
-------
data : numpy array (N,2),dtype=np.ndarray.
Spike data packed as numpy array of the size (N,2), where each elements is a 1D array of floats.
N is the number of trials, and for each trial the first column contains the interspike intervals (ISIs),
and the second column contains the corresponding neuronal indices.
"""
num_neuron, num_trial = spikes.shape
# initialize data array
data = np.empty((num_trial, 2), dtype=np.ndarray)
# indices of neurons that spiked
spike_ind = np.empty(num_neuron, dtype=np.ndarray)
# transform spikes to interspike intervals format
for iTrial in range(num_trial):
for iCell in range(num_neuron):
spike_ind[iCell] = iCell * np.ones(len(spikes[iCell, iTrial]), dtype=np.int)
all_spikes = np.concatenate(spikes[:, iTrial], axis=0)
all_spike_ind = np.concatenate(spike_ind[:], axis=0)
# create data array
data[iTrial, 0] = np.zeros(len(all_spikes) + (not last_event_is_spike))
if all_spikes.shape[0] == 0:
data[iTrial, 1] = np.zeros(0)
# If no spikes emitted, set to trial beginning time
last_spike_time = time_epoch[iTrial][0]
else:
# sort spike times and neuron index arrays
ind_sort = np.argsort(all_spikes)
all_spikes = all_spikes[ind_sort]
all_spike_ind = all_spike_ind[ind_sort]
data[iTrial, 0][1:len(all_spikes)] = all_spikes[1:] - all_spikes[:-1]
data[iTrial, 0][0] = all_spikes[0] - time_epoch[iTrial][0] # handle the first ISI
last_spike_time = all_spikes[-1]
if not last_event_is_spike:
data[iTrial, 0][-1] = time_epoch[iTrial][1] - last_spike_time
# assign indicies of neurons which fired, -1 to absorption event
data[iTrial, 1] = all_spike_ind if last_event_is_spike else np.concatenate((all_spike_ind, [-1]))
return data | 30,067 |
def api_github_v2(user_profile, event, payload, branches, default_stream, commit_stream, issue_stream, topic_focus = None):
"""
processes github payload with version 2 field specification
`payload` comes in unmodified from github
`default_stream` is set to what `stream` is in v1 above
`commit_stream` and `issue_stream` fall back to `default_stream` if they are empty
This and allowing alternative endpoints is what distinguishes v1 from v2 of the github configuration
"""
if not commit_stream:
commit_stream = default_stream
if not issue_stream:
issue_stream = default_stream
target_stream = commit_stream
repository = payload['repository']
if not topic_focus:
topic_focus = repository['name']
# Event Handlers
if event == 'pull_request':
pull_req = payload['pull_request']
subject = github_generic_subject('pull request', topic_focus, pull_req)
content = github_generic_content('pull request', payload, pull_req)
elif event == 'issues':
# in v1, we assume that this stream exists since it is
# deprecated and the few realms that use it already have the
# stream
target_stream = issue_stream
issue = payload['issue']
subject = github_generic_subject('issue', topic_focus, issue)
content = github_generic_content('issue', payload, issue)
elif event == 'issue_comment':
# Comments on both issues and pull requests come in as issue_comment events
issue = payload['issue']
if 'pull_request' not in issue or issue['pull_request']['diff_url'] is None:
# It's an issues comment
target_stream = issue_stream
noun = 'issue'
else:
# It's a pull request comment
noun = 'pull request'
subject = github_generic_subject(noun, topic_focus, issue)
comment = payload['comment']
content = ("%s [commented](%s) on [%s %d](%s)\n\n~~~ quote\n%s\n~~~"
% (comment['user']['login'],
comment['html_url'],
noun,
issue['number'],
issue['html_url'],
comment['body']))
elif event == 'push':
subject, content = build_message_from_gitlog(user_profile, topic_focus,
payload['ref'], payload['commits'],
payload['before'], payload['after'],
payload['compare'],
payload['pusher']['name'],
forced=payload['forced'],
created=payload['created'])
elif event == 'commit_comment':
comment = payload['comment']
subject = "%s: commit %s" % (topic_focus, comment['commit_id'])
content = ("%s [commented](%s)"
% (comment['user']['login'],
comment['html_url']))
if comment['line'] is not None:
content += " on `%s`, line %d" % (comment['path'], comment['line'])
content += "\n\n~~~ quote\n%s\n~~~" % (comment['body'],)
return (target_stream, subject, content) | 30,068 |
def concatenate(
iterable: Iterable[Results],
callback: Optional[Callable] = None,
modes: Iterable[str] = ("val", "test"),
reduction: str = "none",
) -> Results:
"""Returns a concatenated Results.
Args:
iterable (iterable of Results): Iterable of `Results` instance.
callback (callable, optional): Called for each `Results`. Must take
(`mode`, `index`, `output`, `target`) arguments and return a tuple
of ('index', `output`, `target`).
modes (iterable of str): Specify modes to concatenate.
reduction (str, optional): Reduction. `none` or `mean`.
"""
modes = list(modes)
indexes: Dict[str, list] = {mode: [] for mode in modes}
outputs: Dict[str, list] = {mode: [] for mode in modes}
targets: Dict[str, list] = {mode: [] for mode in modes}
for results in iterable:
for mode in modes:
if mode not in results:
continue
result = results[mode]
index, output, target = result["index"], result["output"], result["target"]
if callback:
index, output, target = callback(index, output, target)
indexes[mode].append(index)
outputs[mode].append(output)
targets[mode].append(target)
results = Results()
for mode in modes:
index = np.concatenate(indexes[mode])
output = np.concatenate(outputs[mode])
target = np.concatenate(targets[mode])
dict = ivory.core.collections.Dict()
results[mode] = dict(index=index, output=output, target=target)
if reduction != "none":
results = getattr(results, reduction)()
return results | 30,069 |
def parse_version(s: str) -> tuple[int, ...]:
"""poor man's version comparison"""
return tuple(int(p) for p in s.split('.')) | 30,070 |
def test_match_dict_index():
"""aski dict for first item"""
kid1=JsonNode('dict',start=1,end=2,name='e')
node = JsonNode('dict', start=0, end=2, kids=[ kid1 ])
pattern = '{0}'
ret=list(matcher.match(node, pattern))
assert ret == [kid1] | 30,071 |
def replay_train(DQN, train_batch):
"""
여기서 train_batch는 minibatch에서 가져온 data들입니다.
x_stack은 state들을 쌓는 용도로이고,
y_stack은 deterministic Q-learning 값을 쌓기 위한 용도입니다.
우선 쌓기전에 비어있는 배열로 만들어놓기로 하죠.
"""
x_stack = np.empty(0).reshape(0, DQN.input_size) # array(10, 4)
y_stack = np.empty(0).reshape(0, DQN.output_size) # array(10, 2)
# Get stored information from the buffer
"""for를 통해서 minibatch(train_batch)에서 가져온 값들을 하나씩 꺼냅니다."""
for state, action, reward, next_state, done in train_batch:
Q = DQN.predict(state)
# terminal
if done:
Q[0, action] = reward
else :
# Obtain the Q' values by feeding the new state through our network
Q[0, action] = reward + dis * np.max(DQN.predict(next_state))
"""
여기서 mian에 있는 action = np.argmax(mainDQN.predict(state))과
predict가 같이 쓰이고 있기 때문에 Non-stationary targets의 문제가 생깁니다.
"""
"""np.vstack는 y_stack에 쌓기 위한 numpy함수입니다."""
y_stack = np.vstack([y_stack, Q])
x_stack = np.vstack([x_stack, state])
# Train our network using target and predicted Q values on each episode
"""
쌓은 stack들을 바로 update로 돌려서 학습을 시킵니다.
학습은 위에서 만들었던 neural network(linear regression)을 통해서 학습이 되겠지요.
"""
return DQN.update(x_stack, y_stack) | 30,072 |
def make_reverse_macro_edge_name(macro_edge_name):
"""Autogenerate a reverse macro edge name for the given macro edge name."""
if macro_edge_name.startswith(INBOUND_EDGE_FIELD_PREFIX):
raw_edge_name = macro_edge_name[len(INBOUND_EDGE_FIELD_PREFIX) :]
prefix = OUTBOUND_EDGE_FIELD_PREFIX
elif macro_edge_name.startswith(OUTBOUND_EDGE_FIELD_PREFIX):
raw_edge_name = macro_edge_name[len(OUTBOUND_EDGE_FIELD_PREFIX) :]
prefix = INBOUND_EDGE_FIELD_PREFIX
else:
raise AssertionError("Unreachable condition reached: {}".format(macro_edge_name))
reversed_macro_edge_name = prefix + raw_edge_name
return reversed_macro_edge_name | 30,073 |
def unescaped_split(pattern,
string,
max_split=0,
remove_empty_matches=False,
use_regex=False):
"""
Splits the given string by the specified pattern. The return character (\\n)
is not a natural split pattern (if you don't specify it yourself).
This function handles escaped split-patterns (and so splits only patterns
that are unescaped).
:param pattern: A pattern that defines where to split.
:param string: The string to split by the defined pattern.
:param max_split: Defines the maximum number of splits. If 0 or
less is provided, the number of splits is not
limited.
:param remove_empty_matches: Defines whether empty entries should
be removed from the result.
:param use_regex: Specifies whether to treat the split pattern
as a regex or simple string.
:return: An iterator returning the split up strings.
"""
return _split(string,
max_split,
remove_empty_matches,
unescaped_search_for,
pattern,
string,
0,
0,
use_regex) | 30,074 |
def download_coco(args, overwrite=False):
"""download COCO dataset and Unzip to download_dir"""
_DOWNLOAD_URLS = [
('http://images.cocodataset.org/zips/train2017.zip',
'10ad623668ab00c62c096f0ed636d6aff41faca5'),
('http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
'8551ee4bb5860311e79dace7e79cb91e432e78b3'),
('http://images.cocodataset.org/zips/val2017.zip',
'4950dc9d00dbe1c933ee0170f5797584351d2a41'),
]
if not os.path.isdir(args.download_dir):
makedirs(args.download_dir)
for url, checksum in _DOWNLOAD_URLS:
filename = download(url, path=args.download_dir, overwrite=overwrite, sha1_hash=checksum)
with zipfile.ZipFile(filename) as zf:
zf.extractall(path=args.download_dir) | 30,075 |
def citation_distance_matrix(graph):
"""
:param graph: networkx graph
:returns: distance matrix, node labels
"""
sinks = [key for key, outdegree in graph.out_degree() if outdegree==0]
paths = {s: nx.shortest_path_length(graph, target=s) for s in sinks}
paths_df = pd.DataFrame(paths)#, index=graph.nodes)
paths_nonzero_df = 1*~paths_df.isnull()
a_paths_nonzero = paths_nonzero_df.values
m = a_paths_nonzero
intersect = m.dot(m.T)
union = m.dot(np.ones(m.shape).T) + np.ones(m.shape).dot(m.T) -intersect
union[union==0] = 1
dist = 1 - intersect/union
return dist, paths_nonzero_df.index | 30,076 |
def read_into_dataframe(file: IO, filename: str = "", nrows: int = 100,max_characters: int = 50) -> pd.DataFrame:
"""Reads a file into a DataFrame.
Infers the file encoding and whether a header column exists
Args:
file (IO): file buffer.
filename (str): filename. Used to infer compression.
nrows (int, optional): number of rows to peek. Default: 100.
max_characters (int, optional): max characters a column name can have to be distinguished from a real text value
Returns:
A pandas.DataFrame.
"""
detector = UniversalDetector()
for line, text in enumerate(file):
detector.feed(text)
if detector.done or line > nrows:
break
detector.close()
encoding = detector.result.get("encoding")
compression = infer_compression(filename, "infer")
file.seek(0, SEEK_SET)
contents = file.read()
with BytesIO(contents) as file:
df0 = pd.read_csv(
file,
encoding=encoding,
compression=compression,
sep=None,
engine="python",
header="infer",
nrows=nrows,
)
df0_cols = list(df0.columns)
#Check if all columns are strins and short strings(text values tend to be long)
column_names_checker = all([type(item) == str for item in df0_cols])
if column_names_checker:
column_names_checker = all([len(item) < max_characters for item in df0_cols])
#Check if any column can be turned to float
conversion_checker= True
for item in df0_cols:
try:
item = float(item)
conversion_checker = False
break
except:
pass
#Prefix and header
final_checker = True if (column_names_checker and conversion_checker) else False
header = "infer" if final_checker else None
prefix = None if header else "col"
with BytesIO(contents) as file:
df = pd.read_csv(
file,
encoding=encoding,
compression=compression,
sep=None,
engine="python",
header=header,
prefix=prefix,
)
return df | 30,077 |
def load_generator(config: dict):
"""
Create the generator and load its weights using the function `load_weights`.
Args:
config (dict): Dictionary with the configurations.
Returns:
BigGAN.Generator: The generator.
"""
# GPU
device = "cuda"
torch.backends.cudnn.benchmark = True
# TODO: how to handle seed?
# Seed RNG
utils.seed_rng(config["seed"])
# Import the model
model_name = "BigGAN" # ! Code rewrite only supports BigGAN
model = __import__(model_name)
# Create generator and load it to the GPU
G = model.Generator(**config).to(device)
# If using EMA, prepare it
if config["ema"]:
G_ema = model.Generator(**{**config, "skip_init": True, "no_optim": True}).to(
device
)
utils.ema(G, G_ema, config["ema_decay"], config["ema_start"])
else:
G_ema = None
# If loading from a pre-trained model, load weights
try:
load_weights(G, config, G_ema=G_ema if config["ema"] else None)
except:
load_weights(G, config, G_ema=None)
G_ema.load_state_dict(G.state_dict())
# Switch to eval mode
G.eval()
if config["ema"]:
G_ema.eval()
return G_ema if config["ema"] and config["use_ema"] else G | 30,078 |
def Navigatev0_action_to_tensor(act: OrderedDict, task=1):
"""
Creates the following (batch_size, seq_len, 11) action tensor from Navigatev0 actions:
0. cam left
1. cam right
2. cam up
3. cam down
4. place + jump
5. place
6. forward + attack
7. attack
8. forward + jump
9. jump
10. forward
"""
batch_size, seq_len = act["jump"].shape
PLACE_OPTIONS = {"none": 0, "dirt": 1}
# ONE_HOT = {0: np.array([1, 0]), 1: np.array([0, 1])}
out = torch.zeros((batch_size,seq_len,11))
for b in range(batch_size):
for s in range(seq_len):
c = act["camera"]
# We don't need to check if 0, 1, and 10 are in task actions
# since they always will be
task_acts = TASK_ACTIONS[task]
# Set camera left
if c[b,s][0] < -10 and abs(c[b,s][0]) >= abs(c[b,s][1]):
out[b,s][0] = 1
# Set camera right
elif c[b,s][0] > 10 and abs(c[b,s][0]) >= abs(c[b,s][1]):
out[b,s][1] = 1
# Set camera up
elif 2 in task_acts and c[b,s][1] < -10 and abs(c[b,s][1]) >= abs(c[b,s][0]):
out[b,s][2] = 1
elif 3 in task_acts and c[b,s][1] > 10 and abs(c[b,s][1]) >= abs(c[b,s][0]):
out[b,s][3] = 1
elif PLACE_OPTIONS[act["place"][b,s]] == 1:
if 4 in task_acts and act["jump"][b,s] == 1:
out[b,s][4] = 1
elif 5 in task_acts:
out[b,s][5] = 1
elif act["attack"][b,s] == 1:
if 6 in task_acts and act["forward"][b,s] == 1:
out[b,s][6] = 1
elif 7 in task_acts:
out[b,s][7] = 1
elif act["jump"][b,s] == 1:
if 8 in task_acts and act["forward"][b,s] == 1:
out[b,s][8] = 1
elif 9 in task_acts:
out[b,s][9] = 1
else:
out[b,s][10] = 1
return out | 30,079 |
def asfarray(a, dtype=mstype.float32):
"""
Similar to asarray, converts the input to a float tensor.
If non-float dtype is defined, this function will return a float32 tensor instead.
Args:
a (Union[int, float, bool, list, tuple, numpy.ndarray]): Input data, in
any form that can be converted to a `Tensor`. This includes lists, lists of
tuples, tuples, tuples of tuples, tuples of lists and numpy.ndarray.
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype, can
be in format of np.int32, or \'int32\'. If dtype is :class:`None`, the data type
of the new tensor will be inferred from `a`. Default is :class:`mindspore.float32`.
Returns:
Tensor, generated tensor with the specified float dtype.
Raises:
TypeError: If input arguments have types not specified above.
ValueError: If input `a` has different sizes at different dimensions.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.asfarray([1,2,3]))
[1. 2. 3.]
"""
_check_input_for_asarray(a)
if dtype is None:
return asarray(a)
dtype = _check_dtype(dtype)
if dtype not in (mstype.float16, mstype.float32, mstype.float64):
dtype = mstype.float32
if isinstance(a, (list, tuple)):
# Convert all tuple/nested tuples to lists
a = _deep_list(a)
# Convert all tensor sub-elements to numpy arrays
a = _deep_tensor_to_nparray(a)
a = onp.asarray(a)
if a.dtype is onp.dtype('object'):
raise TypeError(f"For Tensor conversion, the input_data is {a} that contains unsupported element.")
if isinstance(a, onp.ndarray):
a = Tensor.from_numpy(a)
return Tensor(a, dtype) | 30,080 |
def test_fail_when_no_template(
testdata_dir: pathlib.Path, tmp_trestle_dir: pathlib.Path, monkeypatch: MonkeyPatch
) -> None:
"""Test that validation fails when template does not exists."""
task_template_folder = tmp_trestle_dir / '.trestle/author/test_task/'
source_instance = testdata_dir / 'author/0.0.1/test_1_md_format/correct_instance.md'
task_instance = tmp_trestle_dir / 'test_task/correct_instance.md'
task_template_folder.mkdir(exist_ok=True, parents=True)
(tmp_trestle_dir / 'test_task').mkdir(parents=True)
task_instance.touch()
shutil.copyfile(source_instance, task_instance)
command_validate = 'trestle author docs validate -tn test_task'
monkeypatch.setattr(sys, 'argv', command_validate.split())
rc = trestle.cli.Trestle().run()
assert rc == 1 | 30,081 |
def _verify_option(value: Optional[str], value_proc: Callable) -> Optional[str]:
"""Verifies that input value via click.option matches the expected value.
This sets ``value`` to ``None`` if it is invalid so the rest of the prompt
can flow smoothly.
Args:
value (Optional[str]): Input value.
value_proc (Callable): A function to check the validity of ``value``.
Returns:
(Optional[str]): ``value`` if it is a valid value. ``None`` if it is
not.
Raises:
click.exceptions.UsageError: When ``value`` is invalid.
"""
if value is None:
return value
try:
value = value_proc(value)
except click.exceptions.UsageError as error:
click.echo(f"Error: {error.message}", err=True)
value = None
return value | 30,082 |
def test_external_server():
""" Test starting up an external server and accessing with a client with start_server=False """
corenlp_home = os.getenv('CORENLP_HOME')
start_cmd = f'java -Xmx5g -cp "{corenlp_home}/*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9001 ' \
f'-timeout 60000 -server_id stanza_external_server -serverProperties {SERVER_TEST_PROPS}'
start_cmd = start_cmd and shlex.split(start_cmd)
external_server_process = subprocess.Popen(start_cmd)
with corenlp.CoreNLPClient(start_server=False, endpoint="http://localhost:9001") as external_server_client:
ann = external_server_client.annotate(TEXT, annotators='tokenize,ssplit,pos', output_format='text')
assert external_server_process
external_server_process.terminate()
external_server_process.wait(5)
assert ann.strip() == EN_GOLD | 30,083 |
def batch_jacobian(output, inp, use_pfor=True, parallel_iterations=None):
"""Computes and stacks jacobians of `output[i,...]` w.r.t. `input[i,...]`.
e.g.
x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
y = x * x
jacobian = batch_jacobian(y, x)
# => [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]
Args:
output: A tensor with shape [b, y1, ..., y_n]. `output[i,...]` should
only depend on `inp[i,...]`.
inp: A tensor with shape [b, x1, ..., x_m]
use_pfor: If true, uses pfor for computing the Jacobian. Else uses a tf.while_loop.
parallel_iterations: A knob to control how many iterations and dispatched in
parallel. This knob can be used to control the total memory usage.
Returns:
A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`
is the jacobian of `output[i, ...]` w.r.t. `inp[i, ...]`, i.e. stacked
per-example jacobians.
Raises:
ValueError: if first dimension of `output` and `inp` do not match.
(NL) This function is taken from the following (and minimally modified to be used):
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/parallel_for/gradients.py#L81
"""
output_shape = output.shape
if not output_shape[0].is_compatible_with(inp.shape[0]):
raise ValueError("Need first dimension of output shape (%s) and inp shape "
"(%s) to match." % (output.shape, inp.shape))
if output_shape.is_fully_defined():
batch_size = int(output_shape[0])
output_row_size = output_shape.num_elements() // batch_size
else:
output_shape = tf.shape(output)
batch_size = output_shape[0]
output_row_size = tf.size(output) // batch_size
inp_shape = tf.shape(inp)
# Flatten output to 2-D.
with tf.control_dependencies([tf.assert_equal(batch_size, inp_shape[0])]):
output = tf.reshape(output, [batch_size, output_row_size])
def loop_fn(i):
y = tf.gather(output, i, axis=1)
return tf.gradients(y, inp)[0]
#if use_pfor:
if False:
pfor_output = tf.pfor(loop_fn, output_row_size,
parallel_iterations=parallel_iterations)
else:
pfor_output = for_loop(
loop_fn, output.dtype,
output_row_size,
parallel_iterations=parallel_iterations)
if pfor_output is None:
return None
pfor_output = tf.reshape(pfor_output, [output_row_size, batch_size, -1])
output = tf.transpose(pfor_output, [1, 0, 2])
new_shape = tf.concat([output_shape, inp_shape[1:]], axis=0)
return tf.reshape(output, new_shape) | 30,084 |
def test_phono3py_ltc_lbte(
fixture_sandbox,
fixture_code,
generate_calc_job,
generate_inputs,
generate_settings,
generate_fc3_filedata,
generate_fc2_filedata,
):
"""Test a phonopy calculation."""
entry_point_calc_job = "phonoxpy.phono3py"
inputs = generate_inputs(
metadata={"options": {"resources": {"tot_num_mpiprocs": 1}}}
)
inputs.update(
{
"settings": generate_settings(
mesh=50,
phonon_supercell_matrix=[2, 2, 2],
ts=[300, 400, 500],
lbte=True,
),
"code": fixture_code(entry_point_calc_job),
"fc2": generate_fc2_filedata(),
"fc3": generate_fc3_filedata(),
}
)
ph_settings = {}
_setup_phono3py_calculation_keyset5(ph_settings, inputs["settings"])
assert set(ph_settings) == set(("mesh", "lbte", "ts"))
calc_info = generate_calc_job(fixture_sandbox, entry_point_calc_job, inputs)
assert set(calc_info.retrieve_list) == set(("phono3py.yaml", "kappa-*.hdf5"))
assert set(calc_info.codes_info[0].cmdline_params) == set(
(
"-c",
"phono3py_params.yaml.xz",
"--fc2",
"--fc3",
"--mesh",
"50.0",
"--lbte",
"--ts",
"300",
"400",
"500",
)
) | 30,085 |
def declare(baseFamily=None, baseDefault=0, derivedFamily=None, derivedDefault=""):
"""
Declare a pair of components
"""
# the declaration
class base(pyre.component, family=baseFamily):
"""a component"""
b = pyre.properties.int(default=baseDefault)
class derived(base, family=derivedFamily):
"""a derived component"""
d = pyre.properties.str(default=derivedDefault)
# return the pair to the caller
return base, derived | 30,086 |
def parse_iso8601(dtstring: str) -> datetime:
"""naive parser for ISO8061 datetime strings,
Parameters
----------
dtstring
the datetime as string in one of two formats:
* ``2017-11-20T07:16:29+0000``
* ``2017-11-20T07:16:29Z``
"""
return datetime.strptime(
dtstring,
'%Y-%m-%dT%H:%M:%SZ' if len(dtstring) == 20 else '%Y-%m-%dT%H:%M:%S%z') | 30,087 |
def CalcCurvature(vertices,faces):
"""
CalcCurvature recives a list of vertices and faces
and the normal at each vertex and calculates the second fundamental
matrix and the curvature by least squares, by inverting the 3x3 Normal matrix
INPUT:
vertices -nX3 array of vertices
faces -mX3 array of faces
VertexNormals - nX3 matrix (n=number of vertices) containing the normal at each vertex
FaceNormals - mX3 matrix (m = number of faces) containing the normal of each face
OUTPUT:
FaceSFM - a list of 2x2 np arrays of (m = number of faces) second fundamental tensor at the faces
VertexSFM - a list of 2x2 np arrays (n = number of vertices) second fundamental tensor at the vertices
Other Parameters
wfp : mx3 array of vertex voronoi cell area/Mixed area weights as given in Meyer 2002
up,vp : local coordinate system at each vertex
e0,e1,e2 : edge vectors
"""
#list of 2x2 arrays for each vertex
VertexSFM = [np.zeros([2,2]) for i in vertices]
up = np.zeros(vertices.shape)
e0=vertices[faces[:,2]]-vertices[faces[:,1]]
e1=vertices[faces[:,0]]-vertices[faces[:,2]]
e2=vertices[faces[:,1]]-vertices[faces[:,0]]
e0_norm=normr(e0)
e1_norm=normr(e1)
e2_norm=normr(e2)
FaceNormals=0.5*fastcross(e1,e2) #not unit length. holds the area which is needed next
VertNormals,wfp=GetVertexNormalsExtra(vertices,faces,FaceNormals,e0,e1,e2)
FaceNormals=normr(FaceNormals)
#Calculate initial coordinate system
up[faces[:,0]]=e2_norm
up[faces[:,1]]=e0_norm
up[faces[:,2]]=e1_norm
#Calculate initial vertex coordinate system
up=fastcross(up,VertNormals)
up=normr(up)
vp=fastcross(VertNormals,up)
B=normr(fastcross(FaceNormals,e0_norm))
nfaces=faces.shape[0]
# Build a least square problem at each face to get the SFM at each face and solve it using the normal equation
scale=1.0/np.sqrt(np.sum((e0[0,:]**2+e1[0,:]**2+e2[0,:]**2)/3.0))
AT = scale*np.array([[inner1d(e0,e0_norm), inner1d(e0,B), np.zeros(nfaces)],
[np.zeros(nfaces), inner1d(e0,e0_norm), inner1d(e0,B)],
[inner1d(e1,e0_norm), inner1d(e1,B), np.zeros(nfaces)],
[np.zeros(nfaces), inner1d(e1,e0_norm), inner1d(e1,B)],
[inner1d(e2,e0_norm), inner1d(e2,B), np.zeros(nfaces)],
[np.zeros(nfaces), inner1d(e2,e0_norm), inner1d(e2,B)]]).T
A = np.transpose(AT,axes=(0,2,1)).copy()
dn0=VertNormals[faces[:,2]]-VertNormals[faces[:,1]]
dn1=VertNormals[faces[:,0]]-VertNormals[faces[:,2]]
dn2=VertNormals[faces[:,1]]-VertNormals[faces[:,0]]
b= scale*np.array([inner1d(dn0,e0_norm),
inner1d(dn0,B ),
inner1d(dn1,e0_norm),
inner1d(dn1,B ),
inner1d(dn2,e0_norm),
inner1d(dn2,B )]).T[:,:,np.newaxis]
X1=np.array([np.linalg.pinv(a,-1) for a in A])
X = np.matmul(X1,b)
#now calculate curvature per vertex as weighted sum of the face curvature
for i,f in enumerate(faces):
for j in [0,1,2]:
new_ku,new_kuv,new_kv = ProjectCurvatureTensor(e0_norm[i],B[i],FaceNormals[i],X[i][0],X[i][1],X[i][2],up[f[j]],vp[f[j]])
VertexSFM[f[j]]+=wfp[i,j]*np.array([[new_ku,new_kuv],[new_kuv,new_kv]]).squeeze()
return VertexSFM,VertNormals | 30,088 |
def query_abstracts(
q: Optional[str] = None,
n_results: Optional[int] = None,
index: str = "agenda-2020-1",
fields: list = ["title^2", "abstract", "fullname", "institution"],
):
"""
Query abstracts from a given Elastic index
q: str, query
n_results: int, number of results from
index: str, index of ElasticSearch
fields: list, list of fields that are included in the search
"""
responses = query(q, n_results, index, fields)
return responses | 30,089 |
def PretrainedEmbeddingIndicesDictionary() -> typing.Dict[str, int]:
"""Read and return the embeddings indices dictionary."""
with open(INST2VEC_DICITONARY_PATH, "rb") as f:
return pickle.load(f) | 30,090 |
def color_negative_red(val):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
color = 'red' if val < 0 else 'black'
return 'color: %s' % color | 30,091 |
async def test_form_cannot_connect(hass: HomeAssistant) -> None:
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.surepetcare.config_flow.surepy.client.SureAPIClient.get_token",
side_effect=SurePetcareError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"username": "test-username",
"password": "test-password",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"} | 30,092 |
def globpattern(dir, pattern):
"""
Return leaf names in the specified directory which match the pattern.
"""
if not hasglob(pattern):
if pattern == '':
if os.path.isdir(dir):
return ['']
return []
if os.path.exists(util.normaljoin(dir, pattern)):
return [pattern]
return []
leaves = os.listdir(dir) + ['.', '..']
# "hidden" filenames are a bit special
if not pattern.startswith('.'):
leaves = [leaf for leaf in leaves
if not leaf.startswith('.')]
leaves = fnmatch.filter(leaves, pattern)
leaves = [l for l in leaves if os.path.exists(util.normaljoin(dir, l))]
leaves.sort()
return leaves | 30,093 |
def test_basic_if_statement():
"""TEST 3.2: If statements are the basic control structures.
The `if` should first evaluate its first argument. If this evaluates to
true, then the second argument is evaluated and returned. Otherwise the
third and last argument is evaluated and returned instead.
"""
assert_equals(42, evaluate(parse("(if #t 42 1000)"), Environment()))
assert_equals(1000, evaluate(parse("(if #f 42 1000)"), Environment()))
assert_equals(True, evaluate(parse("(if #t #t #f)"), Environment())) | 30,094 |
def get_student_discipline(person_id: str = None):
"""
Returns student discipline information for a particular person.
:param person_id: The numeric ID of the person you're interested in.
:returns: String containing xml or an lxml element.
"""
return get_anonymous('getStudentDiscipline', person_id=person_id) | 30,095 |
def main():
"""evaluate gpt-model fine-tune on qa dataset"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='gpt2',
help='pretrained model name')
parser.add_argument("--using_cache", type=bool, default=False)
parser.add_argument(
"--importance", type=float, help="LifeLong Learning need its (Lambda)")
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help=
"The output directory where the model predictions and checkpoints will be written."
)
parser.add_argument("--do_eval",action="store_true")
# parser.add_argument("--old_dataset", type=str, default="")
parser.add_argument('--eval_dataset', type=str, default='')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--eval_batch_size', type=int, default=8)
# parser.add_argument('--old_batch_size', type=int, default=1)
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--top_k", type=int, default=8)
parser.add_argument("--top_p", type=float, default=0.9)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training \
steps to perform. Override num_train_epochs.")
parser.add_argument(
'--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before\
performing a backward/update pass.")
parser.add_argument("--no_cuda",action="store_true")
parser.add_argument("--argmax",action="store_true")
parser.add_argument("--sample",type=int,default=1)
args = parser.parse_args()
args.device = torch.device(
"cuda:0" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
print(args)
set_seed(args.seed)
device=args.device
n_gpu = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(device, n_gpu))
if not args.do_eval:
raise ValueError("At least `do_eval` must be True.")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
special_tokens = ['_context_', '_question_', '_ans_','_eos_','_pad_']
load_dir, tokenizer, model, special_tokens_ids = load_tool(args.model_name,special_tokens,device)
print(special_tokens_ids)
def tokenize_and_encode(obj):
""" Tokenize and encode a nested object """
if isinstance(obj, str):
# print("str ",obj)
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
elif isinstance(obj, set):
return obj
return list(tokenize_and_encode(o) for o in obj)
logger.info("Encoding dataset...")
eval_dataset = load_squad_dataset(
args.eval_dataset, using_cache=args.using_cache)
datasets = (eval_dataset, )
encoded_datasets = tokenize_and_encode(datasets)
max_length, q_length, a_length = longest_length(model)
input_length = max(len(story[:max_length]) + len(question[:q_length]) + 5 \
for dataset in encoded_datasets for story, question, ans, _ in dataset)
input_length = min(input_length, model.config.n_positions-2)
# Load and encode the datasets
# Prepare inputs tensors and dataloaders
tensor_datasets, ID_list = pre_process_datasets(encoded_datasets, input_length,*special_tokens_ids)
eval_data = TensorDataset(*tensor_datasets[0])
eval_sampler = SequentialSampler(eval_data)
eval_sampler=BatchSampler(eval_sampler,batch_size=args.eval_batch_size,drop_last=False)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler,num_workers=8)
if args.do_eval:
model.eval()
answer_dict = dict()
compared_dict = dict()
tqdm_bar = tqdm(eval_dataloader, desc="Evaluating")
for step, data in enumerate(tqdm_bar):
start_time = time.time()
sentence, answer, ID_index = tuple(t.to(device) for t in data)
sentence = sentence[sentence != special_tokens_ids[4]].long()
answer = answer[answer != special_tokens_ids[4]].long()
# print(answer)
# pdb.set_trace()
out = sample_sequence(
model=model,
context=sentence,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
is_xlnet=False,
tokenizer=tokenizer,
argmax=args.argmax,num_samples=args.sample)
end_time = time.time()
# print("It costs {} seconds for generate data!!".format(end_time-start_time))
out_ = out[:, :].tolist()
answer_ = tokenizer.decode(answer.tolist(),clean_up_tokenization_spaces=True)
for i in range(len(out_)):
text = tokenizer.decode(out_[i], clean_up_tokenization_spaces=True,skip_special_tokens=True)
answer_dict[ID_list[ID_index[0][i]]] = text
compared_dict[ID_list[ID_index[0][i]]] = (text,answer_)
if step % 50 == 0:
print("step:", step)
print(" prediction: ",text)
print(" groundtrut: ",answer_)
with open(args.output_dir + "/predictions.json", "w") as outfile:
json.dump(answer_dict, outfile)
with open(args.output_dir + "/compared_answer.json","w") as outfile:
json.dump(compared_dict, outfile)
return | 30,096 |
def dot(p1, p2):
"""
Dot product
:param p1:
:param p2:
:return:
"""
return p1[X] * p2[X] + p1[Y] * p2[Y] | 30,097 |
def logout():
"""View function which handles a logout request."""
tf_clean_session()
if current_user.is_authenticated:
logout_user()
# No body is required - so if a POST and json - return OK
if request.method == "POST" and _security._want_json(request):
return _security._render_json({}, 200, headers=None, user=None)
return redirect(get_post_logout_redirect()) | 30,098 |
def _lookup_configuration():
"""Lookup the configuration file.
:return: opened configuration file
:rtype: stream
"""
for pth in CONFIG_PATH:
path = os.path.abspath(os.path.expanduser(pth))
LOGGER.debug('Checking for %s', path)
if os.path.exists(path):
LOGGER.info('Config file: %s', path)
return open(path)
return pkgutil.get_data('picdb', 'resources/config_app.yaml') | 30,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.