hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
02daf469a9b3371648f1e038238e3d53ed5042fa
| 18,746
|
py
|
Python
|
build/images/manifest.py
|
liexusong/fuchsia
|
81897680af92a1848a063e3c20ff3a4892ccff07
|
[
"BSD-2-Clause"
] | 3
|
2021-09-02T07:21:06.000Z
|
2022-03-12T03:20:10.000Z
|
build/images/manifest.py
|
DamieFC/fuchsia
|
f78a4a1326f4a4bb5834500918756173c01bab4f
|
[
"BSD-2-Clause"
] | 56
|
2021-06-03T03:16:25.000Z
|
2022-03-20T01:07:44.000Z
|
build/images/manifest.py
|
DamieFC/fuchsia
|
f78a4a1326f4a4bb5834500918756173c01bab4f
|
[
"BSD-2-Clause"
] | 2
|
2022-02-25T12:22:49.000Z
|
2022-03-12T03:20:10.000Z
|
#!/usr/bin/env python3.8
# Copyright 2017 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import namedtuple
import argparse
import os
import errno
import fnmatch
import shlex
import shutil
import sys
# TECHNICAL NOTE:
#
# This Python file serves both as a standalone script, and a utility module, to
# read and write ZBI manifests, which are simple text files where each line
# should look like:
#
# <target-path>=<source-path>
#
# Where <target-path> is an destination/installation path, e.g. within a Fuchsia
# package, and <source-path> is the location of the corresponding source content.
#
# Each manifest entry will be identified by a |manifest_entry| instance (see below),
# which records the entry's target and source paths, the input manifest it appears
# in, and an optional output 'group' index (more on this later).
#
# The core feature implemented here is to parse command-line arguments to build
# two manifest_entry lists:
#
# - A list of 'selected' entries that will be output to one of the output
# manifest specified on the command-line through '--output=FILE'. Each such entry
# has a 'group' field which indicates which output file it must be written to.
#
# - A list of 'unselected' entries, either because they appeared before the
# first '--output=FILE' argument, or because they were excluded from selection
# through the use of '--exclude' and '--include' options. Each such entry
# as a 'group' value of None.
#
# Input manifest entries can be fed to the command line in two ways:
#
# * Using --manifest=FILE to read an existing ZBI manifest file.
#
# * Using --entry=TARGET=SOURCE directly, where TARGET and SOURCE are target and
# source paths for the entry. This requires at least one previous
# --entry-manifest=PATH argument, which will be used to populate the entry's
# |manifest| field.
#
# Note that each source path will be relative to the latest --cwd=DIR argument
# that appeared on the command line, with a default value of '.'.
#
# An output manifest can be specified with --output=FILE. This argument can be used
# several times to generate several output manifests.
#
# Any input entry that appears before the first --output argument goes to the
# unselected list.
#
# Input entries that appear after an --output argument are assigned to be written
# to the correspondind output manifest file, by default, through --exclude and
# --include options described below can result in such an entry to be unselected
# as well.
#
# Many command line options control state during parsing that will either filter
# or transform the input entries before they are recorded into one of the two
# output lists. See their description in common_parse_args() and parse_args()
# below.
#
# Note that some scripts use this Python module to first build a first version
# of the 'unselected' and 'selected' lists, and will later modify these lists
# before actually writing to the output manifest(s). For example to move
# unselected shared library dependencies to the selected list if some selected
# binaries depend on them.
#
# Describes a manifest entry:
# group: Either None, if the entry doesn't need to be written to any output
# manifest, or an integer index into the list of --output=FILE arguments
# otherwise.
# target: Target installation path.
# source: Source path for the entry.
# manifest: Path to input manifest this entry belongs to.
manifest_entry = namedtuple(
'manifest_entry', [
'group',
'target',
'source',
'manifest',
])
def format_manifest_entry(entry):
"""Convert a manifest_entry instance to its final text representation."""
return entry.target + '=' + entry.source
def format_manifest_file(manifest):
"""Convert a list of manifest_entry instances to a ZBI manifest file."""
return ''.join(format_manifest_entry(entry) + '\n' for entry in manifest)
def read_manifest_lines(sep, lines, title, manifest_cwd, result_cwd):
"""Convert an input manifest into a manifest_entry iteration.
Args:
sep: Separator used between target and source path (e.g. '=').
lines: Iterable of ZBI manifest input lines.
title: Path to the input manifest these lines belong to.
manifest_cwd: Current source directory assumed by input manifest.
result_cwd: Current source directory assumed by the resulting entries.
Returns:
An iterable of manifest_entry instances. Where 'source' will be relative
to `result_cwd`.
"""
for line in lines:
# Remove the trailing newline.
assert line.endswith('\n'), 'Unterminated manifest line: %r' % line
line = line[:-1]
# The {group}target=source syntax is no longer supported, but just
# assert that we do not find it in input manifests anymore.
assert not line.startswith('{'), (
'{group} syntax no longer supported in manifest line: %r'% line)
# Grok target=source syntax.
[target_file, build_file] = line.split(sep, 1)
if manifest_cwd != result_cwd:
# Expand the path based on the cwd presumed in the manifest.
build_file = os.path.normpath(
os.path.join(manifest_cwd, build_file))
# Make it relative to the cwd we want to work from.
build_file = os.path.relpath(build_file, result_cwd)
yield manifest_entry(None, target_file, build_file, title)
def ingest_manifest_lines(
sep, lines, title, in_cwd, select_entries, out_cwd, output_group):
"""Convert an input manifest into two lists of selected and unselected entries.
Args:
sep: Separator used between target and source path (e.g. '=').
lines: Iterable of manifest input lines.
title: Path to the input manifest these lines belong to.
in_cwd: Current directory assumed by input manifest entries.
select_entries: A boolean indicating whether to select these manifest
entries.
out_cwd: Current directory assumed by the output.
output_group: A manifest_entry.group value that is only applied to
selected entries.
Returns:
A (selected, unselected) tuple, where each item is a list of manifest_entry
instances from the input. If |select_entries| is true, |selected| will contain
all entries from the input, with their group set to |output_group| and
|unselected| will be empty. If |select_entries| is False, then |selected| will
be empty, and |unselected| will contain all entries from the input, with their
group set to None.
"""
selected = []
unselected = []
for entry in read_manifest_lines(sep, lines, title, in_cwd, out_cwd):
if select_entries:
selected.append(entry._replace(group=output_group))
else:
unselected.append(entry._replace(group=None))
return selected, unselected
def apply_rewrites(sep, rewrites, entry):
"""Rewrite a manifest entry based on a list of rewrite rules.
The rewrite rules must be passed as a list of (pattern, line)
tuples, where |pattern| is an fnmatch pattern, that will be checked
against the entry's target path.
In case of a match, the whole entry is replaced by the content of
|line|, which should typically look like "<target>=<source>" and will be
parsed through read_manifest_lines().
The {source} and {target} substitutions are supported in |line|.
Note that rewrites preserve the original entry's manifest and group values.
Args:
sep: Separator used between target and source path (e.g. '=')
rewrites: A list of (pattern, line) tuples.
entry: The entry to rewrite if necessary.
Returns:
The new entry value after potential rewrites.
"""
for pattern, line in rewrites:
if fnmatch.fnmatchcase(entry.target, pattern):
[new_entry] = read_manifest_lines(
sep, [line.format(**entry._asdict()) + '\n'], entry.manifest,
os.path.dirname(entry.manifest),
os.path.dirname(entry.manifest))
entry = new_entry._replace(group=entry.group)
return entry
def contents_entry(entry):
"""Replace a manifest_entry source path with its file content.
Used to implement the --content option. In a nutshell, an entry
that looks like '<target>=<source>' will be rewritten to
'<target>=<content of source file>', preserving other fields
in the entry.
"""
with open(entry.source) as file:
[line] = file.read().splitlines()
return entry._replace(source=line)
class input_action_base(argparse.Action):
"""Helper base class used to implement --manifest and --entry parsing.
This is a base class that assumes each derived class provides a
get_manifest_lines() method returning a (selected, unselected) pair
of manifest_entry lists, as returned by ingest_manifest_lines().
This maintains the state of the command-line parser, and creates and
updates the args.selected and args.unselected lists, described in the
technical note at the top of this file.
"""
def __init__(self, *args, **kwargs):
super(input_action_base, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
outputs = getattr(namespace, 'output', None)
all_selected = getattr(namespace, 'selected', None)
if all_selected is None:
all_selected = []
setattr(namespace, 'selected', all_selected)
all_unselected = getattr(namespace, 'unselected', None)
if all_unselected is None:
all_unselected = []
setattr(namespace, 'unselected', all_unselected)
# Only select manifest entries for output if at least one
# --output=FILE argument was found.
select_entries = outputs is not None
cwd = getattr(namespace, 'cwd', '')
if outputs is not None:
output_group = len(outputs) - 1
else:
output_group = None
selected, unselected = self.get_manifest_lines(
namespace, values, cwd, select_entries, namespace.output_cwd, output_group)
include = getattr(namespace, 'include', [])
exclude = getattr(namespace, 'exclude', [])
if include or exclude:
def included(entry):
def matches(file, patterns):
return any(
fnmatch.fnmatch(file, pattern) for pattern in patterns)
if matches(entry.target, exclude):
return False
if include and not matches(entry.target, include):
return False
return True
unselected += [entry for entry in selected if not included(entry)]
selected = list(filter(included, selected))
if getattr(namespace, 'contents', False):
selected = list(map(contents_entry, selected))
unselected = list(map(contents_entry, unselected))
sep = getattr(namespace, 'separator', '=')
rewrites = [
entry.split('=', 1) for entry in getattr(namespace, 'rewrite', [])
]
selected = [apply_rewrites(sep, rewrites, entry) for entry in selected]
unselected = [
apply_rewrites(sep, rewrites, entry) for entry in unselected
]
all_selected += selected
all_unselected += unselected
class input_manifest_action(input_action_base):
def __init__(self, *args, **kwargs):
super(input_manifest_action, self).__init__(*args, **kwargs)
def get_manifest_lines(self, namespace, filename, *args):
all_inputs = getattr(namespace, 'manifest', None)
if all_inputs is None:
all_inputs = []
setattr(namespace, 'manifest', all_inputs)
all_inputs.append(filename)
with open(filename, 'r') as file:
return ingest_manifest_lines(
getattr(namespace, 'separator', '='), file, file.name, *args)
class input_entry_action(input_action_base):
def __init__(self, *args, **kwargs):
super(input_entry_action, self).__init__(*args, **kwargs)
def get_manifest_lines(self, namespace, entry, *args):
return ingest_manifest_lines(
getattr(namespace, 'separator', '='), [entry + '\n'],
namespace.entry_manifest, *args)
def common_parse_args(parser):
"""Add common parsier arguments for this script and users of this module.
See technical note above to understand what these do.
"""
parser.fromfile_prefix_chars = '@'
parser.convert_arg_line_to_args = shlex.split
parser.add_argument(
'--output',
action='append',
required=True,
metavar='FILE',
help='Specift next output manifest file.')
parser.add_argument(
'--output-cwd',
default='',
metavar='DIRECTORY',
help='Change the current source directory used when writing entries to output files.')
parser.add_argument(
'--absolute',
action='store_true',
default=False,
help='Output source file names as absolute paths.')
parser.add_argument(
'--cwd',
default='',
metavar='DIRECTORY',
help='Change the current source directory used when reading input entries.')
parser.add_argument(
'--manifest',
action=input_manifest_action,
metavar='FILE',
default=[],
help='Add all entries from input manifest file (must exist)')
parser.add_argument(
'--entry',
action=input_entry_action,
metavar='PATH=FILE',
help='Add a single entry as if from an input manifest. Requires a previous ' +
'--entry-manifest argument.')
parser.add_argument(
'--entry-manifest',
default='<command-line --entry>',
metavar='TITLE',
help='Title in lieu of manifest file name for subsequent --entry arguments.')
parser.add_argument(
'--include',
action='append',
default=[],
metavar='TARGET',
help='Only include input entries whose target path matches the fnmatch pattern ' +
'TARGET. Can be used multiple times to extend the list of patterns. These ' +
'are always applied after --exclude pattern exclusions.')
parser.add_argument(
'--reset-include',
action='store_const',
const=[],
dest='include',
help='Reset the --include pattern list to be empty.')
parser.add_argument(
'--exclude',
action='append',
default=[],
metavar='TARGET',
help='Ignore input entries whose target path matches the fnmatch pattern TARGET. ' +
'Can be used multiple times to extend the list of patterns.'),
parser.add_argument(
'--reset-exclude',
action='store_const',
const=[],
dest='exclude',
help='Reset the --exclude pattern list to be empty.')
parser.add_argument(
'--separator',
default='=',
metavar='SEP',
help='Use SEP between TARGET and SOURCE in manifet entries.')
return parser.parse_args()
def parse_args():
parser = argparse.ArgumentParser(description='Read manifest files.')
parser.add_argument(
'--copy-contentaddr',
action='store_true',
default=False,
help='Copy to content-addressed targets, not manifest.')
parser.add_argument(
'--sources',
action='store_true',
default=False,
help='Write source file per line, not manifest entry.')
parser.add_argument(
'--contents',
action='store_true',
default=False,
help='Replace each source file name with its contents.')
parser.add_argument(
'--no-contents',
action='store_false',
dest='contents',
help='Reset previous --contents')
parser.add_argument(
'--rewrite',
action='append',
default=[],
metavar='PATTERN=ENTRY',
help='Replace entries whose target matches PATTERN with ENTRY,'
' which can use {source} and {target} substitutions.'),
parser.add_argument(
'--reset-rewrite',
dest='rewrite',
action='store_const',
const=[],
help='Reset previous --rewrite.')
parser.add_argument(
'--unique',
action='store_true',
default=False,
help='Elide duplicates even with different sources.')
parser.add_argument(
'--stamp', metavar='FILE', help='Touch FILE at the end.')
args = common_parse_args(parser)
if args.copy_contentaddr:
if args.contents:
parser.error('--copy-contentaddr is incompatible with --contents')
args.unique = True
args.sources = True
return args
def main():
args = parse_args()
output_sets = [(dict() if args.unique else set()) for file in args.output]
for entry in getattr(args, 'selected', []):
assert entry.group is not None, entry
if args.absolute:
line = os.path.abspath(entry.source)
else:
line = entry.source
if not args.sources:
line = entry.target + args.separator + line
if args.unique:
output_sets[entry.group][entry.target] = line
else:
output_sets[entry.group].add(line)
for output_filename, output_set in zip(args.output, output_sets):
if args.copy_contentaddr:
created_dirs = set()
for target, source in output_set.items():
target_path = os.path.join(output_filename, target)
if os.path.exists(target_path):
continue
target_dir = os.path.dirname(target_path)
if target_dir not in created_dirs:
if not os.path.exists(target_dir):
os.makedirs(target_dir)
created_dirs.add(target_dir)
shutil.copyfile(source, target_path)
else:
with open(output_filename, 'w') as file:
file.write(
''.join(
sorted(
line + '\n' for line in (
iter(output_set.values()) if args.
unique else output_set))))
if args.stamp:
with open(args.stamp, 'w') as file:
os.utime(file.name, None)
return 0
if __name__ == '__main__':
sys.exit(main())
| 37.64257
| 94
| 0.646165
|
f37a37e1320c345b29bf10b70dcbc86bf1f0d06e
| 645
|
py
|
Python
|
tests/test_traceback.py
|
pytask-dev/pytask
|
b6769b48abda44c6261b9a7b58865f8844423c13
|
[
"MIT"
] | 41
|
2020-07-24T15:19:19.000Z
|
2022-03-17T17:40:57.000Z
|
tests/test_traceback.py
|
pytask-dev/pytask
|
b6769b48abda44c6261b9a7b58865f8844423c13
|
[
"MIT"
] | 240
|
2020-06-26T21:37:49.000Z
|
2022-03-31T08:56:56.000Z
|
tests/test_traceback.py
|
pytask-dev/pytask
|
b6769b48abda44c6261b9a7b58865f8844423c13
|
[
"MIT"
] | null | null | null |
import textwrap
import pytest
from pytask import cli
@pytest.mark.parametrize("is_hidden", [True, False])
def test_hide_traceback_from_error_report(runner, tmp_path, is_hidden):
source = f"""
def task_main():
a = "This variable should not be shown."
__tracebackhide__ = {is_hidden}
helper()
def helper():
raise Exception
"""
tmp_path.joinpath("task_main.py").write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix(), "--show-locals"])
assert result.exit_code == 1
assert ("This variable should not be shown." in result.output) is not is_hidden
| 23.888889
| 83
| 0.67907
|
e4f2c591e986caf33eef493a1165d2468486a5a5
| 3,463
|
py
|
Python
|
tests/m_test_matrixnet_applier.py
|
HolyBayes/rep
|
8a8d70f87e148e6fd73ff0c3a8606e6074a5c47b
|
[
"Apache-2.0"
] | 726
|
2015-04-16T08:16:30.000Z
|
2022-03-25T19:19:42.000Z
|
tests/m_test_matrixnet_applier.py
|
HolyBayes/rep
|
8a8d70f87e148e6fd73ff0c3a8606e6074a5c47b
|
[
"Apache-2.0"
] | 86
|
2015-04-16T23:57:01.000Z
|
2021-09-26T01:03:47.000Z
|
tests/m_test_matrixnet_applier.py
|
HolyBayes/rep
|
8a8d70f87e148e6fd73ff0c3a8606e6074a5c47b
|
[
"Apache-2.0"
] | 167
|
2015-04-16T11:42:18.000Z
|
2022-01-11T15:10:19.000Z
|
"""
Here we test the correctness and speed of the formula.
"""
from __future__ import division, print_function, absolute_import
import os
import time
import numpy
from scipy.special import expit
import pandas
from six import BytesIO
from six.moves import zip
from rep.estimators._matrixnetapplier import MatrixNetApplier as NumpyClassifier
__author__ = 'Alex Rogozhnikov'
DATA_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "help_files")
def read_files(mx_filename, test_filename):
test_file = pandas.read_csv(test_filename, sep='\t')
with open(mx_filename, 'rb') as mx:
mx_content = mx.read()
return mx_content, test_file
def numpy_predict(formula_mx, data):
data = data.astype(float)
data = pandas.DataFrame(data)
mx = NumpyClassifier(BytesIO(formula_mx))
return mx.apply(data)
def stage_numpy_predict(formula_mx, data, step=1):
data = data.astype(float)
data = pandas.DataFrame(data)
mx = NumpyClassifier(BytesIO(formula_mx))
prediction = numpy.zeros(len(data))
for num, prediction_iteration in enumerate(mx.apply_separately(data)):
prediction += prediction_iteration
if num % step == 0:
yield expit(prediction)
def check_leaves(mx_filename, test_filename, n_trees=5000):
formula_mx, data = read_files(mx_filename, test_filename)
data = data.astype(float)
data = pandas.DataFrame(data)
mx = NumpyClassifier(BytesIO(formula_mx))
leaves = mx.compute_leaf_indices(data)
assert leaves.shape[0] == data.shape[0]
assert leaves.shape[1] == n_trees
print(leaves)
def test_leaves():
check_leaves(
os.path.join(DATA_PATH, 'test_formula_mx'),
os.path.join(DATA_PATH, 'data.csv'))
def check_staged_predictions(mx_filename, test_filename, n_iterations, stage_predict_function):
mx_content, test_file = read_files(mx_filename, test_filename)
predictions = pandas.read_csv(os.path.join(DATA_PATH, 'predictions.csv'))
predictions = pandas.DataFrame(predictions)
# Checking the predictions on first 100 events
for x, (key, row) in zip(stage_predict_function(mx_content, test_file[:100]), predictions.iterrows()):
assert numpy.allclose(row, x)
# Checking the number of iterations on 10 events
assert sum(1 for _ in stage_predict_function(mx_content, test_file[:10])) == n_iterations + 1
print('Check was passed')
# How the file was obtained
# def write_staged_predictions(mx_filename, test_filename):
# mx_content, test_file = read_files(mx_filename, test_filename)
# # testing on first 100 events
# test_file = test_file[:100]
#
# predictions = numpy.zeros([100, 100], dtype=float)
#
# for i, x in enumerate(stage_cython_predict(mx_content, test_file)):
# if i == 100:
# break
# predictions[i, :] = x
#
# pandas.DataFrame(predictions).to_csv('data/predictions.csv', index=False)
def compute_speed(mx_filename, test_filename, function, print_name=''):
mx_content, test_file = read_files(mx_filename, test_filename)
# just iterating over sequence
start = time.time()
for x in function(mx_content, test_file):
pass
print(print_name, time.time() - start)
def test_applier():
check_staged_predictions(
os.path.join(DATA_PATH, 'test_formula_mx'),
os.path.join(DATA_PATH, 'data.csv'),
stage_predict_function=stage_numpy_predict,
n_iterations=5000)
| 30.919643
| 106
| 0.714409
|
fe73ca589de4a0d681f9fa3d8bdee5349b32d6f9
| 697
|
py
|
Python
|
client/client/functions/commands/remote_shell.py
|
IchBInHanz/clientnet
|
20a75df953eee08d9bc1135a20cc662ba87efea1
|
[
"MIT"
] | 1
|
2021-01-08T15:27:29.000Z
|
2021-01-08T15:27:29.000Z
|
client/client/functions/commands/remote_shell.py
|
IchBInHanz/clientnet
|
20a75df953eee08d9bc1135a20cc662ba87efea1
|
[
"MIT"
] | null | null | null |
client/client/functions/commands/remote_shell.py
|
IchBInHanz/clientnet
|
20a75df953eee08d9bc1135a20cc662ba87efea1
|
[
"MIT"
] | null | null | null |
def remoteShell(cmd):
import os
path_cmd_partitioned = cmd.partition("/workdir/")
workdir = path_cmd_partitioned[2]
# if workdir == "undifined":
# pass
# else:
# # workdir = workdir.replace("\\", "\\\\")
# os.chdir(workdir)
only_command = path_cmd_partitioned[0].replace("/cmd/", "").replace(path_cmd_partitioned[1], "").replace(path_cmd_partitioned[2], "")
if "cd " in only_command:
cd_path = only_command.replace("cd ", "")
# cd_path = cd_path.replace("\\", "\\\\")
os.chdir(cd_path)
return os.getcwd() + "__docknext__Directory changed!"
ret = os.getcwd() + "__docknext__" + only_command
return ret
| 38.722222
| 137
| 0.601148
|
c0c2be61f906835387530a491f9378534aa55f0c
| 2,898
|
py
|
Python
|
TRIPPy/plot/loadct.py
|
fsciortino/TRIPPy
|
e2af5cc0e01dc42dbba9329e30b9dd20feca4902
|
[
"MIT"
] | 2
|
2015-03-27T11:44:42.000Z
|
2015-06-16T18:56:27.000Z
|
TRIPPy/plot/loadct.py
|
fsciortino/TRIPPy
|
e2af5cc0e01dc42dbba9329e30b9dd20feca4902
|
[
"MIT"
] | 1
|
2019-03-12T18:16:28.000Z
|
2019-04-24T23:44:23.000Z
|
TRIPPy/plot/loadct.py
|
fsciortino/TRIPPy
|
e2af5cc0e01dc42dbba9329e30b9dd20feca4902
|
[
"MIT"
] | 2
|
2020-03-03T03:47:09.000Z
|
2021-12-03T12:33:19.000Z
|
import scipy
import matplotlib.colors
import os
import sys
import inspect
import matplotlib.pyplot as plt
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
gsdict = {'thermal':0,
'thermal2':1,
'thermal3':2,
'bright':3,
'copper2':4,
'dusk':5,
'earth':6,
'hicontrast':7,
'pastel':8,
'pink2':9,
'sepia':10,
'cold':11,
'RoyalGold':12,
'FCPM_001':13,
'CMR':14}
def loadct(num, r=False, mayavi=False, **kwargs):
if not mayavi:
output = scipy.genfromtxt(cmd_folder+'/idl_colors.txt',
skip_header=256*num,
skip_footer=(39-num)*256)/255.
if r:
output = output[::-1]
return matplotlib.colors.LinearSegmentedColormap.from_list('idl', output, **kwargs)
else:
output = scipy.ones((256,4),dtype=int)
output[:,0:3] = scipy.genfromtxt(cmd_folder+'/idl_colors.txt',
skip_header=256*num,
skip_footer=(39-num)*256,dtype=int)
if r:
output = output[::-1]
return output
def loadgs(num, r=False, mayavi=False, **kwargs):
if isinstance(num,str):
name = num
if name.endswith('_r'):
r = True
num = gsdict[num[:-2]]
else:
num = gsdict[num]
else:
name = 'gs'
if not mayavi:
output = scipy.genfromtxt(cmd_folder+'/gs_colors.txt',
skip_header=256*num,
skip_footer=(14-num)*256)
if r:
output = output[::-1]
return matplotlib.colors.LinearSegmentedColormap.from_list(name, output,**kwargs)
else:
output = scipy.ones((256,4),dtype=int)
output[:,0:3] = scipy.genfromtxt(cmd_folder+'/gs_colors.txt',
skip_header=256*num,
skip_footer=(14-num)*256,dtype=int)
if r:
output = output[::-1]
return output
def showct():
a=scipy.outer(scipy.arange(0,1,0.01),scipy.ones(10))
plt.figure(figsize=(10,5))
plt.subplots_adjust(top=0.8,bottom=0.05,left=0.01,right=0.99)
l=56
idx = 0
for m in xrange(40):
plt.subplot(1,l,idx+1)
idx += 1
plt.axis("off")
plt.imshow(a,aspect='auto',cmap=loadct(m),origin="lower")
plt.title('idl'+str(m),rotation=90,fontsize=10)
for m in xrange(15):
plt.subplot(1,l,idx+1)
idx += 1
plt.axis("off")
plt.imshow(a,aspect='auto',cmap=loadgs(m),origin="lower")
plt.title('gs'+str(m),rotation=90,fontsize=10)
plt.show()
| 29.272727
| 105
| 0.506556
|
c06479e4a71467c578ed9336b9fd868d899c6074
| 2,327
|
py
|
Python
|
backend/api/serializers/Notifications.py
|
amichard/tfrs
|
ed3973016cc5c2ae48999d550a23b41a5ddad807
|
[
"Apache-2.0"
] | 18
|
2017-05-10T21:55:11.000Z
|
2021-03-01T16:41:32.000Z
|
backend/api/serializers/Notifications.py
|
amichard/tfrs
|
ed3973016cc5c2ae48999d550a23b41a5ddad807
|
[
"Apache-2.0"
] | 1,167
|
2017-03-04T00:18:43.000Z
|
2022-03-03T22:31:51.000Z
|
backend/api/serializers/Notifications.py
|
amichard/tfrs
|
ed3973016cc5c2ae48999d550a23b41a5ddad807
|
[
"Apache-2.0"
] | 48
|
2017-03-09T17:19:39.000Z
|
2022-02-24T16:38:17.000Z
|
"""
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline
compliance reporting for transportation fuel suppliers in accordance with
the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from rest_framework import serializers
from api.models.NotificationMessage import NotificationMessage
from .ComplianceReport import ComplianceReportMinSerializer
from .CreditTrade import CreditTradeMinSerializer
from .Document import DocumentMinSerializer
from .Organization import OrganizationMinSerializer
from .User import UserBasicSerializer
class NotificationMessageSerializer(serializers.ModelSerializer):
"""
Default Serializer for Notification Message
"""
originating_user = UserBasicSerializer(read_only=True)
user = UserBasicSerializer(read_only=True)
related_credit_trade = CreditTradeMinSerializer(read_only=True)
related_document = DocumentMinSerializer(read_only=True)
related_organization = OrganizationMinSerializer(read_only=True)
related_report = ComplianceReportMinSerializer(read_only=True)
def __init__(self, *args, **kwargs):
super(NotificationMessageSerializer, self).__init__(*args, **kwargs)
# mark all fields except is_read as read_only
for field_name in set(self.fields.keys()) - {'is_read'}:
self.fields[field_name].read_only = True
class Meta:
model = NotificationMessage
fields = '__all__'
class NotificationMessageUpdateSerializer(serializers.ModelSerializer):
"""
Update Serializer for Notification Message
"""
class Meta:
model = NotificationMessage
fields = '__all__'
| 36.936508
| 77
| 0.760636
|
0bd451695e16901538ff5551c6b6b428cb769641
| 30,755
|
py
|
Python
|
neural_structured_learning/keras/adversarial_regularization.py
|
eustomaqua/neural-structured-learning
|
e63a9e7ef435caaf6d70c04b6529e830bf47239d
|
[
"Apache-2.0"
] | null | null | null |
neural_structured_learning/keras/adversarial_regularization.py
|
eustomaqua/neural-structured-learning
|
e63a9e7ef435caaf6d70c04b6529e830bf47239d
|
[
"Apache-2.0"
] | null | null | null |
neural_structured_learning/keras/adversarial_regularization.py
|
eustomaqua/neural-structured-learning
|
e63a9e7ef435caaf6d70c04b6529e830bf47239d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Incorporates adversarial regularization into a Keras model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import types
import attr
import neural_structured_learning.configs as nsl_configs
import neural_structured_learning.lib as nsl_lib
import six
import tensorflow as tf
def adversarial_loss(features,
labels,
model,
loss_fn,
sample_weights=None,
adv_config=None,
predictions=None,
labeled_loss=None,
gradient_tape=None,
model_kwargs=None):
"""Computes the adversarial loss for `model` given `features` and `labels`.
This utility function adds adversarial perturbations to the input `features`,
runs the `model` on the perturbed features for predictions, and returns the
corresponding loss `loss_fn(labels, model(perturbed_features))`. This function
can be used in a Keras subclassed model and a custom training loop. This can
also be used freely as a helper function in eager execution mode.
The adversarial perturbation is based on the gradient of the labeled loss on
the original input features, i.e. `loss_fn(labels, model(features))`.
Therefore, this function needs to compute the model's predictions on the input
features as `model(features)`, and the labeled loss as `loss_fn(labels,
predictions)`. If predictions or labeled loss have already been computed, they
can be passed in via the `predictions` and `labeled_loss` arguments in order
to save computational resources. Note that in eager execution mode,
`gradient_tape` needs to be set accordingly when passing in `predictions` or
`labeled_loss`, so that the gradient can be computed correctly.
Example:
```python
# A linear regression model (for demonstrating the usage only)
model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(2,))])
loss_fn = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.SGD()
# Custom training loop. (The actual training data is omitted for clarity.)
for x, y in train_dataset:
with tf.GradientTape() as tape_w:
# A separate GradientTape is needed for watching the input.
with tf.GradientTape() as tape_x:
tape_x.watch(x)
# Regular forward pass.
labeled_loss = loss_fn(y, model(x))
# Calculates the adversarial loss. This will reuse labeled_loss and will
# consume tape_x.
adv_loss = nsl.keras.adversarial_loss(
x, y, model, loss_fn, labeled_loss=labeled_loss, gradient_tape=tape_x)
# Combines both losses. This could also be a weighted combination.
total_loss = labeled_loss + adv_loss
# Regular backward pass.
gradients = tape_w.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
```
Arguments:
features: Input features, should be a `Tensor` or a collection of `Tensor`
objects. If it is a collection, the first dimension of all `Tensor`
objects inside should be the same (i.e. batch size).
labels: Target labels.
model: A callable that takes `features` as inputs and computes `predictions`
as outputs. An example would be a `tf.keras.Model` object.
loss_fn: A callable which calcualtes labeled loss from `labels`,
`predictions`, and `sample_weights`. An example would be a
`tf.keras.losses.Loss` object.
sample_weights: (optional) A 1-D `Tensor` of weights for the examples, with
the same length as the first dimension of `features`.
adv_config: (optional) An `nsl.configs.AdvRegConfig` object for adversarial
regularization hyperparameters. Use `nsl.configs.make_adv_reg_config` to
construct one.
predictions: (optional) Precomputed value of `model(features)`. If set, the
value will be reused when calculating adversarial regularization. In eager
mode, the `gradient_tape` has to be set as well.
labeled_loss: (optional) Precomputed value of `loss_fn(labels,
model(features))`. If set, the value will be reused when calculating
adversarial regularization. In eager mode, the `gradient_tape` has to be
set as well.
gradient_tape: (optional) A `tf.GradientTape` object watching `features`.
model_kwargs: (optional) A dictionary of additional keyword arguments to be
passed to the `model`.
Returns:
A `Tensor` for adversarial regularization loss, i.e. labeled loss on
adversarially perturbed features.
"""
if adv_config is None:
adv_config = nsl_configs.AdvRegConfig()
if model_kwargs is not None:
model = functools.partial(model, **model_kwargs)
# Calculates labeled_loss if not provided.
if labeled_loss is None:
# Reuses the tape if provided; otherwise creates a new tape.
gradient_tape = gradient_tape or tf.GradientTape()
with gradient_tape:
gradient_tape.watch(tf.nest.flatten(features))
# Calculates prediction if not provided.
predictions = predictions if predictions is not None else model(features)
labeled_loss = loss_fn(labels, predictions, sample_weights)
adv_input, adv_sample_weights = nsl_lib.gen_adv_neighbor(
features,
labeled_loss,
config=adv_config.adv_neighbor_config,
gradient_tape=gradient_tape)
adv_output = model(adv_input)
if sample_weights is not None:
adv_sample_weights = tf.math.multiply(sample_weights, adv_sample_weights)
adv_loss = loss_fn(labels, adv_output, adv_sample_weights)
return adv_loss
class _LossWrapper(tf.keras.losses.Loss):
"""Wrapper converting a loss function into a `Loss` object.
This is to reuse logic of sample-weighted loss computation in `Loss` base
class.
Attributes:
loss_fn: Underlying loss function.
weight: Weight of this loss term in total loss. Should be applied outside
of this class, e.g. `total_loss += loss.weight * loss(y_true, y_pred)`.
batch_size_reduction: Whether to perform `SUM_OVER_BATCH_SIZE` reduction.
This field is set in lieu of having `reduction=SUM_OVER_BATCH_SIZE`,
because the latter is not supported when using with
`tf.distribute.Strategy`.
"""
def __init__(self, loss_fn, name, weight):
reduction = getattr(loss_fn, 'reduction', None)
if reduction in (None, tf.losses.Reduction.SUM_OVER_BATCH_SIZE,
tf.compat.v2.losses.Reduction.AUTO):
reduction = tf.losses.Reduction.NONE
self.batch_size_reduction = True
else:
self.batch_size_reduction = False
super(_LossWrapper, self).__init__(name=name, reduction=reduction)
self.weight = weight
if isinstance(loss_fn, tf.keras.losses.Loss) and self.batch_size_reduction:
self.loss_fn = loss_fn.__class__.from_config(loss_fn.get_config())
self.loss_fn.reduction = tf.losses.Reduction.NONE
else:
self.loss_fn = loss_fn
def call(self, y_true, y_pred):
return self.loss_fn(y_true, y_pred)
def __call__(self, *args, **kwargs):
if isinstance(self.loss_fn, tf.keras.losses.Loss):
loss_value = self.loss_fn(*args, **kwargs)
else:
loss_value = super(_LossWrapper, self).__call__(*args, **kwargs)
if self.batch_size_reduction:
size = tf.cast(tf.size(loss_value), dtype=loss_value.dtype)
loss_value = tf.math.divide_no_nan(tf.math.reduce_sum(loss_value), size)
return loss_value
def _is_sparse_categorical_loss(self):
return self.loss_fn == tf.keras.losses.sparse_categorical_crossentropy or (
isinstance(self.loss_fn, tf.keras.losses.SparseCategoricalCrossentropy))
def _is_binary_classification_loss(self):
return self.loss_fn in (
tf.keras.losses.binary_crossentropy,
tf.keras.losses.hinge, tf.keras.losses.squared_hinge) or isinstance(
self.loss_fn, (tf.keras.losses.BinaryCrossentropy,
tf.keras.losses.Hinge, tf.keras.losses.SquaredHinge))
def resolve_metric(self, metric):
"""Resolves potentially ambiguous metric name based on the loss function."""
# This method is intended for the scenario that a Keras model is compiled
# with a metric which meaning depends on the learning task. For example,
# `'accuracy'` may refer to `tf.keras.metrics.binary_accuracy` for binary
# classification tasks, to `tf.keras.metrics.categorical_accuracy` for
# multi-class classification tasks with one-hot labels, or to
# `tf.keras.metrics.sparse_categorical_accuracy` for mult-class
# classification tasks with index labels. In such scenario the loss
# function can help deduce the desired metric function since they share the
# same input `(y_true, y_pred)`.
# The list of such metrics is defined in `get_metric_function()` in
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/keras/engine/training_utils.py
if metric not in ('accuracy', 'acc', 'crossentropy', 'ce'):
return metric
if self._is_binary_classification_loss():
prefix = 'binary_'
elif self._is_sparse_categorical_loss():
prefix = 'sparse_categorical_'
else:
prefix = 'categorical_'
suffix = 'accuracy' if metric in ('accuracy', 'acc') else 'crossentropy'
return prefix + suffix
def _prepare_loss_fns(loss, output_names):
"""Converts `loss` to a list of per-output loss functions or objects."""
# losses for multiple outputs indexed by name
if isinstance(loss, collections.Mapping):
for name in output_names:
if name not in loss:
raise ValueError(
'Loss for {} not found in `loss` dictionary.'.format(name))
return [tf.keras.losses.get(loss[name]) for name in output_names]
# loss for single output, or shared loss fn for multiple outputs
if isinstance(loss, six.string_types):
return [tf.keras.losses.get(loss) for _ in output_names]
# losses for multiple outputs indexed by position
if isinstance(loss, collections.Sequence):
if len(loss) != len(output_names):
raise ValueError('`loss` should have the same number of elements as '
'model output')
return six.moves.map(tf.keras.losses.get, loss)
# loss for single output, or shared loss fn for multiple outputs
return [tf.keras.losses.get(loss) for _ in output_names]
def _prepare_loss_weights(loss_weights, output_names):
"""Converts `loss_weights` to a list of float values."""
if loss_weights is None:
return [1.0] * len(output_names)
if isinstance(loss_weights, collections.Sequence):
if len(loss_weights) != len(output_names):
raise ValueError('`loss_weights` should have the same number of elements '
'as model output')
return list(map(float, loss_weights))
if isinstance(loss_weights, collections.Mapping):
for name in output_names:
if name not in loss_weights:
raise ValueError('Loss weight for {} not found in `loss_weights` '
'dictionary.'.format(name))
return [float(loss_weights[name]) for name in output_names]
raise TypeError('`loss_weights` must be a list or a dict, '
'got {}'.format(str(loss_weights)))
def _clone_metrics(metrics):
"""Creates a copy of the maybe-nested metric specification.
Args:
metrics: A collection of metric specifications. Supports the same set of
formats as the `metrics` argument in `tf.keras.Model.compile`.
Returns:
The same format as the `metrics` argument, with all `tf.keras.metric.Metric`
objects replaced by their copies.
"""
def clone(metric):
# A `Metric` object is stateful and can only be used in 1 model on 1 output.
# Cloning the object allows the same metric to be applied in both base and
# adversarial-regularized models, and also on multiple outputs in one model.
# The cloning logic is the same as the `clone_metric` function in
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/keras/metrics.py
if not isinstance(metric, tf.keras.metrics.Metric):
return metric
with tf.init_scope():
return metric.__class__.from_config(metric.get_config())
return tf.nest.map_structure(clone, metrics)
def _prepare_metric_fns(metrics, output_names, loss_wrappers):
"""Converts `metrics` into a list of per-output list of metrics.
Args:
metrics: List of metrics to be evaluated during training and testing. Each
metric can be specified using a string (e.g. `'accuracy'`) or a
`tf.keras.metrics.Metric` object. For multi-output model, this can also be
a dictionary like `{'output1': ['metric'], 'output2': ['metric2']}`.
See the `metrics` argument in `tf.keras.Model.compile`.
output_names: List of names of the model's output. If `metrics` is a
dictionary, the names in this list will be taken as lookup keys.
loss_wrappers: List of `_LossWrapper` objects corresponding to each output.
Returns:
A list of the same length as `output_names`, where each element is a list of
callables representing the metrics to be evaluated on the corresponding
output.
"""
if metrics is None:
return [[] for _ in output_names]
if not isinstance(metrics, (list, collections.Mapping)):
raise TypeError('`metrics` must be a list or a dict, got {}'.format(
str(metrics)))
to_list = lambda x: x if isinstance(x, list) else [x]
if isinstance(metrics, collections.Mapping):
# Converts `metrics` from a dictionary to a list of lists using the order
# specified in `output_names`.
metrics = [to_list(metrics.get(name, [])) for name in output_names]
if not any(isinstance(m, list) for m in metrics):
# Replicates `metrics` to be a list of lists if it is a plain list of
# metrics, so that all metrics can be applied to each output.
metrics = [metrics] + [_clone_metrics(metrics) for _ in output_names[1:]]
# Here `metrics` is a list of lists, and each sub-list corresponds to metrics
# to be applied on an output.
if len(metrics) != len(output_names):
raise ValueError('The number of sub-lists in `metrics` should be the '
'same as model output.')
metric_fns = []
for per_output_metrics, loss_wrapper in zip(metrics, loss_wrappers):
metric_fns.append([
tf.keras.metrics.get(loss_wrapper.resolve_metric(metric))
for metric in to_list(per_output_metrics)
])
return metric_fns
def _compute_loss_and_metrics(losses,
metrics,
labels,
outputs,
sample_weights=None):
"""Computes total loss and (loss value, loss name) pairs for metrics.
Args:
losses: List of `_LossWrapper` objects to be evaluated on corresponding
outputs. Must have the same length as `labels` and `outputs`.
metrics: List of list of (metric fn, metric name) pairs, for additional
metrics to report for each output. Must have the same length as `outputs`.
If set to `None`, no additional metrics will be reported.
labels: List of `Tensor` objects of ground truth targets. Must have the same
length as `losses` and `outputs`.
outputs: List of `Tensor` objects of predicted targets. Must have the same
length as `losses` and `labels`.
sample_weights: (optional) `Tensor` of weight for the loss of each sample.
Returns:
total_loss: Weighted sum of losses on all outputs.
metrics: List of (value, aggregation, name) tuples for metric reporting.
"""
outputs = tf.nest.flatten(outputs)
total_loss, output_metrics = [], []
if metrics is None:
metrics = [[]] * len(losses)
for (label, output, loss, per_output_metrics) in zip(labels, outputs, losses,
metrics):
loss_value = loss(label, output, sample_weights)
total_loss.append(loss.weight * loss_value)
output_metrics.append((loss_value, 'mean', loss.name))
for metric_fn, metric_name in per_output_metrics:
value = metric_fn(label, output)
# Metric objects always return an aggregated result, and shouldn't be
# aggregated again.
if isinstance(metric_fn, tf.keras.metrics.Metric):
aggregation = None
else:
aggregation = 'mean'
output_metrics.append((value, aggregation, metric_name))
return tf.add_n(total_loss), output_metrics
class AdversarialRegularization(tf.keras.Model):
"""Wrapper thats adds adversarial regularization to a given `tf.keras.Model`.
This model will reuse the layers and variables as the given `base_model`, so
training this model will also update the variables in the `base_model`. The
adversarial regularization can be configured by `adv_config`. (See
`nsl.configs.AdvRegConfig` for the hyperparameters.) The regularization term
will be added into training objective, and will be minimized during training
together with other losses specified in `compile()`.
This model expects its input to be a dictionary mapping feature names to
feature values. The dictionary should contain both input data (`x`) and target
data (`y`). The feature names of the target data should be passed to this
model's constructor in `label_keys`, so the model can distinguish between
input data and target data. If your samples are weighted, the sample weight
should also be a feature in the dictionary, and its name should be passed to
the constructor in `sample_weight_key`. When calling this model's `fit()` or
`evaluate()` method, the argument `y` should not be set because the target
data is already in the input dictionary. The dictionary format also implies
that the input has to be named, i.e. the `name` argument of `tf.keras.Input()`
should be set.
Example:
```python
# A linear regression model (for demonstrating the usage only)
base_model = tf.keras.Sequential([
tf.keras.Input(shape=(2,), name='input'),
tf.keras.layers.Dense(1),
])
# Applies the wrapper, with 0.2 as regularization weight.
adv_config = nsl.configs.make_adv_reg_config(multiplier=0.2)
adv_model = nsl.keras.AdversarialRegularization(base_model,
label_keys=['label'],
adv_config=adv_config)
# Compiles the model as usual.
adv_model.compile(optimizer='adam', loss='mean_squared_error')
# Trains the model. (The actual training data is omitted for clarity.)
# The model minimizes (mean_squared_error + 0.2 * adversarial_regularization).
adv_model.fit(x={'input': x_train, 'label': y_train}, batch_size=32)
```
It is recommended to use `tf.data.Dataset` to handle the dictionary format
requirement of the input, especially when using the `validation_data` argument
in `fit()`.
```python
train_data = tf.data.Dataset.from_tensor_slices(
{'input': x_train, 'label': y_train}).batch(batch_size)
val_data = tf.data.Dataset.from_tensor_slices(
{'input': x_val, 'label': y_val}).batch(batch_size)
val_steps = x_val.shape[0] / batch_size
adv_model.fit(train_data, validation_data=val_data,
validation_steps=val_steps, epochs=2, verbose=1)
```
"""
def __init__(self,
base_model,
label_keys=('label',),
sample_weight_key=None,
adv_config=None):
"""Constructor of `AdversarialRegularization` class.
Args:
base_model: A `tf.Keras.Model` to which adversarial regularization will be
applied.
label_keys: A tuple of strings denoting which keys in the input features
(a `dict` mapping keys to tensors) represent labels. This list should be
1-to-1 corresponding to the output of the `base_model`.
sample_weight_key: A string denoting which key in the input feature (a
`dict` mapping keys to tensors) represents sample weight. If not set,
the weight is 1.0 for each input example.
adv_config: Instance of `nsl.configs.AdvRegConfig` for configuring
adversarial regularization.
"""
super(AdversarialRegularization,
self).__init__(name='AdversarialRegularization')
self.base_model = base_model
self.label_keys = label_keys
self.sample_weight_key = sample_weight_key
self.adv_config = adv_config or nsl_configs.AdvRegConfig()
def compile(self,
optimizer,
loss=None,
metrics=None,
loss_weights=None,
**kwargs):
if loss:
self._compile_arg_loss = loss
self._compile_arg_loss_weights = loss_weights
self._compile_arg_metrics = metrics
self._labeled_losses = None
self._labeled_metrics = None
# Compiles base model with saved losses and metrics.
self.base_model.compile(
optimizer,
loss=self._compile_arg_loss,
metrics=_clone_metrics(self._compile_arg_metrics),
loss_weights=self._compile_arg_loss_weights,
**kwargs)
if hasattr(self.base_model, 'output_names'):
# Organizes losses after the base model is fully compiled. The output
# names from the base model is needed in case the loss (and/or
# loss_weights) is specified in a dict().
self._build_loss_and_metric_fns(self.base_model.output_names)
# Hides losses and metrics for parent class so the model won't expect
# separate label input (parameter `y`) in fit() and evaluate().
super(AdversarialRegularization, self).compile(optimizer, **kwargs)
def _make_metric_name(self, fn, label):
"""Generates a unique name, and resolves conflicts by appending a number."""
if isinstance(fn, types.FunctionType):
base_name = fn.__name__
else:
base_name = getattr(fn, 'name', fn.__class__.__name__)
if len(self.label_keys) > 1:
# If there are more than one output, disambigaute losses by corresponding
# label name.
base_name += '_' + label
if base_name not in self._metric_name_count:
self._metric_name_count[base_name] = 1
return base_name
else:
self._metric_name_count[base_name] += 1
return '{}_{}'.format(base_name, self._metric_name_count[base_name])
def _build_loss_and_metric_fns(self, output_names):
self._metric_name_count = collections.Counter()
self._build_labeled_losses(output_names)
self._build_labeled_metrics(output_names, self._labeled_losses)
del self._metric_name_count # no longer needed
def _build_labeled_losses(self, output_names):
if self._labeled_losses:
return # Losses are already populated.
if len(output_names) != len(self.label_keys):
raise ValueError('The model has different number of outputs and labels. '
'({} vs. {})'.format(
len(output_names), len(self.label_keys)))
loss_fns = _prepare_loss_fns(self._compile_arg_loss, output_names)
loss_weights = _prepare_loss_weights(self._compile_arg_loss_weights,
output_names)
self._labeled_losses = []
for loss_fn, loss_weight, label_key in zip(loss_fns, loss_weights,
self.label_keys):
loss_name = self._make_metric_name(loss_fn, label_key)
self._labeled_losses.append(_LossWrapper(loss_fn, loss_name, loss_weight))
def _build_labeled_metrics(self, output_names, labeled_losses):
if self._labeled_metrics:
return # Metrics are already populated.
metric_fn_lists = _prepare_metric_fns(self._compile_arg_metrics,
output_names, labeled_losses)
self._labeled_metrics = []
for metric_fns, label_key in zip(metric_fn_lists, self.label_keys):
per_output_metrics = []
for metric_fn in metric_fns:
metric_name = self._make_metric_name(metric_fn, label_key)
if isinstance(metric_fn, tf.keras.metrics.Metric):
# Updates the name of the Metric object to make sure it is unique.
metric_fn._name = metric_name # pylint: disable=protected-access
per_output_metrics.append((metric_fn, metric_name))
self._labeled_metrics.append(per_output_metrics)
def _get_or_create_base_output_names(self, outputs):
num_output = len(tf.nest.flatten(outputs))
return getattr(self.base_model, 'output_names',
['output_%d' % i for i in range(1, num_output + 1)])
def _compute_total_loss(self, labels, outputs, sample_weights=None):
# `None` is passed instead of the actual metrics in order to skip computing
# metric values and updating metric states.
loss, _ = _compute_loss_and_metrics(self._labeled_losses, None, labels,
outputs, sample_weights)
return loss
def _split_inputs(self, inputs):
sample_weights = inputs.get(self.sample_weight_key, None)
# Labels shouldn't be perturbed when generating adversarial examples.
labels = [
tf.stop_gradient(inputs[label_key]) for label_key in self.label_keys
]
# Removes labels and sample weights from the input dictionary, since they
# are only used in this class and base model does not need them as inputs.
non_feature_keys = set(self.label_keys).union([self.sample_weight_key])
inputs = {
key: value
for key, value in six.iteritems(inputs)
if key not in non_feature_keys
}
return inputs, labels, sample_weights
def _forward_pass(self, inputs, labels, sample_weights, base_model_kwargs):
"""Runs the usual forward pass to compute outputs, loss, and metrics."""
with tf.GradientTape() as tape:
tape.watch(list(inputs.values()))
outputs = self.base_model(inputs, **base_model_kwargs)
# If the base_model is a subclassed model, its output_names are not
# available before its first call. If it is a dynamic subclassed model,
# its output_names are not available even after its first call, so we
# create names to match the number of outputs.
self._build_loss_and_metric_fns(
self._get_or_create_base_output_names(outputs))
labeled_loss, metrics = _compute_loss_and_metrics(self._labeled_losses,
self._labeled_metrics,
labels, outputs,
sample_weights)
return outputs, labeled_loss, metrics, tape
def call(self, inputs, **kwargs):
if any(key not in inputs for key in self.label_keys):
# This is to prevent "no loss to optimize" error when the first call to
# the model is without label input.
raise ValueError('Labels are not in the input. For predicting examples '
'without labels, please use the base model instead.')
inputs, labels, sample_weights = self._split_inputs(inputs)
outputs, labeled_loss, metrics, tape = self._forward_pass(
inputs, labels, sample_weights, kwargs)
self.add_loss(labeled_loss)
for value, aggregation, name in metrics:
self.add_metric(value, aggregation=aggregation, name=name)
# Adversarial loss.
adv_loss = adversarial_loss(
inputs,
labels,
self.base_model,
self._compute_total_loss,
sample_weights=sample_weights,
adv_config=self.adv_config,
labeled_loss=labeled_loss,
gradient_tape=tape,
model_kwargs=kwargs)
self.add_loss(self.adv_config.multiplier * adv_loss)
self.add_metric(adv_loss, name='adversarial_loss', aggregation='mean')
return outputs
def save(self, *args, **kwargs):
raise NotImplementedError(
'Saving `AdversarialRegularization` models is currently not supported. '
'Consider using `save_weights` or saving the `base_model`.')
def perturb_on_batch(self, x, **config_kwargs):
"""Perturbs the given input to generates adversarial examples.
Args:
x: Input examples to be perturbed, in a dictionary of Numpy arrays,
`Tensor`, `SparseTensor`, or `RaggedTensor` objects. The first
dimension of all tensors or arrays should be the same (i.e. batch size).
**config_kwargs: (optional) hyperparameters for generating adversarial
preturbation. Any keyword argument here will overwrite the corresponding
field in `nsl.configs.AdvNeighborConfig` specified in `__init__`.
Acceptable keys: `feature_mask`, `adv_step_size`, and `adv_grad_norm`.
Returns:
A dictionary of NumPy arrays, `SparseTensor`, or `RaggedTensor` objects of
the generated adversarial examples.
"""
x = tf.nest.map_structure(tf.convert_to_tensor, x, expand_composites=True)
inputs, labels, sample_weights = self._split_inputs(x)
_, labeled_loss, _, tape = self._forward_pass(inputs, labels,
sample_weights,
{'training': False})
config_kwargs = {k: v for k, v in config_kwargs.items() if v is not None}
config = attr.evolve(self.adv_config.adv_neighbor_config, **config_kwargs)
adv_inputs, _ = nsl_lib.gen_adv_neighbor(
inputs, labeled_loss, config=config, gradient_tape=tape)
if tf.executing_eagerly():
# Converts `Tensor` objects to NumPy arrays and keeps other objects (e.g.
# `SparseTensor`) as-is.
adv_inputs = tf.nest.map_structure(
lambda x: x.numpy() if hasattr(x, 'numpy') else x,
adv_inputs,
expand_composites=False)
else:
adv_inputs = tf.keras.backend.function([], adv_inputs)([])
# Inserts the labels and sample_weights back to the input dictionary, so
# the returned input has the same structure as the original input.
for label_key, label in zip(self.label_keys, labels):
adv_inputs[label_key] = label
if self.sample_weight_key is not None:
adv_inputs[self.sample_weight_key] = sample_weights
return adv_inputs
| 44.061605
| 107
| 0.695269
|
95b181e6008eeaafe572a6cacb3d0d7f8043e777
| 641
|
py
|
Python
|
post/migrations/0040_auto_20191216_0455.py
|
abhiabhi94/blog
|
0da1522361e47af3bbfba974f801277e14c7a397
|
[
"MIT"
] | null | null | null |
post/migrations/0040_auto_20191216_0455.py
|
abhiabhi94/blog
|
0da1522361e47af3bbfba974f801277e14c7a397
|
[
"MIT"
] | 12
|
2019-09-11T18:37:19.000Z
|
2019-09-16T21:51:35.000Z
|
post/migrations/0040_auto_20191216_0455.py
|
abhiabhi94/blog
|
0da1522361e47af3bbfba974f801277e14c7a397
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.5 on 2019-12-15 23:25
import ckeditor_uploader.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('post', '0039_post_hits'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='_content_rendered',
),
migrations.RemoveField(
model_name='post',
name='content_markup_type',
),
migrations.AlterField(
model_name='post',
name='content',
field=ckeditor_uploader.fields.RichTextUploadingField(),
),
]
| 22.892857
| 68
| 0.581903
|
8a5ae3d6f7505bd18fca3c39adf8199852b9556c
| 11,092
|
py
|
Python
|
data_generation/forward_model/devito/devito/core/autotuning.py
|
LukasMosser/SNIST
|
c7122541db2aa597cff34a45aa0465ec1679e2b3
|
[
"MIT"
] | 11
|
2019-07-18T11:15:12.000Z
|
2021-09-14T02:20:52.000Z
|
data_generation/forward_model/devito/devito/core/autotuning.py
|
LukasMosser/SNIST
|
c7122541db2aa597cff34a45aa0465ec1679e2b3
|
[
"MIT"
] | null | null | null |
data_generation/forward_model/devito/devito/core/autotuning.py
|
LukasMosser/SNIST
|
c7122541db2aa597cff34a45aa0465ec1679e2b3
|
[
"MIT"
] | 3
|
2020-02-15T14:23:29.000Z
|
2021-02-04T01:51:02.000Z
|
from collections import OrderedDict
from itertools import chain, combinations, product
import resource
import psutil
from devito.dle import BlockDimension, NThreads
from devito.ir import Backward, retrieve_iteration_tree
from devito.logger import perf, warning as _warning
from devito.mpi import MPI
from devito.parameters import configuration
from devito.symbolics import evaluate
from devito.tools import filter_ordered, flatten, prod
__all__ = ['autotune']
def autotune(operator, args, level, mode):
"""
Operator autotuning.
Parameters
----------
operator : Operator
Input Operator.
args : dict_like
The runtime arguments with which `operator` is run.
level : str
The autotuning aggressiveness (basic, aggressive). A more aggressive
autotuning might eventually result in higher performance, though in
some circumstances it might instead increase the actual runtime.
mode : str
The autotuning mode (preemptive, runtime). In preemptive mode, the
output runtime values supplied by the user to `operator.apply` are
replaced with shadow copies.
"""
key = [level, mode]
accepted = configuration._accepted['autotuning']
if key not in accepted:
raise ValueError("The accepted `(level, mode)` combinations are `%s`; "
"provided `%s` instead" % (accepted, key))
# Tunable objects
blockable = [i for i in operator.dimensions if isinstance(i, BlockDimension)]
nthreads = [i for i in operator.input if isinstance(i, NThreads)]
if len(nthreads + blockable) == 0:
# Nothing to tune for
return args, {}
# We get passed all the arguments, but the cfunction only requires a subset
at_args = OrderedDict([(p.name, args[p.name]) for p in operator.parameters])
# User-provided output data won't be altered in `preemptive` mode
if mode == 'preemptive':
output = [i.name for i in operator.output]
for k, v in args.items():
if k in output:
at_args[k] = v.copy()
# Disable halo exchanges as the number of autotuning steps performed on each
# rank may be different. Also, this makes the autotuning runtimes reliable
# regardless of whether the timed regions include the halo exchanges or not,
# as now the halo exchanges become a no-op.
try:
nb = []
if mode != 'runtime':
for i, _ in at_args['nb']._obj._fields_:
nb.append((i, getattr(at_args['nb']._obj, i)))
setattr(at_args['nb']._obj, i, MPI.PROC_NULL)
except KeyError:
assert not configuration['mpi']
trees = retrieve_iteration_tree(operator.body)
# Shrink the time dimension's iteration range for quick autotuning
steppers = {i for i in flatten(trees) if i.dim.is_Time}
if len(steppers) == 0:
stepper = None
timesteps = 1
elif len(steppers) == 1:
stepper = steppers.pop()
timesteps = init_time_bounds(stepper, at_args)
if timesteps is None:
return args, {}
else:
warning("cannot perform autotuning unless there is one time loop; skipping")
return args, {}
# Formula to calculate the number of parallel blocks given block shape,
# number of threads, and extent of the parallel iteration space
calculate_parblocks = make_calculate_parblocks(trees, blockable, nthreads)
# Generated loop-blocking attempts
block_shapes = generate_block_shapes(blockable, args, level)
# Generate nthreads attempts
nthreads = generate_nthreads(nthreads, args, level)
generators = [i for i in [block_shapes, nthreads] if i]
timings = OrderedDict()
for i in product(*generators):
run = tuple(chain(*i))
mapper = OrderedDict(run)
# Can we safely autotune over the given time range?
if not check_time_bounds(stepper, at_args, args, mode):
break
# Update `at_args` to use the new tunable values
at_args = {k: mapper.get(k, v) for k, v in at_args.items()}
if heuristically_discard_run(calculate_parblocks, at_args):
continue
# Make sure we remain within stack bounds, otherwise skip run
try:
stack_footprint = operator._mem_summary['stack']
if int(evaluate(stack_footprint, **at_args)) > options['stack_limit']:
continue
except TypeError:
warning("couldn't determine stack size; skipping run %s" % str(i))
continue
except AttributeError:
assert stack_footprint == 0
# Use fresh profiling data
timer = operator._profiler.timer.reset()
at_args[operator._profiler.name] = timer
operator.cfunction(*list(at_args.values()))
elapsed = sum(getattr(timer._obj, k) for k, _ in timer._obj._fields_)
timings[run] = elapsed
log("run <%s> took %f (s) in %d timesteps" %
(','.join('%s=%s' % (k, v) for k, v in mapper.items()), elapsed, timesteps))
# Prepare for the next autotuning run
update_time_bounds(stepper, at_args, timesteps, mode)
try:
best = dict(min(timings, key=timings.get))
log("selected best: %s" % best)
except ValueError:
warning("couldn't perform any runs")
return args, {}
# Build the new argument list
args = {k: best.get(k, v) for k, v in args.items()}
# In `runtime` mode, some timesteps have been executed already, so we
# get to adjust the time range
finalize_time_bounds(stepper, at_args, args, mode)
# Reset profiling data
assert operator._profiler.name in args
args[operator._profiler.name] = operator._profiler.timer.reset()
# Reinstate MPI neighbourhood
for i, v in nb:
setattr(args['nb']._obj, i, v)
# Autotuning summary
summary = {}
summary['runs'] = len(timings)
summary['tpr'] = timesteps # tpr -> timesteps per run
summary['tuned'] = dict(best)
return args, summary
def init_time_bounds(stepper, at_args):
if stepper is None:
return
dim = stepper.dim.root
if stepper.direction is Backward:
at_args[dim.min_name] = at_args[dim.max_name] - options['squeezer']
if at_args[dim.max_name] < at_args[dim.min_name]:
warning("too few time iterations; skipping")
return False
else:
at_args[dim.max_name] = at_args[dim.min_name] + options['squeezer']
if at_args[dim.min_name] > at_args[dim.max_name]:
warning("too few time iterations; skipping")
return False
return stepper.extent(start=at_args[dim.min_name], finish=at_args[dim.max_name])
def check_time_bounds(stepper, at_args, args, mode):
if mode != 'runtime' or stepper is None:
return True
dim = stepper.dim.root
if stepper.direction is Backward:
if at_args[dim.min_name] < args[dim.min_name]:
warning("too few time iterations; stopping")
return False
else:
if at_args[dim.max_name] > args[dim.max_name]:
warning("too few time iterations; stopping")
return False
return True
def update_time_bounds(stepper, at_args, timesteps, mode):
if mode != 'runtime' or stepper is None:
return
dim = stepper.dim.root
if stepper.direction is Backward:
at_args[dim.max_name] -= timesteps
at_args[dim.min_name] -= timesteps
else:
at_args[dim.min_name] += timesteps
at_args[dim.max_name] += timesteps
def finalize_time_bounds(stepper, at_args, args, mode):
if mode != 'runtime' or stepper is None:
return
dim = stepper.dim.root
if stepper.direction is Backward:
args[dim.max_name] = at_args[dim.max_name]
args[dim.min_name] = args[dim.min_name]
else:
args[dim.min_name] = at_args[dim.min_name]
args[dim.max_name] = args[dim.max_name]
def make_calculate_parblocks(trees, blockable, nthreads):
blocks_per_threads = []
main_block_trees = [i for i in trees if set(blockable) < set(i.dimensions)]
for tree, nt in product(main_block_trees, nthreads):
block_iters = [i for i in tree if i.dim in blockable]
par_block_iters = block_iters[:block_iters[0].ncollapsed]
niterations = prod(i.extent() for i in par_block_iters)
block_size = prod(i.dim.step for i in par_block_iters)
blocks_per_threads.append((niterations / block_size) / nt)
return blocks_per_threads
def generate_block_shapes(blockable, args, level):
# Max attemptable block shape
max_bs = tuple((d.step.name, d.max_step.subs(args)) for d in blockable)
# Attempted block shapes:
# 1) Defaults (basic mode)
ret = [tuple((d.step.name, v) for d in blockable) for v in options['blocksize']]
# 2) Always try the entire iteration space (degenerate block)
ret.append(max_bs)
# 3) More attempts if auto-tuning in aggressive mode
if level == 'aggressive':
# Ramp up to larger block shapes
handle = tuple((i, options['blocksize'][-1]) for i, _ in ret[0])
for i in range(3):
new_bs = tuple((b, v*2) for b, v in handle)
ret.insert(ret.index(handle) + 1, new_bs)
handle = new_bs
handle = []
# Extended shuffling for the smaller block shapes
for bs in ret[:4]:
for i in ret:
handle.append(bs[:-1] + (i[-1],))
# Some more shuffling for all block shapes
for bs in list(ret):
ncombs = len(bs)
for i in range(ncombs):
for j in combinations(dict(bs), i+1):
handle.append(tuple((b, v*2 if b in j else v) for b, v in bs))
ret.extend(handle)
# Drop unnecessary attempts:
# 1) Block shapes exceeding the iteration space extent
ret = [i for i in ret if all(dict(i)[k] <= v for k, v in max_bs)]
# 2) Redundant block shapes
ret = filter_ordered(ret)
return ret
def generate_nthreads(nthreads, args, level):
ret = [((i.name, args[i.name]),) for i in nthreads]
# On the KNL, also try running with a different number of hyperthreads
if level == 'aggressive' and configuration['platform'] == 'knl':
ret.extend([((i.name, psutil.cpu_count()),) for i in nthreads])
ret.extend([((i.name, psutil.cpu_count() // 2),) for i in nthreads])
ret.extend([((i.name, psutil.cpu_count() // 4),) for i in nthreads])
return filter_ordered(ret)
def heuristically_discard_run(calculate_parblocks, at_args):
if configuration['develop-mode']:
return False
# Drop run if not at least one block per thread
return all(i.subs(at_args) < 1 for i in calculate_parblocks)
options = {
'squeezer': 4,
'blocksize': sorted({8, 16, 24, 32, 40, 64, 128}),
'stack_limit': resource.getrlimit(resource.RLIMIT_STACK)[0] / 4
}
"""Autotuning options."""
def log(msg):
perf("AutoTuner: %s" % msg)
def warning(msg):
_warning("AutoTuner: %s" % msg)
| 35.4377
| 88
| 0.642625
|
1f0dbe5c791974893a15733592825b3b7b69ae6a
| 2,257
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowLispEthernetMapCachePrefix/cli/equal/golden_output1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowLispEthernetMapCachePrefix/cli/equal/golden_output1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowLispEthernetMapCachePrefix/cli/equal/golden_output1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | null | null | null |
expected_output = {
"lisp_id": {
0: {
"instance_id": {
8188: {
"eid_table": "Vlan 210",
"entries": 1,
"eid_prefix": {
"0017.0100.0001/48": {
"uptime": "01:09:06",
"expiry_time": "22:50:53",
"via": "map-reply",
"map_reply_state": "complete",
"prefix_location": "local-to-site",
"source_type": "map-reply",
"last_modified": "01:09:06,",
"source_ip": "1.1.1.10",
"prefix_state": "Active",
"encap": "dynamic-EID traffic",
"rloc_set": {
"1.1.1.10": {
"uptime": "01:09:06",
"rloc_state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-",
"last_state_change": {
"time": "01:09:06",
"count": 1,
},
"last_route_reach_change": {
"time": "01:09:06",
"count": 1,
},
"last_pri_weight_change": {
"priority": "never",
"weight": "never",
},
"rloc_probe_sent": {
"time": "01:09:06",
"rtt": 1,
"rtt_unit": "ms",
},
}
},
}
},
}
}
}
}
}
| 42.584906
| 64
| 0.216216
|
d3e8f3992e46e06915015416caf7aa7c13da08d8
| 3,893
|
py
|
Python
|
azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/virtual_network.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure/mgmt/devtestlabs/models/virtual_network.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure/mgmt/devtestlabs/models/virtual_network.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualNetwork(Resource):
"""A virtual network.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The identifier of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param location: The location of the resource.
:type location: str
:param tags: The tags of the resource.
:type tags: dict[str, str]
:param allowed_subnets: The allowed subnets of the virtual network.
:type allowed_subnets: list[~azure.mgmt.devtestlabs.models.Subnet]
:param description: The description of the virtual network.
:type description: str
:param external_provider_resource_id: The Microsoft.Network resource
identifier of the virtual network.
:type external_provider_resource_id: str
:param external_subnets: The external subnet properties.
:type external_subnets:
list[~azure.mgmt.devtestlabs.models.ExternalSubnet]
:param subnet_overrides: The subnet overrides of the virtual network.
:type subnet_overrides:
list[~azure.mgmt.devtestlabs.models.SubnetOverride]
:ivar created_date: The creation date of the virtual network.
:vartype created_date: datetime
:param provisioning_state: The provisioning status of the resource.
:type provisioning_state: str
:param unique_identifier: The unique immutable identifier of a resource
(Guid).
:type unique_identifier: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'created_date': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'allowed_subnets': {'key': 'properties.allowedSubnets', 'type': '[Subnet]'},
'description': {'key': 'properties.description', 'type': 'str'},
'external_provider_resource_id': {'key': 'properties.externalProviderResourceId', 'type': 'str'},
'external_subnets': {'key': 'properties.externalSubnets', 'type': '[ExternalSubnet]'},
'subnet_overrides': {'key': 'properties.subnetOverrides', 'type': '[SubnetOverride]'},
'created_date': {'key': 'properties.createdDate', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'unique_identifier': {'key': 'properties.uniqueIdentifier', 'type': 'str'},
}
def __init__(self, location=None, tags=None, allowed_subnets=None, description=None, external_provider_resource_id=None, external_subnets=None, subnet_overrides=None, provisioning_state=None, unique_identifier=None):
super(VirtualNetwork, self).__init__(location=location, tags=tags)
self.allowed_subnets = allowed_subnets
self.description = description
self.external_provider_resource_id = external_provider_resource_id
self.external_subnets = external_subnets
self.subnet_overrides = subnet_overrides
self.created_date = None
self.provisioning_state = provisioning_state
self.unique_identifier = unique_identifier
| 45.267442
| 220
| 0.659389
|
0922191931101dab7d3f87561ff1b4dec06456a7
| 86
|
py
|
Python
|
contacts/admin.py
|
sjy5386/subshorts
|
d8170ee4a66989c3e852f86aa83bab6341e3aa10
|
[
"MIT"
] | 3
|
2022-03-08T19:02:41.000Z
|
2022-03-16T23:04:37.000Z
|
contacts/admin.py
|
sjy5386/subshorts
|
d8170ee4a66989c3e852f86aa83bab6341e3aa10
|
[
"MIT"
] | 5
|
2022-03-17T02:16:52.000Z
|
2022-03-18T02:55:25.000Z
|
contacts/admin.py
|
sjy5386/subshorts
|
d8170ee4a66989c3e852f86aa83bab6341e3aa10
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
admin.site.register(Contact)
| 14.333333
| 32
| 0.790698
|
fe151be68963f98b6606742c120582d52c35ef62
| 458
|
py
|
Python
|
index.py
|
rednes/raspberry-webapi
|
cccdcd3e8736f0366d5a296dd05136b19439b63a
|
[
"MIT"
] | null | null | null |
index.py
|
rednes/raspberry-webapi
|
cccdcd3e8736f0366d5a296dd05136b19439b63a
|
[
"MIT"
] | null | null | null |
index.py
|
rednes/raspberry-webapi
|
cccdcd3e8736f0366d5a296dd05136b19439b63a
|
[
"MIT"
] | null | null | null |
from bottle import route, run, template
import wiringpi
import time
GPIO_DICT = {'1':4, '2':17, '3':27}
def gpio_process(led_pin):
wiringpi.wiringPiSetupGpio()
wiringpi.pinMode(led_pin, 1)
wiringpi.digitalWrite(led_pin, 1)
time.sleep(1)
wiringpi.digitalWrite(led_pin, 0)
@route('/api/<number>')
def index(number):
led_pin = GPIO_DICT[number]
gpio_process(led_pin)
run(host='localhost', port=8080, debug=True, reloader=True)
| 20.818182
| 59
| 0.70524
|
da32f3d7f7859182db955ad3b197e2ee8a8b7580
| 4,463
|
py
|
Python
|
jenkins/docker_diff.py
|
jpbetz/test-infra
|
8272c557733c4ff13eaca515cb61523bf0fc91cd
|
[
"Apache-2.0"
] | 3
|
2020-04-10T14:14:11.000Z
|
2021-06-09T08:39:22.000Z
|
jenkins/docker_diff.py
|
jpbetz/test-infra
|
8272c557733c4ff13eaca515cb61523bf0fc91cd
|
[
"Apache-2.0"
] | 3
|
2021-03-20T05:23:47.000Z
|
2021-03-20T05:35:10.000Z
|
jenkins/docker_diff.py
|
jpbetz/test-infra
|
8272c557733c4ff13eaca515cb61523bf0fc91cd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Output the differences between two Docker images.
Usage:
python docker_diff.py [--deep=path] <image_1> <image_2>
"""
import argparse
import json
import logging
import os
import shutil
import subprocess
import tarfile
import tempfile
def call(cmd, **kwargs):
"""run call with args."""
logging.info('exec %s', ' '.join(cmd))
return subprocess.call(cmd, **kwargs)
def check_call(cmd):
"""run check_call with args."""
logging.info('exec %s', ' '.join(cmd))
return subprocess.check_call(cmd)
def dockerfile_layers(tarball):
'''Given a `docker save` tarball, return the layer metadata in order.'''
layer_by_parent = {}
for member in tarball.getmembers():
if member.name.endswith('/json'):
layer = json.load(tarball.extractfile(member))
layer_by_parent[layer.get('parent')] = layer
# assemble layers by following parent pointers
layers = []
parent = None # base image has no parent
while parent in layer_by_parent:
layer = layer_by_parent[parent]
layers.append(layer)
parent = layer['id']
return layers
def is_whiteout(fname):
"""Check if whiteout."""
return fname.startswith('.wh.') or '/.wh.' in fname
def extract_layers(tarball, layers, outdir):
'''Extract docker layers to a specific directory (fake a union mount).'''
for layer in layers:
obj = tarball.extractfile('%s/layer.tar' % layer['id'])
with tarfile.open(fileobj=obj) as fp:
# Complication: .wh. files indicate deletions.
# https://github.com/docker/docker/blob/master/image/spec/v1.md
members = fp.getmembers()
members_good = [m for m in members if not is_whiteout(m.name)]
fp.extractall(outdir, members_good)
for member in members:
name = member.name
if is_whiteout(name):
path = os.path.join(outdir, name.replace('.wh.', ''))
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.exists(path):
os.unlink(path)
def docker_diff(image_a, image_b, tmpdir, deep):
"""Diff two docker images."""
# dump images for inspection
tf_a_path = '%s/a.tar' % tmpdir
tf_b_path = '%s/b.tar' % tmpdir
check_call(['docker', 'save', '-o', tf_a_path, image_a])
check_call(['docker', 'save', '-o', tf_b_path, image_b])
tf_a = tarfile.open(tf_a_path)
tf_b = tarfile.open(tf_b_path)
# find layers in order
layers_a = dockerfile_layers(tf_a)
layers_b = dockerfile_layers(tf_b)
# minor optimization: skip identical layers
common = len(os.path.commonprefix([layers_a, layers_b]))
tf_a_out = '%s/a' % tmpdir
tf_b_out = '%s/b' % tmpdir
extract_layers(tf_a, layers_a[common:], tf_a_out)
extract_layers(tf_b, layers_b[common:], tf_b_out)
# actually compare the resulting directories
# just show whether something changed (OS upgrades change a lot)
call(['diff', '-qr', 'a', 'b'], cwd=tmpdir)
if deep:
# if requested, do a more in-depth content diff as well.
call([
'diff', '-rU5',
os.path.join('a', deep),
os.path.join('b', deep)],
cwd=tmpdir)
def main():
"""Run docker_diff."""
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--deep', help='Show full differences for specific directory')
parser.add_argument('image_a')
parser.add_argument('image_b')
options = parser.parse_args()
tmpdir = tempfile.mkdtemp(prefix='docker_diff_')
try:
docker_diff(options.image_a, options.image_b, tmpdir, options.deep)
finally:
shutil.rmtree(tmpdir)
if __name__ == '__main__':
main()
| 29.556291
| 86
| 0.641273
|
bed1cf483cf714fd1e02c5e00cd930fc99b2745b
| 335
|
py
|
Python
|
tests/class/alias08.py
|
ktok07b6/polyphony
|
657c5c7440520db6b4985970bd50547407693ac4
|
[
"MIT"
] | 83
|
2015-11-30T09:59:13.000Z
|
2021-08-03T09:12:28.000Z
|
tests/class/alias08.py
|
jesseclin/polyphony
|
657c5c7440520db6b4985970bd50547407693ac4
|
[
"MIT"
] | 4
|
2017-02-10T01:43:11.000Z
|
2020-07-14T03:52:25.000Z
|
tests/class/alias08.py
|
jesseclin/polyphony
|
657c5c7440520db6b4985970bd50547407693ac4
|
[
"MIT"
] | 11
|
2016-11-18T14:39:15.000Z
|
2021-02-23T10:05:20.000Z
|
from polyphony import testbench
class D:
def get_v(self, v):
return v
class C:
def __init__(self, v):
self.v = v
def alias08(x):
s = 0
for i in range(x):
c = C(i)
s += c.v
return s
@testbench
def test():
assert 1+2+3 == alias08(4)
assert 0 == alias08(0)
test()
| 13.958333
| 31
| 0.513433
|
099744e9eec3308ec1bcb822460b081276ff394a
| 7,189
|
py
|
Python
|
dfirtrack/settings.py
|
cclauss/dfirtrack
|
2a307c5fe82e927b3c229a20a02bc0c7a5d66d9a
|
[
"Apache-2.0"
] | null | null | null |
dfirtrack/settings.py
|
cclauss/dfirtrack
|
2a307c5fe82e927b3c229a20a02bc0c7a5d66d9a
|
[
"Apache-2.0"
] | null | null | null |
dfirtrack/settings.py
|
cclauss/dfirtrack
|
2a307c5fe82e927b3c229a20a02bc0c7a5d66d9a
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for DFIRTrack project.
"""
import os
from dfirtrack.config import LOGGING_PATH
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY', 'CHANGEME')
# Application definition
INSTALLED_APPS = [
'dfirtrack_main',
'dfirtrack_artifacts',
'dfirtrack_api',
'dfirtrack_config',
'rest_framework',
'django_q',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.postgres',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework.authtoken',
'martor',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'dfirtrack_main.async_messages.middleware.async_messages_middleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# use database cache for async messages
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'dfirtrack_async_messages',
}
}
ROOT_URLCONF = 'dfirtrack.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dfirtrack.wsgi.application'
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
# TODO: change to something like 'reverse()' to prevent redundant code
LOGIN_REDIRECT_URL = '/main_overview/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'std_formatter': {
'format': '[%(asctime)s] %(levelname)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
},
'handlers': {
'customlog': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': LOGGING_PATH + '/' + 'dfirtrack.log',
'formatter': 'std_formatter',
},
},
'loggers': {
'dfirtrack_artifacts': {
'handlers': ['customlog'],
'level': 'DEBUG',
'propagate': True,
},
'dfirtrack_main': {
'handlers': ['customlog'],
'level': 'DEBUG',
'propagate': True,
},
},
}
Q_CLUSTER = {
'name': 'dfirtrack',
'orm': 'default', # use database backend as message broker
'label': 'DFIRTrack Q Cluster', # label for admin page
'catch_up': False, # do not catch up postponed tasks after downtime
'max_attempts': 1, # do not retry failed task
'timeout': 1800, # timeout tasks after half an hour
'retry': 1801, # retry tasks only after timeout time (skip retry is not possible afaik)
'save_limit': 0, # save unlimited successful tasks in the database
'sync': False, # switch for synchronous execution (also done for testing via 'dfirtrack.test_settings')
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES' : [
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'dfirtrack_api.authentication.TokenAuthentication',
],
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
],
}
MARTOR_ENABLE_CONFIGS = {
'emoji': 'true', # to enable/disable emoji icons.
'imgur': 'false', # to enable/disable imgur/custom uploader.
'mention': 'false', # to enable/disable mention
'jquery': 'true', # to include/revoke jquery (require for admin default django)
'living': 'false', # to enable/disable live updates in preview
'spellcheck': 'false', # to enable/disable spellcheck in form textareas
'hljs': 'true', # to enable/disable hljs highlighting in preview
}
MARTOR_TOOLBAR_BUTTONS = [
'bold', 'italic', 'horizontal', 'heading', 'pre-code',
'blockquote', 'unordered-list', 'ordered-list',
'link', 'direct-mention', 'toggle-maximize', 'help'
]
MARTOR_ENABLE_LABEL = True
"""
import local settings from local_settings
use settings from this file in case of missing local settings
try statements are split to avoid conflicts in case of missing values
"""
# ALLOWED_HOSTS
try:
from .local_settings import ALLOWED_HOSTS
except ImportError: # coverage: ignore branch
# add IP or FQDN if relevant
ALLOWED_HOSTS = ['localhost']
# DATABASES
try:
from .local_settings import DATABASES
except ImportError: # coverage: ignore branch
# Database - check environment variables for context
if "CI" in os.environ:
# use PostgreSQL for GitHub action
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'github_actions',
'USER': 'dfirtrack',
'PASSWORD': 'dfirtrack',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
else:
# use SQLite3 otherwise (for local setup without dfirtrack.local_settings)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'dfirtrack.sqlite3'),
}
}
# DATA_UPLOAD_MAX_NUMBER_FIELDS
try:
from .local_settings import DATA_UPLOAD_MAX_NUMBER_FIELDS
except ImportError: # coverage: ignore branch
DATA_UPLOAD_MAX_NUMBER_FIELDS = 10000
# DEBUG
try:
from .local_settings import DEBUG
except ImportError: # coverage: ignore branch
# change to True for debugging
DEBUG = False
# STATIC_ROOT
try:
from .local_settings import STATIC_ROOT
except ImportError: # coverage: ignore branch
STATIC_ROOT = '/var/www/html/static/'
| 28.41502
| 128
| 0.633607
|
12381dee8d04f016585fcb37eaed79c2d4fb0788
| 168
|
py
|
Python
|
backend/newsletter/__init__.py
|
Lanseuo/newsletter
|
a0e9e81035ec6322426d5c9223f11a80e6dc7245
|
[
"MIT"
] | null | null | null |
backend/newsletter/__init__.py
|
Lanseuo/newsletter
|
a0e9e81035ec6322426d5c9223f11a80e6dc7245
|
[
"MIT"
] | 3
|
2021-03-08T19:40:32.000Z
|
2022-02-12T02:21:17.000Z
|
backend/newsletter/__init__.py
|
Lanseuo/newsletter
|
a0e9e81035ec6322426d5c9223f11a80e6dc7245
|
[
"MIT"
] | null | null | null |
from .api import newsletter_api_blueprint
from .frontend import newsletter_frontend_blueprint
__all__ = ["newsletter_api_blueprint", "newsletter_frontend_blueprint"]
| 28
| 71
| 0.857143
|
90194f8fadd5bc68b4a63515af23ec56de53d695
| 1,803
|
py
|
Python
|
google/ads/googleads/v6/enums/types/user_list_size_range.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v6/enums/types/user_list_size_range.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v6/enums/types/user_list_size_range.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.enums",
marshal="google.ads.googleads.v6",
manifest={"UserListSizeRangeEnum",},
)
class UserListSizeRangeEnum(proto.Message):
r"""Size range in terms of number of users of a UserList."""
class UserListSizeRange(proto.Enum):
r"""Enum containing possible user list size ranges."""
UNSPECIFIED = 0
UNKNOWN = 1
LESS_THAN_FIVE_HUNDRED = 2
LESS_THAN_ONE_THOUSAND = 3
ONE_THOUSAND_TO_TEN_THOUSAND = 4
TEN_THOUSAND_TO_FIFTY_THOUSAND = 5
FIFTY_THOUSAND_TO_ONE_HUNDRED_THOUSAND = 6
ONE_HUNDRED_THOUSAND_TO_THREE_HUNDRED_THOUSAND = 7
THREE_HUNDRED_THOUSAND_TO_FIVE_HUNDRED_THOUSAND = 8
FIVE_HUNDRED_THOUSAND_TO_ONE_MILLION = 9
ONE_MILLION_TO_TWO_MILLION = 10
TWO_MILLION_TO_THREE_MILLION = 11
THREE_MILLION_TO_FIVE_MILLION = 12
FIVE_MILLION_TO_TEN_MILLION = 13
TEN_MILLION_TO_TWENTY_MILLION = 14
TWENTY_MILLION_TO_THIRTY_MILLION = 15
THIRTY_MILLION_TO_FIFTY_MILLION = 16
OVER_FIFTY_MILLION = 17
__all__ = tuple(sorted(__protobuf__.manifest))
| 33.388889
| 74
| 0.723239
|
030dd3dd032ee2a66293b91964effb77ad0a4796
| 66,342
|
py
|
Python
|
src/transform_conversions/transformations.py
|
jaymwong/transformation_conversions
|
cbd269c651aa92ccc56c90129d0c29df09bb2e46
|
[
"BSD-2-Clause"
] | null | null | null |
src/transform_conversions/transformations.py
|
jaymwong/transformation_conversions
|
cbd269c651aa92ccc56c90129d0c29df09bb2e46
|
[
"BSD-2-Clause"
] | null | null | null |
src/transform_conversions/transformations.py
|
jaymwong/transformation_conversions
|
cbd269c651aa92ccc56c90129d0c29df09bb2e46
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2006-2017, Christoph Gohlke
# Copyright (c) 2006-2017, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Homogeneous Transformation Matrices and Quaternions.
A library for calculating 4x4 matrices for translating, rotating, reflecting,
scaling, shearing, projecting, orthogonalizing, and superimposing arrays of
3D homogeneous coordinates as well as for converting between rotation matrices,
Euler angles, and quaternions. Also includes an Arcball control object and
functions to decompose transformation matrices.
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2017.02.17
Requirements
------------
* `CPython 2.7 or 3.5 <http://www.python.org>`_
* `Numpy 1.11 <http://www.numpy.org>`_
* `Transformations.c 2017.02.17 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for speedup of some functions)
Notes
-----
The API is not stable yet and is expected to change between revisions.
This Python code is not optimized for speed. Refer to the transformations.c
module for a faster implementation of some functions.
Documentation in HTML format can be generated with epydoc.
Matrices (M) can be inverted using numpy.linalg.inv(M), be concatenated using
numpy.dot(M0, M1), or transform homogeneous coordinate arrays (v) using
numpy.dot(M, v) for shape (4, \*) column vectors, respectively
numpy.dot(v, M.T) for shape (\*, 4) row vectors ("array of points").
This module follows the "column vectors on the right" and "row major storage"
(C contiguous) conventions. The translation components are in the right column
of the transformation matrix, i.e. M[:3, 3].
The transpose of the transformation matrices may have to be used to interface
with other graphics systems, e.g. with OpenGL's glMultMatrixd(). See also [16].
Calculations are carried out with numpy.float64 precision.
Vector, point, quaternion, and matrix function arguments are expected to be
"array like", i.e. tuple, list, or numpy arrays.
Return types are numpy arrays unless specified otherwise.
Angles are in radians unless specified otherwise.
Quaternions w+ix+jy+kz are represented as [w, x, y, z].
A triple of Euler angles can be applied/interpreted in 24 ways, which can
be specified using a 4 character string or encoded 4-tuple:
*Axes 4-string*: e.g. 'sxyz' or 'ryxy'
- first character : rotations are applied to 's'tatic or 'r'otating frame
- remaining characters : successive rotation axis 'x', 'y', or 'z'
*Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1)
- inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix.
- parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed
by 'z', or 'z' is followed by 'x'. Otherwise odd (1).
- repetition : first and last axis are same (1) or different (0).
- frame : rotations are applied to static (0) or rotating (1) frame.
Other Python packages and modules for 3D transformations and quaternions:
* `Transforms3d <https://pypi.python.org/pypi/transforms3d>`_
includes most code of this module.
* `Blender.mathutils <http://www.blender.org/api/blender_python_api>`_
* `numpy-dtypes <https://github.com/numpy/numpy-dtypes>`_
References
----------
(1) Matrices and transformations. Ronald Goldman.
In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990.
(2) More matrices and transformations: shear and pseudo-perspective.
Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(3) Decomposing a matrix into simple transformations. Spencer Thomas.
In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(4) Recovering the data from the transformation matrix. Ronald Goldman.
In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991.
(5) Euler angle conversion. Ken Shoemake.
In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994.
(6) Arcball rotation control. Ken Shoemake.
In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994.
(7) Representing attitude: Euler angles, unit quaternions, and rotation
vectors. James Diebel. 2006.
(8) A discussion of the solution for the best rotation to relate two sets
of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828.
(9) Closed-form solution of absolute orientation using unit quaternions.
BKP Horn. J Opt Soc Am A. 1987. 4(4):629-642.
(10) Quaternions. Ken Shoemake.
http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf
(11) From quaternion to matrix and back. JMP van Waveren. 2005.
http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm
(12) Uniform random rotations. Ken Shoemake.
In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992.
(13) Quaternion in molecular modeling. CFF Karney.
J Mol Graph Mod, 25(5):595-604
(14) New method for extracting the quaternion from a rotation matrix.
Itzhack Y Bar-Itzhack, J Guid Contr Dynam. 2000. 23(6): 1085-1087.
(15) Multiple View Geometry in Computer Vision. Hartley and Zissermann.
Cambridge University Press; 2nd Ed. 2004. Chapter 4, Algorithm 4.7, p 130.
(16) Column Vectors vs. Row Vectors.
http://steve.hollasch.net/cgindex/math/matrix/column-vec.html
Examples
--------
>>> alpha, beta, gamma = 0.123, -1.234, 2.345
>>> origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]
>>> I = identity_matrix()
>>> Rx = rotation_matrix(alpha, xaxis)
>>> Ry = rotation_matrix(beta, yaxis)
>>> Rz = rotation_matrix(gamma, zaxis)
>>> R = concatenate_matrices(Rx, Ry, Rz)
>>> euler = euler_from_matrix(R, 'rxyz')
>>> numpy.allclose([alpha, beta, gamma], euler)
True
>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz')
>>> is_same_transform(R, Re)
True
>>> al, be, ga = euler_from_matrix(Re, 'rxyz')
>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))
True
>>> qx = quaternion_about_axis(alpha, xaxis)
>>> qy = quaternion_about_axis(beta, yaxis)
>>> qz = quaternion_about_axis(gamma, zaxis)
>>> q = quaternion_multiply(qx, qy)
>>> q = quaternion_multiply(q, qz)
>>> Rq = quaternion_matrix(q)
>>> is_same_transform(R, Rq)
True
>>> S = scale_matrix(1.23, origin)
>>> T = translation_matrix([1, 2, 3])
>>> Z = shear_matrix(beta, xaxis, origin, zaxis)
>>> R = random_rotation_matrix(numpy.random.rand(3))
>>> M = concatenate_matrices(T, R, Z, S)
>>> scale, shear, angles, trans, persp = decompose_matrix(M)
>>> numpy.allclose(scale, 1.23)
True
>>> numpy.allclose(trans, [1, 2, 3])
True
>>> numpy.allclose(shear, [0, math.tan(beta), 0])
True
>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles))
True
>>> M1 = compose_matrix(scale, shear, angles, trans, persp)
>>> is_same_transform(M, M1)
True
>>> v0, v1 = random_vector(3), random_vector(3)
>>> M = rotation_matrix(angle_between_vectors(v0, v1), vector_product(v0, v1))
>>> v2 = numpy.dot(v0, M[:3,:3].T)
>>> numpy.allclose(unit_vector(v1), unit_vector(v2))
True
"""
from __future__ import division, print_function
import math
import numpy
__version__ = '2017.02.17'
__docformat__ = 'restructuredtext en'
__all__ = ()
def identity_matrix():
"""Return 4x4 identity/unit matrix.
>>> I = identity_matrix()
>>> numpy.allclose(I, numpy.dot(I, I))
True
>>> numpy.sum(I), numpy.trace(I)
(4.0, 4.0)
>>> numpy.allclose(I, numpy.identity(4))
True
"""
return numpy.identity(4)
def translation_matrix(direction):
"""Return matrix to translate by direction vector.
>>> v = numpy.random.random(3) - 0.5
>>> numpy.allclose(v, translation_matrix(v)[:3, 3])
True
"""
M = numpy.identity(4)
M[:3, 3] = direction[:3]
return M
def translation_from_matrix(matrix):
"""Return translation vector from translation matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = translation_from_matrix(translation_matrix(v0))
>>> numpy.allclose(v0, v1)
True
"""
return numpy.array(matrix, copy=False)[:3, 3].copy()
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_from_matrix(matrix):
"""Return mirror plane point and normal vector from reflection matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = numpy.random.random(3) - 0.5
>>> M0 = reflection_matrix(v0, v1)
>>> point, normal = reflection_from_matrix(M0)
>>> M1 = reflection_matrix(point, normal)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
# normal: unit eigenvector corresponding to eigenvalue -1
w, V = numpy.linalg.eig(M[:3, :3])
i = numpy.where(abs(numpy.real(w) + 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
normal = numpy.real(V[:, i[0]]).squeeze()
# point: any unit eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return point, normal
def rotation_matrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction.
>>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0])
>>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1])
True
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = numpy.identity(4, numpy.float64)
>>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> numpy.allclose(2, numpy.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
"""
sina = math.sin(angle)
cosa = math.cos(angle)
direction = unit_vector(direction[:3])
# rotation matrix around unit vector
R = numpy.diag([cosa, cosa, cosa])
R += numpy.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += numpy.array([[ 0.0, -direction[2], direction[1]],
[ direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0]])
M = numpy.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
M[:3, 3] = point - numpy.dot(R, point)
return M
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
w, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
w, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v[3] = 1
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.diag([factor, factor, factor, 1.0])
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S0 = scale_matrix(factor, origin)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
>>> S0 = scale_matrix(factor, origin, direct)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction
def projection_matrix(point, normal, direction=None,
perspective=None, pseudo=False):
"""Return matrix to project onto plane defined by point and normal.
Using either perspective point, projection direction, or none of both.
If pseudo is True, perspective projections will preserve relative depth
such that Perspective = dot(Orthogonal, PseudoPerspective).
>>> P = projection_matrix([0, 0, 0], [1, 0, 0])
>>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
True
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> P1 = projection_matrix(point, normal, direction=direct)
>>> P2 = projection_matrix(point, normal, perspective=persp)
>>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> is_same_transform(P2, numpy.dot(P0, P3))
True
>>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0])
>>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(P, v0)
>>> numpy.allclose(v1[1], v0[1])
True
>>> numpy.allclose(v1[0], 3-v1[1])
True
"""
M = numpy.identity(4)
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
normal = unit_vector(normal[:3])
if perspective is not None:
# perspective projection
perspective = numpy.array(perspective[:3], dtype=numpy.float64,
copy=False)
M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
M[:3, :3] -= numpy.outer(perspective, normal)
if pseudo:
# preserve relative depth
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
else:
M[:3, 3] = numpy.dot(point, normal) * perspective
M[3, :3] = -normal
M[3, 3] = numpy.dot(perspective, normal)
elif direction is not None:
# parallel projection
direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
scale = numpy.dot(direction, normal)
M[:3, :3] -= numpy.outer(direction, normal) / scale
M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
else:
# orthogonal projection
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * normal
return M
def projection_from_matrix(matrix, pseudo=False):
"""Return projection plane and perspective point from projection matrix.
Return values are same as arguments for projection_matrix function:
point, normal, direction, perspective, and pseudo.
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, direct)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)
>>> result = projection_from_matrix(P0, pseudo=False)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> result = projection_from_matrix(P0, pseudo=True)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not pseudo and len(i):
# point: any eigenvector corresponding to eigenvalue 1
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
# direction: unit eigenvector corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 0")
direction = numpy.real(V[:, i[0]]).squeeze()
direction /= vector_norm(direction)
# normal: unit eigenvector of M33.T corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33.T)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if len(i):
# parallel projection
normal = numpy.real(V[:, i[0]]).squeeze()
normal /= vector_norm(normal)
return point, normal, direction, None, False
else:
# orthogonal projection, where normal equals direction vector
return point, direction, None, None, False
else:
# perspective projection
i = numpy.where(abs(numpy.real(w)) > 1e-8)[0]
if not len(i):
raise ValueError(
"no eigenvector not corresponding to eigenvalue 0")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
normal = - M[3, :3]
perspective = M[:3, 3] / numpy.dot(point[:3], normal)
if pseudo:
perspective -= normal
return point, normal, None, perspective, pseudo
def clip_matrix(left, right, bottom, top, near, far, perspective=False):
"""Return matrix to obtain normalized device coordinates from frustum.
The frustum bounds are axis-aligned along x (left, right),
y (bottom, top) and z (near, far).
Normalized device coordinates are in range [-1, 1] if coordinates are
inside the frustum.
If perspective is True the frustum is a truncated pyramid with the
perspective point at origin and direction along z axis, otherwise an
orthographic canonical view volume (a box).
Homogeneous coordinates transformed by the perspective clip matrix
need to be dehomogenized (divided by w coordinate).
>>> frustum = numpy.random.rand(6)
>>> frustum[1] += frustum[0]
>>> frustum[3] += frustum[2]
>>> frustum[5] += frustum[4]
>>> M = clip_matrix(perspective=False, *frustum)
>>> numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
array([-1., -1., -1., 1.])
>>> numpy.dot(M, [frustum[1], frustum[3], frustum[5], 1])
array([ 1., 1., 1., 1.])
>>> M = clip_matrix(perspective=True, *frustum)
>>> v = numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
>>> v / v[3]
array([-1., -1., -1., 1.])
>>> v = numpy.dot(M, [frustum[1], frustum[3], frustum[4], 1])
>>> v / v[3]
array([ 1., 1., -1., 1.])
"""
if left >= right or bottom >= top or near >= far:
raise ValueError("invalid frustum")
if perspective:
if near <= _EPS:
raise ValueError("invalid frustum: near <= 0")
t = 2.0 * near
M = [[t/(left-right), 0.0, (right+left)/(right-left), 0.0],
[0.0, t/(bottom-top), (top+bottom)/(top-bottom), 0.0],
[0.0, 0.0, (far+near)/(near-far), t*far/(far-near)],
[0.0, 0.0, -1.0, 0.0]]
else:
M = [[2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)],
[0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)],
[0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)],
[0.0, 0.0, 0.0, 1.0]]
return numpy.array(M)
def shear_matrix(angle, direction, point, normal):
"""Return matrix to shear by angle along direction vector on shear plane.
The shear plane is defined by a point and normal vector. The direction
vector must be orthogonal to the plane's normal vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S = shear_matrix(angle, direct, point, normal)
>>> numpy.allclose(1, numpy.linalg.det(S))
True
"""
normal = unit_vector(normal[:3])
direction = unit_vector(direction[:3])
if abs(numpy.dot(normal, direction)) > 1e-6:
raise ValueError("direction and normal vectors are not orthogonal")
angle = math.tan(angle)
M = numpy.identity(4)
M[:3, :3] += angle * numpy.outer(direction, normal)
M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
return M
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S0 = shear_matrix(angle, direct, point, normal)
>>> angle, direct, point, normal = shear_from_matrix(S0)
>>> S1 = shear_matrix(angle, direct, point, normal)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-4)[0]
if len(i) < 2:
raise ValueError("no two linear independent eigenvectors found %s" % w)
V = numpy.real(V[:, i]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = numpy.cross(V[i0], V[i1])
w = vector_norm(n)
if w > lenorm:
lenorm = w
normal = n
normal /= lenorm
# direction and angle
direction = numpy.dot(M33 - numpy.identity(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
# point: eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return angle, direction, point, normal
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
matrix : array_like
Non-degenerative homogeneous transformation matrix
Return tuple of:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
Raise ValueError if matrix is of wrong type or degenerative.
>>> T0 = translation_matrix([1, 2, 3])
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
>>> T1 = translation_matrix(trans)
>>> numpy.allclose(T0, T1)
True
>>> S = scale_matrix(0.123)
>>> scale, shear, angles, trans, persp = decompose_matrix(S)
>>> scale[0]
0.123
>>> R0 = euler_matrix(1, 2, 3)
>>> scale, shear, angles, trans, persp = decompose_matrix(R0)
>>> R1 = euler_matrix(*angles)
>>> numpy.allclose(R0, R1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0.0, 0.0, 0.0, 1.0
if not numpy.linalg.det(P):
raise ValueError("matrix is singular")
scale = numpy.zeros((3, ))
shear = [0.0, 0.0, 0.0]
angles = [0.0, 0.0, 0.0]
if any(abs(M[:3, 3]) > _EPS):
perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
M[:, 3] = 0.0, 0.0, 0.0, 1.0
else:
perspective = numpy.array([0.0, 0.0, 0.0, 1.0])
translate = M[3, :3].copy()
M[3, :3] = 0.0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = numpy.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = numpy.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = numpy.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
numpy.negative(scale, scale)
numpy.negative(row, row)
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
# angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return transformation matrix from sequence of transformations.
This is the inverse of the decompose_matrix function.
Sequence of transformations:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
>>> scale = numpy.random.random(3) - 0.5
>>> shear = numpy.random.random(3) - 0.5
>>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)
>>> trans = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(4) - 0.5
>>> M0 = compose_matrix(scale, shear, angles, trans, persp)
>>> result = decompose_matrix(M0)
>>> M1 = compose_matrix(*result)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.identity(4)
if perspective is not None:
P = numpy.identity(4)
P[3, :] = perspective[:4]
M = numpy.dot(M, P)
if translate is not None:
T = numpy.identity(4)
T[:3, 3] = translate[:3]
M = numpy.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = numpy.dot(M, R)
if shear is not None:
Z = numpy.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = numpy.dot(M, Z)
if scale is not None:
S = numpy.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = numpy.dot(M, S)
M /= M[3, 3]
return M
def orthogonalization_matrix(lengths, angles):
"""Return orthogonalization matrix for crystallographic cell coordinates.
Angles are expected in degrees.
The de-orthogonalization matrix is the inverse.
>>> O = orthogonalization_matrix([10, 10, 10], [90, 90, 90])
>>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10)
True
>>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
>>> numpy.allclose(numpy.sum(O), 43.063229)
True
"""
a, b, c = lengths
angles = numpy.radians(angles)
sina, sinb, _ = numpy.sin(angles)
cosa, cosb, cosg = numpy.cos(angles)
co = (cosa * cosb - cosg) / (sina * sinb)
return numpy.array([
[ a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0],
[-a*sinb*co, b*sina, 0.0, 0.0],
[ a*cosb, b*cosa, c, 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True):
"""Return affine transform matrix to register two point sets.
v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous
coordinates, where ndims is the dimensionality of the coordinate space.
If shear is False, a similarity transformation matrix is returned.
If also scale is False, a rigid/Euclidean transformation matrix
is returned.
By default the algorithm by Hartley and Zissermann [15] is used.
If usesvd is True, similarity and Euclidean transformation matrices
are calculated by minimizing the weighted sum of squared deviations
(RMSD) according to the algorithm by Kabsch [8].
Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9]
is used, which is slower when using this Python implementation.
The returned matrix performs rotation, translation and uniform scaling
(if specified).
>>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]]
>>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]]
>>> affine_matrix_from_points(v0, v1)
array([[ 0.14549, 0.00062, 675.50008],
[ 0.00048, 0.14094, 53.24971],
[ 0. , 0. , 1. ]])
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> S = scale_matrix(random.random())
>>> M = concatenate_matrices(T, R, S)
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1)
>>> M = affine_matrix_from_points(v0[:3], v1[:3])
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
More examples in superimposition_matrix()
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=True)
v1 = numpy.array(v1, dtype=numpy.float64, copy=True)
ndims = v0.shape[0]
if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape:
raise ValueError("input arrays are of wrong shape or type")
# move centroids to origin
t0 = -numpy.mean(v0, axis=1)
M0 = numpy.identity(ndims+1)
M0[:ndims, ndims] = t0
v0 += t0.reshape(ndims, 1)
t1 = -numpy.mean(v1, axis=1)
M1 = numpy.identity(ndims+1)
M1[:ndims, ndims] = t1
v1 += t1.reshape(ndims, 1)
if shear:
# Affine transformation
A = numpy.concatenate((v0, v1), axis=0)
u, s, vh = numpy.linalg.svd(A.T)
vh = vh[:ndims].T
B = vh[:ndims]
C = vh[ndims:2*ndims]
t = numpy.dot(C, numpy.linalg.pinv(B))
t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1)
M = numpy.vstack((t, ((0.0,)*ndims) + (1.0,)))
elif usesvd or ndims != 3:
# Rigid transformation via SVD of covariance matrix
u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = numpy.dot(u, vh)
if numpy.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= numpy.outer(u[:, ndims-1], vh[ndims-1, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = numpy.identity(ndims+1)
M[:ndims, :ndims] = R
else:
# Rigid transformation matrix via quaternion
# compute symmetric matrix N
xx, yy, zz = numpy.sum(v0 * v1, axis=1)
xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
N = [[xx+yy+zz, 0.0, 0.0, 0.0],
[yz-zy, xx-yy-zz, 0.0, 0.0],
[zx-xz, xy+yx, yy-xx-zz, 0.0],
[xy-yx, zx+xz, yz+zy, zz-xx-yy]]
# quaternion: eigenvector corresponding to most positive eigenvalue
w, V = numpy.linalg.eigh(N)
q = V[:, numpy.argmax(w)]
q /= vector_norm(q) # unit quaternion
# homogeneous transformation matrix
M = quaternion_matrix(q)
if scale and not shear:
# Affine transformation; scale is ratio of RMS deviations from centroid
v0 *= v0
v1 *= v1
M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
# move centroids back
M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0))
M /= M[ndims, ndims]
return M
def superimposition_matrix(v0, v1, scale=False, usesvd=True):
"""Return matrix to transform given 3D point set into second point set.
v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points.
The parameters scale and usesvd are explained in the more general
affine_matrix_from_points function.
The returned matrix is a similarity or Euclidean transformation matrix.
This function has a fast C implementation in transformations.c.
>>> v0 = numpy.random.rand(3, 10)
>>> M = superimposition_matrix(v0, v0)
>>> numpy.allclose(M, numpy.identity(4))
True
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]]
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> S = scale_matrix(random.random())
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> M = concatenate_matrices(T, R, S)
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1)
>>> M = superimposition_matrix(v0, v1, scale=True)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v = numpy.empty((4, 100, 3))
>>> v[:, :, 0] = v0
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
return affine_matrix_from_points(v0, v1, shear=False,
scale=scale, usesvd=usesvd)
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
M = numpy.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj*si
M[i, k] = sj*ci
M[j, i] = sj*sk
M[j, j] = -cj*ss+cc
M[j, k] = -cj*cs-sc
M[k, i] = -sj*ck
M[k, j] = cj*sc+cs
M[k, k] = cj*cc-ss
else:
M[i, i] = cj*ck
M[i, j] = sj*sc-cs
M[i, k] = sj*cc+ss
M[j, i] = cj*sk
M[j, j] = sj*ss+cc
M[j, k] = sj*cs-sc
M[k, i] = -sj
M[k, j] = cj*si
M[k, k] = cj*ci
return M
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> numpy.allclose(R0, R1)
True
>>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(axes=axes, *angles)
... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
... if not numpy.allclose(R0, R1): print(axes, "failed")
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
if sy > _EPS:
ax = math.atan2( M[i, j], M[i, k])
ay = math.atan2( sy, M[i, i])
az = math.atan2( M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2( sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
if cy > _EPS:
ax = math.atan2( M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2( M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az
def euler_from_quaternion(quaternion, axes='sxyz'):
"""Return Euler angles from quaternion for specified axis sequence.
>>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(angles, [0.123, 0, 0])
True
"""
return euler_from_matrix(quaternion_matrix(quaternion), axes)
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis + 1
j = _NEXT_AXIS[i+parity-1] + 1
k = _NEXT_AXIS[i-parity] + 1
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
q = numpy.empty((4, ))
if repetition:
q[0] = cj*(cc - ss)
q[i] = cj*(cs + sc)
q[j] = sj*(cc + ss)
q[k] = sj*(cs - sc)
else:
q[0] = cj*cc + sj*ss
q[i] = cj*sc - sj*cs
q[j] = cj*ss + sj*cc
q[k] = cj*cs - sj*sc
if parity:
q[j] *= -1.0
return q
def quaternion_about_axis(angle, axis):
"""Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, [1, 0, 0])
>>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0])
True
"""
q = numpy.array([0.0, axis[0], axis[1], axis[2]])
qlen = vector_norm(q)
if qlen > _EPS:
q *= math.sin(angle/2.0) / qlen
q[0] = math.cos(angle/2.0)
return q
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))
True
>>> M = quaternion_matrix([1, 0, 0, 0])
>>> numpy.allclose(M, numpy.identity(4))
True
>>> M = quaternion_matrix([0, 1, 0, 0])
>>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
n = numpy.dot(q, q)
if n < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / n)
q = numpy.outer(q, q)
return numpy.array([
[1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],
[ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def quaternion_from_matrix(matrix, isprecise=False):
"""Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
>>> q = quaternion_from_matrix(numpy.identity(4), True)
>>> numpy.allclose(q, [1, 0, 0, 0])
True
>>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1]))
>>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])
True
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R, True)
>>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])
True
>>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
True
>>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
True
>>> R = random_rotation_matrix()
>>> q = quaternion_from_matrix(R)
>>> is_same_transform(R, quaternion_matrix(q))
True
>>> is_same_quaternion(quaternion_from_matrix(R, isprecise=False),
... quaternion_from_matrix(R, isprecise=True))
True
>>> R = euler_matrix(0.0, 0.0, numpy.pi/2.0)
>>> is_same_quaternion(quaternion_from_matrix(R, isprecise=False),
... quaternion_from_matrix(R, isprecise=True))
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
if isprecise:
q = numpy.empty((4, ))
t = numpy.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 0, 1, 2
if M[1, 1] > M[0, 0]:
i, j, k = 1, 2, 0
if M[2, 2] > M[i, i]:
i, j, k = 2, 0, 1
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q = q[[3, 0, 1, 2]]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0],
[m01+m10, m11-m00-m22, 0.0, 0.0],
[m02+m20, m12+m21, m22-m00-m11, 0.0],
[m21-m12, m02-m20, m10-m01, m00+m11+m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = numpy.linalg.eigh(K)
q = V[[3, 0, 1, 2], numpy.argmax(w)]
if q[0] < 0.0:
numpy.negative(q, q)
return q
def quaternion_multiply(quaternion1, quaternion0):
"""Return multiplication of two quaternions.
>>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7])
>>> numpy.allclose(q, [28, -44, -14, 48])
True
"""
w0, x0, y0, z0 = quaternion0
w1, x1, y1, z1 = quaternion1
return numpy.array([
-x1*x0 - y1*y0 - z1*z0 + w1*w0,
x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0], dtype=numpy.float64)
def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q
def quaternion_inverse(quaternion):
"""Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q / numpy.dot(q, q)
def quaternion_real(quaternion):
"""Return real part of quaternion.
>>> quaternion_real([3, 0, 1, 2])
3.0
"""
return float(quaternion[0])
def quaternion_imag(quaternion):
"""Return imaginary part of quaternion.
>>> quaternion_imag([3, 0, 1, 2])
array([ 0., 1., 2.])
"""
return numpy.array(quaternion[1:4], dtype=numpy.float64, copy=True)
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
"""Return spherical linear interpolation between two quaternions.
>>> q0 = random_quaternion()
>>> q1 = random_quaternion()
>>> q = quaternion_slerp(q0, q1, 0)
>>> numpy.allclose(q, q0)
True
>>> q = quaternion_slerp(q0, q1, 1, 1)
>>> numpy.allclose(q, q1)
True
>>> q = quaternion_slerp(q0, q1, 0.5)
>>> angle = math.acos(numpy.dot(q0, q))
>>> numpy.allclose(2, math.acos(numpy.dot(q0, q1)) / angle) or \
numpy.allclose(2, math.acos(-numpy.dot(q0, q1)) / angle)
True
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = numpy.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
numpy.negative(q1, q1)
angle = math.acos(d) + spin * math.pi
if abs(angle) < _EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def random_quaternion(rand=None):
"""Return uniform random unit quaternion.
rand: array like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
>>> q = random_quaternion()
>>> numpy.allclose(1, vector_norm(q))
True
>>> q = random_quaternion(numpy.random.random(3))
>>> len(q.shape), q.shape[0]==4
(1, True)
"""
if rand is None:
rand = numpy.random.rand(3)
else:
assert len(rand) == 3
r1 = numpy.sqrt(1.0 - rand[0])
r2 = numpy.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return numpy.array([numpy.cos(t2)*r2, numpy.sin(t1)*r1,
numpy.cos(t1)*r1, numpy.sin(t2)*r2])
def random_rotation_matrix(rand=None):
"""Return uniform random rotation matrix.
rand: array like
Three independent random variables that are uniformly distributed
between 0 and 1 for each returned quaternion.
>>> R = random_rotation_matrix()
>>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4))
True
"""
return quaternion_matrix(random_quaternion(rand))
class Arcball(object):
"""Virtual Trackball Control.
>>> ball = Arcball()
>>> ball = Arcball(initial=numpy.identity(4))
>>> ball.place([320, 320], 320)
>>> ball.down([500, 250])
>>> ball.drag([475, 275])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 3.90583455)
True
>>> ball = Arcball(initial=[1, 0, 0, 0])
>>> ball.place([320, 320], 320)
>>> ball.setaxes([1, 1, 0], [-1, 1, 0])
>>> ball.constrain = True
>>> ball.down([400, 200])
>>> ball.drag([200, 400])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 0.2055924)
True
>>> ball.next()
"""
def __init__(self, initial=None):
"""Initialize virtual trackball control.
initial : quaternion or rotation matrix
"""
self._axis = None
self._axes = None
self._radius = 1.0
self._center = [0.0, 0.0]
self._vdown = numpy.array([0.0, 0.0, 1.0])
self._constrain = False
if initial is None:
self._qdown = numpy.array([1.0, 0.0, 0.0, 0.0])
else:
initial = numpy.array(initial, dtype=numpy.float64)
if initial.shape == (4, 4):
self._qdown = quaternion_from_matrix(initial)
elif initial.shape == (4, ):
initial /= vector_norm(initial)
self._qdown = initial
else:
raise ValueError("initial not a quaternion or matrix")
self._qnow = self._qpre = self._qdown
def place(self, center, radius):
"""Place Arcball, e.g. when window size changes.
center : sequence[2]
Window coordinates of trackball center.
radius : float
Radius of trackball in window coordinates.
"""
self._radius = float(radius)
self._center[0] = center[0]
self._center[1] = center[1]
def setaxes(self, *axes):
"""Set axes to constrain rotations."""
if axes is None:
self._axes = None
else:
self._axes = [unit_vector(axis) for axis in axes]
@property
def constrain(self):
"""Return state of constrain to axis mode."""
return self._constrain
@constrain.setter
def constrain(self, value):
"""Set state of constrain to axis mode."""
self._constrain = bool(value)
def down(self, point):
"""Set initial cursor window coordinates and pick constrain-axis."""
self._vdown = arcball_map_to_sphere(point, self._center, self._radius)
self._qdown = self._qpre = self._qnow
if self._constrain and self._axes is not None:
self._axis = arcball_nearest_axis(self._vdown, self._axes)
self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)
else:
self._axis = None
def drag(self, point):
"""Update current cursor window coordinates."""
vnow = arcball_map_to_sphere(point, self._center, self._radius)
if self._axis is not None:
vnow = arcball_constrain_to_axis(vnow, self._axis)
self._qpre = self._qnow
t = numpy.cross(self._vdown, vnow)
if numpy.dot(t, t) < _EPS:
self._qnow = self._qdown
else:
q = [numpy.dot(self._vdown, vnow), t[0], t[1], t[2]]
self._qnow = quaternion_multiply(q, self._qdown)
def next(self, acceleration=0.0):
"""Continue rotation in direction of last drag."""
q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False)
self._qpre, self._qnow = self._qnow, q
def matrix(self):
"""Return homogeneous rotation matrix."""
return quaternion_matrix(self._qnow)
def arcball_map_to_sphere(point, center, radius):
"""Return unit sphere coordinates from window coordinates."""
v0 = (point[0] - center[0]) / radius
v1 = (center[1] - point[1]) / radius
n = v0*v0 + v1*v1
if n > 1.0:
# position outside of sphere
n = math.sqrt(n)
return numpy.array([v0/n, v1/n, 0.0])
else:
return numpy.array([v0, v1, math.sqrt(1.0 - n)])
def arcball_constrain_to_axis(point, axis):
"""Return sphere point perpendicular to axis."""
v = numpy.array(point, dtype=numpy.float64, copy=True)
a = numpy.array(axis, dtype=numpy.float64, copy=True)
v -= a * numpy.dot(a, v) # on plane
n = vector_norm(v)
if n > _EPS:
if v[2] < 0.0:
numpy.negative(v, v)
v /= n
return v
if a[2] == 1.0:
return numpy.array([1.0, 0.0, 0.0])
return unit_vector([-a[1], a[0], 0.0])
def arcball_nearest_axis(point, axes):
"""Return axis, which arc is nearest to point."""
point = numpy.array(point, dtype=numpy.float64, copy=False)
nearest = None
mx = -1.0
for axis in axes:
t = numpy.dot(arcball_constrain_to_axis(point, axis), point)
if t > mx:
nearest = axis
mx = t
return nearest
# epsilon for testing whether a number is close to zero
_EPS = numpy.finfo(float).eps * 4.0
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
def vector_norm(data, axis=None, out=None):
"""Return length, i.e. Euclidean norm, of ndarray along axis.
>>> v = numpy.random.random(3)
>>> n = vector_norm(v)
>>> numpy.allclose(n, numpy.linalg.norm(v))
True
>>> v = numpy.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> v = numpy.random.rand(5, 4, 3)
>>> n = numpy.empty((5, 3))
>>> vector_norm(v, axis=1, out=n)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1])
1.0
"""
data = numpy.array(data, dtype=numpy.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(numpy.dot(data, data))
data *= data
out = numpy.atleast_1d(numpy.sum(data, axis=axis))
numpy.sqrt(out, out)
return out
else:
data *= data
numpy.sum(data, axis=axis, out=out)
numpy.sqrt(out, out)
def unit_vector(data, axis=None, out=None):
"""Return ndarray normalized by length, i.e. Euclidean norm, along axis.
>>> v0 = numpy.random.random(3)
>>> v1 = unit_vector(v0)
>>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
True
>>> v0 = numpy.random.rand(5, 4, 3)
>>> v1 = unit_vector(v0, axis=-1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
>>> numpy.allclose(v1, v2)
True
>>> v1 = unit_vector(v0, axis=1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
>>> numpy.allclose(v1, v2)
True
>>> v1 = numpy.empty((5, 4, 3))
>>> unit_vector(v0, axis=1, out=v1)
>>> numpy.allclose(v1, v2)
True
>>> list(unit_vector([]))
[]
>>> list(unit_vector([1]))
[1.0]
"""
if out is None:
data = numpy.array(data, dtype=numpy.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(numpy.dot(data, data))
return data
else:
if out is not data:
out[:] = numpy.array(data, copy=False)
data = out
length = numpy.atleast_1d(numpy.sum(data*data, axis))
numpy.sqrt(length, length)
if axis is not None:
length = numpy.expand_dims(length, axis)
data /= length
if out is None:
return data
def random_vector(size):
"""Return array of random doubles in the half-open interval [0.0, 1.0).
>>> v = random_vector(10000)
>>> numpy.all(v >= 0) and numpy.all(v < 1)
True
>>> v0 = random_vector(10)
>>> v1 = random_vector(10)
>>> numpy.any(v0 == v1)
False
"""
return numpy.random.random(size)
def vector_product(v0, v1, axis=0):
"""Return vector perpendicular to vectors.
>>> v = vector_product([2, 0, 0], [0, 3, 0])
>>> numpy.allclose(v, [0, 0, 6])
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> v = vector_product(v0, v1)
>>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> v = vector_product(v0, v1, axis=1)
>>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]])
True
"""
return numpy.cross(v0, v1, axis=axis)
def angle_between_vectors(v0, v1, directed=True, axis=0):
"""Return angle between vectors.
If directed is False, the input vectors are interpreted as undirected axes,
i.e. the maximum angle is pi/2.
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3])
>>> numpy.allclose(a, math.pi)
True
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False)
>>> numpy.allclose(a, 0)
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> a = angle_between_vectors(v0, v1)
>>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> a = angle_between_vectors(v0, v1, axis=1)
>>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532])
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)
dot = numpy.sum(v0 * v1, axis=axis)
dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis)
return numpy.arccos(dot if directed else numpy.fabs(dot))
def inverse_matrix(matrix):
"""Return inverse of square transformation matrix.
>>> M0 = random_rotation_matrix()
>>> M1 = inverse_matrix(M0.T)
>>> numpy.allclose(M1, numpy.linalg.inv(M0.T))
True
>>> for size in range(1, 7):
... M0 = numpy.random.rand(size, size)
... M1 = inverse_matrix(M0)
... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size)
"""
return numpy.linalg.inv(matrix)
def concatenate_matrices(*matrices):
"""Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True
"""
M = numpy.identity(4)
for i in matrices:
M = numpy.dot(M, i)
return M
def is_same_transform(matrix0, matrix1):
"""Return True if two matrices perform same transformation.
>>> is_same_transform(numpy.identity(4), numpy.identity(4))
True
>>> is_same_transform(numpy.identity(4), random_rotation_matrix())
False
"""
matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
matrix0 /= matrix0[3, 3]
matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
matrix1 /= matrix1[3, 3]
return numpy.allclose(matrix0, matrix1)
def is_same_quaternion(q0, q1):
"""Return True if two quaternions are equal."""
q0 = numpy.array(q0)
q1 = numpy.array(q1)
return numpy.allclose(q0, q1) or numpy.allclose(q0, -q1)
def _import_module(name, package=None, warn=True, prefix='_py_', ignore='_'):
"""Try import all public attributes from module into global namespace.
Existing attributes with name clashes are renamed with prefix.
Attributes starting with underscore are ignored by default.
Return True on successful import.
"""
import warnings
from importlib import import_module
try:
if not package:
module = import_module(name)
else:
module = import_module('.' + name, package=package)
except ImportError:
if warn:
warnings.warn("failed to import module %s" % name)
else:
for attr in dir(module):
if ignore and attr.startswith(ignore):
continue
if prefix:
if attr in globals():
globals()[prefix + attr] = globals()[attr]
elif warn:
warnings.warn("no Python implementation of " + attr)
globals()[attr] = getattr(module, attr)
return True
_import_module('_transformations')
if __name__ == "__main__":
import doctest
import random # noqa: used in doctests
numpy.set_printoptions(suppress=True, precision=5)
doctest.testmod()
| 34.391913
| 79
| 0.588285
|
aee6932251ad55ca15484d6e091fa2a12acc8da9
| 2,118
|
py
|
Python
|
excut/experiments/datasets.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | 5
|
2020-11-17T19:59:49.000Z
|
2021-09-23T23:10:39.000Z
|
excut/experiments/datasets.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | null | null | null |
excut/experiments/datasets.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | null | null | null |
import os
datasets={}
experiment_name="expr10"
# add baseline_data
for ds in ['terroristAttack', 'imdb', 'uwcse', 'webkb', 'mutagenesis', 'hep']:
dataset_folder = os.path.join('/scratch/GW/pool0/gadelrab/ExDEC/data/baseline_data/', ds)
dataset_output_folder = os.path.join('/scratch/GW/pool0/gadelrab/ExDEC/%s/baseline_data/' % experiment_name, ds)
datasets[ds] = {'dataset_folder': dataset_folder,
'dataset_output_folder': dataset_output_folder,
'target_entities': os.path.join(dataset_folder, '%s_target_entities' % ds),
'kg': os.path.join(dataset_folder, '%s_kg' % ds),
'kg_idntifier': 'http://%s_kg.org' % ds,
'data_prefix': 'http://exp-data.org'
}
# Add yago related data
for ds in ['yago_art_3_4k']: #, 'yago_art_3_filtered_target', 'yago_art_3_4k']:
dataset_folder = '/scratch/GW/pool0/gadelrab/ExDEC/data/yago/'
dataset_output_folder = os.path.join('/scratch/GW/pool0/gadelrab/ExDEC/%s/yago/' % experiment_name, ds)
datasets[ds] = {'dataset_folder': dataset_folder,
'dataset_output_folder': dataset_output_folder,
'target_entities': os.path.join(dataset_folder, '%s.tsv' % ds),
'kg': '/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yagoFacts_3.tsv',
'kg_idntifier': 'http://yago-expr.org',
'data_prefix': 'http://exp-data.org',
'safe_url': True
}
for ds in ['grad_ungrad_course']:
dataset_folder = '/scratch/GW/pool0/gadelrab/ExDEC/data/uobm/'
dataset_output_folder = os.path.join('/scratch/GW/pool0/gadelrab/ExDEC/%s/uobm/' % experiment_name, ds)
datasets[ds] = {'dataset_folder': dataset_folder,
'dataset_output_folder': dataset_output_folder,
'target_entities': os.path.join(dataset_folder, '%s.ttl' % ds),
'kg': '/scratch/GW/pool0/gadelrab/ExDEC/data/uobm/uobm10_kg.nt',
'kg_idntifier': 'http://uobm10.org'
}
| 51.658537
| 116
| 0.604344
|
24e9de125225abe03ba4a2480dccdf9b0fcdb52f
| 30
|
py
|
Python
|
sample_rules/pyrete/__init__.py
|
eshandas/pyrate
|
281db3632f5fbe4a930ca205e9eb0b285c908141
|
[
"MIT"
] | 9
|
2018-07-09T17:59:39.000Z
|
2020-12-26T14:48:58.000Z
|
sample_rules/pyrete/__init__.py
|
eshandas/pyrate
|
281db3632f5fbe4a930ca205e9eb0b285c908141
|
[
"MIT"
] | 2
|
2018-07-09T17:59:27.000Z
|
2021-06-01T21:56:33.000Z
|
sample_rules/pyrete/__init__.py
|
eshandas/pyrete
|
281db3632f5fbe4a930ca205e9eb0b285c908141
|
[
"MIT"
] | null | null | null |
"""
About the Rule Engine
"""
| 7.5
| 21
| 0.6
|
4adf9b717f616d77d3f12ee9ae851022626c26d3
| 702
|
py
|
Python
|
Etap 3/Logia14/Zad4.py
|
aszokalski/Logia
|
5e29745b01623df8a2f162f143656a76056af407
|
[
"MIT"
] | null | null | null |
Etap 3/Logia14/Zad4.py
|
aszokalski/Logia
|
5e29745b01623df8a2f162f143656a76056af407
|
[
"MIT"
] | null | null | null |
Etap 3/Logia14/Zad4.py
|
aszokalski/Logia
|
5e29745b01623df8a2f162f143656a76056af407
|
[
"MIT"
] | null | null | null |
def ilesam(wyroby, samochody):
wyn = 0
for samochod in samochody:
zapakowano = 0
while zapakowano < samochod:
if len(wyroby) == 0:
return wyn + 1
if zapakowano + wyroby[0][1] > samochod:
break
maks = 0
for i in range(1, wyroby[0][0] + 1):
if zapakowano + i * wyroby[0][1] <= samochod:
maks = i
zapakowano += maks * wyroby[0][1]
wyroby[0][0] -= maks
if wyroby[0][0] == 0:
wyroby = wyroby[1:]
wyn += 1
samochody.append(samochod)
| 26
| 61
| 0.404558
|
5db262a9d7621cd71b8c5eb673d1d886db81f057
| 55,006
|
py
|
Python
|
PythonAPI/carissma_project/lib/python3.5/site-packages/matplotlib/projections/polar.py
|
AbdulHoffmann/carla_carissma
|
8d382769ffa02a6c61a22c57160285505f5ff0a4
|
[
"MIT"
] | 445
|
2019-01-26T13:50:26.000Z
|
2022-03-18T05:17:38.000Z
|
venv/lib/python3.7/site-packages/matplotlib/projections/polar.py
|
John1001Song/Big-Data-Robo-Adviser
|
9444dce96954c546333d5aecc92a06c3bfd19aa5
|
[
"MIT"
] | 242
|
2019-01-29T15:48:27.000Z
|
2022-03-31T22:09:21.000Z
|
venv/lib/python3.7/site-packages/matplotlib/projections/polar.py
|
John1001Song/Big-Data-Robo-Adviser
|
9444dce96954c546333d5aecc92a06c3bfd19aa5
|
[
"MIT"
] | 64
|
2018-04-25T08:51:57.000Z
|
2022-01-29T14:13:57.000Z
|
from collections import OrderedDict
import types
import numpy as np
from matplotlib.axes import Axes
import matplotlib.axis as maxis
from matplotlib import cbook
from matplotlib import docstring
import matplotlib.markers as mmarkers
import matplotlib.patches as mpatches
import matplotlib.path as mpath
from matplotlib import rcParams
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.spines as mspines
class PolarTransform(mtransforms.Transform):
"""
The base polar transform. This handles projection *theta* and
*r* into Cartesian coordinate space *x* and *y*, but does not
perform the ultimate affine transformation into the correct
position.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True,
_apply_theta_transforms=True):
mtransforms.Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
self._apply_theta_transforms = _apply_theta_transforms
def __str__(self):
return ("{}(\n"
"{},\n"
" use_rmin={},\n"
" _apply_theta_transforms={})"
.format(type(self).__name__,
mtransforms._indent_str(self._axis),
self._use_rmin,
self._apply_theta_transforms))
def transform_non_affine(self, tr):
xy = np.empty(tr.shape, float)
t = tr[:, 0:1]
r = tr[:, 1:2]
x = xy[:, 0:1]
y = xy[:, 1:2]
# PolarAxes does not use the theta transforms here, but apply them for
# backwards-compatibility if not being used by it.
if self._apply_theta_transforms and self._axis is not None:
t *= self._axis.get_theta_direction()
t += self._axis.get_theta_offset()
if self._use_rmin and self._axis is not None:
r = r - self._axis.get_rorigin()
mask = r < 0
x[:] = np.where(mask, np.nan, r * np.cos(t))
y[:] = np.where(mask, np.nan, r * np.sin(t))
return xy
transform_non_affine.__doc__ = \
mtransforms.Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
if len(vertices) == 2 and vertices[0, 0] == vertices[1, 0]:
return mpath.Path(self.transform(vertices), path.codes)
ipath = path.interpolated(path._interpolation_steps)
return mpath.Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = \
mtransforms.Transform.transform_path_non_affine.__doc__
def inverted(self):
return PolarAxes.InvertedPolarTransform(self._axis, self._use_rmin,
self._apply_theta_transforms)
inverted.__doc__ = mtransforms.Transform.inverted.__doc__
class PolarAffine(mtransforms.Affine2DBase):
"""
The affine part of the polar projection. Scales the output so
that maximum radius rests on the edge of the axes circle.
"""
def __init__(self, scale_transform, limits):
"""
*limits* is the view limit of the data. The only part of
its bounds that is used is the y limits (for the radius limits).
The theta range is handled by the non-affine transform.
"""
mtransforms.Affine2DBase.__init__(self)
self._scale_transform = scale_transform
self._limits = limits
self.set_children(scale_transform, limits)
self._mtx = None
def __str__(self):
return ("{}(\n"
"{},\n"
"{})"
.format(type(self).__name__,
mtransforms._indent_str(self._scale_transform),
mtransforms._indent_str(self._limits)))
def get_matrix(self):
if self._invalid:
limits_scaled = self._limits.transformed(self._scale_transform)
yscale = limits_scaled.ymax - limits_scaled.ymin
affine = mtransforms.Affine2D() \
.scale(0.5 / yscale) \
.translate(0.5, 0.5)
self._mtx = affine.get_matrix()
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = mtransforms.Affine2DBase.get_matrix.__doc__
class InvertedPolarTransform(mtransforms.Transform):
"""
The inverse of the polar transform, mapping Cartesian
coordinate space *x* and *y* back to *theta* and *r*.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True,
_apply_theta_transforms=True):
mtransforms.Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
self._apply_theta_transforms = _apply_theta_transforms
def __str__(self):
return ("{}(\n"
"{},\n"
" use_rmin={},\n"
" _apply_theta_transforms={})"
.format(type(self).__name__,
mtransforms._indent_str(self._axis),
self._use_rmin,
self._apply_theta_transforms))
def transform_non_affine(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:]
r = np.sqrt(x*x + y*y)
with np.errstate(invalid='ignore'):
# At x=y=r=0 this will raise an
# invalid value warning when doing 0/0
# Divide by zero warnings are only raised when
# the numerator is different from 0. That
# should not happen here.
theta = np.arccos(x / r)
theta = np.where(y < 0, 2 * np.pi - theta, theta)
# PolarAxes does not use the theta transforms here, but apply them for
# backwards-compatibility if not being used by it.
if self._apply_theta_transforms and self._axis is not None:
theta -= self._axis.get_theta_offset()
theta *= self._axis.get_theta_direction()
theta %= 2 * np.pi
if self._use_rmin and self._axis is not None:
r += self._axis.get_rorigin()
return np.concatenate((theta, r), 1)
transform_non_affine.__doc__ = \
mtransforms.Transform.transform_non_affine.__doc__
def inverted(self):
return PolarAxes.PolarTransform(self._axis, self._use_rmin,
self._apply_theta_transforms)
inverted.__doc__ = mtransforms.Transform.inverted.__doc__
class ThetaFormatter(mticker.Formatter):
"""
Used to format the *theta* tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __call__(self, x, pos=None):
vmin, vmax = self.axis.get_view_interval()
d = np.rad2deg(abs(vmax - vmin))
digits = max(-int(np.log10(d) - 1.5), 0)
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
format_str = r"${value:0.{digits:d}f}^\circ$"
return format_str.format(value=np.rad2deg(x), digits=digits)
else:
# we use unicode, rather than mathtext with \circ, so
# that it will work correctly with any arbitrary font
# (assuming it has a degree sign), whereas $5\circ$
# will only work correctly with one of the supported
# math fonts (Computer Modern and STIX)
format_str = "{value:0.{digits:d}f}\N{DEGREE SIGN}"
return format_str.format(value=np.rad2deg(x), digits=digits)
class _AxisWrapper(object):
def __init__(self, axis):
self._axis = axis
def get_view_interval(self):
return np.rad2deg(self._axis.get_view_interval())
def set_view_interval(self, vmin, vmax):
self._axis.set_view_interval(*np.deg2rad((vmin, vmax)))
def get_minpos(self):
return np.rad2deg(self._axis.get_minpos())
def get_data_interval(self):
return np.rad2deg(self._axis.get_data_interval())
def set_data_interval(self, vmin, vmax):
self._axis.set_data_interval(*np.deg2rad((vmin, vmax)))
def get_tick_space(self):
return self._axis.get_tick_space()
class ThetaLocator(mticker.Locator):
"""
Used to locate theta ticks.
This will work the same as the base locator except in the case that the
view spans the entire circle. In such cases, the previously used default
locations of every 45 degrees are returned.
"""
def __init__(self, base):
self.base = base
self.axis = self.base.axis = _AxisWrapper(self.base.axis)
def set_axis(self, axis):
self.axis = _AxisWrapper(axis)
self.base.set_axis(self.axis)
def __call__(self):
lim = self.axis.get_view_interval()
if _is_full_circle_deg(lim[0], lim[1]):
return np.arange(8) * 2 * np.pi / 8
else:
return np.deg2rad(self.base())
def autoscale(self):
return self.base.autoscale()
def pan(self, numsteps):
return self.base.pan(numsteps)
def refresh(self):
return self.base.refresh()
def view_limits(self, vmin, vmax):
vmin, vmax = np.rad2deg((vmin, vmax))
return np.deg2rad(self.base.view_limits(vmin, vmax))
def zoom(self, direction):
return self.base.zoom(direction)
class ThetaTick(maxis.XTick):
"""
A theta-axis tick.
This subclass of `XTick` provides angular ticks with some small
modification to their re-positioning such that ticks are rotated based on
tick location. This results in ticks that are correctly perpendicular to
the arc spine.
When 'auto' rotation is enabled, labels are also rotated to be parallel to
the spine. The label padding is also applied here since it's not possible
to use a generic axes transform to produce tick-specific padding.
"""
def __init__(self, axes, *args, **kwargs):
self._text1_translate = mtransforms.ScaledTranslation(
0, 0,
axes.figure.dpi_scale_trans)
self._text2_translate = mtransforms.ScaledTranslation(
0, 0,
axes.figure.dpi_scale_trans)
super().__init__(axes, *args, **kwargs)
def _get_text1(self):
t = super()._get_text1()
t.set_rotation_mode('anchor')
t.set_transform(t.get_transform() + self._text1_translate)
return t
def _get_text2(self):
t = super()._get_text2()
t.set_rotation_mode('anchor')
t.set_transform(t.get_transform() + self._text2_translate)
return t
def _apply_params(self, **kw):
super()._apply_params(**kw)
# Ensure transform is correct; sometimes this gets reset.
trans = self.label1.get_transform()
if not trans.contains_branch(self._text1_translate):
self.label1.set_transform(trans + self._text1_translate)
trans = self.label2.get_transform()
if not trans.contains_branch(self._text2_translate):
self.label2.set_transform(trans + self._text2_translate)
def _update_padding(self, pad, angle):
padx = pad * np.cos(angle) / 72
pady = pad * np.sin(angle) / 72
self._text1_translate._t = (padx, pady)
self._text1_translate.invalidate()
self._text2_translate._t = (-padx, -pady)
self._text2_translate.invalidate()
def update_position(self, loc):
super().update_position(loc)
axes = self.axes
angle = loc * axes.get_theta_direction() + axes.get_theta_offset()
text_angle = np.rad2deg(angle) % 360 - 90
angle -= np.pi / 2
if self.tick1On:
marker = self.tick1line.get_marker()
if marker in (mmarkers.TICKUP, '|'):
trans = mtransforms.Affine2D().scale(1.0, 1.0).rotate(angle)
elif marker == mmarkers.TICKDOWN:
trans = mtransforms.Affine2D().scale(1.0, -1.0).rotate(angle)
else:
# Don't modify custom tick line markers.
trans = self.tick1line._marker._transform
self.tick1line._marker._transform = trans
if self.tick2On:
marker = self.tick2line.get_marker()
if marker in (mmarkers.TICKUP, '|'):
trans = mtransforms.Affine2D().scale(1.0, 1.0).rotate(angle)
elif marker == mmarkers.TICKDOWN:
trans = mtransforms.Affine2D().scale(1.0, -1.0).rotate(angle)
else:
# Don't modify custom tick line markers.
trans = self.tick2line._marker._transform
self.tick2line._marker._transform = trans
mode, user_angle = self._labelrotation
if mode == 'default':
text_angle = user_angle
else:
if text_angle > 90:
text_angle -= 180
elif text_angle < -90:
text_angle += 180
text_angle += user_angle
if self.label1On:
self.label1.set_rotation(text_angle)
if self.label2On:
self.label2.set_rotation(text_angle)
# This extra padding helps preserve the look from previous releases but
# is also needed because labels are anchored to their center.
pad = self._pad + 7
self._update_padding(pad,
self._loc * axes.get_theta_direction() +
axes.get_theta_offset())
class ThetaAxis(maxis.XAxis):
"""
A theta Axis.
This overrides certain properties of an `XAxis` to provide special-casing
for an angular axis.
"""
__name__ = 'thetaaxis'
axis_name = 'theta'
def _get_tick(self, major):
if major:
tick_kw = self._major_tick_kw
else:
tick_kw = self._minor_tick_kw
return ThetaTick(self.axes, 0, '', major=major, **tick_kw)
def _wrap_locator_formatter(self):
self.set_major_locator(ThetaLocator(self.get_major_locator()))
self.set_major_formatter(ThetaFormatter())
self.isDefault_majloc = True
self.isDefault_majfmt = True
def cla(self):
super().cla()
self.set_ticks_position('none')
self._wrap_locator_formatter()
def _set_scale(self, value, **kwargs):
super()._set_scale(value, **kwargs)
self._wrap_locator_formatter()
def _copy_tick_props(self, src, dest):
'Copy the props from src tick to dest tick'
if src is None or dest is None:
return
super()._copy_tick_props(src, dest)
# Ensure that tick transforms are independent so that padding works.
trans = dest._get_text1_transform()[0]
dest.label1.set_transform(trans + dest._text1_translate)
trans = dest._get_text2_transform()[0]
dest.label2.set_transform(trans + dest._text2_translate)
class RadialLocator(mticker.Locator):
"""
Used to locate radius ticks.
Ensures that all ticks are strictly positive. For all other
tasks, it delegates to the base
:class:`~matplotlib.ticker.Locator` (which may be different
depending on the scale of the *r*-axis.
"""
def __init__(self, base, axes=None):
self.base = base
self._axes = axes
def __call__(self):
show_all = True
# Ensure previous behaviour with full circle non-annular views.
if self._axes:
if _is_full_circle_rad(*self._axes.viewLim.intervalx):
rorigin = self._axes.get_rorigin()
if self._axes.get_rmin() <= rorigin:
show_all = False
if show_all:
return self.base()
else:
return [tick for tick in self.base() if tick > rorigin]
def autoscale(self):
return self.base.autoscale()
def pan(self, numsteps):
return self.base.pan(numsteps)
def zoom(self, direction):
return self.base.zoom(direction)
def refresh(self):
return self.base.refresh()
def view_limits(self, vmin, vmax):
vmin, vmax = self.base.view_limits(vmin, vmax)
return mtransforms.nonsingular(min(0, vmin), vmax)
class _ThetaShift(mtransforms.ScaledTranslation):
"""
Apply a padding shift based on axes theta limits.
This is used to create padding for radial ticks.
Parameters
----------
axes : matplotlib.axes.Axes
The owning axes; used to determine limits.
pad : float
The padding to apply, in points.
start : str, {'min', 'max', 'rlabel'}
Whether to shift away from the start (``'min'``) or the end (``'max'``)
of the axes, or using the rlabel position (``'rlabel'``).
"""
def __init__(self, axes, pad, mode):
mtransforms.ScaledTranslation.__init__(self, pad, pad,
axes.figure.dpi_scale_trans)
self.set_children(axes._realViewLim)
self.axes = axes
self.mode = mode
self.pad = pad
def __str__(self):
return ("{}(\n"
"{},\n"
"{},\n"
"{})"
.format(type(self).__name__,
mtransforms._indent_str(self.axes),
mtransforms._indent_str(self.pad),
mtransforms._indent_str(repr(self.mode))))
def get_matrix(self):
if self._invalid:
if self.mode == 'rlabel':
angle = (
np.deg2rad(self.axes.get_rlabel_position()) *
self.axes.get_theta_direction() +
self.axes.get_theta_offset()
)
else:
if self.mode == 'min':
angle = self.axes._realViewLim.xmin
elif self.mode == 'max':
angle = self.axes._realViewLim.xmax
if self.mode in ('rlabel', 'min'):
padx = np.cos(angle - np.pi / 2)
pady = np.sin(angle - np.pi / 2)
else:
padx = np.cos(angle + np.pi / 2)
pady = np.sin(angle + np.pi / 2)
self._t = (self.pad * padx / 72, self.pad * pady / 72)
return mtransforms.ScaledTranslation.get_matrix(self)
class RadialTick(maxis.YTick):
"""
A radial-axis tick.
This subclass of `YTick` provides radial ticks with some small modification
to their re-positioning such that ticks are rotated based on axes limits.
This results in ticks that are correctly perpendicular to the spine. Labels
are also rotated to be perpendicular to the spine, when 'auto' rotation is
enabled.
"""
def _get_text1(self):
t = super()._get_text1()
t.set_rotation_mode('anchor')
return t
def _get_text2(self):
t = super()._get_text2()
t.set_rotation_mode('anchor')
return t
def _determine_anchor(self, mode, angle, start):
# Note: angle is the (spine angle - 90) because it's used for the tick
# & text setup, so all numbers below are -90 from (normed) spine angle.
if mode == 'auto':
if start:
if -90 <= angle <= 90:
return 'left', 'center'
else:
return 'right', 'center'
else:
if -90 <= angle <= 90:
return 'right', 'center'
else:
return 'left', 'center'
else:
if start:
if angle < -68.5:
return 'center', 'top'
elif angle < -23.5:
return 'left', 'top'
elif angle < 22.5:
return 'left', 'center'
elif angle < 67.5:
return 'left', 'bottom'
elif angle < 112.5:
return 'center', 'bottom'
elif angle < 157.5:
return 'right', 'bottom'
elif angle < 202.5:
return 'right', 'center'
elif angle < 247.5:
return 'right', 'top'
else:
return 'center', 'top'
else:
if angle < -68.5:
return 'center', 'bottom'
elif angle < -23.5:
return 'right', 'bottom'
elif angle < 22.5:
return 'right', 'center'
elif angle < 67.5:
return 'right', 'top'
elif angle < 112.5:
return 'center', 'top'
elif angle < 157.5:
return 'left', 'top'
elif angle < 202.5:
return 'left', 'center'
elif angle < 247.5:
return 'left', 'bottom'
else:
return 'center', 'bottom'
def update_position(self, loc):
super().update_position(loc)
axes = self.axes
thetamin = axes.get_thetamin()
thetamax = axes.get_thetamax()
direction = axes.get_theta_direction()
offset_rad = axes.get_theta_offset()
offset = np.rad2deg(offset_rad)
full = _is_full_circle_deg(thetamin, thetamax)
if full:
angle = (axes.get_rlabel_position() * direction +
offset) % 360 - 90
tick_angle = 0
if angle > 90:
text_angle = angle - 180
elif angle < -90:
text_angle = angle + 180
else:
text_angle = angle
else:
angle = (thetamin * direction + offset) % 360 - 90
if direction > 0:
tick_angle = np.deg2rad(angle)
else:
tick_angle = np.deg2rad(angle + 180)
if angle > 90:
text_angle = angle - 180
elif angle < -90:
text_angle = angle + 180
else:
text_angle = angle
mode, user_angle = self._labelrotation
if mode == 'auto':
text_angle += user_angle
else:
text_angle = user_angle
if self.label1On:
if full:
ha = self.label1.get_ha()
va = self.label1.get_va()
else:
ha, va = self._determine_anchor(mode, angle, direction > 0)
self.label1.set_ha(ha)
self.label1.set_va(va)
self.label1.set_rotation(text_angle)
if self.tick1On:
marker = self.tick1line.get_marker()
if marker == mmarkers.TICKLEFT:
trans = (mtransforms.Affine2D()
.scale(1.0, 1.0)
.rotate(tick_angle))
elif marker == '_':
trans = (mtransforms.Affine2D()
.scale(1.0, 1.0)
.rotate(tick_angle + np.pi / 2))
elif marker == mmarkers.TICKRIGHT:
trans = (mtransforms.Affine2D()
.scale(-1.0, 1.0)
.rotate(tick_angle))
else:
# Don't modify custom tick line markers.
trans = self.tick1line._marker._transform
self.tick1line._marker._transform = trans
if full:
self.label2On = False
self.tick2On = False
else:
angle = (thetamax * direction + offset) % 360 - 90
if direction > 0:
tick_angle = np.deg2rad(angle)
else:
tick_angle = np.deg2rad(angle + 180)
if angle > 90:
text_angle = angle - 180
elif angle < -90:
text_angle = angle + 180
else:
text_angle = angle
mode, user_angle = self._labelrotation
if mode == 'auto':
text_angle += user_angle
else:
text_angle = user_angle
if self.label2On:
ha, va = self._determine_anchor(mode, angle, direction < 0)
self.label2.set_ha(ha)
self.label2.set_va(va)
self.label2.set_rotation(text_angle)
if self.tick2On:
marker = self.tick2line.get_marker()
if marker == mmarkers.TICKLEFT:
trans = (mtransforms.Affine2D()
.scale(1.0, 1.0)
.rotate(tick_angle))
elif marker == '_':
trans = (mtransforms.Affine2D()
.scale(1.0, 1.0)
.rotate(tick_angle + np.pi / 2))
elif marker == mmarkers.TICKRIGHT:
trans = (mtransforms.Affine2D()
.scale(-1.0, 1.0)
.rotate(tick_angle))
else:
# Don't modify custom tick line markers.
trans = self.tick2line._marker._transform
self.tick2line._marker._transform = trans
class RadialAxis(maxis.YAxis):
"""
A radial Axis.
This overrides certain properties of a `YAxis` to provide special-casing
for a radial axis.
"""
__name__ = 'radialaxis'
axis_name = 'radius'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sticky_edges.y.append(0)
def _get_tick(self, major):
if major:
tick_kw = self._major_tick_kw
else:
tick_kw = self._minor_tick_kw
return RadialTick(self.axes, 0, '', major=major, **tick_kw)
def _wrap_locator_formatter(self):
self.set_major_locator(RadialLocator(self.get_major_locator(),
self.axes))
self.isDefault_majloc = True
def cla(self):
super().cla()
self.set_ticks_position('none')
self._wrap_locator_formatter()
def _set_scale(self, value, **kwargs):
super()._set_scale(value, **kwargs)
self._wrap_locator_formatter()
def _is_full_circle_deg(thetamin, thetamax):
"""
Determine if a wedge (in degrees) spans the full circle.
The condition is derived from :class:`~matplotlib.patches.Wedge`.
"""
return abs(abs(thetamax - thetamin) - 360.0) < 1e-12
def _is_full_circle_rad(thetamin, thetamax):
"""
Determine if a wedge (in radians) spans the full circle.
The condition is derived from :class:`~matplotlib.patches.Wedge`.
"""
return abs(abs(thetamax - thetamin) - 2 * np.pi) < 1.74e-14
class _WedgeBbox(mtransforms.Bbox):
"""
Transform (theta,r) wedge Bbox into axes bounding box.
Parameters
----------
center : (float, float)
Center of the wedge
viewLim : `~matplotlib.transforms.Bbox`
Bbox determining the boundaries of the wedge
originLim : `~matplotlib.transforms.Bbox`
Bbox determining the origin for the wedge, if different from *viewLim*
"""
def __init__(self, center, viewLim, originLim, **kwargs):
mtransforms.Bbox.__init__(self,
np.array([[0.0, 0.0], [1.0, 1.0]], np.float),
**kwargs)
self._center = center
self._viewLim = viewLim
self._originLim = originLim
self.set_children(viewLim, originLim)
def __str__(self):
return ("{}(\n"
"{},\n"
"{},\n"
"{})"
.format(type(self).__name__,
mtransforms._indent_str(self._center),
mtransforms._indent_str(self._viewLim),
mtransforms._indent_str(self._originLim)))
def get_points(self):
if self._invalid:
points = self._viewLim.get_points().copy()
# Scale angular limits to work with Wedge.
points[:, 0] *= 180 / np.pi
if points[0, 0] > points[1, 0]:
points[:, 0] = points[::-1, 0]
# Scale radial limits based on origin radius.
points[:, 1] -= self._originLim.y0
# Scale radial limits to match axes limits.
rscale = 0.5 / points[1, 1]
points[:, 1] *= rscale
width = min(points[1, 1] - points[0, 1], 0.5)
# Generate bounding box for wedge.
wedge = mpatches.Wedge(self._center, points[1, 1],
points[0, 0], points[1, 0],
width=width)
self.update_from_path(wedge.get_path())
# Ensure equal aspect ratio.
w, h = self._points[1] - self._points[0]
if h < w:
deltah = (w - h) / 2.0
deltaw = 0.0
elif w < h:
deltah = 0.0
deltaw = (h - w) / 2.0
else:
deltah = 0.0
deltaw = 0.0
self._points += np.array([[-deltaw, -deltah], [deltaw, deltah]])
self._invalid = 0
return self._points
get_points.__doc__ = mtransforms.Bbox.get_points.__doc__
class PolarAxes(Axes):
"""
A polar graph projection, where the input dimensions are *theta*, *r*.
Theta starts pointing east and goes anti-clockwise.
"""
name = 'polar'
def __init__(self, *args,
theta_offset=0, theta_direction=1, rlabel_position=22.5,
**kwargs):
"""
Create a new Polar Axes for a polar plot.
"""
self._default_theta_offset = theta_offset
self._default_theta_direction = theta_direction
self._default_rlabel_position = np.deg2rad(rlabel_position)
super().__init__(*args, **kwargs)
self.use_sticky_edges = True
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
__init__.__doc__ = Axes.__init__.__doc__
def cla(self):
Axes.cla(self)
self.title.set_y(1.05)
start = self.spines.get('start', None)
if start:
start.set_visible(False)
end = self.spines.get('end', None)
if end:
end.set_visible(False)
self.set_xlim(0.0, 2 * np.pi)
self.grid(rcParams['polaraxes.grid'])
inner = self.spines.get('inner', None)
if inner:
inner.set_visible(False)
self.set_rorigin(None)
self.set_theta_offset(self._default_theta_offset)
self.set_theta_direction(self._default_theta_direction)
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = ThetaAxis(self)
self.yaxis = RadialAxis(self)
# Calling polar_axes.xaxis.cla() or polar_axes.xaxis.cla()
# results in weird artifacts. Therefore we disable this for
# now.
# self.spines['polar'].register_axis(self.yaxis)
self._update_transScale()
def _set_lim_and_transforms(self):
# A view limit where the minimum radius can be locked if the user
# specifies an alternate origin.
self._originViewLim = mtransforms.LockableBbox(self.viewLim)
# Handle angular offset and direction.
self._direction = mtransforms.Affine2D() \
.scale(self._default_theta_direction, 1.0)
self._theta_offset = mtransforms.Affine2D() \
.translate(self._default_theta_offset, 0.0)
self.transShift = mtransforms.composite_transform_factory(
self._direction,
self._theta_offset)
# A view limit shifted to the correct location after accounting for
# orientation and offset.
self._realViewLim = mtransforms.TransformedBbox(self.viewLim,
self.transShift)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
# Scale view limit into a bbox around the selected wedge. This may be
# smaller than the usual unit axes rectangle if not plotting the full
# circle.
self.axesLim = _WedgeBbox((0.5, 0.5),
self._realViewLim, self._originViewLim)
# Scale the wedge to fill the axes.
self.transWedge = mtransforms.BboxTransformFrom(self.axesLim)
# Scale the axes to fill the figure.
self.transAxes = mtransforms.BboxTransformTo(self.bbox)
# A (possibly non-linear) projection on the (already scaled)
# data. This one is aware of rmin
self.transProjection = self.PolarTransform(
self,
_apply_theta_transforms=False)
# Add dependency on rorigin.
self.transProjection.set_children(self._originViewLim)
# An affine transformation on the data, generally to limit the
# range of the axes
self.transProjectionAffine = self.PolarAffine(self.transScale,
self._originViewLim)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = (
self.transScale + self.transShift + self.transProjection +
(self.transProjectionAffine + self.transWedge + self.transAxes))
# This is the transform for theta-axis ticks. It is
# equivalent to transData, except it always puts r == 0.0 and r == 1.0
# at the edge of the axis circles.
self._xaxis_transform = (
mtransforms.blended_transform_factory(
mtransforms.IdentityTransform(),
mtransforms.BboxTransformTo(self.viewLim)) +
self.transData)
# The theta labels are flipped along the radius, so that text 1 is on
# the outside by default. This should work the same as before.
flipr_transform = mtransforms.Affine2D() \
.translate(0.0, -0.5) \
.scale(1.0, -1.0) \
.translate(0.0, 0.5)
self._xaxis_text_transform = flipr_transform + self._xaxis_transform
# This is the transform for r-axis ticks. It scales the theta
# axis so the gridlines from 0.0 to 1.0, now go from thetamin to
# thetamax.
self._yaxis_transform = (
mtransforms.blended_transform_factory(
mtransforms.BboxTransformTo(self.viewLim),
mtransforms.IdentityTransform()) +
self.transData)
# The r-axis labels are put at an angle and padded in the r-direction
self._r_label_position = mtransforms.Affine2D() \
.translate(self._default_rlabel_position, 0.0)
self._yaxis_text_transform = mtransforms.TransformWrapper(
self._r_label_position + self.transData)
def get_xaxis_transform(self, which='grid'):
if which not in ['tick1', 'tick2', 'grid']:
raise ValueError(
"'which' must be one of 'tick1', 'tick2', or 'grid'")
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text_transform, 'center', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text_transform, 'center', 'center'
def get_yaxis_transform(self, which='grid'):
if which in ('tick1', 'tick2'):
return self._yaxis_text_transform
elif which == 'grid':
return self._yaxis_transform
else:
raise ValueError(
"'which' must be one of 'tick1', 'tick2', or 'grid'")
def get_yaxis_text1_transform(self, pad):
thetamin, thetamax = self._realViewLim.intervalx
if _is_full_circle_rad(thetamin, thetamax):
return self._yaxis_text_transform, 'bottom', 'left'
elif self.get_theta_direction() > 0:
halign = 'left'
pad_shift = _ThetaShift(self, pad, 'min')
else:
halign = 'right'
pad_shift = _ThetaShift(self, pad, 'max')
return self._yaxis_text_transform + pad_shift, 'center', halign
def get_yaxis_text2_transform(self, pad):
if self.get_theta_direction() > 0:
halign = 'right'
pad_shift = _ThetaShift(self, pad, 'max')
else:
halign = 'left'
pad_shift = _ThetaShift(self, pad, 'min')
return self._yaxis_text_transform + pad_shift, 'center', halign
def draw(self, *args, **kwargs):
thetamin, thetamax = np.rad2deg(self._realViewLim.intervalx)
if thetamin > thetamax:
thetamin, thetamax = thetamax, thetamin
rmin, rmax = self._realViewLim.intervaly - self.get_rorigin()
if isinstance(self.patch, mpatches.Wedge):
# Backwards-compatibility: Any subclassed Axes might override the
# patch to not be the Wedge that PolarAxes uses.
center = self.transWedge.transform_point((0.5, 0.5))
self.patch.set_center(center)
self.patch.set_theta1(thetamin)
self.patch.set_theta2(thetamax)
edge, _ = self.transWedge.transform_point((1, 0))
radius = edge - center[0]
width = min(radius * (rmax - rmin) / rmax, radius)
self.patch.set_radius(radius)
self.patch.set_width(width)
inner_width = radius - width
inner = self.spines.get('inner', None)
if inner:
inner.set_visible(inner_width != 0.0)
visible = not _is_full_circle_deg(thetamin, thetamax)
# For backwards compatibility, any subclassed Axes might override the
# spines to not include start/end that PolarAxes uses.
start = self.spines.get('start', None)
end = self.spines.get('end', None)
if start:
start.set_visible(visible)
if end:
end.set_visible(visible)
if visible:
yaxis_text_transform = self._yaxis_transform
else:
yaxis_text_transform = self._r_label_position + self.transData
if self._yaxis_text_transform != yaxis_text_transform:
self._yaxis_text_transform.set(yaxis_text_transform)
self.yaxis.reset_ticks()
self.yaxis.set_clip_path(self.patch)
Axes.draw(self, *args, **kwargs)
def _gen_axes_patch(self):
return mpatches.Wedge((0.5, 0.5), 0.5, 0.0, 360.0)
def _gen_axes_spines(self):
spines = OrderedDict([
('polar', mspines.Spine.arc_spine(self, 'top',
(0.5, 0.5), 0.5, 0.0, 360.0)),
('start', mspines.Spine.linear_spine(self, 'left')),
('end', mspines.Spine.linear_spine(self, 'right')),
('inner', mspines.Spine.arc_spine(self, 'bottom',
(0.5, 0.5), 0.0, 0.0, 360.0))
])
spines['polar'].set_transform(self.transWedge + self.transAxes)
spines['inner'].set_transform(self.transWedge + self.transAxes)
spines['start'].set_transform(self._yaxis_transform)
spines['end'].set_transform(self._yaxis_transform)
return spines
def set_thetamax(self, thetamax):
self.viewLim.x1 = np.deg2rad(thetamax)
def get_thetamax(self):
return np.rad2deg(self.viewLim.xmax)
def set_thetamin(self, thetamin):
self.viewLim.x0 = np.deg2rad(thetamin)
def get_thetamin(self):
return np.rad2deg(self.viewLim.xmin)
def set_thetalim(self, *args, **kwargs):
if 'thetamin' in kwargs:
kwargs['xmin'] = np.deg2rad(kwargs.pop('thetamin'))
if 'thetamax' in kwargs:
kwargs['xmax'] = np.deg2rad(kwargs.pop('thetamax'))
return tuple(np.rad2deg(self.set_xlim(*args, **kwargs)))
def set_theta_offset(self, offset):
"""
Set the offset for the location of 0 in radians.
"""
mtx = self._theta_offset.get_matrix()
mtx[0, 2] = offset
self._theta_offset.invalidate()
def get_theta_offset(self):
"""
Get the offset for the location of 0 in radians.
"""
return self._theta_offset.get_matrix()[0, 2]
def set_theta_zero_location(self, loc, offset=0.0):
"""
Sets the location of theta's zero. (Calls set_theta_offset
with the correct value in radians under the hood.)
loc : str
May be one of "N", "NW", "W", "SW", "S", "SE", "E", or "NE".
offset : float, optional
An offset in degrees to apply from the specified `loc`. **Note:**
this offset is *always* applied counter-clockwise regardless of
the direction setting.
"""
mapping = {
'N': np.pi * 0.5,
'NW': np.pi * 0.75,
'W': np.pi,
'SW': np.pi * 1.25,
'S': np.pi * 1.5,
'SE': np.pi * 1.75,
'E': 0,
'NE': np.pi * 0.25}
return self.set_theta_offset(mapping[loc] + np.deg2rad(offset))
def set_theta_direction(self, direction):
"""
Set the direction in which theta increases.
clockwise, -1:
Theta increases in the clockwise direction
counterclockwise, anticlockwise, 1:
Theta increases in the counterclockwise direction
"""
mtx = self._direction.get_matrix()
if direction in ('clockwise',):
mtx[0, 0] = -1
elif direction in ('counterclockwise', 'anticlockwise'):
mtx[0, 0] = 1
elif direction in (1, -1):
mtx[0, 0] = direction
else:
raise ValueError(
"direction must be 1, -1, clockwise or counterclockwise")
self._direction.invalidate()
def get_theta_direction(self):
"""
Get the direction in which theta increases.
-1:
Theta increases in the clockwise direction
1:
Theta increases in the counterclockwise direction
"""
return self._direction.get_matrix()[0, 0]
def set_rmax(self, rmax):
self.viewLim.y1 = rmax
def get_rmax(self):
return self.viewLim.ymax
def set_rmin(self, rmin):
self.viewLim.y0 = rmin
def get_rmin(self):
return self.viewLim.ymin
def set_rorigin(self, rorigin):
self._originViewLim.locked_y0 = rorigin
def get_rorigin(self):
return self._originViewLim.y0
def set_rlim(self, *args, **kwargs):
if 'rmin' in kwargs:
kwargs['ymin'] = kwargs.pop('rmin')
if 'rmax' in kwargs:
kwargs['ymax'] = kwargs.pop('rmax')
return self.set_ylim(*args, **kwargs)
def get_rlabel_position(self):
"""
Returns
-------
float
The theta position of the radius labels in degrees.
"""
return np.rad2deg(self._r_label_position.get_matrix()[0, 2])
def set_rlabel_position(self, value):
"""Updates the theta position of the radius labels.
Parameters
----------
value : number
The angular position of the radius labels in degrees.
"""
self._r_label_position.clear().translate(np.deg2rad(value), 0.0)
def set_yscale(self, *args, **kwargs):
Axes.set_yscale(self, *args, **kwargs)
self.yaxis.set_major_locator(
self.RadialLocator(self.yaxis.get_major_locator(), self))
def set_rscale(self, *args, **kwargs):
return Axes.set_yscale(self, *args, **kwargs)
def set_rticks(self, *args, **kwargs):
return Axes.set_yticks(self, *args, **kwargs)
def set_thetagrids(self, angles, labels=None, fmt=None, **kwargs):
"""
Set the theta gridlines in a polar plot.
Parameters
----------
angles : tuple with floats, degrees
The angles of the theta gridlines.
labels : tuple with strings or None
The labels to use at each theta gridline. The
`.projections.polar.ThetaFormatter` will be used if None.
fmt : str or None
Format string used in `matplotlib.ticker.FormatStrFormatter`.
For example '%f'. Note that the angle that is used is in
radians.
Returns
-------
lines, labels : list of `.lines.Line2D`, list of `.text.Text`
*lines* are the theta gridlines and *labels* are the tick labels.
Other Parameters
----------------
**kwargs
*kwargs* are optional `~.Text` properties for the labels.
See Also
--------
.PolarAxes.set_rgrids
.Axis.get_gridlines
.Axis.get_ticklabels
"""
# Make sure we take into account unitized data
angles = self.convert_yunits(angles)
angles = np.deg2rad(angles)
self.set_xticks(angles)
if labels is not None:
self.set_xticklabels(labels)
elif fmt is not None:
self.xaxis.set_major_formatter(mticker.FormatStrFormatter(fmt))
for t in self.xaxis.get_ticklabels():
t.update(kwargs)
return self.xaxis.get_ticklines(), self.xaxis.get_ticklabels()
def set_rgrids(self, radii, labels=None, angle=None, fmt=None,
**kwargs):
"""
Set the radial gridlines on a polar plot.
Parameters
----------
radii : tuple with floats
The radii for the radial gridlines
labels : tuple with strings or None
The labels to use at each radial gridline. The
`matplotlib.ticker.ScalarFormatter` will be used if None.
angle : float
The angular position of the radius labels in degrees.
fmt : str or None
Format string used in `matplotlib.ticker.FormatStrFormatter`.
For example '%f'.
Returns
-------
lines, labels : list of `.lines.Line2D`, list of `.text.Text`
*lines* are the radial gridlines and *labels* are the tick labels.
Other Parameters
----------------
**kwargs
*kwargs* are optional `~.Text` properties for the labels.
See Also
--------
.PolarAxes.set_thetagrids
.Axis.get_gridlines
.Axis.get_ticklabels
"""
# Make sure we take into account unitized data
radii = self.convert_xunits(radii)
radii = np.asarray(radii)
self.set_yticks(radii)
if labels is not None:
self.set_yticklabels(labels)
elif fmt is not None:
self.yaxis.set_major_formatter(mticker.FormatStrFormatter(fmt))
if angle is None:
angle = self.get_rlabel_position()
self.set_rlabel_position(angle)
for t in self.yaxis.get_ticklabels():
t.update(kwargs)
return self.yaxis.get_gridlines(), self.yaxis.get_ticklabels()
def set_xscale(self, scale, *args, **kwargs):
if scale != 'linear':
raise NotImplementedError(
"You can not set the xscale on a polar plot.")
def format_coord(self, theta, r):
"""
Return a format string formatting the coordinate using Unicode
characters.
"""
if theta < 0:
theta += 2 * np.pi
theta /= np.pi
return ('\N{GREEK SMALL LETTER THETA}=%0.3f\N{GREEK SMALL LETTER PI} '
'(%0.3f\N{DEGREE SIGN}), r=%0.3f') % (theta, theta * 180.0, r)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself. For a polar plot,
this should always be 1.0
'''
return 1.0
# # # Interactive panning
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
Polar axes do not support zoom boxes.
"""
return False
def can_pan(self):
"""
Return *True* if this axes supports the pan/zoom button functionality.
For polar axes, this is slightly misleading. Both panning and
zooming are performed by the same button. Panning is performed
in azimuth while zooming is done along the radial.
"""
return True
def start_pan(self, x, y, button):
angle = np.deg2rad(self.get_rlabel_position())
mode = ''
if button == 1:
epsilon = np.pi / 45.0
t, r = self.transData.inverted().transform_point((x, y))
if angle - epsilon <= t <= angle + epsilon:
mode = 'drag_r_labels'
elif button == 3:
mode = 'zoom'
self._pan_start = types.SimpleNamespace(
rmax=self.get_rmax(),
trans=self.transData.frozen(),
trans_inverse=self.transData.inverted().frozen(),
r_label_angle=self.get_rlabel_position(),
x=x,
y=y,
mode=mode)
def end_pan(self):
del self._pan_start
def drag_pan(self, button, key, x, y):
p = self._pan_start
if p.mode == 'drag_r_labels':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
# Deal with theta
dt0 = t - startt
dt1 = startt - t
if abs(dt1) < abs(dt0):
dt = abs(dt1) * np.sign(dt0) * -1.0
else:
dt = dt0 * -1.0
dt = (dt / np.pi) * 180.0
self.set_rlabel_position(p.r_label_angle - dt)
trans, vert1, horiz1 = self.get_yaxis_text1_transform(0.0)
trans, vert2, horiz2 = self.get_yaxis_text2_transform(0.0)
for t in self.yaxis.majorTicks + self.yaxis.minorTicks:
t.label1.set_va(vert1)
t.label1.set_ha(horiz1)
t.label2.set_va(vert2)
t.label2.set_ha(horiz2)
elif p.mode == 'zoom':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
# Deal with r
scale = r / startr
self.set_rmax(p.rmax / scale)
# to keep things all self contained, we can put aliases to the Polar classes
# defined above. This isn't strictly necessary, but it makes some of the
# code more readable (and provides a backwards compatible Polar API)
PolarAxes.PolarTransform = PolarTransform
PolarAxes.PolarAffine = PolarAffine
PolarAxes.InvertedPolarTransform = InvertedPolarTransform
PolarAxes.ThetaFormatter = ThetaFormatter
PolarAxes.RadialLocator = RadialLocator
PolarAxes.ThetaLocator = ThetaLocator
# These are a couple of aborted attempts to project a polar plot using
# cubic bezier curves.
# def transform_path(self, path):
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# vertices = self.transform(vertices)
# result = np.zeros((len(vertices) * 3 - 2, 2), float)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ),
# mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# kappa = 0.5
# p0 = vertices[0:-1]
# p1 = vertices[1: ]
# x0 = p0[:, 0:1]
# y0 = p0[:, 1: ]
# b0 = ((y0 - x0) - y0) / ((x0 + y0) - x0)
# a0 = y0 - b0*x0
# x1 = p1[:, 0:1]
# y1 = p1[:, 1: ]
# b1 = ((y1 - x1) - y1) / ((x1 + y1) - x1)
# a1 = y1 - b1*x1
# x = -(a0-a1) / (b0-b1)
# y = a0 + b0*x
# xk = (x - x0) * kappa + x0
# yk = (y - y0) * kappa + y0
# result[1::3, 0:1] = xk
# result[1::3, 1: ] = yk
# xk = (x - x1) * kappa + x1
# yk = (y - y1) * kappa + y1
# result[2::3, 0:1] = xk
# result[2::3, 1: ] = yk
# result[3::3] = p1
# print(vertices[-2:])
# print(result[-2:])
# return mpath.Path(result, codes)
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# print("interpolate", interpolate)
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# result = np.zeros((len(vertices) * 3 - 2, 2), float)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ),
# mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# tkappa = np.arctan(kappa)
# hyp_kappa = np.sqrt(kappa*kappa + 1.0)
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# r0 = vertices[0:-1, 1]
# r1 = vertices[1: , 1]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# td_scaled = td / (np.pi * 0.5)
# rd = r1 - r0
# r0kappa = r0 * kappa * td_scaled
# r1kappa = r1 * kappa * td_scaled
# ravg_kappa = ((r1 + r0) / 2.0) * kappa * td_scaled
# result[1::3, 0] = t0 + (tkappa * td_scaled)
# result[1::3, 1] = r0*hyp_kappa
# # result[1::3, 1] = r0 / np.cos(tkappa * td_scaled)
# # np.sqrt(r0*r0 + ravg_kappa*ravg_kappa)
# result[2::3, 0] = t1 - (tkappa * td_scaled)
# result[2::3, 1] = r1*hyp_kappa
# # result[2::3, 1] = r1 / np.cos(tkappa * td_scaled)
# # np.sqrt(r1*r1 + ravg_kappa*ravg_kappa)
# result[3::3, 0] = t1
# result[3::3, 1] = r1
# print(vertices[:6], result[:6], t0[:6], t1[:6], td[:6],
# td_scaled[:6], tkappa)
# result = self.transform(result)
# return mpath.Path(result, codes)
# transform_path_non_affine = transform_path
| 35.464861
| 79
| 0.56212
|
29d04f4862c66da64cd582746e4bb5c1395878e4
| 4,590
|
py
|
Python
|
websocket/controller/base_controller.py
|
shadow-share/websocket
|
221f523f2973633b14d66b3890a644a0707dd18c
|
[
"MIT"
] | null | null | null |
websocket/controller/base_controller.py
|
shadow-share/websocket
|
221f523f2973633b14d66b3890a644a0707dd18c
|
[
"MIT"
] | null | null | null |
websocket/controller/base_controller.py
|
shadow-share/websocket
|
221f523f2973633b14d66b3890a644a0707dd18c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (C) 2017 ShadowMan
#
import abc
from websocket.ext import frame_verifier
from websocket.net import tcp_stream, ws_frame
from websocket.utils import (
logger, exceptions
)
class BaseController(object, metaclass=abc.ABCMeta):
def __init__(self, stream: tcp_stream.TCPStream, output, handler):
# socket file descriptor
self._socket_fd = stream.get_socket_fd()
# TCP stream buffer
self._tcp_stream = stream
# websocket event handler
self._handlers = handler # WebSocketHandlerProtocol
# output package method
if not callable(output):
raise TypeError('output method must be callable')
self._output = output
# opcode handler mapping
self._opcode_handlers = {
0x0: lambda f: print(f), 0x1: self._valid_message,
0x2: self._valid_message, 0x3: lambda f: print(f),
0x4: lambda f: print(f), 0x5: lambda f: print(f),
0x6: lambda f: print(f), 0x7: lambda f: print(f),
0x8: self._recv_close, 0x9: lambda f: print(f),
0xA: lambda f: print(f), 0xB: lambda f: print(f),
0xC: lambda f: print(f), 0xD: lambda f: print(f),
0xE: lambda f: print(f), 0xF: lambda f: print(f),
}
def ready_receive(self):
frame_header = self._tcp_stream.peek_buffer(10)
try:
if len(frame_header) < 2:
return
frame_length = ws_frame.parse_frame_length(frame_header)
except Exception:
raise
if self._tcp_stream.buffer_length() < frame_length:
return
frame = ws_frame.WebSocketFrame(
self._tcp_stream.feed_buffer(frame_length))
if not frame_verifier.verify_frame(self._socket_fd, frame):
logger.error('Receive Client Frame Format Invalid {}'.format(frame))
logger.debug('Receive Client({}:{}) frame: {}'.format(
*self._socket_fd.getpeername(), frame))
self._opcode_handlers.get(frame.flag_opcode)(frame)
# opcode =data_pack:ws_frame.FrameBase 1 or opcode = 2
# TODO. opcode = 0
def _valid_message(self, complete_frame: ws_frame.FrameBase):
try:
response = self._handlers.on_message(
self._before_message_handler(complete_frame.payload_data))
response = self._after_message_handler(response)
if response is True:
return
elif response is None:
logger.warning('message handler ignore from client message')
return
elif hasattr(response, 'pack'):
self._output(response)
elif hasattr(response, 'generate_frame'):
self._output(response.generate_frame)
else:
raise exceptions.InvalidResponse('invalid response')
except exceptions.InvalidResponse:
logger.error('message handler return value is invalid response')
raise
except Exception as e:
# error occurs but handler not solution
logger.error('Client({}:{}) Error occurs({})'.format(
*self._socket_fd.getpeername(), str(e)))
raise exceptions.ConnectClosed((1002, str(e)))
@abc.abstractclassmethod
def _before_message_handler(self, payload_data):
pass
@abc.abstractclassmethod
def _after_message_handler(self, response):
pass
def _recv_close(self, complete_frame):
if len(complete_frame.payload_data) >= 2:
code = complete_frame.payload_data[0:2]
reason = complete_frame.payload_data[2:]
else:
code, reason = 1000, b''
self._handlers.on_close(code, reason)
# If an endpoint receives a Close frame and did not previously send
# a Close frame, the endpoint MUST send a Close frame in response
raise exceptions.ConnectClosed((1000, ''))
def _recv_ping(self, complete_frame):
logger.debug('Client({}:{}) receive ping frame'.format(
*self._socket_fd.getpeername()))
self._output(ws_frame.generate_pong_frame(
extra_data=complete_frame.payload_data))
def _recv_pong(self, complete_frame):
logger.debug('Client({}:{}) receive pong frame({})'.format(
*self._socket_fd.getpeername(), complete_frame.payload_data))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type or exc_tb:
raise exc_val
| 38.571429
| 80
| 0.621569
|
34e0abd43e40f35af538f5889c988d107d4449b9
| 10,137
|
py
|
Python
|
sdk/luminesce/models/lusid_problem_details.py
|
finbourne/luminesce-sdk-python-preview
|
7af198cfa9c0fbd619272fb90601162fb7db0a67
|
[
"MIT"
] | null | null | null |
sdk/luminesce/models/lusid_problem_details.py
|
finbourne/luminesce-sdk-python-preview
|
7af198cfa9c0fbd619272fb90601162fb7db0a67
|
[
"MIT"
] | null | null | null |
sdk/luminesce/models/lusid_problem_details.py
|
finbourne/luminesce-sdk-python-preview
|
7af198cfa9c0fbd619272fb90601162fb7db0a67
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
FINBOURNE Honeycomb Web API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 1.9.129
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from luminesce.configuration import Configuration
class LusidProblemDetails(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'name': 'str',
'error_details': 'list[dict(str, str)]',
'code': 'int',
'type': 'str',
'title': 'str',
'status': 'int',
'detail': 'str',
'instance': 'str',
'extensions': 'dict(str, object)'
}
attribute_map = {
'name': 'name',
'error_details': 'errorDetails',
'code': 'code',
'type': 'type',
'title': 'title',
'status': 'status',
'detail': 'detail',
'instance': 'instance',
'extensions': 'extensions'
}
required_map = {
'name': 'required',
'error_details': 'optional',
'code': 'required',
'type': 'optional',
'title': 'optional',
'status': 'optional',
'detail': 'optional',
'instance': 'optional',
'extensions': 'optional'
}
def __init__(self, name=None, error_details=None, code=None, type=None, title=None, status=None, detail=None, instance=None, extensions=None, local_vars_configuration=None): # noqa: E501
"""LusidProblemDetails - a model defined in OpenAPI"
:param name: (required)
:type name: str
:param error_details:
:type error_details: list[dict(str, str)]
:param code: (required)
:type code: int
:param type:
:type type: str
:param title:
:type title: str
:param status:
:type status: int
:param detail:
:type detail: str
:param instance:
:type instance: str
:param extensions:
:type extensions: dict(str, object)
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._error_details = None
self._code = None
self._type = None
self._title = None
self._status = None
self._detail = None
self._instance = None
self._extensions = None
self.discriminator = None
self.name = name
self.error_details = error_details
self.code = code
self.type = type
self.title = title
self.status = status
self.detail = detail
self.instance = instance
self.extensions = extensions
@property
def name(self):
"""Gets the name of this LusidProblemDetails. # noqa: E501
:return: The name of this LusidProblemDetails. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this LusidProblemDetails.
:param name: The name of this LusidProblemDetails. # noqa: E501
:type name: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def error_details(self):
"""Gets the error_details of this LusidProblemDetails. # noqa: E501
:return: The error_details of this LusidProblemDetails. # noqa: E501
:rtype: list[dict(str, str)]
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""Sets the error_details of this LusidProblemDetails.
:param error_details: The error_details of this LusidProblemDetails. # noqa: E501
:type error_details: list[dict(str, str)]
"""
self._error_details = error_details
@property
def code(self):
"""Gets the code of this LusidProblemDetails. # noqa: E501
:return: The code of this LusidProblemDetails. # noqa: E501
:rtype: int
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this LusidProblemDetails.
:param code: The code of this LusidProblemDetails. # noqa: E501
:type code: int
"""
if self.local_vars_configuration.client_side_validation and code is None: # noqa: E501
raise ValueError("Invalid value for `code`, must not be `None`") # noqa: E501
self._code = code
@property
def type(self):
"""Gets the type of this LusidProblemDetails. # noqa: E501
:return: The type of this LusidProblemDetails. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this LusidProblemDetails.
:param type: The type of this LusidProblemDetails. # noqa: E501
:type type: str
"""
self._type = type
@property
def title(self):
"""Gets the title of this LusidProblemDetails. # noqa: E501
:return: The title of this LusidProblemDetails. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this LusidProblemDetails.
:param title: The title of this LusidProblemDetails. # noqa: E501
:type title: str
"""
self._title = title
@property
def status(self):
"""Gets the status of this LusidProblemDetails. # noqa: E501
:return: The status of this LusidProblemDetails. # noqa: E501
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this LusidProblemDetails.
:param status: The status of this LusidProblemDetails. # noqa: E501
:type status: int
"""
self._status = status
@property
def detail(self):
"""Gets the detail of this LusidProblemDetails. # noqa: E501
:return: The detail of this LusidProblemDetails. # noqa: E501
:rtype: str
"""
return self._detail
@detail.setter
def detail(self, detail):
"""Sets the detail of this LusidProblemDetails.
:param detail: The detail of this LusidProblemDetails. # noqa: E501
:type detail: str
"""
self._detail = detail
@property
def instance(self):
"""Gets the instance of this LusidProblemDetails. # noqa: E501
:return: The instance of this LusidProblemDetails. # noqa: E501
:rtype: str
"""
return self._instance
@instance.setter
def instance(self, instance):
"""Sets the instance of this LusidProblemDetails.
:param instance: The instance of this LusidProblemDetails. # noqa: E501
:type instance: str
"""
self._instance = instance
@property
def extensions(self):
"""Gets the extensions of this LusidProblemDetails. # noqa: E501
:return: The extensions of this LusidProblemDetails. # noqa: E501
:rtype: dict(str, object)
"""
return self._extensions
@extensions.setter
def extensions(self, extensions):
"""Sets the extensions of this LusidProblemDetails.
:param extensions: The extensions of this LusidProblemDetails. # noqa: E501
:type extensions: dict(str, object)
"""
self._extensions = extensions
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LusidProblemDetails):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, LusidProblemDetails):
return True
return self.to_dict() != other.to_dict()
| 27.32345
| 191
| 0.57808
|
4086c4f3f6fa8f726d213fb7896fdd118e13557e
| 1,072
|
py
|
Python
|
fitclip/accounts/migrations/0001_initial.py
|
zezaeoh/fitclib
|
101b6d692fbf13cf05e0b65706da01143f7c278a
|
[
"MIT"
] | null | null | null |
fitclip/accounts/migrations/0001_initial.py
|
zezaeoh/fitclib
|
101b6d692fbf13cf05e0b65706da01143f7c278a
|
[
"MIT"
] | 9
|
2020-03-15T08:26:20.000Z
|
2022-03-12T00:13:43.000Z
|
fitclip/accounts/migrations/0001_initial.py
|
zezaeoh/fitclip
|
101b6d692fbf13cf05e0b65706da01143f7c278a
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.1 on 2020-01-11 15:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('location', models.CharField(blank=True, max_length=30)),
('date_joined', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('is_organizer', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
]
| 35.733333
| 145
| 0.636194
|
fbf7fac3f7c59d4d93e2ca33008609e73e34c137
| 1,032
|
py
|
Python
|
panel/pane/__init__.py
|
Jacob-Barhak/panel
|
04cad38ea703e4e69fb76f063a27f4ffe40688e8
|
[
"BSD-3-Clause"
] | 1
|
2021-07-06T21:07:45.000Z
|
2021-07-06T21:07:45.000Z
|
panel/pane/__init__.py
|
Jacob-Barhak/panel
|
04cad38ea703e4e69fb76f063a27f4ffe40688e8
|
[
"BSD-3-Clause"
] | 2
|
2022-01-13T03:54:51.000Z
|
2022-03-12T01:01:00.000Z
|
panel/pane/__init__.py
|
Jacob-Barhak/panel
|
04cad38ea703e4e69fb76f063a27f4ffe40688e8
|
[
"BSD-3-Clause"
] | null | null | null |
"""
The pane module contains PaneBase objects which may render any type of
object as a bokeh model so it can be embedded in a panel. The pane
objects are one of three main components in panel the other two being
layouts and widgets. Panes may render anything including plots, text,
images, equations etc.
"""
from .ace import Ace # noqa
from .alert import Alert # noqa
from .base import PaneBase, Pane, panel # noqa
from .equation import LaTeX # noqa
from .deckgl import DeckGL # noqa
from .echarts import ECharts # noqa
from .holoviews import HoloViews, Interactive # noqa
from .idom import IDOM # noqa0
from .ipywidget import IPyWidget # noqa
from .image import GIF, JPG, PNG, SVG # noqa
from .markup import DataFrame, HTML, JSON, Markdown, Str # noqa
from .media import Audio, Video # noqa
from .perspective import Perspective # noqa
from .plotly import Plotly # noqa
from .plot import Bokeh, Matplotlib, RGGPlot, YT # noqa
from .streamz import Streamz # noqa
from .vega import Vega # noqa
from .vtk import VTKVolume, VTK # noqa
| 39.692308
| 70
| 0.766473
|
0a70cee0a0557c3caad5f55c378550af25be82fc
| 15,688
|
py
|
Python
|
habitat/sims/habitat_simulator.py
|
eric-xw/habitat-api
|
cb3ac6c0c8c51b48a4c79f58b6a80000476cb258
|
[
"MIT"
] | null | null | null |
habitat/sims/habitat_simulator.py
|
eric-xw/habitat-api
|
cb3ac6c0c8c51b48a4c79f58b6a80000476cb258
|
[
"MIT"
] | null | null | null |
habitat/sims/habitat_simulator.py
|
eric-xw/habitat-api
|
cb3ac6c0c8c51b48a4c79f58b6a80000476cb258
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Any, Optional
from enum import Enum
import habitat
import habitat_sim
import numpy as np
from gym import spaces, Space
from habitat import SensorSuite, Config
from habitat.core.logging import logger
from habitat.core.simulator import AgentState, ShortestPathPoint
from habitat.core.simulator import RGBSensor, DepthSensor, SemanticSensor
RGBSENSOR_DIMENSION = 3
def overwrite_config(config_from: Config, config_to: Any) -> None:
for attr, value in config_from.items():
if hasattr(config_to, attr.lower()):
setattr(config_to, attr.lower(), value)
def check_sim_obs(obs, sensor):
assert obs is not None, (
"Observation corresponding to {} not present in "
"simulator's observations".format(sensor.uuid)
)
class SimulatorActions(Enum):
FORWARD = 0
LEFT = 1
RIGHT = 2
STOP = 3
class HabitatSimRGBSensor(RGBSensor):
sim_sensor_type: habitat_sim.SensorType
def __init__(self, config):
self.sim_sensor_type = habitat_sim.SensorType.COLOR
super().__init__(config=config)
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=0,
high=255,
shape=(self.config.HEIGHT, self.config.WIDTH, RGBSENSOR_DIMENSION),
dtype=np.uint8,
)
def get_observation(self, sim_obs):
obs = sim_obs.get(self.uuid, None)
# remove alpha channel
obs = obs[:, :, :RGBSENSOR_DIMENSION]
check_sim_obs(obs, self)
return obs
class HabitatSimDepthSensor(DepthSensor):
sim_sensor_type: habitat_sim.SensorType
min_depth_value: float
max_depth_value: float
def __init__(self, config):
self.sim_sensor_type = habitat_sim.SensorType.DEPTH
if config.NORMALIZE_DEPTH:
self.min_depth_value = 0
self.max_depth_value = 1
else:
self.min_depth_value = config.MIN_DEPTH
self.max_depth_value = config.MAX_DEPTH
super().__init__(config=config)
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=self.min_depth_value,
high=self.max_depth_value,
shape=(self.config.HEIGHT, self.config.WIDTH, 1),
dtype=np.float32,
)
def get_observation(self, sim_obs):
obs = sim_obs.get(self.uuid, None)
check_sim_obs(obs, self)
obs = np.clip(obs, self.config.MIN_DEPTH, self.config.MAX_DEPTH)
if self.config.NORMALIZE_DEPTH:
# normalize depth observation to [0, 1]
obs = (obs - self.config.MIN_DEPTH) / self.config.MAX_DEPTH
obs = np.expand_dims(obs, axis=2) # make depth observation a 3D array
return obs
class HabitatSimSemanticSensor(SemanticSensor):
sim_sensor_type: habitat_sim.SensorType
def __init__(self, config):
self.sim_sensor_type = habitat_sim.SensorType.SEMANTIC
super().__init__(config=config)
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=np.iinfo(np.uint32).min,
high=np.iinfo(np.uint32).max,
shape=(self.config.HEIGHT, self.config.WIDTH),
dtype=np.uint32,
)
def get_observation(self, sim_obs):
obs = sim_obs.get(self.uuid, None)
check_sim_obs(obs, self)
return obs
class HabitatSim(habitat.Simulator):
"""Simulator wrapper over habitat-sim
habitat-sim repo: https://github.com/facebookresearch/habitat-sim
Args:
config: configuration for initializing the simulator.
"""
def __init__(self, config: Config) -> None:
self.config = config
agent_config = self._get_agent_config()
sim_sensors = []
for sensor_name in agent_config.SENSORS:
sensor_cfg = getattr(self.config, sensor_name)
is_valid_sensor = hasattr(
habitat.sims.habitat_simulator, sensor_cfg.TYPE # type: ignore
)
assert is_valid_sensor, "invalid sensor type {}".format(
sensor_cfg.TYPE
)
sim_sensors.append(
getattr(
habitat.sims.habitat_simulator,
sensor_cfg.TYPE, # type: ignore
)(sensor_cfg)
)
self._sensor_suite = SensorSuite(sim_sensors)
self.sim_config = self.create_sim_config(self._sensor_suite)
self._current_scene = self.sim_config.sim_cfg.scene.id
self._sim = habitat_sim.Simulator(self.sim_config)
self._action_space = spaces.Discrete(
len(self.sim_config.agents[0].action_space)
)
self._is_episode_active = False
def create_sim_config(
self, _sensor_suite: SensorSuite
) -> habitat_sim.Configuration:
sim_config = habitat_sim.SimulatorConfiguration()
sim_config.scene.id = self.config.SCENE
sim_config.gpu_device_id = self.config.HABITAT_SIM_V0.GPU_DEVICE_ID
agent_config = habitat_sim.AgentConfiguration()
overwrite_config(
config_from=self._get_agent_config(), config_to=agent_config
)
sensor_specifications = []
for sensor in _sensor_suite.sensors.values():
sim_sensor_cfg = habitat_sim.SensorSpec()
sim_sensor_cfg.uuid = sensor.uuid
sim_sensor_cfg.resolution = list(
sensor.observation_space.shape[:2]
)
sim_sensor_cfg.parameters["hfov"] = str(sensor.config.HFOV)
sim_sensor_cfg.position = sensor.config.POSITION
# TODO(maksymets): Add configure method to Sensor API to avoid
# accessing child attributes through parent interface
sim_sensor_cfg.sensor_type = sensor.sim_sensor_type # type: ignore
sensor_specifications.append(sim_sensor_cfg)
# If there is no sensors specified create a dummy sensor so simulator
# won't throw an error
if not _sensor_suite.sensors.values():
sim_sensor_cfg = habitat_sim.SensorSpec()
sim_sensor_cfg.resolution = [1, 1]
sensor_specifications.append(sim_sensor_cfg)
agent_config.sensor_specifications = sensor_specifications
agent_config.action_space = {
SimulatorActions.LEFT.value: habitat_sim.ActionSpec(
"turn_left",
habitat_sim.ActuationSpec(amount=self.config.TURN_ANGLE),
),
SimulatorActions.RIGHT.value: habitat_sim.ActionSpec(
"turn_right",
habitat_sim.ActuationSpec(amount=self.config.TURN_ANGLE),
),
SimulatorActions.FORWARD.value: habitat_sim.ActionSpec(
"move_forward",
habitat_sim.ActuationSpec(
amount=self.config.FORWARD_STEP_SIZE
),
),
SimulatorActions.STOP.value: habitat_sim.ActionSpec("stop"),
}
return habitat_sim.Configuration(sim_config, [agent_config])
@property
def sensor_suite(self) -> SensorSuite:
return self._sensor_suite
@property
def action_space(self) -> Space:
return self._action_space
@property
def is_episode_active(self) -> bool:
return self._is_episode_active
def _update_agents_state(self) -> bool:
is_updated = False
for agent_id, _ in enumerate(self.config.AGENTS):
agent_cfg = self._get_agent_config(agent_id)
if agent_cfg.IS_SET_START_STATE:
self.set_agent_state(
agent_cfg.START_POSITION,
agent_cfg.START_ROTATION,
agent_id,
)
is_updated = True
return is_updated
def reset(self):
sim_obs = self._sim.reset()
if self._update_agents_state():
sim_obs = self._sim.get_sensor_observations()
self._is_episode_active = True
return self._sensor_suite.get_observations(sim_obs)
def step(self, action):
assert self._is_episode_active, (
"episode is not active, environment not RESET or "
"STOP action called previously"
)
if action == SimulatorActions.STOP.value:
self._is_episode_active = False
sim_obs = self._sim.get_sensor_observations()
else:
sim_obs = self._sim.step(action)
observations = self._sensor_suite.get_observations(sim_obs)
return observations
def render(self, mode: str = "rgb") -> Any:
"""
Args:
mode: sensor whose observation is used for returning the frame,
eg: "rgb", "depth", "semantic"
Returns:
rendered frame according to the mode
"""
sim_obs = self._sim.get_sensor_observations()
observations = self._sensor_suite.get_observations(sim_obs)
output = observations.get(mode)
assert output is not None, "mode {} sensor is not active".format(mode)
return output
def seed(self, seed):
self._sim.seed(seed)
def reconfigure(self, config: Config) -> None:
# TODO(maksymets): Switch to Habitat-Sim more efficient caching
is_same_scene = config.SCENE == self._current_scene
self.config = config
self.sim_config = self.create_sim_config(self._sensor_suite)
if not is_same_scene:
self._current_scene = config.SCENE
self._sim.close()
del self._sim
self._sim = habitat_sim.Simulator(self.sim_config)
self._update_agents_state()
def geodesic_distance(self, position_a, position_b):
path = habitat_sim.ShortestPath()
path.requested_start = np.array(position_a, dtype=np.float32)
path.requested_end = np.array(position_b, dtype=np.float32)
self._sim.pathfinder.find_path(path)
return path.geodesic_distance
def action_space_shortest_path(
self, source: AgentState, targets: List[AgentState], agent_id: int = 0
) -> List[ShortestPathPoint]:
"""
Returns:
List of agent states and actions along the shortest path from
source to the nearest target (both included). If one of the
target(s) is identical to the source, a list containing only
one node with the identical agent state is returned. Returns
an empty list in case none of the targets are reachable from
the source. For the last item in the returned list the action
will be None.
"""
raise NotImplementedError(
"This function is no longer implemented. Please use the greedy follower instead"
)
@property
def up_vector(self):
return np.array([0.0, 1.0, 0.0])
@property
def forward_vector(self):
return -np.array([0.0, 0.0, 1.0])
def get_straight_shortest_path_points(self, position_a, position_b):
path = habitat_sim.ShortestPath()
path.requested_start = position_a
path.requested_end = position_b
self._sim.pathfinder.find_path(path)
return path.points
def sample_navigable_point(self):
return self._sim.pathfinder.get_random_navigable_point().tolist()
def is_navigable(self, point: List[float]):
return self._sim.pathfinder.is_navigable(point)
def semantic_annotations(self):
"""
Returns:
SemanticScene which is a three level hierarchy of semantic
annotations for the current scene. Specifically this method
returns a SemanticScene which contains a list of SemanticLevel's
where each SemanticLevel contains a list of SemanticRegion's where
each SemanticRegion contains a list of SemanticObject's.
SemanticScene has attributes: aabb(axis-aligned bounding box) which
has attributes aabb.center and aabb.sizes which are 3d vectors,
categories, levels, objects, regions.
SemanticLevel has attributes: id, aabb, objects and regions.
SemanticRegion has attributes: id, level, aabb, category (to get
name of category use category.name()) and objects.
SemanticObject has attributes: id, region, aabb, obb (oriented
bounding box) and category.
SemanticScene contains List[SemanticLevels]
SemanticLevel contains List[SemanticRegion]
SemanticRegion contains List[SemanticObject]
Example to loop through in a hierarchical fashion:
for level in semantic_scene.levels:
for region in level.regions:
for obj in region.objects:
"""
return self._sim.semantic_scene
def close(self):
self._sim.close()
@property
def index_stop_action(self):
return SimulatorActions.STOP.value
@property
def index_forward_action(self):
return SimulatorActions.FORWARD.value
def _get_agent_config(self, agent_id: Optional[int] = None) -> Any:
if agent_id is None:
agent_id = self.config.DEFAULT_AGENT_ID
agent_name = self.config.AGENTS[agent_id]
agent_config = getattr(self.config, agent_name)
return agent_config
def get_agent_state(self, agent_id: int = 0):
assert agent_id == 0, "No support of multi agent in {} yet.".format(
self.__class__.__name__
)
return self._sim.get_agent(agent_id).get_state()
def set_agent_state(
self,
position: List[float] = None,
rotation: List[float] = None,
agent_id: int = 0,
reset_sensors: bool = True,
) -> None:
"""Sets agent state similar to initialize_agent, but without agents
creation.
Args:
position: numpy ndarray containing 3 entries for (x, y, z).
rotation: numpy ndarray with 4 entries for (x, y, z, w) elements
of unit quaternion (versor) representing agent 3D orientation,
(https://en.wikipedia.org/wiki/Versor)
agent_id: int identification of agent from multiagent setup.
reset_sensors: bool for if sensor changes (e.g. tilt) should be
reset).
"""
agent = self._sim.get_agent(agent_id)
state = self.get_agent_state(agent_id)
state.position = position
state.rotation = rotation
# NB: The agent state also contains the sensor states in _absolute_ coordinates.
# In order to set the agent's body to a specific location and have the sensors follow,
# we must not provide any state for the sensors.
# This will cause them to follow the agent's body
state.sensor_states = dict()
agent.set_state(state, reset_sensors)
self._check_agent_position(position, agent_id)
# TODO (maksymets): Remove check after simulator became stable
def _check_agent_position(self, position, agent_id=0):
if not np.allclose(position, self.get_agent_state(agent_id).position):
logger.info("Agent state diverges from configured start position.")
def distance_to_closest_obstacle(self, position, max_search_radius=2.0):
return self._sim.pathfinder.distance_to_closest_obstacle(
position, max_search_radius
)
| 35.413093
| 94
| 0.641382
|
79bef2e075970f5a21dd700766b46c9595bf6172
| 1,498
|
py
|
Python
|
dockci/server.py
|
RickyCook/paas-in-a-day-dockci
|
0320b4df5a697d94d19d5b4293c1d4d07c383f75
|
[
"0BSD"
] | 1
|
2017-04-28T22:10:35.000Z
|
2017-04-28T22:10:35.000Z
|
dockci/server.py
|
RickyCook/paas-in-a-day-dockci
|
0320b4df5a697d94d19d5b4293c1d4d07c383f75
|
[
"0BSD"
] | null | null | null |
dockci/server.py
|
RickyCook/paas-in-a-day-dockci
|
0320b4df5a697d94d19d5b4293c1d4d07c383f75
|
[
"0BSD"
] | null | null | null |
"""
Functions for setting up and starting the DockCI application server
"""
import logging
import mimetypes
from flask import Flask
from flask_mail import Mail
from dockci.models.config import Config
from dockci.util import setup_templates
APP = Flask(__name__)
MAIL = Mail()
CONFIG = Config()
APP.config.model = CONFIG # For templates
def app_init():
"""
Pre-run app setup
"""
logger = logging.getLogger('dockci.init')
logger.info("Loading app config")
APP.secret_key = CONFIG.secret
APP.config['MAIL_SERVER'] = CONFIG.mail_server
APP.config['MAIL_PORT'] = CONFIG.mail_port
APP.config['MAIL_USE_TLS'] = CONFIG.mail_use_tls
APP.config['MAIL_USE_SSL'] = CONFIG.mail_use_ssl
APP.config['MAIL_USERNAME'] = CONFIG.mail_username
APP.config['MAIL_PASSWORD'] = CONFIG.mail_password
APP.config['MAIL_DEFAULT_SENDER'] = CONFIG.mail_default_sender
mimetypes.add_type('application/x-yaml', 'yaml')
app_init_views()
def app_init_views():
"""
Activate all DockCI views
"""
# pylint:disable=unused-variable
import dockci.views.core
import dockci.views.build
import dockci.views.job
setup_templates(APP)
def run(app_args):
"""
Setup, and run the DockCI application server, using the args given to
configure it
"""
app_init()
server_args = {
key: val
for key, val in app_args.items()
if key in ('host', 'port', 'debug')
}
APP.run(**server_args)
| 21.098592
| 73
| 0.682911
|
7fbc42cb9850d9914223c80745be6ae7a6c8bde6
| 7,596
|
py
|
Python
|
service/api/db/__init__.py
|
alanquillin/rpi-light-controller
|
a22d1bbe661e9842ac61aea38164b9faf6fa1cc8
|
[
"MIT"
] | null | null | null |
service/api/db/__init__.py
|
alanquillin/rpi-light-controller
|
a22d1bbe661e9842ac61aea38164b9faf6fa1cc8
|
[
"MIT"
] | null | null | null |
service/api/db/__init__.py
|
alanquillin/rpi-light-controller
|
a22d1bbe661e9842ac61aea38164b9faf6fa1cc8
|
[
"MIT"
] | null | null | null |
import re
from contextlib import contextmanager
from functools import wraps
from urllib.parse import quote
from psycopg2.errors import ( # pylint: disable=no-name-in-module
InvalidTextRepresentation,
NotNullViolation,
UniqueViolation,
)
from sqlalchemy import create_engine, func, text
from sqlalchemy.exc import DataError, IntegrityError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.inspection import inspect
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.properties import ColumnProperty
from lib import exceptions as local_exc
from lib import json
Base = declarative_base()
__all__ = [
"device_zones",
"devices",
"zones"
]
@contextmanager
def convert_exception(sqla, psycopg2=None, new=None, param_names=None, str_match=""):
if param_names is None:
param_names = []
try:
yield
except sqla as exc:
if str_match not in str(exc):
raise
if param_names:
args = [str(exc.params.get(param)) for param in param_names]
else:
args = [str(exc)]
if psycopg2 is None:
raise new() from exc
if isinstance(exc.orig, psycopg2):
if not param_names:
args = [str(exc.orig)]
raise new(*args) from exc
raise
def create_session(config, **kwargs):
engine_kwargs = {
"connect_args": {"application_name": config.get("app_id", f"UNKNOWN=>({__name__})")},
"json_serializer": json.dumps,
}
password = config.get("db.password")
engine = create_engine(
(
"postgresql://"
f"{quote(config.get('db.username'))}:{quote(password)}@{quote(config.get('db.host'))}:"
f"{config.get('db.port')}/{quote(config.get('db.name'))}"
),
**engine_kwargs,
)
return sessionmaker(bind=engine, **kwargs)()
@contextmanager
def session_scope(config, **kwargs):
session = create_session(config, **kwargs)
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
def _get_column_value(instance, col_name):
try:
return getattr(instance, col_name)
except AttributeError:
for attr, column in inspect(instance.__class__).c.items():
if column.name == col_name:
return getattr(instance, attr)
raise AttributeError
class DictifiableMixin:
def to_dict(self, include_relationships=None):
result = {}
for name, attr in inspect(self.__class__).all_orm_descriptors.items():
if name.startswith("_"):
continue
if hasattr(attr, "property") and not isinstance(attr.property, ColumnProperty):
continue
name = getattr(attr, "name", name)
result[name] = _get_column_value(self, name)
if not include_relationships:
include_relationships = []
for rel in include_relationships:
val = getattr(self, rel)
if val is not None:
result[rel] = getattr(self, rel).to_dict()
return result
def _json_repr_(self, *args, **kwargs):
return self.to_dict(*args, **kwargs)
_MERGEABLE_FIELDS_LIST = "_mergeable_fields"
def mergeable_fields(*fields_list):
def decorator(cls):
setattr(cls, _MERGEABLE_FIELDS_LIST, fields_list)
return cls
return decorator
_ENUM_EXC_MAP = "_custom_exception_map"
def _merge_into(target, updates):
if target is None:
return updates
for k, v in updates.items():
if k in target and isinstance(v, dict) and isinstance(target[k], dict):
_merge_into(target[k], v)
elif k in target and isinstance(v, list) and isinstance(target[k], list):
target[k].extend(v)
else:
target[k] = v
return target
class QueryMethodsMixin:
@classmethod
def query(cls, session, q=None, slice_start=None, slice_end=None, **kwargs):
if q is None:
q = session.query(cls).filter_by(**kwargs)
if not None in [slice_start, slice_end]:
q = q.slice(slice_start, slice_end)
try:
return q.all()
except DataError as err:
if not isinstance(err.orig, InvalidTextRepresentation):
raise
if "invalid input value for enum" not in str(err.orig):
raise
# somewhat finicky parsing of the PG error here
# expected format returned by str(err.orig):
# E psycopg2.errors.InvalidTextRepresentation: invalid input value for enum "OrderType": "invalid"
# E LINE 3: ...3-4b88-a6b0-5a01d901233f' AND orders.order_type = 'invalid' ...
# E ^
msg, desc, pointer, _ = str(err.orig).split("\n")
# get Enum name from the first line
enum_name = msg.split("for enum ")[1].split(":")[0].strip('"')
exc = getattr(cls, _ENUM_EXC_MAP).get(enum_name, local_exc.InvalidEnum)
# Use the indicator on the third line to find the column name in the second line
err_ix = pointer.index("^")
_, column_name = desc[:err_ix].split()[-2].split(".")
raise exc(err.params.get(column_name, "could not find offending value")) from err
@classmethod
def get_by_pkey(cls, session, pkey):
return session.query(cls).get(pkey)
@classmethod
def create(cls, session, autocommit=True, **kwargs):
for key in kwargs:
if not hasattr(cls, key):
raise local_exc.InvalidParameter(key)
row = cls(**kwargs)
session.add(row)
if autocommit:
try:
with convert_exception(
IntegrityError, psycopg2=NotNullViolation, new=local_exc.RequiredParameterNotFound
), convert_exception(
IntegrityError, psycopg2=UniqueViolation, new=local_exc.ItemAlreadyExists, str_match="_pkey"
):
session.commit()
except:
session.rollback()
raise
return row
@classmethod
def update_query(cls, session, filters=None, **updates):
if filters is None:
filters = {}
session.query(cls).filter_by(**filters).update(updates)
@classmethod
def update(cls, session, pkey, merge_nested=False, autocommit=True, **kwargs):
merge_fields = getattr(cls, _MERGEABLE_FIELDS_LIST, [])
row = cls.get_by_pkey(session, pkey)
for key, value in kwargs.items():
if not hasattr(cls, key):
raise local_exc.InvalidParameter(key)
if merge_nested and key in merge_fields:
current = getattr(row, key, {})
value = _merge_into(current, value)
setattr(row, key, value)
session.add(row)
if autocommit:
try:
session.commit()
except:
session.rollback()
raise
return row
@classmethod
def delete(cls, session, pkey, autocommit=True):
session.delete(cls.get_by_pkey(session, pkey))
if autocommit:
try:
session.commit()
except:
session.rollback()
raise
@classmethod
def all(cls, session):
return session.query(cls).all()
| 29.215385
| 116
| 0.59189
|
5e6a60a2fc85d3548f62246069fd8864e35a3b23
| 12,153
|
py
|
Python
|
markdown/extensions/toc.py
|
hatzel/markdown-spoilers
|
1964f298f0e8b99f1202d36ccc7d8cf7d613ad26
|
[
"BSD-3-Clause"
] | 2
|
2020-06-21T12:02:58.000Z
|
2020-09-02T15:21:19.000Z
|
markdown/extensions/toc.py
|
hatzel/markdown-spoilers
|
1964f298f0e8b99f1202d36ccc7d8cf7d613ad26
|
[
"BSD-3-Clause"
] | null | null | null |
markdown/extensions/toc.py
|
hatzel/markdown-spoilers
|
1964f298f0e8b99f1202d36ccc7d8cf7d613ad26
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Table of Contents Extension for Python-Markdown
===============================================
See <https://Python-Markdown.github.io/extensions/toc>
for documentation.
Oringinal code Copyright 2008 [Jack Miller](https://codezen.org/)
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
from ..util import etree, parseBoolValue, AMP_SUBSTITUTE, HTML_PLACEHOLDER_RE, string_type
import re
import unicodedata
def slugify(value, separator):
""" Slugify a string, to make it URL friendly. """
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = re.sub(r'[^\w\s-]', '', value.decode('ascii')).strip().lower()
return re.sub(r'[%s\s]+' % separator, separator, value)
IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
def unique(id, ids):
""" Ensure id is unique in set of ids. Append '_1', '_2'... if not """
while id in ids or not id:
m = IDCOUNT_RE.match(id)
if m:
id = '%s_%d' % (m.group(1), int(m.group(2))+1)
else:
id = '%s_%d' % (id, 1)
ids.add(id)
return id
def stashedHTML2text(text, md):
""" Extract raw HTML from stash, reduce to plain text and swap with placeholder. """
def _html_sub(m):
""" Substitute raw html with plain text. """
try:
raw = md.htmlStash.rawHtmlBlocks[int(m.group(1))]
except (IndexError, TypeError): # pragma: no cover
return m.group(0)
# Strip out tags and entities - leaveing text
return re.sub(r'(<[^>]+>)|(&[\#a-zA-Z0-9]+;)', '', raw)
return HTML_PLACEHOLDER_RE.sub(_html_sub, text)
def nest_toc_tokens(toc_list):
"""Given an unsorted list with errors and skips, return a nested one.
[{'level': 1}, {'level': 2}]
=>
[{'level': 1, 'children': [{'level': 2, 'children': []}]}]
A wrong list is also converted:
[{'level': 2}, {'level': 1}]
=>
[{'level': 2, 'children': []}, {'level': 1, 'children': []}]
"""
ordered_list = []
if len(toc_list):
# Initialize everything by processing the first entry
last = toc_list.pop(0)
last['children'] = []
levels = [last['level']]
ordered_list.append(last)
parents = []
# Walk the rest nesting the entries properly
while toc_list:
t = toc_list.pop(0)
current_level = t['level']
t['children'] = []
# Reduce depth if current level < last item's level
if current_level < levels[-1]:
# Pop last level since we know we are less than it
levels.pop()
# Pop parents and levels we are less than or equal to
to_pop = 0
for p in reversed(parents):
if current_level <= p['level']:
to_pop += 1
else: # pragma: no cover
break
if to_pop:
levels = levels[:-to_pop]
parents = parents[:-to_pop]
# Note current level as last
levels.append(current_level)
# Level is the same, so append to
# the current parent (if available)
if current_level == levels[-1]:
(parents[-1]['children'] if parents
else ordered_list).append(t)
# Current level is > last item's level,
# So make last item a parent and append current as child
else:
last['children'].append(t)
parents.append(last)
levels.append(current_level)
last = t
return ordered_list
class TocTreeprocessor(Treeprocessor):
def __init__(self, md, config):
super(TocTreeprocessor, self).__init__(md)
self.marker = config["marker"]
self.title = config["title"]
self.base_level = int(config["baselevel"]) - 1
self.slugify = config["slugify"]
self.sep = config["separator"]
self.use_anchors = parseBoolValue(config["anchorlink"])
self.use_permalinks = parseBoolValue(config["permalink"], False)
if self.use_permalinks is None:
self.use_permalinks = config["permalink"]
self.header_rgx = re.compile("[Hh][123456]")
if isinstance(config["toc_depth"], string_type) and '-' in config["toc_depth"]:
self.toc_top, self.toc_bottom = [int(x) for x in config["toc_depth"].split('-')]
else:
self.toc_top = 1
self.toc_bottom = int(config["toc_depth"])
def iterparent(self, node):
''' Iterator wrapper to get allowed parent and child all at once. '''
# We do not allow the marker inside a header as that
# would causes an enless loop of placing a new TOC
# inside previously generated TOC.
for child in node:
if not self.header_rgx.match(child.tag) and child.tag not in ['pre', 'code']:
yield node, child
for p, c in self.iterparent(child):
yield p, c
def replace_marker(self, root, elem):
''' Replace marker with elem. '''
for (p, c) in self.iterparent(root):
text = ''.join(c.itertext()).strip()
if not text:
continue
# To keep the output from screwing up the
# validation by putting a <div> inside of a <p>
# we actually replace the <p> in its entirety.
if c.text and c.text.strip() == self.marker:
for i in range(len(p)):
if p[i] == c:
p[i] = elem
break
def set_level(self, elem):
''' Adjust header level according to base level. '''
level = int(elem.tag[-1]) + self.base_level
if level > 6:
level = 6
elem.tag = 'h%d' % level
def add_anchor(self, c, elem_id): # @ReservedAssignment
anchor = etree.Element("a")
anchor.text = c.text
anchor.attrib["href"] = "#" + elem_id
anchor.attrib["class"] = "toclink"
c.text = ""
for elem in c:
anchor.append(elem)
while len(c):
c.remove(c[0])
c.append(anchor)
def add_permalink(self, c, elem_id):
permalink = etree.Element("a")
permalink.text = ("%spara;" % AMP_SUBSTITUTE
if self.use_permalinks is True
else self.use_permalinks)
permalink.attrib["href"] = "#" + elem_id
permalink.attrib["class"] = "headerlink"
permalink.attrib["title"] = "Permanent link"
c.append(permalink)
def build_toc_div(self, toc_list):
""" Return a string div given a toc list. """
div = etree.Element("div")
div.attrib["class"] = "toc"
# Add title to the div
if self.title:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
header.text = self.title
def build_etree_ul(toc_list, parent):
ul = etree.SubElement(parent, "ul")
for item in toc_list:
# List item link, to be inserted into the toc div
li = etree.SubElement(ul, "li")
link = etree.SubElement(li, "a")
link.text = item.get('name', '')
link.attrib["href"] = '#' + item.get('id', '')
if item['children']:
build_etree_ul(item['children'], li)
return ul
build_etree_ul(toc_list, div)
if 'prettify' in self.md.treeprocessors:
self.md.treeprocessors['prettify'].run(div)
return div
def run(self, doc):
# Get a list of id attributes
used_ids = set()
for el in doc.iter():
if "id" in el.attrib:
used_ids.add(el.attrib["id"])
toc_tokens = []
for el in doc.iter():
if isinstance(el.tag, string_type) and self.header_rgx.match(el.tag):
self.set_level(el)
if int(el.tag[-1]) < self.toc_top or int(el.tag[-1]) > self.toc_bottom:
continue
text = ''.join(el.itertext()).strip()
# Do not override pre-existing ids
if "id" not in el.attrib:
innertext = stashedHTML2text(text, self.md)
el.attrib["id"] = unique(self.slugify(innertext, self.sep), used_ids)
toc_tokens.append({
'level': int(el.tag[-1]),
'id': el.attrib["id"],
'name': el.attrib.get('data-toc-label', text)
})
# Remove the data-toc-label attribute as it is no longer needed
if 'data-toc-label' in el.attrib:
del el.attrib['data-toc-label']
if self.use_anchors:
self.add_anchor(el, el.attrib["id"])
if self.use_permalinks:
self.add_permalink(el, el.attrib["id"])
toc_tokens = nest_toc_tokens(toc_tokens)
div = self.build_toc_div(toc_tokens)
if self.marker:
self.replace_marker(doc, div)
# serialize and attach to markdown instance.
toc = self.md.serializer(div)
for pp in self.md.postprocessors:
toc = pp.run(toc)
self.md.toc_tokens = toc_tokens
self.md.toc = toc
class TocExtension(Extension):
TreeProcessorClass = TocTreeprocessor
def __init__(self, **kwargs):
self.config = {
"marker": ['[TOC]',
'Text to find and replace with Table of Contents - '
'Set to an empty string to disable. Defaults to "[TOC]"'],
"title": ["",
"Title to insert into TOC <div> - "
"Defaults to an empty string"],
"anchorlink": [False,
"True if header should be a self link - "
"Defaults to False"],
"permalink": [0,
"True or link text if a Sphinx-style permalink should "
"be added - Defaults to False"],
"baselevel": ['1', 'Base level for headers.'],
"slugify": [slugify,
"Function to generate anchors based on header text - "
"Defaults to the headerid ext's slugify function."],
'separator': ['-', 'Word separator. Defaults to "-".'],
"toc_depth": [6,
'Define the range of section levels to include in'
'the Table of Contents. A single integer (b) defines'
'the bottom section level (<h1>..<hb>) only.'
'A string consisting of two digits separated by a hyphen'
'in between ("2-5"), define the top (t) and the'
'bottom (b) (<ht>..<hb>). Defaults to `6` (bottom).'],
}
super(TocExtension, self).__init__(**kwargs)
def extendMarkdown(self, md):
md.registerExtension(self)
self.md = md
self.reset()
tocext = self.TreeProcessorClass(md, self.getConfigs())
# Headerid ext is set to '>prettify'. With this set to '_end',
# it should always come after headerid ext (and honor ids assinged
# by the header id extension) if both are used. Same goes for
# attr_list extension. This must come last because we don't want
# to redefine ids after toc is created. But we do want toc prettified.
md.treeprocessors.register(tocext, 'toc', 5)
def reset(self):
self.md.toc = ''
self.md.toc_tokens = []
def makeExtension(**kwargs): # pragma: no cover
return TocExtension(**kwargs)
| 36.605422
| 92
| 0.536987
|
b0d7f52bb5cf83e5e479b9efdda0844533bd6b57
| 4,266
|
py
|
Python
|
setup.py
|
quecloud/rpxdock
|
41f7f98f5dacf24fc95897910263a0bec2209e59
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
quecloud/rpxdock
|
41f7f98f5dacf24fc95897910263a0bec2209e59
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
quecloud/rpxdock
|
41f7f98f5dacf24fc95897910263a0bec2209e59
|
[
"Apache-2.0"
] | 1
|
2020-04-13T20:07:52.000Z
|
2020-04-13T20:07:52.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'rpxdock'
DESCRIPTION = 'Protein Docking Toolkit'
URL = 'https://github.com/willsheffler/rpxdock.git'
EMAIL = 'willsheffler@gmail.com'
AUTHOR = 'Will Sheffler'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = '0.1.0'
# What packages are required for this module to be executed?
REQUIRED = [
'numpy==1.16.3',
'xarray==0.11.3',
'pandas==0.24.2',
'pytest',
'pytest-sugar',
'pytest-xdist',
'tqdm',
'homog',
'cppimport',
]
# What packages are optional?
EXTRAS = {
'pyrosetta': 'pyrosetta',
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace('-', '_').replace(' ', '_')
with open(os.path.join(here, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
'Support setup.py upload.'
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
'Prints things in bold.'
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
def get_files(*patterns):
fnames = set()
from pathlib import Path
for pattern in patterns:
for filename in Path('rpxdock').glob(pattern):
fnames.add(str(filename).lstrip('rpxdock/'))
return list(fnames)
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/rst',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(),
# If your package is a single module, use this instead of 'packages':
# py_modules=['rpxdock'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
tests_require=['pytest'],
package_dir={'rpxdock': 'rpxdock'},
package_data=dict(rpxdock=[
'*/*.hpp',
'*/*.cpp',
'data/*.pickle',
'data/hscore/*',
'data/pdb/*',
'sampling/data/karney/*',
'rotamer/richardson.csv',
] + get_files('extern/**/*')),
include_package_data=True,
license='Apache2',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| 27.171975
| 84
| 0.647211
|
4589fe6c9c8a9ed9b92f0407d07fe68984a2c7ec
| 3,258
|
py
|
Python
|
matter/settings.py
|
krishnatahr/matter
|
26cff865298ab10d0a9a9ac7a9d821099c314969
|
[
"Apache-2.0"
] | null | null | null |
matter/settings.py
|
krishnatahr/matter
|
26cff865298ab10d0a9a9ac7a9d821099c314969
|
[
"Apache-2.0"
] | null | null | null |
matter/settings.py
|
krishnatahr/matter
|
26cff865298ab10d0a9a9ac7a9d821099c314969
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for matter project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '11l1i$83lkz1a&6-&r+c87eee*#p&2g3gv72bcg58e9n_vq8o%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rssfeeds',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'matter.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'matter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'articles',
'USER': 'root',
'PASSWORD': 'root',
'HOST': 'localhost',
'PORT': '3306'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| 25.653543
| 91
| 0.68938
|
1c1d45b6715982b34a08cb5a4d252b6b356100d3
| 42,305
|
py
|
Python
|
sdk/python/pulumi_akamai/properties/property.py
|
pulumi/pulumi-akamai
|
85f933ccf2f61738b3074a13fa718132280f8364
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2021-01-21T15:22:12.000Z
|
2021-08-25T14:15:29.000Z
|
sdk/python/pulumi_akamai/properties/property.py
|
pulumi/pulumi-akamai
|
85f933ccf2f61738b3074a13fa718132280f8364
|
[
"ECL-2.0",
"Apache-2.0"
] | 59
|
2020-08-13T14:39:36.000Z
|
2022-03-31T15:19:48.000Z
|
sdk/python/pulumi_akamai/properties/property.py
|
pulumi/pulumi-akamai
|
85f933ccf2f61738b3074a13fa718132280f8364
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PropertyArgs', 'Property']
@pulumi.input_type
class PropertyArgs:
def __init__(__self__, *,
contacts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
contract: Optional[pulumi.Input[str]] = None,
contract_id: Optional[pulumi.Input[str]] = None,
cp_code: Optional[pulumi.Input[str]] = None,
group: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
hostnames: Optional[pulumi.Input[Sequence[pulumi.Input['PropertyHostnameArgs']]]] = None,
is_secure: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
origins: Optional[pulumi.Input[Sequence[pulumi.Input['PropertyOriginArgs']]]] = None,
product: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
rule_format: Optional[pulumi.Input[str]] = None,
rule_warnings: Optional[pulumi.Input[Sequence[pulumi.Input['PropertyRuleWarningArgs']]]] = None,
rules: Optional[pulumi.Input[str]] = None,
variables: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Property resource.
:param pulumi.Input[str] contract_id: Contract ID to be assigned to the Property
:param pulumi.Input[str] group_id: Group ID to be assigned to the Property
:param pulumi.Input[str] name: Name to give to the Property (must be unique)
:param pulumi.Input[str] product_id: Product ID to be assigned to the Property
:param pulumi.Input[str] rule_format: Specify the rule format version (defaults to latest version available when created)
:param pulumi.Input[str] rules: Property Rules as JSON
"""
if contacts is not None:
warnings.warn("""The setting \"contact\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""contacts is deprecated: The setting \"contact\" has been deprecated.""")
if contacts is not None:
pulumi.set(__self__, "contacts", contacts)
if contract is not None:
warnings.warn("""The setting \"contract\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""contract is deprecated: The setting \"contract\" has been deprecated.""")
if contract is not None:
pulumi.set(__self__, "contract", contract)
if contract_id is not None:
pulumi.set(__self__, "contract_id", contract_id)
if cp_code is not None:
warnings.warn("""The setting \"cp_code\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""cp_code is deprecated: The setting \"cp_code\" has been deprecated.""")
if cp_code is not None:
pulumi.set(__self__, "cp_code", cp_code)
if group is not None:
warnings.warn("""The setting \"group\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""group is deprecated: The setting \"group\" has been deprecated.""")
if group is not None:
pulumi.set(__self__, "group", group)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if hostnames is not None:
pulumi.set(__self__, "hostnames", hostnames)
if is_secure is not None:
warnings.warn("""The setting \"is_secure\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""is_secure is deprecated: The setting \"is_secure\" has been deprecated.""")
if is_secure is not None:
pulumi.set(__self__, "is_secure", is_secure)
if name is not None:
pulumi.set(__self__, "name", name)
if origins is not None:
warnings.warn("""The setting \"origin\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""origins is deprecated: The setting \"origin\" has been deprecated.""")
if origins is not None:
pulumi.set(__self__, "origins", origins)
if product is not None:
warnings.warn("""The setting \"product\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""product is deprecated: The setting \"product\" has been deprecated.""")
if product is not None:
pulumi.set(__self__, "product", product)
if product_id is not None:
pulumi.set(__self__, "product_id", product_id)
if rule_format is not None:
pulumi.set(__self__, "rule_format", rule_format)
if rule_warnings is not None:
warnings.warn("""Rule warnings will not be set in state anymore""", DeprecationWarning)
pulumi.log.warn("""rule_warnings is deprecated: Rule warnings will not be set in state anymore""")
if rule_warnings is not None:
pulumi.set(__self__, "rule_warnings", rule_warnings)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if variables is not None:
warnings.warn("""The setting \"variables\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""variables is deprecated: The setting \"variables\" has been deprecated.""")
if variables is not None:
pulumi.set(__self__, "variables", variables)
@property
@pulumi.getter
def contacts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "contacts")
@contacts.setter
def contacts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "contacts", value)
@property
@pulumi.getter
def contract(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "contract")
@contract.setter
def contract(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "contract", value)
@property
@pulumi.getter(name="contractId")
def contract_id(self) -> Optional[pulumi.Input[str]]:
"""
Contract ID to be assigned to the Property
"""
return pulumi.get(self, "contract_id")
@contract_id.setter
def contract_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "contract_id", value)
@property
@pulumi.getter(name="cpCode")
def cp_code(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cp_code")
@cp_code.setter
def cp_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cp_code", value)
@property
@pulumi.getter
def group(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "group")
@group.setter
def group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
"""
Group ID to be assigned to the Property
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter
def hostnames(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PropertyHostnameArgs']]]]:
return pulumi.get(self, "hostnames")
@hostnames.setter
def hostnames(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PropertyHostnameArgs']]]]):
pulumi.set(self, "hostnames", value)
@property
@pulumi.getter(name="isSecure")
def is_secure(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_secure")
@is_secure.setter
def is_secure(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_secure", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name to give to the Property (must be unique)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PropertyOriginArgs']]]]:
return pulumi.get(self, "origins")
@origins.setter
def origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PropertyOriginArgs']]]]):
pulumi.set(self, "origins", value)
@property
@pulumi.getter
def product(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "product")
@product.setter
def product(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product", value)
@property
@pulumi.getter(name="productId")
def product_id(self) -> Optional[pulumi.Input[str]]:
"""
Product ID to be assigned to the Property
"""
return pulumi.get(self, "product_id")
@product_id.setter
def product_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product_id", value)
@property
@pulumi.getter(name="ruleFormat")
def rule_format(self) -> Optional[pulumi.Input[str]]:
"""
Specify the rule format version (defaults to latest version available when created)
"""
return pulumi.get(self, "rule_format")
@rule_format.setter
def rule_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rule_format", value)
@property
@pulumi.getter(name="ruleWarnings")
def rule_warnings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PropertyRuleWarningArgs']]]]:
return pulumi.get(self, "rule_warnings")
@rule_warnings.setter
def rule_warnings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PropertyRuleWarningArgs']]]]):
pulumi.set(self, "rule_warnings", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[str]]:
"""
Property Rules as JSON
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter
def variables(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "variables")
@variables.setter
def variables(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "variables", value)
@pulumi.input_type
class _PropertyState:
def __init__(__self__, *,
contacts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
contract: Optional[pulumi.Input[str]] = None,
contract_id: Optional[pulumi.Input[str]] = None,
cp_code: Optional[pulumi.Input[str]] = None,
group: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
hostnames: Optional[pulumi.Input[Sequence[pulumi.Input['PropertyHostnameArgs']]]] = None,
is_secure: Optional[pulumi.Input[bool]] = None,
latest_version: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
origins: Optional[pulumi.Input[Sequence[pulumi.Input['PropertyOriginArgs']]]] = None,
product: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
production_version: Optional[pulumi.Input[int]] = None,
read_version: Optional[pulumi.Input[int]] = None,
rule_errors: Optional[pulumi.Input[Sequence[pulumi.Input['PropertyRuleErrorArgs']]]] = None,
rule_format: Optional[pulumi.Input[str]] = None,
rule_warnings: Optional[pulumi.Input[Sequence[pulumi.Input['PropertyRuleWarningArgs']]]] = None,
rules: Optional[pulumi.Input[str]] = None,
staging_version: Optional[pulumi.Input[int]] = None,
variables: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Property resources.
:param pulumi.Input[str] contract_id: Contract ID to be assigned to the Property
:param pulumi.Input[str] group_id: Group ID to be assigned to the Property
:param pulumi.Input[int] latest_version: Property's current latest version number
:param pulumi.Input[str] name: Name to give to the Property (must be unique)
:param pulumi.Input[str] product_id: Product ID to be assigned to the Property
:param pulumi.Input[int] production_version: Property's version currently activated in production (zero when not active in production)
:param pulumi.Input[int] read_version: Required property's version to be read
:param pulumi.Input[str] rule_format: Specify the rule format version (defaults to latest version available when created)
:param pulumi.Input[str] rules: Property Rules as JSON
:param pulumi.Input[int] staging_version: Property's version currently activated in staging (zero when not active in staging)
"""
if contacts is not None:
warnings.warn("""The setting \"contact\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""contacts is deprecated: The setting \"contact\" has been deprecated.""")
if contacts is not None:
pulumi.set(__self__, "contacts", contacts)
if contract is not None:
warnings.warn("""The setting \"contract\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""contract is deprecated: The setting \"contract\" has been deprecated.""")
if contract is not None:
pulumi.set(__self__, "contract", contract)
if contract_id is not None:
pulumi.set(__self__, "contract_id", contract_id)
if cp_code is not None:
warnings.warn("""The setting \"cp_code\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""cp_code is deprecated: The setting \"cp_code\" has been deprecated.""")
if cp_code is not None:
pulumi.set(__self__, "cp_code", cp_code)
if group is not None:
warnings.warn("""The setting \"group\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""group is deprecated: The setting \"group\" has been deprecated.""")
if group is not None:
pulumi.set(__self__, "group", group)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if hostnames is not None:
pulumi.set(__self__, "hostnames", hostnames)
if is_secure is not None:
warnings.warn("""The setting \"is_secure\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""is_secure is deprecated: The setting \"is_secure\" has been deprecated.""")
if is_secure is not None:
pulumi.set(__self__, "is_secure", is_secure)
if latest_version is not None:
pulumi.set(__self__, "latest_version", latest_version)
if name is not None:
pulumi.set(__self__, "name", name)
if origins is not None:
warnings.warn("""The setting \"origin\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""origins is deprecated: The setting \"origin\" has been deprecated.""")
if origins is not None:
pulumi.set(__self__, "origins", origins)
if product is not None:
warnings.warn("""The setting \"product\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""product is deprecated: The setting \"product\" has been deprecated.""")
if product is not None:
pulumi.set(__self__, "product", product)
if product_id is not None:
pulumi.set(__self__, "product_id", product_id)
if production_version is not None:
pulumi.set(__self__, "production_version", production_version)
if read_version is not None:
pulumi.set(__self__, "read_version", read_version)
if rule_errors is not None:
pulumi.set(__self__, "rule_errors", rule_errors)
if rule_format is not None:
pulumi.set(__self__, "rule_format", rule_format)
if rule_warnings is not None:
warnings.warn("""Rule warnings will not be set in state anymore""", DeprecationWarning)
pulumi.log.warn("""rule_warnings is deprecated: Rule warnings will not be set in state anymore""")
if rule_warnings is not None:
pulumi.set(__self__, "rule_warnings", rule_warnings)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if staging_version is not None:
pulumi.set(__self__, "staging_version", staging_version)
if variables is not None:
warnings.warn("""The setting \"variables\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""variables is deprecated: The setting \"variables\" has been deprecated.""")
if variables is not None:
pulumi.set(__self__, "variables", variables)
@property
@pulumi.getter
def contacts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "contacts")
@contacts.setter
def contacts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "contacts", value)
@property
@pulumi.getter
def contract(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "contract")
@contract.setter
def contract(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "contract", value)
@property
@pulumi.getter(name="contractId")
def contract_id(self) -> Optional[pulumi.Input[str]]:
"""
Contract ID to be assigned to the Property
"""
return pulumi.get(self, "contract_id")
@contract_id.setter
def contract_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "contract_id", value)
@property
@pulumi.getter(name="cpCode")
def cp_code(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cp_code")
@cp_code.setter
def cp_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cp_code", value)
@property
@pulumi.getter
def group(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "group")
@group.setter
def group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
"""
Group ID to be assigned to the Property
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter
def hostnames(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PropertyHostnameArgs']]]]:
return pulumi.get(self, "hostnames")
@hostnames.setter
def hostnames(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PropertyHostnameArgs']]]]):
pulumi.set(self, "hostnames", value)
@property
@pulumi.getter(name="isSecure")
def is_secure(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_secure")
@is_secure.setter
def is_secure(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_secure", value)
@property
@pulumi.getter(name="latestVersion")
def latest_version(self) -> Optional[pulumi.Input[int]]:
"""
Property's current latest version number
"""
return pulumi.get(self, "latest_version")
@latest_version.setter
def latest_version(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "latest_version", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name to give to the Property (must be unique)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PropertyOriginArgs']]]]:
return pulumi.get(self, "origins")
@origins.setter
def origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PropertyOriginArgs']]]]):
pulumi.set(self, "origins", value)
@property
@pulumi.getter
def product(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "product")
@product.setter
def product(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product", value)
@property
@pulumi.getter(name="productId")
def product_id(self) -> Optional[pulumi.Input[str]]:
"""
Product ID to be assigned to the Property
"""
return pulumi.get(self, "product_id")
@product_id.setter
def product_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product_id", value)
@property
@pulumi.getter(name="productionVersion")
def production_version(self) -> Optional[pulumi.Input[int]]:
"""
Property's version currently activated in production (zero when not active in production)
"""
return pulumi.get(self, "production_version")
@production_version.setter
def production_version(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "production_version", value)
@property
@pulumi.getter(name="readVersion")
def read_version(self) -> Optional[pulumi.Input[int]]:
"""
Required property's version to be read
"""
return pulumi.get(self, "read_version")
@read_version.setter
def read_version(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "read_version", value)
@property
@pulumi.getter(name="ruleErrors")
def rule_errors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PropertyRuleErrorArgs']]]]:
return pulumi.get(self, "rule_errors")
@rule_errors.setter
def rule_errors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PropertyRuleErrorArgs']]]]):
pulumi.set(self, "rule_errors", value)
@property
@pulumi.getter(name="ruleFormat")
def rule_format(self) -> Optional[pulumi.Input[str]]:
"""
Specify the rule format version (defaults to latest version available when created)
"""
return pulumi.get(self, "rule_format")
@rule_format.setter
def rule_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rule_format", value)
@property
@pulumi.getter(name="ruleWarnings")
def rule_warnings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PropertyRuleWarningArgs']]]]:
return pulumi.get(self, "rule_warnings")
@rule_warnings.setter
def rule_warnings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PropertyRuleWarningArgs']]]]):
pulumi.set(self, "rule_warnings", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[str]]:
"""
Property Rules as JSON
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter(name="stagingVersion")
def staging_version(self) -> Optional[pulumi.Input[int]]:
"""
Property's version currently activated in staging (zero when not active in staging)
"""
return pulumi.get(self, "staging_version")
@staging_version.setter
def staging_version(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "staging_version", value)
@property
@pulumi.getter
def variables(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "variables")
@variables.setter
def variables(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "variables", value)
warnings.warn("""akamai.properties.Property has been deprecated in favor of akamai.Property""", DeprecationWarning)
class Property(pulumi.CustomResource):
warnings.warn("""akamai.properties.Property has been deprecated in favor of akamai.Property""", DeprecationWarning)
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
contacts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
contract: Optional[pulumi.Input[str]] = None,
contract_id: Optional[pulumi.Input[str]] = None,
cp_code: Optional[pulumi.Input[str]] = None,
group: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
hostnames: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PropertyHostnameArgs']]]]] = None,
is_secure: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
origins: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PropertyOriginArgs']]]]] = None,
product: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
rule_format: Optional[pulumi.Input[str]] = None,
rule_warnings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PropertyRuleWarningArgs']]]]] = None,
rules: Optional[pulumi.Input[str]] = None,
variables: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a Property resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] contract_id: Contract ID to be assigned to the Property
:param pulumi.Input[str] group_id: Group ID to be assigned to the Property
:param pulumi.Input[str] name: Name to give to the Property (must be unique)
:param pulumi.Input[str] product_id: Product ID to be assigned to the Property
:param pulumi.Input[str] rule_format: Specify the rule format version (defaults to latest version available when created)
:param pulumi.Input[str] rules: Property Rules as JSON
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[PropertyArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a Property resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param PropertyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PropertyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
contacts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
contract: Optional[pulumi.Input[str]] = None,
contract_id: Optional[pulumi.Input[str]] = None,
cp_code: Optional[pulumi.Input[str]] = None,
group: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
hostnames: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PropertyHostnameArgs']]]]] = None,
is_secure: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
origins: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PropertyOriginArgs']]]]] = None,
product: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
rule_format: Optional[pulumi.Input[str]] = None,
rule_warnings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PropertyRuleWarningArgs']]]]] = None,
rules: Optional[pulumi.Input[str]] = None,
variables: Optional[pulumi.Input[str]] = None,
__props__=None):
pulumi.log.warn("""Property is deprecated: akamai.properties.Property has been deprecated in favor of akamai.Property""")
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PropertyArgs.__new__(PropertyArgs)
if contacts is not None and not opts.urn:
warnings.warn("""The setting \"contact\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""contacts is deprecated: The setting \"contact\" has been deprecated.""")
__props__.__dict__["contacts"] = contacts
if contract is not None and not opts.urn:
warnings.warn("""The setting \"contract\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""contract is deprecated: The setting \"contract\" has been deprecated.""")
__props__.__dict__["contract"] = contract
__props__.__dict__["contract_id"] = contract_id
if cp_code is not None and not opts.urn:
warnings.warn("""The setting \"cp_code\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""cp_code is deprecated: The setting \"cp_code\" has been deprecated.""")
__props__.__dict__["cp_code"] = cp_code
if group is not None and not opts.urn:
warnings.warn("""The setting \"group\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""group is deprecated: The setting \"group\" has been deprecated.""")
__props__.__dict__["group"] = group
__props__.__dict__["group_id"] = group_id
__props__.__dict__["hostnames"] = hostnames
if is_secure is not None and not opts.urn:
warnings.warn("""The setting \"is_secure\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""is_secure is deprecated: The setting \"is_secure\" has been deprecated.""")
__props__.__dict__["is_secure"] = is_secure
__props__.__dict__["name"] = name
if origins is not None and not opts.urn:
warnings.warn("""The setting \"origin\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""origins is deprecated: The setting \"origin\" has been deprecated.""")
__props__.__dict__["origins"] = origins
if product is not None and not opts.urn:
warnings.warn("""The setting \"product\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""product is deprecated: The setting \"product\" has been deprecated.""")
__props__.__dict__["product"] = product
__props__.__dict__["product_id"] = product_id
__props__.__dict__["rule_format"] = rule_format
if rule_warnings is not None and not opts.urn:
warnings.warn("""Rule warnings will not be set in state anymore""", DeprecationWarning)
pulumi.log.warn("""rule_warnings is deprecated: Rule warnings will not be set in state anymore""")
__props__.__dict__["rule_warnings"] = rule_warnings
__props__.__dict__["rules"] = rules
if variables is not None and not opts.urn:
warnings.warn("""The setting \"variables\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""variables is deprecated: The setting \"variables\" has been deprecated.""")
__props__.__dict__["variables"] = variables
__props__.__dict__["latest_version"] = None
__props__.__dict__["production_version"] = None
__props__.__dict__["read_version"] = None
__props__.__dict__["rule_errors"] = None
__props__.__dict__["staging_version"] = None
super(Property, __self__).__init__(
'akamai:properties/property:Property',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
contacts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
contract: Optional[pulumi.Input[str]] = None,
contract_id: Optional[pulumi.Input[str]] = None,
cp_code: Optional[pulumi.Input[str]] = None,
group: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
hostnames: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PropertyHostnameArgs']]]]] = None,
is_secure: Optional[pulumi.Input[bool]] = None,
latest_version: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
origins: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PropertyOriginArgs']]]]] = None,
product: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
production_version: Optional[pulumi.Input[int]] = None,
read_version: Optional[pulumi.Input[int]] = None,
rule_errors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PropertyRuleErrorArgs']]]]] = None,
rule_format: Optional[pulumi.Input[str]] = None,
rule_warnings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PropertyRuleWarningArgs']]]]] = None,
rules: Optional[pulumi.Input[str]] = None,
staging_version: Optional[pulumi.Input[int]] = None,
variables: Optional[pulumi.Input[str]] = None) -> 'Property':
"""
Get an existing Property resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] contract_id: Contract ID to be assigned to the Property
:param pulumi.Input[str] group_id: Group ID to be assigned to the Property
:param pulumi.Input[int] latest_version: Property's current latest version number
:param pulumi.Input[str] name: Name to give to the Property (must be unique)
:param pulumi.Input[str] product_id: Product ID to be assigned to the Property
:param pulumi.Input[int] production_version: Property's version currently activated in production (zero when not active in production)
:param pulumi.Input[int] read_version: Required property's version to be read
:param pulumi.Input[str] rule_format: Specify the rule format version (defaults to latest version available when created)
:param pulumi.Input[str] rules: Property Rules as JSON
:param pulumi.Input[int] staging_version: Property's version currently activated in staging (zero when not active in staging)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _PropertyState.__new__(_PropertyState)
__props__.__dict__["contacts"] = contacts
__props__.__dict__["contract"] = contract
__props__.__dict__["contract_id"] = contract_id
__props__.__dict__["cp_code"] = cp_code
__props__.__dict__["group"] = group
__props__.__dict__["group_id"] = group_id
__props__.__dict__["hostnames"] = hostnames
__props__.__dict__["is_secure"] = is_secure
__props__.__dict__["latest_version"] = latest_version
__props__.__dict__["name"] = name
__props__.__dict__["origins"] = origins
__props__.__dict__["product"] = product
__props__.__dict__["product_id"] = product_id
__props__.__dict__["production_version"] = production_version
__props__.__dict__["read_version"] = read_version
__props__.__dict__["rule_errors"] = rule_errors
__props__.__dict__["rule_format"] = rule_format
__props__.__dict__["rule_warnings"] = rule_warnings
__props__.__dict__["rules"] = rules
__props__.__dict__["staging_version"] = staging_version
__props__.__dict__["variables"] = variables
return Property(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def contacts(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "contacts")
@property
@pulumi.getter
def contract(self) -> pulumi.Output[str]:
return pulumi.get(self, "contract")
@property
@pulumi.getter(name="contractId")
def contract_id(self) -> pulumi.Output[str]:
"""
Contract ID to be assigned to the Property
"""
return pulumi.get(self, "contract_id")
@property
@pulumi.getter(name="cpCode")
def cp_code(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "cp_code")
@property
@pulumi.getter
def group(self) -> pulumi.Output[str]:
return pulumi.get(self, "group")
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Output[str]:
"""
Group ID to be assigned to the Property
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter
def hostnames(self) -> pulumi.Output[Optional[Sequence['outputs.PropertyHostname']]]:
return pulumi.get(self, "hostnames")
@property
@pulumi.getter(name="isSecure")
def is_secure(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "is_secure")
@property
@pulumi.getter(name="latestVersion")
def latest_version(self) -> pulumi.Output[int]:
"""
Property's current latest version number
"""
return pulumi.get(self, "latest_version")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name to give to the Property (must be unique)
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def origins(self) -> pulumi.Output[Optional[Sequence['outputs.PropertyOrigin']]]:
return pulumi.get(self, "origins")
@property
@pulumi.getter
def product(self) -> pulumi.Output[str]:
return pulumi.get(self, "product")
@property
@pulumi.getter(name="productId")
def product_id(self) -> pulumi.Output[str]:
"""
Product ID to be assigned to the Property
"""
return pulumi.get(self, "product_id")
@property
@pulumi.getter(name="productionVersion")
def production_version(self) -> pulumi.Output[int]:
"""
Property's version currently activated in production (zero when not active in production)
"""
return pulumi.get(self, "production_version")
@property
@pulumi.getter(name="readVersion")
def read_version(self) -> pulumi.Output[int]:
"""
Required property's version to be read
"""
return pulumi.get(self, "read_version")
@property
@pulumi.getter(name="ruleErrors")
def rule_errors(self) -> pulumi.Output[Sequence['outputs.PropertyRuleError']]:
return pulumi.get(self, "rule_errors")
@property
@pulumi.getter(name="ruleFormat")
def rule_format(self) -> pulumi.Output[str]:
"""
Specify the rule format version (defaults to latest version available when created)
"""
return pulumi.get(self, "rule_format")
@property
@pulumi.getter(name="ruleWarnings")
def rule_warnings(self) -> pulumi.Output[Sequence['outputs.PropertyRuleWarning']]:
return pulumi.get(self, "rule_warnings")
@property
@pulumi.getter
def rules(self) -> pulumi.Output[str]:
"""
Property Rules as JSON
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter(name="stagingVersion")
def staging_version(self) -> pulumi.Output[int]:
"""
Property's version currently activated in staging (zero when not active in staging)
"""
return pulumi.get(self, "staging_version")
@property
@pulumi.getter
def variables(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "variables")
| 44.909766
| 142
| 0.638412
|
65533cc1fb2793a15015e807e0136e6700c51977
| 13,765
|
py
|
Python
|
auto_editor/formats/premiere.py
|
jjandnn/auto-editor
|
15359899eb5d7eff3a18ff82a545964e16ab5473
|
[
"Unlicense"
] | null | null | null |
auto_editor/formats/premiere.py
|
jjandnn/auto-editor
|
15359899eb5d7eff3a18ff82a545964e16ab5473
|
[
"Unlicense"
] | null | null | null |
auto_editor/formats/premiere.py
|
jjandnn/auto-editor
|
15359899eb5d7eff3a18ff82a545964e16ab5473
|
[
"Unlicense"
] | null | null | null |
import os.path
from os.path import abspath
from platform import system
from shutil import move
from urllib.parse import quote
from auto_editor.timeline import Timeline
from .utils import indent, safe_mkdir
"""
Premiere Pro uses the Final Cut Pro 7 XML Interchange Format
See docs here:
https://developer.apple.com/library/archive/documentation/AppleApplications/Reference/FinalCutPro_XML/Elements/Elements.html
Also, Premiere itself will happily output subtlety incorrect XML files that don't
come back the way they started.
"""
PIXEL_ASPECT_RATIO = "square"
COLORDEPTH = "24"
ANAMORPHIC = "FALSE"
DEPTH = "16"
def fix_url(path: str) -> str:
if system() == "Windows":
return "file://localhost/" + quote(abspath(path)).replace("%5C", "/")
return f"file://localhost{abspath(path)}"
def speedup(speed: float) -> str:
return indent(
6,
"<filter>",
"\t<effect>",
"\t\t<name>Time Remap</name>",
"\t\t<effectid>timeremap</effectid>",
"\t\t<effectcategory>motion</effectcategory>",
"\t\t<effecttype>motion</effecttype>",
"\t\t<mediatype>video</mediatype>",
'\t\t<parameter authoringApp="PremierePro">',
"\t\t\t<parameterid>variablespeed</parameterid>",
"\t\t\t<name>variablespeed</name>",
"\t\t\t<valuemin>0</valuemin>",
"\t\t\t<valuemax>1</valuemax>",
"\t\t\t<value>0</value>",
"\t\t</parameter>",
'\t\t<parameter authoringApp="PremierePro">',
"\t\t\t<parameterid>speed</parameterid>",
"\t\t\t<name>speed</name>",
"\t\t\t<valuemin>-100000</valuemin>",
"\t\t\t<valuemax>100000</valuemax>",
f"\t\t\t<value>{speed}</value>",
"\t\t</parameter>",
'\t\t<parameter authoringApp="PremierePro">',
"\t\t\t<parameterid>reverse</parameterid>",
"\t\t\t<name>reverse</name>",
"\t\t\t<value>FALSE</value>",
"\t\t</parameter>",
'\t\t<parameter authoringApp="PremierePro">',
"\t\t\t<parameterid>frameblending</parameterid>",
"\t\t\t<name>frameblending</name>",
"\t\t\t<value>FALSE</value>",
"\t\t</parameter>",
"\t</effect>",
"</filter>",
)
def premiere_xml(
temp: str,
output: str,
timeline: Timeline,
) -> None:
inp = timeline.inp
chunks = timeline.chunks
if chunks is None:
raise ValueError("Timeline too complex")
fps = timeline.fps
samplerate = timeline.samplerate
audio_file = len(inp.videos) == 0 and len(inp.audios) == 1
# This is not at all how timebase works in actual media but that's how it works here.
timebase = int(fps)
if fps == 23.98 or fps == 23.97602397 or fps == 23.976:
timebase = 24
ntsc = "TRUE"
elif fps == 29.97 or fps == 29.97002997:
timebase = 30
ntsc = "TRUE"
elif fps == 59.94 or fps == 59.94005994:
timebase = 60
ntsc = "TRUE"
else:
ntsc = "FALSE"
duration = chunks[-1][1]
clips = []
for chunk in chunks:
if chunk[2] != 99999:
clips.append(chunk)
pathurls = [fix_url(inp.path)]
tracks = len(inp.audios)
if tracks > 1:
name_without_extension = inp.basename[: inp.basename.rfind(".")]
fold = safe_mkdir(os.path.join(inp.dirname, f"{name_without_extension}_tracks"))
for i in range(1, tracks):
newtrack = os.path.join(fold, f"{i}.wav")
move(os.path.join(temp, f"0-{i}.wav"), newtrack)
pathurls.append(fix_url(newtrack))
width, height = timeline.res
group_name = f"Auto-Editor {'Audio' if audio_file else 'Video'} Group"
with open(output, "w", encoding="utf-8") as outfile:
outfile.write('<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE xmeml>\n')
outfile.write('<xmeml version="4">\n')
outfile.write("\t<sequence>\n")
outfile.write(f"\t\t<name>{group_name}</name>\n")
outfile.write(f"\t\t<duration>{duration}</duration>\n")
outfile.write("\t\t<rate>\n")
outfile.write(f"\t\t\t<timebase>{timebase}</timebase>\n")
outfile.write(f"\t\t\t<ntsc>{ntsc}</ntsc>\n")
outfile.write("\t\t</rate>\n")
outfile.write("\t\t<media>\n")
outfile.write(
indent(
3,
"<video>",
"\t<format>",
"\t\t<samplecharacteristics>",
)
)
if len(inp.videos) > 0:
outfile.write(
indent(
3,
"\t\t\t<rate>",
f"\t\t\t\t<timebase>{timebase}</timebase>",
f"\t\t\t\t<ntsc>{ntsc}</ntsc>",
"\t\t\t</rate>",
)
)
outfile.write(
indent(
3,
f"\t\t\t<width>{width}</width>",
f"\t\t\t<height>{height}</height>",
f"\t\t\t<pixelaspectratio>{PIXEL_ASPECT_RATIO}</pixelaspectratio>",
)
)
if len(inp.videos) > 0:
outfile.write(
indent(
3,
"\t\t\t<fielddominance>none</fielddominance>",
f"\t\t\t<colordepth>{COLORDEPTH}</colordepth>",
)
)
outfile.write(
indent(
3,
"\t\t</samplecharacteristics>",
"\t</format>",
"</video>" if len(inp.videos) == 0 else "\t<track>",
)
)
if len(inp.videos) > 0:
# Handle video clips
total = 0.0
for j, clip in enumerate(clips):
clip_duration = (clip[1] - clip[0] + 1) / clip[2]
_start = int(total)
_end = int(total) + int(clip_duration)
_in = int(clip[0] / clip[2])
_out = int(clip[1] / clip[2])
total += clip_duration
outfile.write(
indent(
5,
f'<clipitem id="clipitem-{j+1}">',
"\t<masterclipid>masterclip-2</masterclipid>",
f"\t<name>{inp.basename}</name>",
f"\t<start>{_start}</start>",
f"\t<end>{_end}</end>",
f"\t<in>{_in}</in>",
f"\t<out>{_out}</out>",
)
)
if j == 0:
outfile.write(
indent(
6,
'<file id="file-1">',
f"\t<name>{inp.basename}</name>",
f"\t<pathurl>{pathurls[0]}</pathurl>",
"\t<rate>",
f"\t\t<timebase>{timebase}</timebase>",
f"\t\t<ntsc>{ntsc}</ntsc>",
"\t</rate>",
f"\t<duration>{duration}</duration>",
"\t<media>",
"\t\t<video>",
"\t\t\t<samplecharacteristics>",
"\t\t\t\t<rate>",
f"\t\t\t\t\t<timebase>{timebase}</timebase>",
f"\t\t\t\t\t<ntsc>{ntsc}</ntsc>",
"\t\t\t\t</rate>",
f"\t\t\t\t<width>{width}</width>",
f"\t\t\t\t<height>{height}</height>",
f"\t\t\t\t<anamorphic>{ANAMORPHIC}</anamorphic>",
f"\t\t\t\t<pixelaspectratio>{PIXEL_ASPECT_RATIO}</pixelaspectratio>",
"\t\t\t\t<fielddominance>none</fielddominance>",
"\t\t\t</samplecharacteristics>",
"\t\t</video>",
"\t\t<audio>",
"\t\t\t<samplecharacteristics>",
f"\t\t\t\t<depth>{DEPTH}</depth>",
f"\t\t\t\t<samplerate>{samplerate}</samplerate>",
"\t\t\t</samplecharacteristics>",
"\t\t\t<channelcount>2</channelcount>",
"\t\t</audio>",
"\t</media>",
"</file>",
)
)
else:
outfile.write('\t\t\t\t\t\t<file id="file-1"/>\n')
if clip[2] != 1:
outfile.write(speedup(clip[2] * 100))
# Linking for video blocks
for i in range(max(3, tracks + 1)):
outfile.write("\t\t\t\t\t\t<link>\n")
outfile.write(
f"\t\t\t\t\t\t\t<linkclipref>clipitem-{(i*(len(clips)))+j+1}</linkclipref>\n"
)
if i == 0:
outfile.write("\t\t\t\t\t\t\t<mediatype>video</mediatype>\n")
else:
outfile.write("\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n")
if i == 2:
outfile.write("\t\t\t\t\t\t\t<trackindex>2</trackindex>\n")
else:
outfile.write("\t\t\t\t\t\t\t<trackindex>1</trackindex>\n")
outfile.write(f"\t\t\t\t\t\t\t<clipindex>{j+1}</clipindex>\n")
if i > 0:
outfile.write("\t\t\t\t\t\t\t<groupindex>1</groupindex>\n")
outfile.write("\t\t\t\t\t\t</link>\n")
outfile.write("\t\t\t\t\t</clipitem>\n")
outfile.write(indent(3, "\t</track>", "</video>"))
# Audio Clips
outfile.write(
indent(
3,
"<audio>",
"\t<numOutputChannels>2</numOutputChannels>",
"\t<format>",
"\t\t<samplecharacteristics>",
f"\t\t\t<depth>{DEPTH}</depth>",
f"\t\t\t<samplerate>{samplerate}</samplerate>",
"\t\t</samplecharacteristics>",
"\t</format>",
)
)
for t in range(tracks):
outfile.write(
'\t\t\t\t<track currentExplodedTrackIndex="0" premiereTrackType="Stereo">\n'
)
total = 0
for j, clip in enumerate(clips):
clip_duration = (clip[1] - clip[0] + 1) / clip[2]
_start = int(total)
_end = int(total) + int(clip_duration)
_in = int(clip[0] / clip[2])
_out = int(clip[1] / clip[2])
total += clip_duration
if audio_file:
clip_item_num = j + 1
master_id = "1"
else:
clip_item_num = len(clips) + 1 + j + (t * len(clips))
master_id = "2"
outfile.write(
indent(
5,
f'<clipitem id="clipitem-{clip_item_num}" premiereChannelType="stereo">',
f"\t<masterclipid>masterclip-{master_id}</masterclipid>",
f"\t<name>{inp.basename}</name>",
f"\t<start>{_start}</start>",
f"\t<end>{_end}</end>",
f"\t<in>{_in}</in>",
f"\t<out>{_out}</out>",
)
)
if j == 0 and (audio_file or t > 0):
outfile.write(
indent(
6,
f'<file id="file-{t+1}">',
f"\t<name>{inp.basename}</name>",
f"\t<pathurl>{pathurls[t]}</pathurl>",
"\t<rate>",
f"\t\t<timebase>{timebase}</timebase>",
f"\t\t<ntsc>{ntsc}</ntsc>",
"\t</rate>",
"\t<media>",
"\t\t<audio>",
"\t\t\t<samplecharacteristics>",
f"\t\t\t\t<depth>{DEPTH}</depth>",
f"\t\t\t\t<samplerate>{samplerate}</samplerate>",
"\t\t\t</samplecharacteristics>",
"\t\t\t<channelcount>2</channelcount>",
"\t\t</audio>",
"\t</media>",
"</file>",
)
)
else:
outfile.write(f'\t\t\t\t\t\t<file id="file-{t+1}"/>\n')
outfile.write(
indent(
6,
"<sourcetrack>",
"\t<mediatype>audio</mediatype>",
"\t<trackindex>1</trackindex>",
"</sourcetrack>",
"<labels>",
"\t<label2>Iris</label2>",
"</labels>",
)
)
if clip[2] != 1:
outfile.write(speedup(clip[2] * 100))
outfile.write("\t\t\t\t\t</clipitem>\n")
if not audio_file:
outfile.write("\t\t\t\t\t<outputchannelindex>1</outputchannelindex>\n")
outfile.write("\t\t\t\t</track>\n")
outfile.write("\t\t\t</audio>\n")
outfile.write("\t\t</media>\n")
outfile.write("\t</sequence>\n")
outfile.write("</xmeml>\n")
| 36.034031
| 124
| 0.427388
|
8f222c6572a78b6e4841c768d209136f6f2087fe
| 10,832
|
py
|
Python
|
tests/test_postgres_storage.py
|
kapilratnani/pgfire
|
8f225f55d8e28f4b12cef33d05534b28f68a9081
|
[
"MIT"
] | 2
|
2020-06-14T06:19:57.000Z
|
2020-08-17T12:46:21.000Z
|
tests/test_postgres_storage.py
|
kapilratnani/pgfire
|
8f225f55d8e28f4b12cef33d05534b28f68a9081
|
[
"MIT"
] | null | null | null |
tests/test_postgres_storage.py
|
kapilratnani/pgfire
|
8f225f55d8e28f4b12cef33d05534b28f68a9081
|
[
"MIT"
] | null | null | null |
import threading
from contextlib import contextmanager
import sqlalchemy as sa
from sqlalchemy import exc
from pgfire.engine.storage.postgres import PostgresJsonStorage, BaseJsonDb
TEST_DB_NAME = 'test_pgfire'
def get_test_db_settings():
return {
"db": TEST_DB_NAME,
"username": "postgres",
"port": 5432,
"password": "123456",
"host": "localhost"
}
@contextmanager
def db_connection(db_name=''):
# init module variables
db_props = get_test_db_settings()
db_host = db_props.get("host")
db_port = db_props.get("port")
db_user = db_props.get("username")
db_password = db_props.get("password")
connection_string = 'postgresql+psycopg2://{}:{}@{}:{}/{}'.format(db_user,
db_password,
db_host,
db_port,
db_name)
engine = sa.create_engine(connection_string)
conn = engine.connect()
yield conn
conn.close()
engine.dispose()
def setup_module(module):
with db_connection() as conn:
conn = conn.execution_options(autocommit=False)
conn.execute("ROLLBACK")
try:
conn.execute("DROP DATABASE %s" % TEST_DB_NAME)
except sa.exc.ProgrammingError as e:
# Could not drop the database, probably does not exist
conn.execute("ROLLBACK")
except sa.exc.OperationalError as e:
# Could not drop database because it's being accessed by other users (psql prompt open?)
conn.execute("ROLLBACK")
conn.execute("CREATE DATABASE %s" % TEST_DB_NAME)
def teardown_module(module):
pass
def test_pgstorage_init():
"""
After storage init, the db should have a table named storage_meta
:return:
"""
meta_table_name = "storage_meta"
db_settings = get_test_db_settings()
pgstorage = PostgresJsonStorage(db_settings)
data = None
with db_connection(TEST_DB_NAME) as con:
result = con.execute("select * from information_schema.tables where table_name='%s'"
% meta_table_name)
data = result.fetchone()
result.close()
pgstorage.close()
assert data
def test_create_db():
"""
storage will create a table for every Json db
:return:
"""
test_table_name = "test_db1"
db_settings = get_test_db_settings()
pgstorage = PostgresJsonStorage(db_settings)
json_db = pgstorage.create_db(test_table_name)
assert json_db
assert isinstance(json_db, BaseJsonDb)
data = None
with db_connection(TEST_DB_NAME) as con:
# TODO also check for functions patch_json_data_notify and update_json_data_notify
result = con.execute("select * from information_schema.tables where table_name='%s'"
% test_table_name)
data = result.fetchone()
result.close()
pgstorage.close()
assert data
def test_delete_db():
"""
delete the db with name
:return:
"""
test_table_name = "test_db2"
db_settings = get_test_db_settings()
pgstorage = PostgresJsonStorage(db_settings)
json_db = pgstorage.create_db(test_table_name)
assert json_db and isinstance(json_db, BaseJsonDb)
with db_connection(TEST_DB_NAME) as con:
result = con.execute("select * from information_schema.tables where table_name='%s'"
% test_table_name)
db_existed = True if result.fetchone() else False
result.close()
delete_return = pgstorage.delete_db(test_table_name)
result = con.execute("select * from information_schema.tables where table_name='%s'"
% test_table_name)
db_deleted = True if result.fetchone() is None else False
result.close()
pgstorage.close()
assert delete_return
assert db_existed
assert db_deleted
def test_get_all_dbs():
"""
return all created dbs
:return:
"""
test_table_name1 = "test_db3"
test_table_name2 = "test_db4"
db_settings = get_test_db_settings()
pgstorage = PostgresJsonStorage(db_settings)
with pgstorage:
json_db1 = pgstorage.create_db(test_table_name1)
assert json_db1 and isinstance(json_db1, BaseJsonDb)
json_db2 = pgstorage.create_db(test_table_name2)
assert json_db2 and isinstance(json_db2, BaseJsonDb)
dbs = pgstorage.get_all_dbs()
assert json_db1.db_name in dbs
assert json_db2.db_name in dbs
def test_simple_get_put_data_at_path():
"""
basic data manipulation
:return:
"""
test_table_name1 = "test_db5"
db_settings = get_test_db_settings()
pgstorage = PostgresJsonStorage(db_settings)
with pgstorage:
json_db1 = pgstorage.create_db(test_table_name1)
# {"a":{"b":{"c":{"d":1}}}}
json_db1.put('a/b/c', {'d': 1})
assert json_db1.get('a/b/c') == {'d': 1}
assert json_db1.get('a/b') == {'c': {'d': 1}}
# {"d": 1}
json_db1.put("d", 1)
assert json_db1.get("d") == 1
# {"e": True}
json_db1.put("e", True)
assert json_db1.get("e")
# {"f":0.01}
json_db1.put("f", 0.01)
assert json_db1.get("f") == 0.01
# {"f":{"b":{"c":1.05}}}
json_db1.put("f/b/c", 1.05)
assert json_db1.get("f/b") == {"c": 1.05}
# {"f":{"b":{"c":1.05}, "d":1.05}}
json_db1.put("f/d", 1.05)
assert json_db1.get("f/d") == 1.05
# {"f":{"b":1.05, "d":1.05}}
json_db1.put("f/b", 1.05)
assert json_db1.get("f/b") == 1.05
assert json_db1.get("f/d") == 1.05
# {"a":{"b":{"c":{"d":1}}}, "d":1, "e":True, "f":{"d":1.05,"b":1.05}}
assert json_db1.get(None) == {"a": {"b": {"c": {"d": 1}}}, "d": 1, "e": True, "f": {"d": 1.05, "b": 1.05}}
def test_get_put_post_patch_delete():
test_db_name = "test_db_fb"
db_settings = get_test_db_settings()
with PostgresJsonStorage(db_settings) as pg_storage:
json_db = pg_storage.create_db(test_db_name)
json_db.put("rest/saving-data/fireblog/users", {
"alanisawesome": {
"name": "Alan Turing",
"birthday": "June 23, 1912"
}
})
assert json_db.get("rest/saving-data/fireblog/users/alanisawesome") == {
"name": "Alan Turing", "birthday": "June 23, 1912"}
json_db.patch("rest/saving-data/fireblog/users/alanisawesome", {"nickname": "Alan The Machine"})
assert json_db.get("rest/saving-data/fireblog/users/alanisawesome") == {
"name": "Alan Turing", "birthday": "June 23, 1912", "nickname": "Alan The Machine"}
posted_data = json_db.post(
"rest/saving-data/fireblog/posts",
{"author": "alanisawesome", "title": "The Turing Machine"}
)
assert json_db.get("rest/saving-data/fireblog/posts/%s" % list(posted_data.keys())[0]) == {
"author": "alanisawesome",
"title": "The Turing Machine"
}
posted_data = json_db.post(
"rest/saving-data/fireblog/posts",
{"author": "gracehopper", "title": "The nano-stick"}
)
assert json_db.get("rest/saving-data/fireblog/posts/%s" % list(posted_data.keys())[0]) == {
"author": "gracehopper",
"title": "The nano-stick"
}
assert json_db.delete("rest/saving-data/fireblog/users/alanisawesome")
assert json_db.get("rest/saving-data/fireblog/users/alanisawesome") is None
def test_get_db():
test_db_name = "test_db_fb"
db_settings = get_test_db_settings()
with PostgresJsonStorage(db_settings) as pg_storage:
# db exists
assert pg_storage.get_db(test_db_name)
# db does not exists
assert pg_storage.get_db("doesnot_exists") is None
data_received_count1 = 0
data_received_count2 = 0
def test_change_notification():
"""
set a change notifier at a path and expect a notification on change
:return:
"""
test_db_name = "test_db_fb"
db_settings = get_test_db_settings()
with PostgresJsonStorage(db_settings) as pg_storage:
notifier = pg_storage.get_notifier(test_db_name, 'rest/saving-data/fireblog1/posts')
message_stream = notifier.listen()
post_data1 = {"t": 1}
post_data2 = {"t": 2}
def message_listener():
global data_received_count1
for data in message_stream:
if data is None:
continue
data_received_count1 += 1
assert data['event'] == 'put'
assert data['path'].startswith("rest/saving-data/fireblog1/posts")
assert data['data'] == post_data1 or data['data'] == post_data2
thr = threading.Thread(target=message_listener)
thr.setDaemon(True)
thr.start()
json_db = pg_storage.get_db(test_db_name)
import time
json_db.post('rest/saving-data/fireblog1/posts', post_data1)
json_db.post('rest/saving-data/fireblog1/posts', post_data2)
time.sleep(1)
notifier.cleanup()
assert data_received_count1 == 2
def test_change_notification2():
"""
set a change notifier at a path and expect a notification on change
:return:
"""
test_db_name = "test_db_fb"
db_settings = get_test_db_settings()
with PostgresJsonStorage(db_settings) as pg_storage:
notifier = pg_storage.get_notifier(test_db_name, 'rest/saving-data/fireblog2/posts')
message_stream = notifier.listen()
post_data1 = {"t": 1}
post_data2 = {"t": 2}
def message_listener():
global data_received_count2
for data in message_stream:
if data is None:
continue
data_received_count2 += 1
assert data['event'] == 'put'
assert data['path'].startswith("rest/saving-data/fireblog2/posts")
assert data['data'] == post_data1
thr = threading.Thread(target=message_listener)
thr.setDaemon(True)
thr.start()
import time
json_db = pg_storage.get_db(test_db_name)
json_db.post('rest/saving-data/fireblog2/posts', post_data1)
json_db.post('rest/saving-data/fireblog2/messages', post_data2)
time.sleep(2)
notifier.cleanup()
assert data_received_count2 == 1
def test_create_index():
"""
create an index on a path in json document, for faster access on those paths.
:return:
"""
pass
| 31.858824
| 114
| 0.598781
|
8947f9e8c246b5985568ed8f4354acf950e79161
| 12,577
|
py
|
Python
|
qiskit/aqua/components/oracles/truth_table_oracle.py
|
Sahar2/qiskit-aqua
|
a228fbe6b9613cff43e47796a7e4843deba2b051
|
[
"Apache-2.0"
] | null | null | null |
qiskit/aqua/components/oracles/truth_table_oracle.py
|
Sahar2/qiskit-aqua
|
a228fbe6b9613cff43e47796a7e4843deba2b051
|
[
"Apache-2.0"
] | null | null | null |
qiskit/aqua/components/oracles/truth_table_oracle.py
|
Sahar2/qiskit-aqua
|
a228fbe6b9613cff43e47796a7e4843deba2b051
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
The Truth Table-based Quantum Oracle.
"""
import logging
import operator
import math
from functools import reduce
import numpy as np
from dlx import DLX
from sympy import symbols
from sympy.logic.boolalg import Xor, And
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.aqua import AquaError
from qiskit.aqua.circuits import ESOP
from qiskit.aqua.components.oracles import Oracle
from qiskit.aqua.utils.arithmetic import is_power_of_2
from .ast_utils import get_ast
logger = logging.getLogger(__name__)
def get_prime_implicants(ones=None, dcs=None):
"""
Compute all prime implicants for a truth table using the Quine-McCluskey Algorithm
Args:
ones (list of int): The list of integers corresponding to '1' outputs
dcs (list of int): The list of integers corresponding to don't-cares
Return:
list of lists of int, representing all prime implicants
"""
def combine_terms(terms, num1s_dict=None):
if num1s_dict is None:
num1s_dict = {}
for num in terms:
num1s = bin(num).count('1')
if num1s not in num1s_dict:
num1s_dict[num1s] = [num]
else:
num1s_dict[num1s].append(num)
new_implicants = {}
new_num1s_dict = {}
prime_dict = {mt: True for mt in sorted(terms)}
cur_num1s, max_num1s = min(num1s_dict.keys()), max(num1s_dict.keys())
while cur_num1s < max_num1s:
if cur_num1s in num1s_dict and (cur_num1s + 1) in num1s_dict:
for cur_term in sorted(num1s_dict[cur_num1s]):
for next_term in sorted(num1s_dict[cur_num1s + 1]):
if isinstance(cur_term, int):
diff_mask = dc_mask = cur_term ^ next_term
implicant_mask = cur_term & next_term
elif isinstance(cur_term, tuple):
if terms[cur_term][1] == terms[next_term][1]:
diff_mask = terms[cur_term][0] ^ terms[next_term][0]
dc_mask = diff_mask | terms[cur_term][1]
implicant_mask = terms[cur_term][0] & terms[next_term][0]
else:
continue
else:
raise AquaError('Unexpected type: {}.'.format(type(cur_term)))
if bin(diff_mask).count('1') == 1:
prime_dict[cur_term] = False
prime_dict[next_term] = False
if isinstance(cur_term, int):
cur_implicant = (cur_term, next_term)
elif isinstance(cur_term, tuple):
cur_implicant = tuple(sorted((*cur_term, *next_term)))
else:
raise AquaError('Unexpected type: {}.'.format(type(cur_term)))
new_implicants[cur_implicant] = (
implicant_mask,
dc_mask
)
num1s = bin(implicant_mask).count('1')
if num1s not in new_num1s_dict:
new_num1s_dict[num1s] = [cur_implicant]
else:
if cur_implicant not in new_num1s_dict[num1s]:
new_num1s_dict[num1s].append(cur_implicant)
cur_num1s += 1
return new_implicants, new_num1s_dict, prime_dict
terms = ones + dcs
cur_num1s_dict = None
prime_implicants = []
while True:
next_implicants, next_num1s_dict, cur_prime_dict = combine_terms(terms, num1s_dict=cur_num1s_dict)
for implicant in cur_prime_dict:
if cur_prime_dict[implicant]:
if isinstance(implicant, int):
if implicant not in dcs:
prime_implicants.append((implicant,))
else:
if not set.issubset(set(implicant), dcs):
prime_implicants.append(implicant)
if next_implicants:
terms = next_implicants
cur_num1s_dict = next_num1s_dict
else:
break
return prime_implicants
def get_exact_covers(cols, rows, num_cols=None):
"""
Use Algorithm X to get all solutions to the exact cover problem
https://en.wikipedia.org/wiki/Knuth%27s_Algorithm_X
Args:
cols (list of int): A list of integers representing the columns to be covered
rows (list of list of int): A list of lists of integers representing the rows
num_cols (int): The total number of columns
Returns:
All exact covers
"""
if num_cols is None:
num_cols = max(cols) + 1
ec = DLX([(c, 0 if c in cols else 1) for c in range(num_cols)])
ec.appendRows([[c] for c in cols])
ec.appendRows(rows)
exact_covers = []
for s in ec.solve():
cover = []
for i in s:
cover.append(ec.getRowList(i))
exact_covers.append(cover)
return exact_covers
class TruthTableOracle(Oracle):
CONFIGURATION = {
'name': 'TruthTableOracle',
'description': 'Truth Table Oracle',
'input_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'truth_table_oracle_schema',
'type': 'object',
'properties': {
'bitmaps': {
"type": "array",
"default": [],
"items": {
"type": "string"
}
},
"optimization": {
"type": "boolean",
"default": False,
},
'mct_mode': {
'type': 'string',
'default': 'basic',
'enum': [
'basic',
'basic-dirty-ancilla',
'advanced',
'noancilla',
]
},
},
'additionalProperties': False
}
}
def __init__(self, bitmaps, optimization=False, mct_mode='basic'):
"""
Constructor for Truth Table-based Oracle
Args:
bitmaps (str or [str]): A single binary string or a list of binary strings representing the desired
single- and multi-value truth table.
optimization (bool): Boolean flag for attempting circuit optimization.
When set, the Quine-McCluskey algorithm is used to compute the prime implicants of the truth table,
and then its exact cover is computed to try to reduce the circuit.
mct_mode (str): The mode to use when constructing multiple-control Toffoli.
"""
if isinstance(bitmaps, str):
bitmaps = [bitmaps]
self.validate(locals())
super().__init__()
self._mct_mode = mct_mode.strip().lower()
self._optimization = optimization
self._bitmaps = bitmaps
# check that the input bitmaps length is a power of 2
if not is_power_of_2(len(bitmaps[0])):
raise AquaError('Length of any bitmap must be a power of 2.')
for bitmap in bitmaps[1:]:
if not len(bitmap) == len(bitmaps[0]):
raise AquaError('Length of all bitmaps must be the same.')
self._nbits = int(math.log(len(bitmaps[0]), 2))
self._num_outputs = len(bitmaps)
self._lit_to_var = None
self._var_to_lit = None
esop_exprs = []
for bitmap in bitmaps:
esop_expr = self._get_esop_ast(bitmap)
esop_exprs.append(esop_expr)
self._esops = [
ESOP(esop_expr, num_vars=self._nbits) for esop_expr in esop_exprs
] if esop_exprs else None
self.construct_circuit()
def _get_esop_ast(self, bitmap):
v = symbols('v:{}'.format(self._nbits))
if self._lit_to_var is None:
self._lit_to_var = [None] + sorted(v, key=str)
if self._var_to_lit is None:
self._var_to_lit = {v: l for v, l in zip(self._lit_to_var[1:], range(1, self._nbits + 1))}
def binstr_to_vars(binstr):
return [
(~v[x[1] - 1] if x[0] == '0' else v[x[1] - 1])
for x in zip(binstr, reversed(range(1, self._nbits + 1)))
][::-1]
if not self._optimization:
expression = Xor(*[
And(*binstr_to_vars(term)) for term in
[np.binary_repr(idx, self._nbits) for idx, v in enumerate(bitmap) if v == '1']
])
else:
ones = [i for i, v in enumerate(bitmap) if v == '1']
if not ones:
return 'const', 0
dcs = [i for i, v in enumerate(bitmap) if v == '*' or v == '-' or v.lower() == 'x']
pis = get_prime_implicants(ones=ones, dcs=dcs)
cover = get_exact_covers(ones, pis)[-1]
clauses = []
for c in cover:
if len(c) == 1:
term = np.binary_repr(c[0], self._nbits)
clause = And(*[
v for i, v in enumerate(binstr_to_vars(term))
])
elif len(c) > 1:
c_or = reduce(operator.or_, c)
c_and = reduce(operator.and_, c)
_ = np.binary_repr(c_and ^ c_or, self._nbits)[::-1]
clause = And(*[
v for i, v in enumerate(binstr_to_vars(np.binary_repr(c_and, self._nbits))) if _[i] == '0'
])
else:
raise AquaError('Unexpected cover term size {}.'.format(len(c)))
if clause:
clauses.append(clause)
expression = Xor(*clauses)
ast = get_ast(self._var_to_lit, expression)
if ast is not None:
return ast
else:
return 'const', 0
@property
def variable_register(self):
return self._variable_register
@property
def ancillary_register(self):
return self._ancillary_register
@property
def output_register(self):
return self._output_register
def construct_circuit(self):
if self._circuit is not None:
return self._circuit
self._circuit = QuantumCircuit()
self._output_register = QuantumRegister(self._num_outputs, name='o')
if self._esops:
for i, e in enumerate(self._esops):
if e is not None:
ci = e.construct_circuit(
output_register=self._output_register,
output_idx=i,
mct_mode=self._mct_mode
)
self._circuit += ci
self._variable_register = self._ancillary_register = None
for qreg in self._circuit.qregs:
if qreg.name == 'v':
self._variable_register = qreg
elif qreg.name == 'a':
self._ancillary_register = qreg
else:
self._variable_register = QuantumRegister(self._nbits, name='v')
self._ancillary_register = None
self._circuit.add_register(self._variable_register, self._output_register)
return self._circuit
def evaluate_classically(self, measurement):
assignment = [(var + 1) * (int(tf) * 2 - 1) for tf, var in zip(measurement[::-1], range(len(measurement)))]
ret = [bitmap[int(measurement, 2)] == '1' for bitmap in self._bitmaps]
if self._num_outputs == 1:
return ret[0], assignment
else:
return ret, assignment
| 37.88253
| 115
| 0.534309
|
4c47e64506489f95cae8002e9bddc3b1a09d1ba6
| 676
|
py
|
Python
|
test-examples/add_surface.py
|
tlambert03/image-demos
|
a2974bcc7f040fd4d14e659c4cbfeabcf726c707
|
[
"BSD-3-Clause"
] | null | null | null |
test-examples/add_surface.py
|
tlambert03/image-demos
|
a2974bcc7f040fd4d14e659c4cbfeabcf726c707
|
[
"BSD-3-Clause"
] | null | null | null |
test-examples/add_surface.py
|
tlambert03/image-demos
|
a2974bcc7f040fd4d14e659c4cbfeabcf726c707
|
[
"BSD-3-Clause"
] | null | null | null |
"""Test converting an image to a multiscale.
"""
import numpy as np
import napari
with napari.gui_qt():
#viewer = napari.Viewer()
np.random.seed(0)
# vertices = np.random.random((10, 3))
# faces = np.random.randint(10, size=(6, 3))
# values = np.random.random(10)
vertices = np.random.random((10, 3))
faces = np.random.randint(10, size=(6, 3))
values = np.random.random(10)
#viewer.add_surface(data)
# vertices = np.array([[0, 0], [0, 20], [10, 0], [10, 10]])
# faces = np.array([[0, 1, 2], [1, 2, 3]])
# values = np.linspace(0, 1, len(vertices))
data = (vertices, faces, values)
viewer = napari.view_surface(data)
| 27.04
| 63
| 0.597633
|
835da965a11111e45d2248a5d82f93d91a8d7828
| 4,331
|
py
|
Python
|
setup.py
|
rmill040/mlsopt
|
5cd729bf92354c35e0e425f350e7a93f4d2d965a
|
[
"MIT"
] | null | null | null |
setup.py
|
rmill040/mlsopt
|
5cd729bf92354c35e0e425f350e7a93f4d2d965a
|
[
"MIT"
] | null | null | null |
setup.py
|
rmill040/mlsopt
|
5cd729bf92354c35e0e425f350e7a93f4d2d965a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from os.path import dirname, join
from setuptools import Extension, find_packages, setup
from setuptools.command.build_ext import build_ext
import versioneer
def list_reqs(fname="requirements.txt"):
"""Gather requirements from the requirements.txt file.
"""
return open(fname).read().splitlines()
def read_file(fname="README.md"):
"""Get contents of file from the module's directory.
"""
return open(join(dirname(__file__), fname), encoding='utf-8').read()
class BuildExt(build_ext):
"""build_ext command for use when numpy headers are needed.
SEE tutorial: https://stackoverflow.com/questions/2379898
SEE fix: https://stackoverflow.com/questions/19919905
"""
def finalize_options(self):
build_ext.finalize_options(self)
import numpy
self.include_dirs.append(numpy.get_include())
# Package meta-data
MODULE_NAME = "mlsopt"
AUTHORS = ",".join(["Robert Milletich", "Anthony Asmar"])
AUTHOR_EMAIL = "rmill040@gmail.com"
KEYWORDS = "stochastic optimization machine learning"
SHORT_DESCRIPTION = "Stochastic optimization of machine learning pipelines"
LONG_DESCRIPTION = read_file()
CONTENT_TYPE = "text/markdown"
MODULE_URL = "https://github.com/rmill040/mlsopt"
PLATFORMS = "any"
TEST_SUITE = "pytest"
SETUP_REQS = ["numpy", "cython"]
INSTALL_REQS = list_reqs()
PACKAGES = find_packages(exclude=['tests'])
CMDCLASS = {"build_ext": BuildExt}
MIN_PYTHON_VERSION = ">=3.6.*"
VERSION = versioneer.get_version()
PACKAGE_DATA = {}
# setup(
# package_data = {
# 'my_package': ['*.pxd'],
# 'my_package/sub_package': ['*.pxd'],
# },
# ...
# )
INCLUDE_PACKAGE_DATA = True
EXTRAS_REQUIRE = {}
SCRIPTS = []
LICENSE = "MIT"
ZIP_SAFE = False
CLASSIFIERS = ['Programming Language :: Python :: 3',
'Development Status :: 3 - Alpha',
'Natural Language :: English',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering',
'Topic :: Software Development']
# Define Cython extensions
EXTENSIONS = []
# EXTENSIONS = [Extension('mlsopt.base.optimizers',
# sources=['mlsopt/base/optimizers.pyx']),
# Extension('mlsopt.base.samplers',
# sources=['mlsopt/base/samplers.pyx'])
# ]
# Define Cython compiler directives
COMPILER_DIRECTIVES = {
'boundscheck' : False,
'wraparound' : False,
}
for e in EXTENSIONS:
e.cython_directives = COMPILER_DIRECTIVES
# Run setup
setup(
name = MODULE_NAME,
url = MODULE_URL,
author = AUTHORS,
author_email = AUTHOR_EMAIL,
python_requires = MIN_PYTHON_VERSION,
version = VERSION,
cmdclass = CMDCLASS,
ext_modules = EXTENSIONS,
test_suite = TEST_SUITE,
setup_requires = SETUP_REQS,
keywords = KEYWORDS,
description = SHORT_DESCRIPTION,
long_description = LONG_DESCRIPTION,
long_description_content_type = CONTENT_TYPE,
packages = PACKAGES,
platforms = PLATFORMS,
scripts = SCRIPTS,
package_data = PACKAGE_DATA,
include_package_data = INCLUDE_PACKAGE_DATA,
install_requires = INSTALL_REQS,
extras_require = EXTRAS_REQUIRE,
license = LICENSE,
classifiers = CLASSIFIERS,
zip_safe = ZIP_SAFE
)
| 37.017094
| 85
| 0.553452
|
e5c48c60e077f86a55a73f2efdd1a43ffbc7cf66
| 6,021
|
py
|
Python
|
api/migrations/0088_auto_20201119_0809.py
|
IFRCGo/ifrcgo-api
|
c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a
|
[
"MIT"
] | 11
|
2018-06-11T06:05:12.000Z
|
2022-03-25T09:31:44.000Z
|
api/migrations/0088_auto_20201119_0809.py
|
IFRCGo/ifrcgo-api
|
c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a
|
[
"MIT"
] | 498
|
2017-11-07T21:20:13.000Z
|
2022-03-31T14:37:18.000Z
|
api/migrations/0088_auto_20201119_0809.py
|
IFRCGo/ifrcgo-api
|
c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a
|
[
"MIT"
] | 6
|
2018-04-11T13:29:50.000Z
|
2020-07-16T16:52:11.000Z
|
# Generated by Django 2.2.13 on 2020-11-19 08:09
import api.models
from django.db import migrations, models
import django.db.models.deletion
import enumfields.fields
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('api', '0087_auto_20200918_0922'),
]
operations = [
migrations.AddField(
model_name='country',
name='additional_tab_name',
field=models.CharField(blank=True, max_length=100, verbose_name='Label for Extra Tab'),
),
migrations.AddField(
model_name='region',
name='additional_tab_name',
field=models.CharField(blank=True, max_length=100, verbose_name='Label for Additional Tab'),
),
migrations.AlterField(
model_name='event',
name='num_displaced',
field=models.IntegerField(blank=True, null=True, verbose_name='number of displaced'),
),
migrations.CreateModel(
name='RegionProfileSnippet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=255)),
('snippet', tinymce.models.HTMLField(blank=True, null=True, verbose_name='snippet')),
('snippet_en', tinymce.models.HTMLField(blank=True, null=True, verbose_name='snippet')),
('snippet_es', tinymce.models.HTMLField(blank=True, null=True, verbose_name='snippet')),
('snippet_fr', tinymce.models.HTMLField(blank=True, null=True, verbose_name='snippet')),
('snippet_ar', tinymce.models.HTMLField(blank=True, null=True, verbose_name='snippet')),
('visibility', enumfields.fields.EnumIntegerField(default=3, enum=api.models.VisibilityChoices, verbose_name='visibility')),
('position', enumfields.fields.EnumIntegerField(default=3, enum=api.models.PositionType, verbose_name='position')),
('region', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='profile_snippets', to='api.Region', verbose_name='region')),
],
options={
'verbose_name': 'region profile snippet',
'verbose_name_plural': 'region profile snippets',
'ordering': ('position', 'id'),
},
),
migrations.CreateModel(
name='RegionPreparednessSnippet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=255)),
('snippet', tinymce.models.HTMLField(blank=True, null=True, verbose_name='snippet')),
('snippet_en', tinymce.models.HTMLField(blank=True, null=True, verbose_name='snippet')),
('snippet_es', tinymce.models.HTMLField(blank=True, null=True, verbose_name='snippet')),
('snippet_fr', tinymce.models.HTMLField(blank=True, null=True, verbose_name='snippet')),
('snippet_ar', tinymce.models.HTMLField(blank=True, null=True, verbose_name='snippet')),
('visibility', enumfields.fields.EnumIntegerField(default=3, enum=api.models.VisibilityChoices, verbose_name='visibility')),
('position', enumfields.fields.EnumIntegerField(default=3, enum=api.models.PositionType, verbose_name='position')),
('region', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='preparedness_snippets', to='api.Region', verbose_name='region')),
],
options={
'verbose_name': 'region preparedness snippet',
'verbose_name_plural': 'region preparedness snippets',
'ordering': ('position', 'id'),
},
),
migrations.CreateModel(
name='RegionEmergencySnippet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=255)),
('snippet', tinymce.models.HTMLField(blank=True, null=True, verbose_name='snippet')),
('snippet_en', tinymce.models.HTMLField(blank=True, null=True, verbose_name='snippet')),
('snippet_es', tinymce.models.HTMLField(blank=True, null=True, verbose_name='snippet')),
('snippet_fr', tinymce.models.HTMLField(blank=True, null=True, verbose_name='snippet')),
('snippet_ar', tinymce.models.HTMLField(blank=True, null=True, verbose_name='snippet')),
('visibility', enumfields.fields.EnumIntegerField(default=3, enum=api.models.VisibilityChoices, verbose_name='visibility')),
('position', enumfields.fields.EnumIntegerField(default=3, enum=api.models.PositionType, verbose_name='position')),
('region', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='emergency_snippets', to='api.Region', verbose_name='region')),
],
options={
'verbose_name': 'region emergencies snippet',
'verbose_name_plural': 'region emergencies snippets',
'ordering': ('position', 'id'),
},
),
migrations.CreateModel(
name='RegionAdditionalLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('url', models.URLField()),
('show_in_go', models.BooleanField(default=False, help_text='Show link contents within GO')),
('region', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='additional_links', to='api.Region')),
],
),
]
| 58.456311
| 169
| 0.621325
|
87436243c7900b91dc76f11e3517d532e66f0fc0
| 2,885
|
py
|
Python
|
reframe/core/launchers/mpi.py
|
smoors/reframe
|
5163a4a77c8b440b725e18ffbf19f19445d01f9e
|
[
"BSD-3-Clause"
] | 1
|
2018-08-02T07:34:10.000Z
|
2018-08-02T07:34:10.000Z
|
reframe/core/launchers/mpi.py
|
smoors/reframe
|
5163a4a77c8b440b725e18ffbf19f19445d01f9e
|
[
"BSD-3-Clause"
] | null | null | null |
reframe/core/launchers/mpi.py
|
smoors/reframe
|
5163a4a77c8b440b725e18ffbf19f19445d01f9e
|
[
"BSD-3-Clause"
] | null | null | null |
from reframe.core.launchers import JobLauncher
from reframe.core.launchers.registry import register_launcher
@register_launcher('srun')
class SrunLauncher(JobLauncher):
def command(self, job):
return ['srun']
@register_launcher('ibrun')
class IbrunLauncher(JobLauncher):
'''TACC's custom parallel job launcher.'''
def command(self, job):
return ['ibrun']
@register_launcher('alps')
class AlpsLauncher(JobLauncher):
def command(self, job):
cmd = ['aprun', '-n', str(job.num_tasks)]
if job.num_tasks_per_node:
cmd += ['-N', str(job.num_tasks_per_node)]
if job.num_cpus_per_task:
cmd += ['-d', str(job.num_cpus_per_task)]
if job.use_smt:
cmd += ['-j', '0']
return cmd
@register_launcher('mpirun')
class MpirunLauncher(JobLauncher):
def command(self, job):
return ['mpirun', '-np', str(job.num_tasks)]
@register_launcher('mpiexec')
class MpiexecLauncher(JobLauncher):
def command(self, job):
return ['mpiexec', '-n', str(job.num_tasks)]
@register_launcher('srunalloc')
class SrunAllocationLauncher(JobLauncher):
def command(self, job):
ret = ['srun']
if job.name:
ret += ['--job-name=%s' % job.name]
if job.time_limit:
ret += ['--time=%d:%d:%d' % job.time_limit]
if job.stdout:
ret += ['--output=%s' % job.stdout]
if job.stderr:
ret += ['--error=%s' % job.stderr]
if job.num_tasks:
ret += ['--ntasks=%s' % str(job.num_tasks)]
if job.num_tasks_per_node:
ret += ['--ntasks-per-node=%s' % str(job.num_tasks_per_node)]
if job.num_tasks_per_core:
ret += ['--ntasks-per-core=%s' % str(job.num_tasks_per_core)]
if job.num_tasks_per_socket:
ret += ['--ntasks-per-socket=%s' % str(job.num_tasks_per_socket)]
if job.num_cpus_per_task:
ret += ['--cpus-per-task=%s' % str(job.num_cpus_per_task)]
if job.sched_partition:
ret += ['--partition=%s' % str(job.sched_partition)]
if job.sched_exclusive_access:
ret += ['--exclusive']
if job.use_smt is not None:
hint = 'multithread' if job.use_smt else 'nomultithread'
ret += ['--hint=%s' % hint]
if job.sched_partition:
ret += ['--partition=%s' % str(job.sched_partition)]
if job.sched_account:
ret += ['--account=%s' % str(job.sched_account)]
if job.sched_nodelist:
ret += ['--nodelist=%s' % str(job.sched_nodelist)]
if job.sched_exclude_nodelist:
ret += ['--exclude=%s' % str(job.sched_exclude_nodelist)]
for opt in job.options:
if opt.startswith('#'):
continue
ret.append(opt)
return ret
| 26.962617
| 77
| 0.57435
|
7ac93ce4c356912e853016664be53b1a699f721a
| 296
|
py
|
Python
|
samples/retrieve_transactions.py
|
shadinaif/iyzipay-python
|
a150418dba6d0170ff7ae3772d16ac83654cb6a9
|
[
"MIT"
] | 66
|
2016-03-24T14:27:41.000Z
|
2022-01-18T22:14:20.000Z
|
samples/retrieve_transactions.py
|
shadinaif/iyzipay-python
|
a150418dba6d0170ff7ae3772d16ac83654cb6a9
|
[
"MIT"
] | 41
|
2016-03-25T16:12:43.000Z
|
2022-01-18T22:57:03.000Z
|
samples/retrieve_transactions.py
|
shadinaif/iyzipay-python
|
a150418dba6d0170ff7ae3772d16ac83654cb6a9
|
[
"MIT"
] | 40
|
2016-04-27T18:26:47.000Z
|
2021-12-12T11:59:40.000Z
|
import iyzipay
options = {
'api_key': iyzipay.api_key,
'secret_key': iyzipay.secret_key,
'base_url': iyzipay.base_url
}
request = {
'transactionDate': '2019-06-25',
'page': '1',
}
report = iyzipay.RetrieveTransactions().retrieve(request, options)
print(report.read().decode('utf-8'))
| 19.733333
| 66
| 0.695946
|
5da44e88b8e59c67e6a0c7c690305e7b9f3f603e
| 1,176
|
py
|
Python
|
src/structlog/_generic.py
|
carsonip/structlog
|
0d20303e1b2ce8d801f2081c5dbb65ea7d26b830
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
src/structlog/_generic.py
|
carsonip/structlog
|
0d20303e1b2ce8d801f2081c5dbb65ea7d26b830
|
[
"Apache-2.0",
"MIT"
] | 1
|
2019-04-02T23:35:13.000Z
|
2019-04-02T23:35:13.000Z
|
src/structlog/_generic.py
|
carsonip/structlog
|
0d20303e1b2ce8d801f2081c5dbb65ea7d26b830
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the MIT License. See the LICENSE file in the root of this
# repository for complete details.
"""
Generic bound logger that can wrap anything.
"""
from __future__ import absolute_import, division, print_function
from functools import partial
from structlog._base import BoundLoggerBase
class BoundLogger(BoundLoggerBase):
"""
A generic BoundLogger that can wrap anything.
Every unknown method will be passed to the wrapped logger. If that's too
much magic for you, try :class:`structlog.stdlib.BoundLogger` or
:class:`structlog.twisted.BoundLogger` which also take advantage of
knowing the wrapped class which generally results in better performance.
Not intended to be instantiated by yourself. See
:func:`~structlog.wrap_logger` and :func:`~structlog.get_logger`.
"""
def __getattr__(self, method_name):
"""
If not done so yet, wrap the desired logger method & cache the result.
"""
wrapped = partial(self._proxy_to_logger, method_name)
setattr(self, method_name, wrapped)
return wrapped
| 32.666667
| 78
| 0.72534
|
379441666ee3f6b45437ea966953589e6f032522
| 174
|
py
|
Python
|
cursodepython/ex006.py
|
yagowill/python
|
06f39d6c05bd1cdd2ebdb853cc5ce534f8406e57
|
[
"MIT"
] | null | null | null |
cursodepython/ex006.py
|
yagowill/python
|
06f39d6c05bd1cdd2ebdb853cc5ce534f8406e57
|
[
"MIT"
] | null | null | null |
cursodepython/ex006.py
|
yagowill/python
|
06f39d6c05bd1cdd2ebdb853cc5ce534f8406e57
|
[
"MIT"
] | null | null | null |
n = int(input('Digite um número: '))
n2 = n * 2
n3 = n * 3
nr = n ** 1/2
print('Você digitou {}, o dobro de é {}, o triplo é {} e a raiz quadrada é {}'.format(n, n2, n3, nr))
| 34.8
| 101
| 0.557471
|
cf9bf4d0f397fa23e70a5ba16e51f4f273efe6e8
| 1,836
|
py
|
Python
|
tests/data/hrrr/test_file_handler.py
|
USDA-ARS-NWRC/weather_forecast_retrieval
|
d1df41a0b06b6484b4a74e7fd0a364505bc1cfc1
|
[
"CC0-1.0"
] | 6
|
2017-12-20T22:42:24.000Z
|
2021-08-07T03:32:27.000Z
|
tests/data/hrrr/test_file_handler.py
|
USDA-ARS-NWRC/weather_forecast_retrieval
|
d1df41a0b06b6484b4a74e7fd0a364505bc1cfc1
|
[
"CC0-1.0"
] | 26
|
2019-03-07T17:47:13.000Z
|
2021-06-25T15:43:27.000Z
|
tests/data/hrrr/test_file_handler.py
|
USDA-ARS-NWRC/weather_forecast_retrieval
|
d1df41a0b06b6484b4a74e7fd0a364505bc1cfc1
|
[
"CC0-1.0"
] | 3
|
2019-03-08T07:28:59.000Z
|
2021-02-12T21:59:12.000Z
|
import unittest
import pandas as pd
from weather_forecast_retrieval.data.hrrr import FileHandler
class TestHRRRFileHandler(unittest.TestCase):
def test_file_date(self):
file_time = pd.to_datetime('2018-02-08 05:00')
forecast_hour = 1
day, file_hour = FileHandler.file_date(
file_time, forecast_hour
)
self.assertEqual('2018-02-08', str(day))
self.assertEqual(4, file_hour)
forecast_hour = 3
day, file_hour = FileHandler.file_date(
file_time, forecast_hour
)
self.assertEqual('2018-02-08', str(day))
self.assertEqual(2, file_hour)
forecast_hour = 8
day, file_hour = FileHandler.file_date(
file_time, forecast_hour
)
self.assertEqual('2018-02-07', str(day))
self.assertEqual(21, file_hour)
def test_file_name(self):
self.assertEqual(
'hrrr.t04z.wrfsfcf01.grib2',
FileHandler.file_name(4, 1)
)
self.assertEqual(
'hrrr.t04z.wrfsfcf01.nc',
FileHandler.file_name(4, 1, 'netcdf')
)
def test_folder_name(self):
self.assertEqual(
'hrrr.20180208',
FileHandler.folder_name(pd.to_datetime('2018-02-08'))
)
def test_folder_and_file(self):
folder_name, file_name = FileHandler.folder_and_file(
pd.to_datetime('2018-02-08 05:00'), 1
)
self.assertEqual('hrrr.20180208', folder_name)
self.assertEqual('hrrr.t04z.wrfsfcf01.grib2', file_name)
def test_folder_to_date(self):
file_date = FileHandler.folder_to_date(
'hrrr.20210615',
'hrrr.t04z.wrfsfcf01.grib2'
)
self.assertEqual(pd.to_datetime('2021-06-15 04:00').tz_localize(tz='UTC'), file_date)
| 29.142857
| 93
| 0.6122
|
0a7881308bd048754dce09ee574a8ac475444809
| 2,446
|
py
|
Python
|
test/ut/retiarii/test_engine.py
|
stjordanis/nni
|
30361a2e87ca37f397c0f0ebd6779b6600c001f9
|
[
"MIT"
] | 2
|
2020-10-27T06:53:53.000Z
|
2021-02-22T22:11:15.000Z
|
test/ut/retiarii/test_engine.py
|
stjordanis/nni
|
30361a2e87ca37f397c0f0ebd6779b6600c001f9
|
[
"MIT"
] | null | null | null |
test/ut/retiarii/test_engine.py
|
stjordanis/nni
|
30361a2e87ca37f397c0f0ebd6779b6600c001f9
|
[
"MIT"
] | null | null | null |
import json
import os
import unittest
from pathlib import Path
import nni.retiarii
from nni.retiarii import Model, submit_models
from nni.retiarii.codegen import model_to_pytorch_script
from nni.retiarii.execution import set_execution_engine
from nni.retiarii.execution.base import BaseExecutionEngine
from nni.retiarii.execution.python import PurePythonExecutionEngine
from nni.retiarii.integration import RetiariiAdvisor
class EngineTest(unittest.TestCase):
def test_codegen(self):
with open(self.enclosing_dir / 'mnist_pytorch.json') as f:
model = Model._load(json.load(f))
script = model_to_pytorch_script(model)
with open(self.enclosing_dir / 'debug_mnist_pytorch.py') as f:
reference_script = f.read()
self.assertEqual(script.strip(), reference_script.strip())
def test_base_execution_engine(self):
advisor = RetiariiAdvisor()
set_execution_engine(BaseExecutionEngine())
with open(self.enclosing_dir / 'mnist_pytorch.json') as f:
model = Model._load(json.load(f))
submit_models(model, model)
advisor.stopping = True
advisor.default_worker.join()
advisor.assessor_worker.join()
def test_py_execution_engine(self):
advisor = RetiariiAdvisor()
set_execution_engine(PurePythonExecutionEngine())
model = Model._load({
'_model': {
'inputs': None,
'outputs': None,
'nodes': {
'layerchoice_1': {
'operation': {'type': 'LayerChoice', 'parameters': {'candidates': ['0', '1']}}
}
},
'edges': []
}
})
model.python_class = object
submit_models(model, model)
advisor.stopping = True
advisor.default_worker.join()
advisor.assessor_worker.join()
def setUp(self) -> None:
self.enclosing_dir = Path(__file__).parent
os.makedirs(self.enclosing_dir / 'generated', exist_ok=True)
from nni.runtime import protocol
protocol._out_file = open(self.enclosing_dir / 'generated/debug_protocol_out_file.py', 'wb')
def tearDown(self) -> None:
from nni.runtime import protocol
protocol._out_file.close()
nni.retiarii.execution.api._execution_engine = None
nni.retiarii.integration_api._advisor = None
| 35.449275
| 102
| 0.6435
|
4fdd1e5fd1421c074243b716dbb31d6a1ebb4b2d
| 31,884
|
py
|
Python
|
tools/tensorflow_docs/api_generator/generate_lib.py
|
armando-fandango/tensorflow-docs
|
5c4bd64cc6466af361dfd1f7e19625e7de52fc37
|
[
"Apache-2.0"
] | null | null | null |
tools/tensorflow_docs/api_generator/generate_lib.py
|
armando-fandango/tensorflow-docs
|
5c4bd64cc6466af361dfd1f7e19625e7de52fc37
|
[
"Apache-2.0"
] | null | null | null |
tools/tensorflow_docs/api_generator/generate_lib.py
|
armando-fandango/tensorflow-docs
|
5c4bd64cc6466af361dfd1f7e19625e7de52fc37
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate tensorflow.org style API Reference docs for a Python module."""
import collections
import fnmatch
import inspect
import os
import pathlib
import shutil
import tempfile
from typing import List
from tensorflow_docs.api_generator import doc_controls
from tensorflow_docs.api_generator import doc_generator_visitor
from tensorflow_docs.api_generator import parser
from tensorflow_docs.api_generator import pretty_docs
from tensorflow_docs.api_generator import public_api
from tensorflow_docs.api_generator import py_guide_parser
from tensorflow_docs.api_generator import traverse
import yaml
# Used to add a collections.OrderedDict representer to yaml so that the
# dump doesn't contain !!OrderedDict yaml tags.
# Reference: https://stackoverflow.com/a/21048064
# Using a normal dict doesn't preserve the order of the input dictionary.
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
def dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def dict_constructor(loader, node):
return collections.OrderedDict(loader.construct_pairs(node))
yaml.add_representer(collections.OrderedDict, dict_representer)
yaml.add_constructor(_mapping_tag, dict_constructor)
class TocNode(object):
"""Represents a node in the TOC.
Attributes:
full_name: Name of the module.
short_name: The last path component.
py_object: Python object of the module.
path: Path to the module's page on tensorflow.org relative to
tensorflow.org.
experimental: Whether the module is experimental or not.
deprecated: Whether the module is deprecated or not.
"""
def __init__(self, module, py_object, path):
self._module = module
self._py_object = py_object
self._path = path
@property
def full_name(self):
return self._module
@property
def short_name(self):
return self.full_name.split('.')[-1]
@property
def py_object(self):
return self._py_object
@property
def path(self):
return self._path
@property
def experimental(self):
return 'experimental' in self.short_name
_DEPRECATED_STRING = 'THIS FUNCTION IS DEPRECATED'
@property
def deprecated(self):
"""Checks if the module is deprecated or not.
Special case is `tf.contrib`. It doesn't have the _tf_decorator attribute
but that module should be marked as deprecated.
Each deprecated function has a `_tf_decorator.decorator_name` attribute.
Check the docstring of that function to confirm if the function was
indeed deprecated. If a different deprecation setting was used on the
function, then "THIS FUNCTION IS DEPRECATED" substring won't be inserted
into the docstring of that function by the decorator.
Returns:
True if depreacted else False.
"""
if 'tf.contrib' in self.full_name:
return True
try:
# Instead of only checking the docstring, checking for the decorator
# provides an additional level of certainty about the correctness of the
# the application of `status: deprecated`.
decorator_list = parser.extract_decorators(self.py_object)
if any('deprecat' in dec for dec in decorator_list):
return self._check_docstring()
except AttributeError:
pass
return False
def _check_docstring(self):
# Only add the deprecated status if the function is deprecated. There are
# other settings that should be ignored like deprecate_args, etc.
docstring = self.py_object.__doc__
return docstring is not None and self._DEPRECATED_STRING in docstring
class Module(TocNode):
"""Represents a single module and its children and submodules.
Attributes:
full_name: Name of the module.
short_name: The last path component.
py_object: Python object of the module.
title: Title of the module in _toc.yaml
path: Path to the module's page on tensorflow.org relative to
tensorflow.org.
children: List of attributes on the module.
submodules: List of submodules in the module.
experimental: Whether the module is experimental or not.
deprecated: Whether the module is deprecated or not.
"""
def __init__(self, module, py_object, path):
super(Module, self).__init__(module, py_object, path)
self._children = []
self._submodules = []
@property
def title(self):
if self.full_name.count('.') > 1:
title = self.full_name.split('.')[-1]
else:
title = self.full_name
return title
@property
def children(self):
return sorted(
self._children, key=lambda x: (x.full_name.upper(), x.full_name))
@property
def submodules(self):
return self._submodules
def add_children(self, children):
if not isinstance(children, list):
children = [children]
self._children.extend(children)
def add_submodule(self, sub_mod):
self._submodules.append(sub_mod)
class ModuleChild(TocNode):
"""Represents a child of a module.
Attributes:
full_name: Name of the child.
short_name: The last path component.
py_object: Python object of the child.
title: Title of the module in _toc.yaml
path: Path to the module's page on tensorflow.org relative to
tensorflow.org.
experimental: Whether the module child is experimental or not.
deprecated: Whether the module is deprecated or not.
"""
def __init__(self, name, py_object, parent, path):
self._parent = parent
super(ModuleChild, self).__init__(name, py_object, path)
@property
def title(self):
return self.full_name[len(self._parent) + 1:]
class GenerateToc(object):
"""Generates a data structure that defines the structure of _toc.yaml."""
def __init__(self, modules):
self._modules = modules
def _create_graph(self):
"""Creates a graph to allow a dfs traversal on it to generate the toc.
Each graph key contains a module and its value is an object of `Module`
class. That module object contains a list of submodules.
Example low-level structure of the graph is as follows:
{
'module1': [submodule1, submodule2],
'submodule1': [sub1-submodule1],
'sub1-submodule1': [],
'submodule2': [],
'module2': [],
'module3': [submodule4],
'submodule4': [sub1-submodule4],
'sub1-submodule4': [sub1-sub1-submodule4],
'sub1-sub1-submodule4': []
}
Returns:
A tuple of (graph, base_modules). Base modules is returned because while
creating a nested list of dictionaries, the top level should only contain
the base modules.
"""
# Sort the modules in case-insensitive alphabetical order.
sorted_modules = sorted(self._modules.keys(), key=lambda a: a.lower())
toc_base_modules = []
toc_graph = {}
for module in sorted_modules:
mod = self._modules[module]
# Add the module to the graph.
toc_graph[module] = mod
# If the module's name contains more than one dot, it is not a base level
# module. Hence, add it to its parents submodules list.
if module.count('.') > 1:
# For example, if module is `tf.keras.applications.densenet` then its
# parent is `tf.keras.applications`.
parent_module = '.'.join(module.split('.')[:-1])
parent_mod_obj = toc_graph.get(parent_module, None)
if parent_mod_obj is not None:
parent_mod_obj.add_submodule(mod)
else:
toc_base_modules.append(module)
return toc_graph, toc_base_modules
def _generate_children(self, mod, is_parent_deprecated):
"""Creates a list of dictionaries containing child's title and path.
For example: The dictionary created will look this this in _toc.yaml.
```
children_list = [{'title': 'Overview', 'path': '/tf/app'},
{'title': 'run', 'path': '/tf/app/run'}]
```
The above list will get converted to the following yaml syntax.
```
- title: Overview
path: /tf/app
- title: run
path: /tf/app/run
```
Args:
mod: A module object.
is_parent_deprecated: Bool, Whether the parent is deprecated or not.
Returns:
A list of dictionaries containing child's title and path.
"""
children_list = []
children_list.append(
collections.OrderedDict([('title', 'Overview'), ('path', mod.path)]))
for child in mod.children:
child_yaml_content = [('title', child.title), ('path', child.path)]
# Set `status: deprecated` only if the parent's status is not
# deprecated.
if child.deprecated and not is_parent_deprecated:
child_yaml_content.insert(1, ('status', 'deprecated'))
elif child.experimental:
child_yaml_content.insert(1, ('status', 'experimental'))
children_list.append(collections.OrderedDict(child_yaml_content))
return children_list
def _dfs(self, mod, visited, is_parent_deprecated):
"""Does a dfs traversal on the graph generated.
This creates a nested dictionary structure which is then dumped as .yaml
file. Each submodule's dictionary of title and path is nested under its
parent module.
For example, `tf.keras.app.net` will be nested under `tf.keras.app` which
will be nested under `tf.keras`. Here's how the nested dictionaries will
look when its dumped as .yaml.
```
- title: tf.keras
section:
- title: Overview
path: /tf/keras
- title: app
section:
- title: Overview
path: /tf/keras/app
- title: net
section:
- title: Overview
path: /tf/keras/app/net
```
The above nested structure is what the dfs traversal will create in form
of lists of dictionaries.
Args:
mod: A module object.
visited: A dictionary of modules visited by the dfs traversal.
is_parent_deprecated: Bool, Whether any parent is deprecated or not.
Returns:
A dictionary containing the nested data structure.
"""
visited[mod.full_name] = True
# parent_exp is set to the current module because the current module is
# the parent for its children.
children_list = self._generate_children(
mod, is_parent_deprecated or mod.deprecated)
# generate for submodules within the submodule.
for submod in mod.submodules:
if not visited[submod.full_name]:
sub_mod_dict = self._dfs(submod, visited, is_parent_deprecated or
mod.deprecated)
children_list.append(sub_mod_dict)
# If the parent module is not experimental, then add the experimental
# status to the submodule.
submod_yaml_content = [('title', mod.title), ('section', children_list)]
# If the parent module is not deprecated, then add the deprecated
# status to the submodule. If the parent is deprecated, then setting its
# status to deprecated in _toc.yaml propagates to all its children and
# submodules.
if mod.deprecated and not is_parent_deprecated:
submod_yaml_content.insert(1, ('status', 'deprecated'))
elif mod.experimental:
submod_yaml_content.insert(1, ('status', 'experimental'))
return collections.OrderedDict(submod_yaml_content)
def generate(self):
"""Generates the final toc.
Returns:
A list of dictionaries which will be dumped into .yaml file.
"""
toc = []
toc_graph, toc_base_modules = self._create_graph()
visited = {node: False for node in toc_graph.keys()}
# Sort in alphabetical case-insensitive order.
toc_base_modules = sorted(toc_base_modules, key=lambda a: a.lower())
for module in toc_base_modules:
module_obj = toc_graph[module]
# Generate children of the base module.
section = self._generate_children(
module_obj, is_parent_deprecated=module_obj.deprecated)
# DFS traversal on the submodules.
for sub_mod in module_obj.submodules:
sub_mod_list = self._dfs(
sub_mod, visited, is_parent_deprecated=module_obj.deprecated)
section.append(sub_mod_list)
module_yaml_content = [('title', module_obj.title), ('section', section)]
if module_obj.deprecated:
module_yaml_content.insert(1, ('status', 'deprecated'))
elif module_obj.experimental:
module_yaml_content.insert(1, ('status', 'experimental'))
toc.append(collections.OrderedDict(module_yaml_content))
return {'toc': toc}
def write_docs(output_dir,
parser_config,
yaml_toc,
root_title='TensorFlow',
search_hints=True,
site_path='api_docs/python',
gen_redirects=True,
table_view=True):
"""Write previously extracted docs to disk.
Write a docs page for each symbol included in the indices of parser_config to
a tree of docs at `output_dir`.
Symbols with multiple aliases will have only one page written about
them, which is referenced for all aliases.
Args:
output_dir: Directory to write documentation markdown files to. Will be
created if it doesn't exist.
parser_config: A `parser.ParserConfig` object, containing all the necessary
indices.
yaml_toc: Set to `True` to generate a "_toc.yaml" file.
root_title: The title name for the root level index.md.
search_hints: (bool) include meta-data search hints at the top of each
output file.
site_path: The output path relative to the site root. Used in the
`_toc.yaml` and `_redirects.yaml` files.
gen_redirects: Bool which decides whether to generate _redirects.yaml
file or not.
table_view: If True, `Args`, `Returns`, `Raises` or `Attributes` will be
converted to a tabular format while generating markdown.
If False, they will be converted to a markdown List view.
Raises:
ValueError: if `output_dir` is not an absolute path
"""
output_dir = pathlib.Path(output_dir)
site_path = pathlib.Path('/', site_path)
# Make output_dir.
if not output_dir.is_absolute():
raise ValueError("'output_dir' must be an absolute path.\n"
f" output_dir='{output_dir}'")
output_dir.mkdir(parents=True, exist_ok=True)
# These dictionaries are used for table-of-contents generation below
# They will contain, after the for-loop below::
# - module name(string):classes and functions the module contains(list)
module_children = {}
# Collect redirects for an api _redirects.yaml file.
redirects = []
# Parse and write Markdown pages, resolving cross-links (`tf.symbol`).
for full_name in sorted(parser_config.index.keys(), key=lambda k: k.lower()):
py_object = parser_config.index[full_name]
if full_name in parser_config.duplicate_of:
continue
# Methods constants are only documented only as part of their parent's page.
if parser_config.reference_resolver.is_fragment(full_name):
continue
# Remove the extension from the path.
docpath, _ = os.path.splitext(parser.documentation_path(full_name))
# For a module, remember the module for the table-of-contents
if inspect.ismodule(py_object):
if full_name in parser_config.tree:
mod_obj = Module(
module=full_name,
py_object=py_object,
path=str(site_path / docpath))
module_children[full_name] = mod_obj
# For something else that's documented,
# figure out what module it lives in
else:
subname = str(full_name)
while True:
subname = subname[:subname.rindex('.')]
if inspect.ismodule(parser_config.index[subname]):
module_name = parser_config.duplicate_of.get(subname, subname)
child_mod = ModuleChild(
name=full_name,
py_object=py_object,
parent=module_name,
path=str(site_path / docpath))
module_children[module_name].add_children(child_mod)
break
# Generate docs for `py_object`, resolving references.
try:
page_info = parser.docs_for_object(full_name, py_object, parser_config)
except:
raise ValueError(f'Failed to generate docs for symbol: `{full_name}`')
path = output_dir / parser.documentation_path(full_name)
try:
path.parent.mkdir(exist_ok=True, parents=True)
# This function returns unicode in PY3.
hidden = doc_controls.should_hide_from_search(page_info.py_object)
if search_hints and not hidden:
content = [page_info.get_metadata_html()]
else:
content = ['robots: noindex\n']
content.append(pretty_docs.build_md_page(page_info, table_view))
text = '\n'.join(content)
path.write_text(text, encoding='utf-8')
except OSError:
raise OSError('Cannot write documentation for '
f'{full_name} to {path.parent}')
duplicates = parser_config.duplicates.get(full_name, [])
if not duplicates:
continue
duplicates = [item for item in duplicates if item != full_name]
if gen_redirects:
for dup in duplicates:
from_path = site_path / dup.replace('.', '/')
to_path = site_path / full_name.replace('.', '/')
redirects.append({'from': str(from_path), 'to': str(to_path)})
if yaml_toc:
toc_gen = GenerateToc(module_children)
toc_dict = toc_gen.generate()
# Replace the overview path *only* for 'TensorFlow' to
# `/api_docs/python/tf_overview`. This will be redirected to
# `/api_docs/python/tf`.
toc_values = toc_dict['toc'][0]
if toc_values['title'] == 'tf':
section = toc_values['section'][0]
section['path'] = str(site_path / 'tf_overview')
leftnav_toc = output_dir / '_toc.yaml'
with open(leftnav_toc, 'w') as toc_file:
yaml.dump(toc_dict, toc_file, default_flow_style=False)
if redirects and gen_redirects:
if yaml_toc and toc_values['title'] == 'tf':
redirects.append({
'from': str(site_path / 'tf_overview'),
'to': str(site_path / 'tf'),
})
redirects_dict = {
'redirects': sorted(redirects, key=lambda redirect: redirect['from'])
}
api_redirects_path = output_dir / '_redirects.yaml'
with open(api_redirects_path, 'w') as redirect_file:
yaml.dump(redirects_dict, redirect_file, default_flow_style=False)
# Write a global index containing all full names with links.
with open(output_dir / 'index.md', 'w') as f:
global_index = parser.generate_global_index(
root_title, parser_config.index, parser_config.reference_resolver)
if not search_hints:
global_index = 'robots: noindex\n' + global_index
f.write(global_index)
def add_dict_to_dict(add_from, add_to):
for key in add_from:
if key in add_to:
add_to[key].extend(add_from[key])
else:
add_to[key] = add_from[key]
def extract(py_modules,
base_dir,
private_map,
do_not_descend_map,
visitor_cls=doc_generator_visitor.DocGeneratorVisitor,
callbacks=None):
"""Walks the module contents, returns an index of all visited objects.
The return value is an instance of `self._visitor_cls`, usually:
`doc_generator_visitor.DocGeneratorVisitor`
Args:
py_modules: A list containing a single (short_name, module_object) pair.
like `[('tf',tf)]`.
base_dir: The package root directory. Nothing defined outside of this
directory is documented.
private_map: A {'path':["name"]} dictionary listing particular object
locations that should be ignored in the doc generator.
do_not_descend_map: A {'path':["name"]} dictionary listing particular object
locations where the children should not be listed.
visitor_cls: A class, typically a subclass of
`doc_generator_visitor.DocGeneratorVisitor` that acumulates the indexes of
objects to document.
callbacks: Additional callbacks passed to `traverse`. Executed between the
`PublicApiFilter` and the accumulator (`DocGeneratorVisitor`). The
primary use case for these is to filter the listy of children (see:
`public_api.local_definitions_filter`)
Returns:
The accumulator (`DocGeneratorVisitor`)
"""
if callbacks is None:
callbacks = []
if len(py_modules) != 1:
raise ValueError("only pass one [('name',module)] pair in py_modules")
short_name, py_module = py_modules[0]
api_filter = public_api.PublicAPIFilter(
base_dir=base_dir,
do_not_descend_map=do_not_descend_map,
private_map=private_map)
accumulator = visitor_cls()
# The objects found during traversal, and their children are passed to each
# of these visitors in sequence. Each visitor returns the list of children
# to be passed to the next visitor.
visitors = [api_filter, public_api.ignore_typing] + callbacks + [accumulator]
traverse.traverse(py_module, visitors, short_name)
return accumulator
class _GetMarkdownTitle(py_guide_parser.PyGuideParser):
"""Extract the title from a .md file."""
def __init__(self):
self.title = None
py_guide_parser.PyGuideParser.__init__(self)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
EXCLUDED = set(['__init__.py', 'OWNERS', 'README.txt'])
def replace_refs(
src_dir: str,
output_dir: str,
reference_resolvers: List[parser.ReferenceResolver],
api_docs_relpath: List[str],
file_pattern: str = '*.md',
):
"""Link `tf.symbol` references found in files matching `file_pattern`.
A matching directory structure, with the modified files is
written to `output_dir`.
`{"__init__.py","OWNERS","README.txt"}` are skipped.
Files not matching `file_pattern` (using `fnmatch`) are copied with no change.
Also, files in the `api_guides/python` directory get explicit ids set on all
heading-2s to ensure back-links work.
Args:
src_dir: The directory to convert files from.
output_dir: The root directory to write the resulting files to.
reference_resolvers: A list of `parser.ReferenceResolver` to make the
replacements.
api_docs_relpath: List of relative-path strings to the api_docs
from the src_dir for each reference_resolver.
file_pattern: Only replace references in files matching file_patters, using
`fnmatch`. Non-matching files are copied unchanged.
"""
# Iterate through all the source files and process them.
for dirpath, _, filenames in os.walk(src_dir):
depth = os.path.relpath(src_dir, start=dirpath)
# Make the directory under output_dir.
new_dir = os.path.join(output_dir,
os.path.relpath(path=dirpath, start=src_dir))
if not os.path.exists(new_dir):
os.makedirs(new_dir)
for base_name in filenames:
if base_name in EXCLUDED:
continue
full_in_path = os.path.join(dirpath, base_name)
suffix = os.path.relpath(path=full_in_path, start=src_dir)
full_out_path = os.path.join(output_dir, suffix)
# Copy files that do not match the file_pattern, unmodified.
if not fnmatch.fnmatch(base_name, file_pattern):
if full_in_path != full_out_path:
shutil.copyfile(full_in_path, full_out_path)
continue
with open(full_in_path, 'rb') as f:
content = f.read().decode('utf-8')
for resolver, rel_path in zip(reference_resolvers, api_docs_relpath):
# TODO(b/163055387): delete when all api_cache files have been updated.
if not rel_path.endswith('python'):
rel_path = os.path.join(rel_path, 'python')
# If `rel_path` is an absolute path, `depth` is just discarded.
relative_path_to_root = os.path.join(depth, rel_path)
content = resolver.replace_references(content, relative_path_to_root)
with open(full_out_path, 'wb') as f:
f.write((content + '\n').encode('utf-8'))
class DocGenerator(object):
"""Main entry point for generating docs."""
def __init__(self,
root_title,
py_modules,
base_dir=None,
code_url_prefix=(),
search_hints=True,
site_path='api_docs/python',
private_map=None,
do_not_descend_map=None,
visitor_cls=doc_generator_visitor.DocGeneratorVisitor,
api_cache=True,
callbacks=None,
yaml_toc=True,
gen_redirects=True,
table_view=True):
"""Creates a doc-generator.
Args:
root_title: A string. The main title for the project. Like "TensorFlow"
py_modules: The python module to document.
base_dir: String or tuple of strings. Directories that "Defined in" links
are generated relative to. Modules outside one of these directories are
not documented. No `base_dir` should be inside another.
code_url_prefix: String or tuple of strings. The prefix to add to "Defined
in" paths. These are zipped with `base-dir`, to set the `defined_in`
path for each file. The defined in link for `{base_dir}/path/to/file` is
set to `{code_url_prefix}/path/to/file`.
search_hints: Bool. Include metadata search hints at the top of each file.
site_path: Path prefix in the "_toc.yaml"
private_map: A {"module.path.to.object": ["names"]} dictionary. Specific
aliases that should not be shown in the resulting docs.
do_not_descend_map: A {"module.path.to.object": ["names"]} dictionary.
Specific aliases that will be shown, but not expanded.
visitor_cls: An option to override the default visitor class
`doc_generator_visitor.DocGeneratorVisitor`.
api_cache: Bool. Generate an api_cache file. This is used to easily add
api links for backticked symbols (like `tf.add`) in other docs.
callbacks: Additional callbacks passed to `traverse`. Executed between the
`PublicApiFilter` and the accumulator (`DocGeneratorVisitor`). The
primary use case for these is to filter the listy of children (see:
`public_api.local_definitions_filter`)
yaml_toc: Bool which decides whether to generate _toc.yaml file or not.
gen_redirects: Bool which decides whether to generate _redirects.yaml
file or not.
table_view: If True, `Args`, `Returns`, `Raises` or `Attributes` will be
converted to a tabular format while generating markdown.
If False, they will be converted to a markdown List view.
"""
self._root_title = root_title
self._py_modules = py_modules
self._short_name = py_modules[0][0]
self._py_module = py_modules[0][1]
if base_dir is None:
# If the user passes a single-file module, only document code defined in
# that file.
base_dir = self._py_module.__file__
if base_dir.endswith('__init__.py'):
# If they passed a package, document anything defined in that directory.
base_dir = os.path.dirname(base_dir)
if isinstance(base_dir, str):
base_dir = (base_dir,)
self._base_dir = tuple(base_dir)
assert self._base_dir, '`base_dir` cannot be empty'
if isinstance(code_url_prefix, str):
code_url_prefix = (code_url_prefix,)
self._code_url_prefix = tuple(code_url_prefix)
if not self._code_url_prefix:
raise ValueError('`code_url_prefix` cannot be empty')
if len(self._code_url_prefix) != len(base_dir):
raise ValueError('The `base_dir` list should have the same number of '
'elements as the `code_url_prefix` list (they get '
'zipped together).')
self._search_hints = search_hints
self._site_path = site_path
self._private_map = private_map or {}
self._do_not_descend_map = do_not_descend_map or {}
self._visitor_cls = visitor_cls
self.api_cache = api_cache
if callbacks is None:
callbacks = []
self._callbacks = callbacks
self._yaml_toc = yaml_toc
self._gen_redirects = gen_redirects
self._table_view = table_view
def make_reference_resolver(self, visitor):
return parser.ReferenceResolver.from_visitor(
visitor, py_module_names=[self._short_name])
def make_parser_config(self, visitor, reference_resolver):
return parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates=visitor.duplicates,
duplicate_of=visitor.duplicate_of,
tree=visitor.tree,
index=visitor.index,
reverse_index=visitor.reverse_index,
base_dir=self._base_dir,
code_url_prefix=self._code_url_prefix)
def run_extraction(self):
"""Walks the module contents, returns an index of all visited objects.
The return value is an instance of `self._visitor_cls`, usually:
`doc_generator_visitor.DocGeneratorVisitor`
Returns:
"""
return extract(
py_modules=self._py_modules,
base_dir=self._base_dir,
private_map=self._private_map,
do_not_descend_map=self._do_not_descend_map,
visitor_cls=self._visitor_cls,
callbacks=self._callbacks)
def build(self, output_dir):
"""Build all the docs.
This produces python api docs:
* generated from `py_module`.
* written to '{output_dir}/api_docs/python/'
Args:
output_dir: Where to write the resulting docs.
"""
workdir = pathlib.Path(tempfile.mkdtemp())
# Extract the python api from the _py_modules
visitor = self.run_extraction()
reference_resolver = self.make_reference_resolver(visitor)
# Replace all the `tf.symbol` references in the workdir.
replace_refs(
src_dir=str(workdir),
output_dir=str(workdir),
reference_resolvers=[reference_resolver],
api_docs_relpath=['api_docs'],
file_pattern='*.md',
)
# Write the api docs.
parser_config = self.make_parser_config(visitor, reference_resolver)
work_py_dir = workdir / 'api_docs/python'
write_docs(
output_dir=str(work_py_dir),
parser_config=parser_config,
yaml_toc=self._yaml_toc,
root_title=self._root_title,
search_hints=self._search_hints,
site_path=self._site_path,
gen_redirects=self._gen_redirects,
table_view=self._table_view)
if self.api_cache:
reference_resolver.to_json_file(
str(work_py_dir / self._short_name / '_api_cache.json'))
try:
os.makedirs(output_dir)
except OSError as e:
if e.strerror != 'File exists':
raise
# Typical results are something like:
#
# out_dir/
# {short_name}/
# _redirects.yaml
# _toc.yaml
# index.md
# {short_name}.md
#
# Copy the top level files to the `{output_dir}/`, delete and replace the
# `{output_dir}/{short_name}/` directory.
for work_path in work_py_dir.glob('*'):
out_path = pathlib.Path(output_dir) / work_path.name
out_path.parent.mkdir(exist_ok=True, parents=True)
if work_path.is_file():
shutil.copy2(work_path, out_path)
elif work_path.is_dir():
shutil.rmtree(out_path, ignore_errors=True)
shutil.copytree(work_path, out_path)
| 34.884026
| 80
| 0.685767
|
75af222d646a2075e37bf272d3392dd649936dca
| 1,371
|
py
|
Python
|
pages/migrations/0001_initial.py
|
cagataysarioglu/TanTradeWebsite
|
8a095f0a1958a3314544f2cff1b203818f071d05
|
[
"MIT"
] | 2
|
2021-01-24T12:41:05.000Z
|
2021-01-24T14:06:29.000Z
|
pages/migrations/0001_initial.py
|
cagataysarioglu/djangoTanTrade
|
8a095f0a1958a3314544f2cff1b203818f071d05
|
[
"MIT"
] | null | null | null |
pages/migrations/0001_initial.py
|
cagataysarioglu/djangoTanTrade
|
8a095f0a1958a3314544f2cff1b203818f071d05
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.5 on 2021-01-27 13:32
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Carousel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Görsel Adı')),
('photo', models.ImageField(null=True, upload_to='', verbose_name='Fotoğraf')),
],
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('namesurname', models.CharField(max_length=30, verbose_name='Ad-Soyad')),
('email', models.EmailField(max_length=254, verbose_name='e-Posta')),
('subject', models.CharField(max_length=70, verbose_name='Konu')),
('image', models.ImageField(blank=True, null=True, upload_to='img/uploads/', verbose_name='Görsel')),
('message', models.TextField(verbose_name='İleti')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Tarih')),
],
),
]
| 39.171429
| 117
| 0.587163
|
f3dc0768fd19cd37860f56270577def6a83e983a
| 7,533
|
py
|
Python
|
sdk/python/pulumi_azure_native/healthcareapis/v20210111/private_endpoint_connection.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/healthcareapis/v20210111/private_endpoint_connection.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/healthcareapis/v20210111/private_endpoint_connection.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PrivateEndpointConnection']
class PrivateEndpointConnection(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The Private Endpoint Connection resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']] private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the service instance.
:param pulumi.Input[str] resource_name_: The name of the service instance.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['private_endpoint_connection_name'] = private_endpoint_connection_name
if private_link_service_connection_state is None and not opts.urn:
raise TypeError("Missing required property 'private_link_service_connection_state'")
__props__['private_link_service_connection_state'] = private_link_service_connection_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if resource_name_ is None and not opts.urn:
raise TypeError("Missing required property 'resource_name_'")
__props__['resource_name'] = resource_name_
__props__['name'] = None
__props__['private_endpoint'] = None
__props__['provisioning_state'] = None
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:healthcareapis/v20210111:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:healthcareapis:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:healthcareapis:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:healthcareapis/latest:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:healthcareapis/latest:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:healthcareapis/v20200330:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:healthcareapis/v20200330:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-native:healthcareapis/v20210111:PrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
"""
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["name"] = None
__props__["private_endpoint"] = None
__props__["private_link_service_connection_state"] = None
__props__["provisioning_state"] = None
__props__["system_data"] = None
__props__["type"] = None
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointResponse']]:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output['outputs.PrivateLinkServiceConnectionStateResponse']:
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
System metadata for this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 48.288462
| 640
| 0.687774
|
82dce90e5ed639dbdbe5a1f16648622ea90bc71c
| 1,555
|
py
|
Python
|
sdk/schemaregistry/azure-schemaregistry-avroencoder/azure/schemaregistry/encoder/avroencoder/__init__.py
|
kazrael2119/azure-sdk-for-python
|
485dd7b1b5ac41c1a5b9991e402b4035b55f437a
|
[
"MIT"
] | 1
|
2022-02-18T01:17:27.000Z
|
2022-02-18T01:17:27.000Z
|
sdk/schemaregistry/azure-schemaregistry-avroencoder/azure/schemaregistry/encoder/avroencoder/__init__.py
|
kazrael2119/azure-sdk-for-python
|
485dd7b1b5ac41c1a5b9991e402b4035b55f437a
|
[
"MIT"
] | null | null | null |
sdk/schemaregistry/azure-schemaregistry-avroencoder/azure/schemaregistry/encoder/avroencoder/__init__.py
|
kazrael2119/azure-sdk-for-python
|
485dd7b1b5ac41c1a5b9991e402b4035b55f437a
|
[
"MIT"
] | 1
|
2022-03-04T06:21:56.000Z
|
2022-03-04T06:21:56.000Z
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from ._version import VERSION
__version__ = VERSION
from ._schema_registry_avro_encoder import AvroEncoder
from ._message_protocol import MessageType, MessageMetadataDict
__all__ = [
"AvroEncoder",
"MessageType",
"MessageMetadataDict"
]
| 40.921053
| 78
| 0.699035
|
5672cf5cf7451baa9b9c1b258094a4df538b94a8
| 6,878
|
py
|
Python
|
uncertainty_baselines/models/wide_resnet_dropout.py
|
dvdzhang/uncertainty-baselines
|
8ce0d7494e5cae0719c1b750da4b61564e536636
|
[
"Apache-2.0"
] | null | null | null |
uncertainty_baselines/models/wide_resnet_dropout.py
|
dvdzhang/uncertainty-baselines
|
8ce0d7494e5cae0719c1b750da4b61564e536636
|
[
"Apache-2.0"
] | null | null | null |
uncertainty_baselines/models/wide_resnet_dropout.py
|
dvdzhang/uncertainty-baselines
|
8ce0d7494e5cae0719c1b750da4b61564e536636
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wide ResNet with dropout."""
import functools
import tensorflow as tf
BatchNormalization = functools.partial( # pylint: disable=invalid-name
tf.keras.layers.BatchNormalization,
epsilon=1e-5, # using epsilon and momentum defaults from Torch
momentum=0.9)
Conv2D = functools.partial( # pylint: disable=invalid-name
tf.keras.layers.Conv2D,
kernel_size=3,
padding='same',
use_bias=False,
kernel_initializer='he_normal')
def apply_dropout(inputs, dropout_rate, filterwise_dropout):
"""Apply a dropout layer to the inputs."""
if filterwise_dropout:
return tf.keras.layers.Dropout(
dropout_rate, noise_shape=[inputs.shape[0], 1, 1, inputs.shape[3]
])(inputs, training=True)
else:
return tf.keras.layers.Dropout(dropout_rate)(inputs, training=True)
def basic_block(inputs, filters, strides, l2, dropout_rate, residual_dropout,
filterwise_dropout):
"""Basic residual block of two 3x3 convs.
Args:
inputs: tf.Tensor.
filters: Number of filters for Conv2D.
strides: Stride dimensions for Conv2D.
l2: L2 regularization coefficient.
dropout_rate: Dropout rate.
residual_dropout: Apply dropout only to the residual connections.
filterwise_dropout: Dropout whole convolutional filters instead of
individual values in the feature map.
Returns:
tf.Tensor.
"""
x = inputs
y = inputs
y = BatchNormalization(beta_regularizer=tf.keras.regularizers.l2(l2),
gamma_regularizer=tf.keras.regularizers.l2(l2))(y)
y = tf.keras.layers.Activation('relu')(y)
if not residual_dropout:
y = apply_dropout(y, dropout_rate, filterwise_dropout)
y = Conv2D(filters,
strides=strides,
kernel_regularizer=tf.keras.regularizers.l2(l2))(y)
if residual_dropout:
y = apply_dropout(y, dropout_rate, filterwise_dropout)
y = BatchNormalization(beta_regularizer=tf.keras.regularizers.l2(l2),
gamma_regularizer=tf.keras.regularizers.l2(l2))(y)
y = tf.keras.layers.Activation('relu')(y)
if not residual_dropout:
y = apply_dropout(y, dropout_rate, filterwise_dropout)
y = Conv2D(filters,
strides=1,
kernel_regularizer=tf.keras.regularizers.l2(l2))(y)
if not residual_dropout:
y = apply_dropout(y, dropout_rate, filterwise_dropout)
if not x.shape.is_compatible_with(y.shape):
x = Conv2D(filters,
kernel_size=1,
strides=strides,
kernel_regularizer=tf.keras.regularizers.l2(l2))(x)
if not residual_dropout:
x = apply_dropout(x, dropout_rate, filterwise_dropout)
x = tf.keras.layers.add([x, y])
return x
def group(inputs, filters, strides, num_blocks, l2, dropout_rate,
residual_dropout, filterwise_dropout):
"""Group of residual blocks."""
x = basic_block(inputs,
filters=filters,
strides=strides,
l2=l2,
dropout_rate=dropout_rate,
residual_dropout=residual_dropout,
filterwise_dropout=filterwise_dropout)
for _ in range(num_blocks - 1):
x = basic_block(x,
filters=filters,
strides=1,
l2=l2,
dropout_rate=dropout_rate,
residual_dropout=residual_dropout,
filterwise_dropout=filterwise_dropout)
return x
def wide_resnet_dropout(input_shape, depth, width_multiplier, num_classes, l2,
dropout_rate, residual_dropout, filterwise_dropout):
"""Builds Wide ResNet.
Following Zagoruyko and Komodakis (2016), it accepts a width multiplier on the
number of filters. Using three groups of residual blocks, the network maps
spatial features of size 32x32 -> 16x16 -> 8x8.
Args:
input_shape: tf.Tensor.
depth: Total number of convolutional layers. "n" in WRN-n-k. It differs from
He et al. (2015)'s notation which uses the maximum depth of the network
counting non-conv layers like dense.
width_multiplier: Integer to multiply the number of typical filters by. "k"
in WRN-n-k.
num_classes: Number of output classes.
l2: L2 regularization coefficient.
dropout_rate: Dropout rate.
residual_dropout: Apply dropout only to the residual connections.
filterwise_dropout: Dropout whole convolutional filters instead of
individual values in the feature map.
Returns:
tf.keras.Model.
"""
if (depth - 4) % 6 != 0:
raise ValueError('depth should be 6n+4 (e.g., 16, 22, 28, 40).')
num_blocks = (depth - 4) // 6
inputs = tf.keras.layers.Input(shape=input_shape)
x = Conv2D(16,
strides=1,
kernel_regularizer=tf.keras.regularizers.l2(l2))(inputs)
if not residual_dropout:
x = apply_dropout(x, dropout_rate, filterwise_dropout)
x = group(x,
filters=16 * width_multiplier,
strides=1,
num_blocks=num_blocks,
l2=l2,
dropout_rate=dropout_rate,
residual_dropout=residual_dropout,
filterwise_dropout=filterwise_dropout)
x = group(x,
filters=32 * width_multiplier,
strides=2,
num_blocks=num_blocks,
l2=l2,
dropout_rate=dropout_rate,
residual_dropout=residual_dropout,
filterwise_dropout=filterwise_dropout)
x = group(x,
filters=64 * width_multiplier,
strides=2,
num_blocks=num_blocks,
l2=l2,
dropout_rate=dropout_rate,
residual_dropout=residual_dropout,
filterwise_dropout=filterwise_dropout)
x = BatchNormalization(beta_regularizer=tf.keras.regularizers.l2(l2),
gamma_regularizer=tf.keras.regularizers.l2(l2))(x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.AveragePooling2D(pool_size=8)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(
num_classes,
kernel_initializer='he_normal',
kernel_regularizer=tf.keras.regularizers.l2(l2),
bias_regularizer=tf.keras.regularizers.l2(l2))(x)
return tf.keras.Model(inputs=inputs, outputs=x)
| 36.391534
| 80
| 0.667345
|
3a45826a34c63a24eb03ff0a9a63d2d948548ea8
| 8,734
|
py
|
Python
|
lib/behavior_model.py
|
ihaeyong/drama-graph
|
60c3c216cd74bb19efd6baf836f6c7c2b42b764f
|
[
"MIT"
] | 3
|
2021-04-28T07:19:39.000Z
|
2022-03-07T09:34:19.000Z
|
lib/behavior_model.py
|
ihaeyong/drama-graph
|
60c3c216cd74bb19efd6baf836f6c7c2b42b764f
|
[
"MIT"
] | 18
|
2020-08-24T12:40:38.000Z
|
2022-03-12T00:47:14.000Z
|
lib/behavior_model.py
|
ihaeyong/drama-graph
|
60c3c216cd74bb19efd6baf836f6c7c2b42b764f
|
[
"MIT"
] | 1
|
2020-10-15T10:09:20.000Z
|
2020-10-15T10:09:20.000Z
|
"""
@author: Haeyong Kang
"""
import torch.nn as nn
import torch
import torch.nn.parallel
from torch.autograd import Variable
from torch.nn import functional as F
from torchvision.ops import roi_align
from Yolo_v2_pytorch.src.utils import *
from Yolo_v2_pytorch.src.yolo_net import Yolo
from Yolo_v2_pytorch.src.yolo_tunning import YoloD
from Yolo_v2_pytorch.src.rois_utils import anchorboxes
from Yolo_v2_pytorch.src.anotherMissOh_dataset import PersonCLS, PBeHavCLS_21
from lib.person_model import person_model
import numpy as np
class behavior_model(nn.Module):
def __init__(self, num_persons, num_behaviors, opt, device):
super(behavior_model, self).__init__()
# just for reference (anchor information)
num_objects_cls = 47
num_relations = 13
num_face_cls = 20
self.person_model = person_model(num_persons, device)
self.detector = self.person_model.detector
self.num_persons = num_persons
# define behavior
self.behavior_conv = nn.Sequential(
nn.Conv2d(1024, 512, 3, 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(512, 256, 3, 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.1, inplace=True))
self.behavior_fc = nn.Sequential(
nn.Linear(256 * 3 * 3, 1024),
nn.LeakyReLU(0.1, inplace=True),nn.Dropout(0.1),
nn.Linear(1024, num_behaviors))
self.behavior_conv1d = nn.Sequential(
nn.Conv1d(2304, 2304, 3, stride=1, padding=1),
nn.LeakyReLU(0.1, inplace=True),nn.Dropout(0.1),
nn.Conv1d(2304, 2304, 3, stride=1, padding=1),
nn.LeakyReLU(0.1, inplace=True),nn.Dropout(0.1),
)
self.num_behaviors = num_behaviors
self.img_size = opt.image_size
self.conf_threshold = opt.conf_threshold
self.nms_threshold = opt.nms_threshold
self.device=device
self.gt_boxes = True
def is_not_blank(self, s):
return bool(s and s.strip())
def label_array(self, batch, label, behavior_label):
# define label array
label_array = np.zeros((batch,
self.num_persons,
self.num_behaviors))
for idx, box in enumerate(label):
for jdx, p_box in enumerate(box):
b_label = behavior_label[idx][jdx]
if b_label :
label_array[idx, int(p_box[4]), int(b_label)] = 1
elif idx > 0:
# label smoothing
label_array[idx, :, :] = label_array[idx-1, :, :]
return label_array
def ex_global_feat(self, fmap):
box_g = torch.from_numpy(
np.array([0,0,self.fmap_size,self.fmap_size])).to(
self.device).detach()
g_box = Variable(
torch.zeros(1, 5).to(self.device)).detach()
g_box[:,1:] = box_g
g_fmap = roi_align(fmap[None],
g_box.float(),
(self.fmap_size//4,
self.fmap_size//4))
g_fmap = self.behavior_conv(g_fmap)
return g_fmap
def forward(self, image, label, behavior_label):
# person detector
logits, fmap = self.person_model(image)
batch = logits.size(0)
fmap = fmap.detach()
# fmap [b, 1024, 14, 14]
self.fmap_size = fmap.size(2)
# define behavior_tensor
behavior_tensor = Variable(
torch.zeros(batch, self.num_persons,
256 * 3 * 3).to(self.device))
# persons boxes
b_logits = []
g_features = []
b_labels = []
# testing
if not self.training:
boxes = post_processing(logits, self.img_size, PersonCLS,
self.detector.anchors,
self.conf_threshold,
self.nms_threshold)
#if self.gt_boxes:
boxes_gt = []
for idx, box in enumerate(label):
b_boxes = []
for jdx, p_box in enumerate(box):
p_box_ = p_box[0:4].tolist()
p_conf_ = [1.0]
p_cls_ = [PersonCLS[int(p_box[4])]]
p_box = np.concatenate([p_box_, p_conf_, p_cls_])
b_boxes.append(p_box)
boxes_gt.append(b_boxes)
if len(boxes) == 0 :
boxes = boxes_gt
if len(boxes) > 0 :
for idx, box in enumerate(boxes):
num_box = len(box)
g_fmap = self.ex_global_feat(fmap[idx])
behavior_tensor[idx] = g_fmap.view(-1)
if num_box == 0 :
continue
with torch.no_grad():
box_ = np.clip(
np.stack(box)[:,:4].astype('float32'),
0.0, self.img_size)
box_ = Variable(torch.from_numpy(box_)).to(
self.device).detach() / self.img_size * self.fmap_size
b_box = Variable(
torch.zeros(num_box, 5).to(self.device)).detach()
b_box[:,1:] = box_
i_fmap = roi_align(fmap[idx][None],
b_box.float(),
(self.fmap_size//4,
self.fmap_size//4))
i_fmap = self.behavior_conv(i_fmap)
for jdx, p_box in enumerate(box):
p_idx = PersonCLS.index(p_box[5])
behavior_tensor[idx, p_idx] += i_fmap[jdx].view(-1)
for idx, box in enumerate(boxes):
i_logit_list = []
for jdx, p_pox in enumerate(box):
p_idx = PersonCLS.index(p_box[5])
p_feat = behavior_tensor[:,p_idx][None,:,:].transpose(1,2)
p_feat = self.behavior_conv1d(p_feat)[0]
#cur_b = behavior_tensor[idx, p_idx]
i_logit = self.behavior_fc(p_feat[:,idx])
i_logit_list.append(i_logit)
b_logits.append(i_logit_list)
return boxes, b_logits
# training
#label_array = self.label_array(batch, label, behavior_label)
if len(behavior_label) > 0 and self.training:
for idx, box in enumerate(label):
num_box = len(box)
g_fmap = self.ex_global_feat(fmap[idx])
behavior_tensor[idx] = g_fmap.view(-1)
if num_box == 0 :
continue
with torch.no_grad():
box_ = np.clip(
np.stack(box)[:,:4].astype('float32')/self.img_size,
0.0, self.fmap_size) * self.fmap_size
box_ = torch.from_numpy(box_).to(self.device).detach()
b_box = Variable(
torch.zeros(num_box, 5).to(self.device)).detach()
b_box[:,1:] = torch.clamp(box_ + torch.randn(box_.shape).to(
self.device), 0, self.fmap_size)
i_fmap = roi_align(fmap[idx][None],
b_box.float(),
(self.fmap_size//4,
self.fmap_size//4))
# local feature
i_fmap = self.behavior_conv(i_fmap)
for jdx, p_box in enumerate(box):
behavior_tensor[idx, int(p_box[4])] += i_fmap[jdx].view(-1)
if len(behavior_label[idx]) > 0:
b_labels.append(behavior_label[idx])
for idx, box in enumerate(label):
for jdx, p_box in enumerate(box):
p_feat = behavior_tensor[:,int(p_box[4])][None,:,:].transpose(1,2)
p_feat = self.behavior_conv1d(p_feat)[0]
#cur_b = behavior_tensor[idx, int(p_box[4])]
i_logit = self.behavior_fc(p_feat[:,idx])
b_logits.append(i_logit)
return logits, b_logits, b_labels
| 37.809524
| 87
| 0.490039
|
5e2b79134b4eff3c448b67911c205497115eceec
| 624
|
py
|
Python
|
configs/bc/mani_skill_point_cloud_transformer11.py
|
Zed-Wu/ManiSkill-Learn
|
8056fe327752cd0863f8730672fe62bd85a0ec12
|
[
"Apache-2.0"
] | null | null | null |
configs/bc/mani_skill_point_cloud_transformer11.py
|
Zed-Wu/ManiSkill-Learn
|
8056fe327752cd0863f8730672fe62bd85a0ec12
|
[
"Apache-2.0"
] | null | null | null |
configs/bc/mani_skill_point_cloud_transformer11.py
|
Zed-Wu/ManiSkill-Learn
|
8056fe327752cd0863f8730672fe62bd85a0ec12
|
[
"Apache-2.0"
] | null | null | null |
_base_ = ['../_base_/bc/bc_mani_skill_pointnet_transformer11.py']
env_cfg = dict(
type='gym',
env_name='OpenCabinetDrawer_1045_link_0-v0',
)
replay_cfg = dict(
type='ReplayMemory',
capacity=1000000,
)
train_mfrl_cfg = dict(
total_steps=50000,
warm_steps=0,
n_steps=0,
n_updates=500,
n_eval=50000,
n_checkpoint=5000,
init_replay_buffers='./example_mani_skill_data/OpenCabinetDrawer_1045_link_0-v0_pcd.h5',
)
eval_cfg = dict(
num=10,
num_procs=1,
use_hidden_state=False,
start_state=None,
save_traj=False, #True,
save_video=False,
use_log=False,
)
| 18.352941
| 92
| 0.69391
|
02061ffbe4d527235136d4357c1b5ac5b441348f
| 768
|
py
|
Python
|
bureau/personnel/migrations/0011_auto_20190120_0308.py
|
clairempr/bureau
|
c9fd114e637829b4e9ff643459d15602cc2efc2f
|
[
"Apache-2.0"
] | 1
|
2019-02-15T09:05:35.000Z
|
2019-02-15T09:05:35.000Z
|
bureau/personnel/migrations/0011_auto_20190120_0308.py
|
clairempr/bureau
|
c9fd114e637829b4e9ff643459d15602cc2efc2f
|
[
"Apache-2.0"
] | null | null | null |
bureau/personnel/migrations/0011_auto_20190120_0308.py
|
clairempr/bureau
|
c9fd114e637829b4e9ff643459d15602cc2efc2f
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.0.9 on 2019-01-20 03:08
from django.db import migrations, models
import partial_date.fields
class Migration(migrations.Migration):
dependencies = [
('personnel', '0010_auto_20190112_1825'),
]
operations = [
migrations.AddField(
model_name='employee',
name='colored',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='employee',
name='confederate',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='employee',
name='date_of_birth',
field=partial_date.fields.PartialDateField(blank=True, null=True),
),
]
| 25.6
| 78
| 0.59375
|
6c8f7fe9c26d8e676549c02ce99a0e1529ae259e
| 119
|
py
|
Python
|
exercises/is_leap.py
|
spyingcyclops/excercism
|
ebc75561e8dc2cc510faf21fd823460db5604067
|
[
"Apache-2.0"
] | null | null | null |
exercises/is_leap.py
|
spyingcyclops/excercism
|
ebc75561e8dc2cc510faf21fd823460db5604067
|
[
"Apache-2.0"
] | null | null | null |
exercises/is_leap.py
|
spyingcyclops/excercism
|
ebc75561e8dc2cc510faf21fd823460db5604067
|
[
"Apache-2.0"
] | null | null | null |
def leap_year(year):
return (year % 4 == 0) and not (year % 100 == 0) and (year % 4 == 0)
print(leap_year(2000))
| 19.833333
| 72
| 0.579832
|
d2babb8c011dfc59f70a10c488a780c5d138a7ef
| 3,886
|
py
|
Python
|
docs/source/conf.py
|
aaronspring/xskillscore
|
a3fe8230ecaf83e2a8c7fa3cf2c2b448600d5331
|
[
"Apache-2.0"
] | 81
|
2018-07-03T06:58:28.000Z
|
2021-12-15T17:24:45.000Z
|
docs/source/conf.py
|
aaronspring/xskillscore
|
a3fe8230ecaf83e2a8c7fa3cf2c2b448600d5331
|
[
"Apache-2.0"
] | 200
|
2020-09-28T16:00:43.000Z
|
2022-03-28T18:41:01.000Z
|
docs/source/conf.py
|
aaronspring/xskillscore
|
a3fe8230ecaf83e2a8c7fa3cf2c2b448600d5331
|
[
"Apache-2.0"
] | 36
|
2020-10-07T22:46:20.000Z
|
2021-11-25T11:34:56.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import datetime
import subprocess
import sys
import sphinx_autosummary_accessors
import xskillscore
print("python exec:", sys.executable)
print("sys.path:", sys.path)
if "conda" in sys.executable:
print("conda environment:")
subprocess.run(["conda", "list"])
else:
print("pip environment:")
subprocess.run(["pip", "list"])
print("xskillscore: %s, %s" % (xskillscore.__version__, xskillscore.__file__))
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.extlinks",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"IPython.sphinxext.ipython_directive",
"IPython.sphinxext.ipython_console_highlighting",
"nbsphinx",
"sphinx_autosummary_accessors",
]
extlinks = {
"issue": ("https://github.com/xarray-contrib/xskillscore/issues/%s", "GH#"),
"pr": ("https://github.com/xarray-contrib/xskillscore/pull/%s", "GH#"),
}
autodoc_typehints = "none"
nbsphinx_timeout = 60
nbsphinx_execute = "always"
autosummary_generate = True
napoleon_use_param = True
napoleon_use_rtype = True
numpydoc_class_members_toctree = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates", sphinx_autosummary_accessors.templates_path]
# The suffix of source filenames.
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "xskillscore"
copyright = "2018-%s, xskillscore Developers" % datetime.datetime.now().year
# The full version, including alpha/beta/rc tags
version = xskillscore.__version__
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = "%Y-%m-%d"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["build", "**.ipynb_checkpoints", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Can add below once we have a logo.
# html_logo = 'images/esmtools-logo.png'
# html_theme_options = {'logo_only': True, 'style_nav_header_background': '#fcfcfc'}
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = today_fmt
# Output file base name for HTML help builder.
htmlhelp_basename = "xskillscoredoc"
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"cftime": ("https://unidata.github.io/cftime", None),
"dask": ("https://docs.dask.org/en/latest", None),
"numpy": ("https://numpy.org/doc/stable", None),
"python": ("https://docs.python.org/3/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
"sklearn": ("https://scikit-learn.org/stable", None),
"xarray": ("https://xarray.pydata.org/en/stable", None),
}
| 31.33871
| 84
| 0.707669
|
1c38df0326d8b746f40fd4e79b8d48711c60488c
| 15,455
|
py
|
Python
|
oscar/lib/python2.7/site-packages/dns/rdata.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/lib/python2.7/site-packages/dns/rdata.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/lib/python2.7/site-packages/dns/rdata.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdata.
@var _rdata_modules: A dictionary mapping a (rdclass, rdtype) tuple to
the module which implements that type.
@type _rdata_modules: dict
@var _module_prefix: The prefix to use when forming modules names. The
default is 'dns.rdtypes'. Changing this value will break the library.
@type _module_prefix: string
@var _hex_chunk: At most this many octets that will be represented in each
chunk of hexstring that _hexify() produces before whitespace occurs.
@type _hex_chunk: int"""
from io import BytesIO
import base64
import binascii
import dns.exception
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.tokenizer
import dns.wiredata
from ._compat import xrange, string_types, text_type
_hex_chunksize = 32
def _hexify(data, chunksize=_hex_chunksize):
"""Convert a binary string into its hex encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
@rtype: string
"""
line = binascii.hexlify(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
_base64_chunksize = 32
def _base64ify(data, chunksize=_base64_chunksize):
"""Convert a binary string into its base64 encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is
L{dns.rdata._base64_chunksize}
@rtype: string
"""
line = base64.b64encode(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
__escaped = bytearray(b'"\\')
def _escapify(qstring):
"""Escape the characters in a quoted string which need it.
@param qstring: the string
@type qstring: string
@returns: the escaped string
@rtype: string
"""
if isinstance(qstring, text_type):
qstring = qstring.encode()
if not isinstance(qstring, bytearray):
qstring = bytearray(qstring)
text = ''
for c in qstring:
if c in __escaped:
text += '\\' + chr(c)
elif c >= 0x20 and c < 0x7F:
text += chr(c)
else:
text += '\\%03d' % c
return text
def _truncate_bitmap(what):
"""Determine the index of greatest byte that isn't all zeros, and
return the bitmap that contains all the bytes less than that index.
@param what: a string of octets representing a bitmap.
@type what: string
@rtype: string
"""
for i in xrange(len(what) - 1, -1, -1):
if what[i] != 0:
return what[0: i + 1]
return what[0:1]
class Rdata(object):
"""Base class for all DNS rdata types.
"""
__slots__ = ['rdclass', 'rdtype']
def __init__(self, rdclass, rdtype):
"""Initialize an rdata.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
"""
self.rdclass = rdclass
self.rdtype = rdtype
def covers(self):
"""DNS SIG/RRSIG rdatas apply to a specific type; this type is
returned by the covers() function. If the rdata type is not
SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
creating rdatasets, allowing the rdataset to contain only RRSIGs
of a particular type, e.g. RRSIG(NS).
@rtype: int
"""
return dns.rdatatype.NONE
def extended_rdatatype(self):
"""Return a 32-bit type value, the least significant 16 bits of
which are the ordinary DNS type, and the upper 16 bits of which are
the "covered" type, if any.
@rtype: int
"""
return self.covers() << 16 | self.rdtype
def to_text(self, origin=None, relativize=True, **kw):
"""Convert an rdata to text format.
@rtype: string
"""
raise NotImplementedError
def to_wire(self, file, compress=None, origin=None):
"""Convert an rdata to wire format.
@rtype: string
"""
raise NotImplementedError
def to_digestable(self, origin=None):
"""Convert rdata to a format suitable for digesting in hashes. This
is also the DNSSEC canonical form."""
f = BytesIO()
self.to_wire(f, None, origin)
return f.getvalue()
def validate(self):
"""Check that the current contents of the rdata's fields are
valid. If you change an rdata by assigning to its fields,
it is a good idea to call validate() when you are done making
changes.
"""
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
def __repr__(self):
covers = self.covers()
if covers == dns.rdatatype.NONE:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
str(self) + '>'
def __str__(self):
return self.to_text()
def _cmp(self, other):
"""Compare an rdata with another rdata of the same rdtype and
rdclass. Return < 0 if self < other in the DNSSEC ordering,
0 if self == other, and > 0 if self > other.
"""
our = self.to_digestable(dns.name.root)
their = other.to_digestable(dns.name.root)
if our == their:
return 0
if our > their:
return 1
return -1
def __eq__(self, other):
if not isinstance(other, Rdata):
return False
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Rdata):
return True
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return True
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) > 0
def __hash__(self):
return hash(self.to_digestable(dns.name.root))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
def choose_relativity(self, origin=None, relativize=True):
"""Convert any domain names in the rdata to the specified
relativization.
"""
pass
class GenericRdata(Rdata):
"""Generate Rdata Class
This class is used for rdata types for which we have no better
implementation. It implements the DNS "unknown RRs" scheme.
"""
__slots__ = ['data']
def __init__(self, rdclass, rdtype, data):
super(GenericRdata, self).__init__(rdclass, rdtype)
self.data = data
def to_text(self, origin=None, relativize=True, **kw):
return r'\# %d ' % len(self.data) + _hexify(self.data)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
token = tok.get()
if not token.is_identifier() or token.value != '\#':
raise dns.exception.SyntaxError(
r'generic rdata does not start with \#')
length = tok.get_int()
chunks = []
while 1:
token = tok.get()
if token.is_eol_or_eof():
break
chunks.append(token.value.encode())
hex = b''.join(chunks)
data = binascii.unhexlify(hex)
if len(data) != length:
raise dns.exception.SyntaxError(
'generic rdata hex data has wrong length')
return cls(rdclass, rdtype, data)
def to_wire(self, file, compress=None, origin=None):
file.write(self.data)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
return cls(rdclass, rdtype, wire[current: current + rdlen])
_rdata_modules = {}
_module_prefix = 'dns.rdtypes'
def get_rdata_class(rdclass, rdtype):
def import_module(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
mod = _rdata_modules.get((rdclass, rdtype))
rdclass_text = dns.rdataclass.to_text(rdclass)
rdtype_text = dns.rdatatype.to_text(rdtype)
rdtype_text = rdtype_text.replace('-', '_')
if not mod:
mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
if not mod:
try:
mod = import_module('.'.join([_module_prefix,
rdclass_text, rdtype_text]))
_rdata_modules[(rdclass, rdtype)] = mod
except ImportError:
try:
mod = import_module('.'.join([_module_prefix,
'ANY', rdtype_text]))
_rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
except ImportError:
mod = None
if mod:
cls = getattr(mod, rdtype_text)
else:
cls = GenericRdata
return cls
def from_text(rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_text() class method is called
with the parameters to this function.
If I{tok} is a string, then a tokenizer is created and the string
is used as its input.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer or input text
@type tok: dns.tokenizer.Tokenizer or string
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: Should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance"""
if isinstance(tok, string_types):
tok = dns.tokenizer.Tokenizer(tok)
cls = get_rdata_class(rdclass, rdtype)
if cls != GenericRdata:
# peek at first token
token = tok.get()
tok.unget(token)
if token.is_identifier() and \
token.value == r'\#':
#
# Known type using the generic syntax. Extract the
# wire form from the generic syntax, and then run
# from_wire on it.
#
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
relativize)
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
origin)
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_wire() class method is called
with the parameters to this function.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance"""
wire = dns.wiredata.maybe_wrap(wire)
cls = get_rdata_class(rdclass, rdtype)
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
| 33.671024
| 79
| 0.604465
|
d492c55abb642be59b7e5be644311b901c89f3a6
| 554
|
py
|
Python
|
recipes/migrations/0015_auto_20210325_1148.py
|
PavelYasukevich/foodgram-project
|
d03af25d8fd0cbf1eec03467a95620b89993c9fd
|
[
"MIT"
] | null | null | null |
recipes/migrations/0015_auto_20210325_1148.py
|
PavelYasukevich/foodgram-project
|
d03af25d8fd0cbf1eec03467a95620b89993c9fd
|
[
"MIT"
] | null | null | null |
recipes/migrations/0015_auto_20210325_1148.py
|
PavelYasukevich/foodgram-project
|
d03af25d8fd0cbf1eec03467a95620b89993c9fd
|
[
"MIT"
] | 1
|
2021-03-27T16:34:07.000Z
|
2021-03-27T16:34:07.000Z
|
# Generated by Django 3.1.7 on 2021-03-25 11:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('recipes', '0014_auto_20210324_1332'),
]
operations = [
migrations.AlterField(
model_name='amount',
name='ingredient',
field=models.ForeignKey(help_text='Ингредиент', on_delete=django.db.models.deletion.CASCADE, related_name='amounts', to='recipes.ingredient', verbose_name='Ингредиент'),
),
]
| 27.7
| 181
| 0.666065
|
7233eff429fd7a098a04439f7cd1f798d63155ce
| 6,234
|
py
|
Python
|
tech_project/lib/python2.7/site-packages/parler/tests/test_model_construction.py
|
priyamshah112/Project-Descripton-Blog
|
8e01016c6be79776c4f5ca75563fa3daa839e39e
|
[
"MIT"
] | null | null | null |
tech_project/lib/python2.7/site-packages/parler/tests/test_model_construction.py
|
priyamshah112/Project-Descripton-Blog
|
8e01016c6be79776c4f5ca75563fa3daa839e39e
|
[
"MIT"
] | null | null | null |
tech_project/lib/python2.7/site-packages/parler/tests/test_model_construction.py
|
priyamshah112/Project-Descripton-Blog
|
8e01016c6be79776c4f5ca75563fa3daa839e39e
|
[
"MIT"
] | null | null | null |
from functools import wraps
import django
from django.db import models
from django.db.models import Manager
from django.utils import six
from parler.models import TranslatableModel
from parler.models import TranslatedFields
try:
from unittest import expectedFailure, skipIf
except ImportError:
# python<2.7
from django.utils.unittest import expectedFailure, skipIf
from .utils import AppTestCase
from .testapp.models import ManualModel, ManualModelTranslations, SimpleModel, Level1, Level2, ProxyBase, ProxyModel, DoubleModel, RegularModel, CharModel
def clear_app_registry(func):
"""
Decorator for unit tests that corrupt the global app registry, and therefore need a reset.
"""
@wraps(func)
def _clearing_dec(*args, **kwargs):
from django.apps import apps
try:
func(*args, **kwargs)
finally:
# TODO: this doens't yet work.
apps.clear_cache()
if django.VERSION >= (1, 10):
return _clearing_dec
else:
return func
class ModelConstructionTests(AppTestCase):
"""
Test model construction
"""
def test_manual_model(self):
"""
Test the metaclass of the model.
"""
# Test whether the link has taken place
self.assertIsInstance(ManualModel().translations, Manager) # RelatedManager class
self.assertIs(ManualModel().translations.model, ManualModelTranslations)
self.assertIs(ManualModel._parler_meta.root_model, ManualModelTranslations)
def test_simple_model(self):
"""
Test the simple model syntax.
"""
self.assertIs(SimpleModel().translations.model, SimpleModel._parler_meta.root_model)
def test_inherited_model(self):
"""
Test the inherited model syntax.
"""
# First level has 1 ParlerMeta object
self.assertEqual(Level1._parler_meta.root.rel_name, 'l1_translations')
self.assertEqual(Level1._parler_meta.root.model.__name__, 'Level1Translation')
self.assertEqual(len(Level1._parler_meta), 1)
# Second level has 2 ParlerMeta objects.
self.assertEqual(len(Level2._parler_meta), 2)
self.assertEqual(Level2._parler_meta[0].rel_name, 'l1_translations')
self.assertEqual(Level2._parler_meta[1].rel_name, 'l2_translations')
self.assertEqual(Level2._parler_meta[1].model.__name__, 'Level2Translation')
# Level 2 root attributes should point to the top-level object (Level1)
self.assertEqual(Level2._parler_meta.root_model.__name__, 'Level1Translation')
self.assertEqual(Level2._parler_meta.root_rel_name, 'l1_translations')
self.assertEqual(Level2._parler_meta.root, Level1._parler_meta.root)
def test_proxy_model(self):
"""
Test whether proxy models can get new translations
"""
# First level has 1 ParlerMeta object
self.assertEqual(ProxyBase._parler_meta.root.rel_name, 'base_translations')
self.assertEqual(len(ProxyBase._parler_meta), 1)
# Second level has 2 ParlerMeta objects
self.assertEqual(len(ProxyModel._parler_meta), 2)
self.assertEqual(ProxyModel._parler_meta[0].rel_name, 'base_translations')
self.assertEqual(ProxyModel._parler_meta[1].rel_name, 'proxy_translations')
self.assertEqual(ProxyModel._parler_meta[0].model.__name__, 'ProxyBaseTranslation')
self.assertEqual(ProxyModel._parler_meta[1].model.__name__, 'ProxyModelTranslation')
# Second inheritance level attributes should point to the top-level object (ProxyBase)
self.assertEqual(ProxyModel._parler_meta.root_model.__name__, 'ProxyBaseTranslation')
self.assertEqual(ProxyModel._parler_meta.root_rel_name, 'base_translations')
self.assertEqual(ProxyModel._parler_meta.root, ProxyBase._parler_meta.root)
def test_double_translation_table(self):
"""
Test how assigning two translation tables works.
"""
self.assertIsNone(DoubleModel._parler_meta.base) # Should call .add_meta() instead of overwriting/chaining it.
self.assertEqual(len(DoubleModel._parler_meta), 2)
self.assertEqual(DoubleModel._parler_meta[0].rel_name, "base_translations")
self.assertEqual(DoubleModel._parler_meta[1].rel_name, "more_translations")
@skipIf(django.VERSION >= (1, 10), "This breaks the Django 1.10 app registry")
@clear_app_registry
def test_overlapping_proxy_model(self):
"""
Test the simple model syntax.
"""
from parler.tests.testapp.invalid_models import RegularModelProxy
# Create an object without translations
RegularModel.objects.create(id=98, original_field='untranslated')
self.assertEqual(RegularModelProxy.objects.count(), 1)
# Refetch from db, should raise an error.
self.assertRaises(RuntimeError, lambda: RegularModelProxy.objects.all()[0])
def test_model_with_different_pks(self):
"""
Test that TranslatableModels works with different types of pks
"""
self.assertIsInstance(SimpleModel.objects.create(tr_title='Test'), SimpleModel)
self.assertIsInstance(CharModel.objects.create(pk='test', tr_title='Test'), CharModel)
@skipIf(django.VERSION >= (1, 10), "This breaks the Django 1.10 app registry")
@clear_app_registry
@expectedFailure
def test_model_metaclass_create_order(self):
"""
For some reason, having a custom ModelBase metaclass breaks
the ``pk`` field detection when ``TranslatableModel`` is the first model in an inheritance chain.
Using ``Book(Product, TranslatableModel)`` does work.
"""
from django.db.models.base import ModelBase
class FooModelBase(ModelBase):
pass
class FooModel(six.with_metaclass(FooModelBase, models.Model)):
class Meta:
abstract = True
class Product(FooModel):
pass
class Book(TranslatableModel, Product):
translations = TranslatedFields(
slug=models.SlugField(blank=False, default='', max_length=128)
)
self.assertTrue(Book._meta.pk)
| 39.455696
| 154
| 0.695059
|
c1be61c35eaadb72e6ba4c41a2563f964533a390
| 601
|
py
|
Python
|
setup.py
|
bread22/GetSubtitles
|
d2e0895a0bd6956698fc4afa7f29e5ba285b2d3e
|
[
"MIT"
] | null | null | null |
setup.py
|
bread22/GetSubtitles
|
d2e0895a0bd6956698fc4afa7f29e5ba285b2d3e
|
[
"MIT"
] | null | null | null |
setup.py
|
bread22/GetSubtitles
|
d2e0895a0bd6956698fc4afa7f29e5ba285b2d3e
|
[
"MIT"
] | null | null | null |
# coding: utf8
from setuptools import setup, find_packages
from getsub.__version__ import __version__
setup(
author="gyh1621",
author_email="guoyh01@gmail.com",
description="download subtitles easily",
license="MIT",
name="getsub",
version=__version__,
packages=find_packages(),
install_requires=[ # 依赖列表
"requests>=2.0",
"beautifulsoup4>=4.4.0",
"guessit==3.1.0",
"rarfile>=3.0",
"pylzma>=0.5.0",
],
entry_points={"console_scripts": ["getsub = getsub.main: main"]},
zip_safe=False,
long_description=__doc__,
)
| 25.041667
| 69
| 0.635607
|
5a5a78b9928f3e5d19c948db67c9b0302f24bb5e
| 6,310
|
py
|
Python
|
py/StlToGif.py
|
hannesknutsson/3D-STL-Boii
|
d379a21c1bf35fcb67a8474a320a0e00006d1435
|
[
"MIT"
] | null | null | null |
py/StlToGif.py
|
hannesknutsson/3D-STL-Boii
|
d379a21c1bf35fcb67a8474a320a0e00006d1435
|
[
"MIT"
] | null | null | null |
py/StlToGif.py
|
hannesknutsson/3D-STL-Boii
|
d379a21c1bf35fcb67a8474a320a0e00006d1435
|
[
"MIT"
] | null | null | null |
# -*- coding: cp1252 -*-
################################
# #
# Created by: Daniel Aguirre #
# Date: 2019/05/13 #
# #
# Modified by: Hannes Knutsson #
# Date: 2020/05/09 #
# #
################################
# Imports
import os, re, math, sys
import sys, getopt, shutil
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from stl import mesh
import imageio
# USERs VARIABLES
# General parameters
inputfile = None
outputfile = None
# GIFs parameters
frames = 25
duration_frame = 0.1
# Visualization parameters
init_angle = 0
elevation = 0
rotation_axises = [1.0, 0.0, 0.0]
rotation_angle = 0
x_offset = 0
y_offset = 0
z_offset = 0
path = "frames" + os.sep
# Checks that input paratmeters are correct
def initialize():
global frames, duration_frame, outputfile
if (frames<=0):
print("Setting default of frames to 25")
frames = 25
if (duration_frame<=0):
print("Setting default duration to 0.1")
duration_frame = 0.1
if inputfile == None:
print("Error: Inputfile not specified")
sys.exit(2)
if outputfile == None:
outputfile = "output.gif"
# Loads the STL file
def loadSTL():
global stl_mesh
stl_mesh = mesh.Mesh.from_file(inputfile)
# Rotate the STL
def rotateSTL():
stl_mesh.rotate(rotation_axises, math.radians(rotation_angle))
# Creates frames for the gif
def createFrames():
# Center the STL
x_min = stl_mesh.vectors[:,:,0].min()
x_max = stl_mesh.vectors[:,:,0].max()
y_min = stl_mesh.vectors[:,:,1].min()
y_max = stl_mesh.vectors[:,:,1].max()
z_min = stl_mesh.vectors[:,:,2].min()
z_max = stl_mesh.vectors[:,:,2].max()
x_center_offset = (x_max + x_min)/2.0
y_center_offset = (y_max + y_min)/2.0
z_center_offset = (z_max + z_min)/2.0
stl_mesh.vectors[:,:,0] = stl_mesh.vectors[:,:,0] - x_center_offset - x_offset
stl_mesh.vectors[:,:,1] = stl_mesh.vectors[:,:,1] - y_center_offset - y_offset
stl_mesh.vectors[:,:,2] = stl_mesh.vectors[:,:,2] - z_center_offset - z_offset
# Create a new plot
figure = plt.figure()
axes = mplot3d.Axes3D(figure)
axes.set_facecolor("#36393F")
# Add STL vectors to the plot
axes.add_collection3d(mplot3d.art3d.Poly3DCollection(stl_mesh.vectors,color="blue"))
axes.add_collection3d(mplot3d.art3d.Line3DCollection(stl_mesh.vectors,color="black",linewidth=0.5))
axes.view_init(elev=35., azim=-45)
# Auto scale to the mesh size
scale = stl_mesh.points.flatten('K')
axes.auto_scale_xyz(scale, scale, scale)
# Deactivate Axes
plt.axis('off')
# Delete folder containing frames from previous runs
if os.path.exists(path):
shutil.rmtree(path)
# Create a folder to contain the frames
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
for i in range(frames):
# Rotate the view
axes.view_init(elev=elevation, azim=init_angle + 360/frames*i)
# Save frame
frame_i = "frame_" + str(i)
plt.savefig(path + frame_i + ".png")
# Loads frames and creates gif
def createGif():
images = []
files = os.listdir(path)
ordered_files = sorted(files, key=lambda x: (int(re.sub('\D','',x)),x))
for file_name in ordered_files:
if file_name.endswith('.png'):
file_path = os.path.join(path, file_name)
images.append(imageio.imread(file_path))
imageio.mimsave(outputfile, images, duration = duration_frame)
# Separate the string into a list of floats
def getList(strlist,separator=","):
try:
valueList = list(map(float,strlist.split(separator)))
except:
print("Error: Input the values only separated by a comma (,) . I.e: 1,0,0")
sys.exit(2)
return list(map(float,strlist.split(separator)))
# MAIN
def main(argv):
# Main variables
global inputfile, outputfile, path
# GIFs parameters
global frames, duration_frame
# Visualization parameters
global init_angle, elevation, rotation_axises, rotation_angle, x_offset, y_offset, z_offset
try:
opts, args = getopt.getopt(argv,"hi:o:pr:n:t:a:e:d:r:",["help","ifile=","ofile=","nframes=", "duration=", "initangle=", "elevation=", "rotation=", "rotation_axis=", "offset=","path="])
except getopt.GetoptError:
print('Error')
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print("Usage: GCode_to_Robtargets [-h | -i <inputfile> -o <outputfile>] ")
print('Options and arguments:')
print("-h : Print this help message and exit")
print("-i arg : Input the file to be get the frames for the gif (also --ifile)")
print("-o arg : Output filename of the gif (also --ofile)")
print("-p arg : Folder in where the frames will be saved (also --path). Default: frames/")
print("-n arg : Amount of frames to generate (also --nframes). Default: 25")
print("-t arg : Duration of display of each frame (also --duration). Default: 0.1")
print("-a arg : Starting angle of the first frame (also --initangle). Default: 0")
print("-e arg : Elevation of the STL (also --elevation). Default: 0")
print("-d arg : Degrees to rotate the stl (also --rotation_angle). Default: 0")
print("-r arg : Specify the rotation axis of the STL (also --rotation_axis). Default: [1,0,0]")
print("--offset arg : Displaces the center from which the STL will revolve. Default: [0,0,0]")
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
elif opt in ("-p", "--path"):
path = arg
elif opt in ("-n", "--nframes"):
frames = int(arg)
elif opt in ("-t", "--duration"):
duration_frame = float(arg)
elif opt in ("-a", "--initangle"):
init_angle = float(arg)
elif opt in ("-e", "--elevation"):
elevation = float(arg)
elif opt in ("-d", "--rotation_angle"):
rotation_angle = float(arg)
elif opt in ("-r", "--rotation_axis"):
rotation_axises = getList(arg)
elif opt in ("--offset"):
offsets = getList(arg)
x_offset = offsets[0]
y_offset = offsets[1]
z_offset = offsets[2]
initialize()
print("Loading STL")
loadSTL()
rotateSTL()
print("Creating frames")
createFrames()
print("Creating gif")
createGif()
print("Finished")
if __name__ == "__main__":
print("Started")
main(sys.argv[1:])
| 26.182573
| 187
| 0.647385
|
cfbae3c8fe2b21eadeb2b67a08b54c441d45ddd0
| 9,419
|
py
|
Python
|
rfsoc_qpsk/qpsk_overlay.py
|
baileyji/rfsoc_qpsk
|
ecca0b3bd1f17c45cc21b0df50b04a70e00c8ee5
|
[
"BSD-3-Clause"
] | null | null | null |
rfsoc_qpsk/qpsk_overlay.py
|
baileyji/rfsoc_qpsk
|
ecca0b3bd1f17c45cc21b0df50b04a70e00c8ee5
|
[
"BSD-3-Clause"
] | null | null | null |
rfsoc_qpsk/qpsk_overlay.py
|
baileyji/rfsoc_qpsk
|
ecca0b3bd1f17c45cc21b0df50b04a70e00c8ee5
|
[
"BSD-3-Clause"
] | null | null | null |
from pynq import Overlay
from pynq import Xlnk
import xrfclk
import xrfdc
import os
import numpy as np
import ipywidgets as ipw
from rfsoc_qpsk import dma_timer, sdr_plots, qpsk_rx, qpsk_tx
class TimerRegistry():
"""Helper class to track active timer threads.
This can be used to help safely stop any orphaned DMA timers.
Orphans appear when a cell is re-run while its DMA timer is active.
"""
def __init__(self):
self.registry = dict()
def register_timers(self, key, timers):
"""Register a list of timers with the registry.
This will safely stop any timers that were previously registered with
the same key.
key: String name for this timer group
timers: List of DmaTimer objects
"""
if key in self.registry:
[timer.stop() for timer in self.registry[key]]
self.registry[key] = timers
class QpskOverlay(Overlay):
"""Overlay subclass for rfsoc-qpsk.
Performs initialisation (including RF components) and exposes them with
more friendly names in a flatter hierarchy. Less typing for everyone.
"""
def __init__(self, bitfile_name=None, init_rf_clks=True, presentation_mode=False, **kwargs):
"""Construct a new QpskOverlay
bitfile_name: Optional. If left None, the 'rfsoc_qpsk.bit' bundled with this
rfsoc-qpsk package will be used.
init_rf_clks: If true (default), the reference clocks are configured
for all tiles. If the clocks are already configured, set
to false for faster execution.
presentation_mode: Flag to enable a dark theme with thick lines and
bigger font
"""
# Generate default bitfile name
if bitfile_name is None:
this_dir = os.path.dirname(__file__)
bitfile_name = os.path.join(this_dir, 'bitstream', 'rfsoc_qpsk.bit')
# Set optional theming for presentation mode
if presentation_mode:
from IPython.display import display, HTML
import plotly.io as pio
# Apply plotly theming
pio.templates.default = 'plotly_dark+presentation'
# Force dark style for ipywidget tab background
display(HTML("""
<style>
.jupyter-widgets.widget-tab > .widget-tab-contents {
background: inherit !important;
}
</style>
"""))
# Set FPD and LPD interface widths
from pynq import MMIO
fpd_cfg = MMIO(0xfd615000, 4)
fpd_cfg.write(0, 0x00000A00)
lpd_cfg = MMIO(0xff419000, 4)
lpd_cfg.write(0, 0x00000000)
# Create Overlay
super().__init__(bitfile_name, **kwargs)
# Extact in-use dataconverter objects with friendly names
self.rf = self.usp_rf_data_converter_0
self.dac_tile = self.rf.dac_tiles[1]
self.dac_block = self.dac_tile.blocks[2]
self.adc_tile = self.rf.adc_tiles[0]
self.adc_block = self.adc_tile.blocks[0]
# Start up LMX clock
if init_rf_clks:
xrfclk.set_all_ref_clks(409.6)
# Set sane DAC defaults
self.dac_tile.DynamicPLLConfig(1, 409.6, 1228.8)
self.dac_block.NyquistZone = 2
self.dac_block.MixerSettings = {
'CoarseMixFreq': xrfdc.COARSE_MIX_BYPASS,
'EventSource': xrfdc.EVNT_SRC_IMMEDIATE,
'FineMixerScale': xrfdc.MIXER_SCALE_1P0,
'Freq': 1000,
'MixerMode': xrfdc.MIXER_MODE_C2R,
'MixerType': xrfdc.MIXER_TYPE_FINE,
'PhaseOffset': 0.0
}
self.dac_block.UpdateEvent(xrfdc.EVENT_MIXER)
self.dac_tile.SetupFIFO(True)
# Set sane ADC defaults
self.adc_tile.DynamicPLLConfig(1, 409.6, 1228.8)
self.adc_block.NyquistZone = 2
self.adc_block.MixerSettings = {
'CoarseMixFreq': xrfdc.COARSE_MIX_BYPASS,
'EventSource': xrfdc.EVNT_SRC_TILE,
'FineMixerScale': xrfdc.MIXER_SCALE_1P0,
'Freq': 1000,
'MixerMode': xrfdc.MIXER_MODE_R2C,
'MixerType': xrfdc.MIXER_TYPE_FINE,
'PhaseOffset': 0.0
}
self.adc_block.UpdateEvent(xrfdc.EVENT_MIXER)
self.adc_tile.SetupFIFO(True)
# Touch RX and TX drivers for strict evaluation
self.qpsk_tx.qpsk_tx.enable=1
self.qpsk_rx.qpsk_rx_dec.enable=1
self.qpsk_rx.qpsk_rx_csync.enable=1
self.qpsk_rx.qpsk_rx_rrc.enable=1
self.qpsk_rx.qpsk_rx_tsync.enable=1
self.timers = TimerRegistry()
def plot_group(self, group_name, domains, get_time_data, fs, get_freq_data=None, get_const_data=None):
"""Create a group of plots for a given set of data generators.
group_name: String name for plot group (used to register timers with
the TimerRegistry)
domains: List of plot types to generate. Select from:
['time','time-binary','frequency','constellation'].
fs: Sampling frequency. Used for time axis scaling
get_time_data: Callback function that returns a buffer of time domain
samples
get_freq_data: Optional callback that returns a buffer of frequency
domain samples. When not specified, a software FFT will
be performed on the get_time_data callback instead.
get_const_data: Optional callback that returns a buffer of time-domain
data for any constellation plots. When not specified,
the get_time_data callback will be used.
"""
plots = []
def many(f, n=4):
return np.concatenate([f() for _ in range(n)])
for domain in domains:
if domain=='frequency':
# HW accelerated FFT
if get_freq_data != None:
f_plot = sdr_plots.HWFreqPlot(
[get_freq_data() for _ in range(4)],
fs, animation_period=100, w=700)
f_dt = dma_timer.DmaTimer(f_plot.add_frame, get_freq_data, 0.3)
# SW FFT
else:
f_plot = sdr_plots.IQFreqPlot(
[many(get_time_data) for _ in range(4)],
fs, x_range=(-2000,2000), animation_period=100, w=700)
f_dt = dma_timer.DmaTimer(f_plot.add_frame, lambda:many(get_time_data), 0.3)
plots.append(dict(title='Frequency domain', plot=f_plot, control=f_dt))
elif domain=='time' or domain=='time-binary':
if domain=='time-binary':
iq_plot = sdr_plots.IQTimePlot(many(get_time_data), fs, w=700, scaling=1, ylabel='Symbol value')
iq_plot.set_line_mode(lines=True, markers=True, shape='hvh')
iq_plot.get_widget().layout.yaxis.dtick=1
else:
iq_plot = sdr_plots.IQTimePlot(many(get_time_data), fs, w=700)
iq_plot.set_line_mode(markers=False)
iq_dt = dma_timer.DmaTimer(iq_plot.add_data, get_time_data, 0.05)
plots.append(dict(title='Time domain', plot=iq_plot, control=iq_dt))
elif domain=='constellation':
c_plot = sdr_plots.IQConstellationPlot(many(get_const_data or get_time_data, n=10), h=550, fade=True)
c_dt = dma_timer.DmaTimer(c_plot.add_data, get_const_data or get_time_data, 0.05)
plots.append(dict(title='Constellation', plot=c_plot, control=c_dt,
layout=ipw.Layout(width='550px', margin='auto')))
self.timers.register_timers(group_name, list(map(lambda tab: tab['control'], plots)))
QpskOverlay.tab_plots(plots)
@staticmethod
def tab_plots(tabs):
"""Helper function to generate a Tab widget given a list of definitions.
tabs: A list of dicts describing a single tab. Each element needs three
keys: 'plot' with a SdrPlot object, 'control' with a DmaTimer
object, and 'title' with a string.
"""
widgets = []
titles = []
for tab in tabs:
widgets.append(ipw.VBox([
tab['plot'].get_widget(),tab['control'].get_widget()
],layout=tab.get('layout',ipw.Layout())))
titles.append(tab['title'])
tab_widget = ipw.Tab(widgets)
for i, title in enumerate(titles):
tab_widget.set_title(i, title)
QpskOverlay._tab_load_resizer_callback(tab_widget)
@staticmethod
def _tab_load_resizer_callback(tabs):
"""Helper function to handle relative widths for plots in hidden tabs"""
display(tabs)
out = ipw.Output()
display(out)
@out.capture()
def callback(change):
plot = tabs.children[change['new']].children[0]
plot.layout.autosize = False
plot.layout.autosize = True
tabs.observe(callback, names='selected_index')
Overlay = QpskOverlay
| 38.288618
| 117
| 0.591039
|
e3c863b296bde7d5a334a6e6c09ebd36551abeaf
| 1,425
|
py
|
Python
|
alipay/aop/api/domain/AlipayOpenServicemarketCommodityQueryModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/AlipayOpenServicemarketCommodityQueryModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/AlipayOpenServicemarketCommodityQueryModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenServicemarketCommodityQueryModel(object):
def __init__(self):
self._commodity_id = None
self._user_id = None
@property
def commodity_id(self):
return self._commodity_id
@commodity_id.setter
def commodity_id(self, value):
self._commodity_id = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.commodity_id:
if hasattr(self.commodity_id, 'to_alipay_dict'):
params['commodity_id'] = self.commodity_id.to_alipay_dict()
else:
params['commodity_id'] = self.commodity_id
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenServicemarketCommodityQueryModel()
if 'commodity_id' in d:
o.commodity_id = d['commodity_id']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| 25.446429
| 75
| 0.6
|
db63dc1d50d1f099940eb8b5fe141bef576cf8ba
| 10,332
|
py
|
Python
|
h2o-py/h2o/estimators/kmeans.py
|
Judahh/h2o-3
|
e3d1770b2c9b7a66af6fe1eee0c472879771a9a9
|
[
"Apache-2.0"
] | 1
|
2017-03-28T09:10:12.000Z
|
2017-03-28T09:10:12.000Z
|
h2o-py/h2o/estimators/kmeans.py
|
Judahh/h2o-3
|
e3d1770b2c9b7a66af6fe1eee0c472879771a9a9
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/h2o/estimators/kmeans.py
|
Judahh/h2o-3
|
e3d1770b2c9b7a66af6fe1eee0c472879771a9a9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_python.py
# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
#
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.estimators.estimator_base import H2OEstimator
from h2o.exceptions import H2OValueError
from h2o.frame import H2OFrame
from h2o.utils.typechecks import assert_is_type, Enum, numeric
class H2OKMeansEstimator(H2OEstimator):
"""
K-means
Performs k-means clustering on an H2O dataset.
"""
algo = "kmeans"
def __init__(self, **kwargs):
super(H2OKMeansEstimator, self).__init__()
self._parms = {}
names_list = {"model_id", "training_frame", "validation_frame", "nfolds", "keep_cross_validation_predictions",
"keep_cross_validation_fold_assignment", "fold_assignment", "fold_column", "ignored_columns",
"ignore_const_cols", "score_each_iteration", "k", "estimate_k", "user_points", "max_iterations",
"standardize", "seed", "init", "max_runtime_secs", "categorical_encoding"}
if "Lambda" in kwargs: kwargs["lambda_"] = kwargs.pop("Lambda")
for pname, pvalue in kwargs.items():
if pname == 'model_id':
self._id = pvalue
self._parms["model_id"] = pvalue
elif pname in names_list:
# Using setattr(...) will invoke type-checking of the arguments
setattr(self, pname, pvalue)
else:
raise H2OValueError("Unknown parameter %s = %r" % (pname, pvalue))
@property
def training_frame(self):
"""
Id of the training data frame (Not required, to allow initial validation of model parameters).
Type: ``H2OFrame``.
"""
return self._parms.get("training_frame")
@training_frame.setter
def training_frame(self, training_frame):
assert_is_type(training_frame, None, H2OFrame)
self._parms["training_frame"] = training_frame
@property
def validation_frame(self):
"""
Id of the validation data frame.
Type: ``H2OFrame``.
"""
return self._parms.get("validation_frame")
@validation_frame.setter
def validation_frame(self, validation_frame):
assert_is_type(validation_frame, None, H2OFrame)
self._parms["validation_frame"] = validation_frame
@property
def nfolds(self):
"""
Number of folds for N-fold cross-validation (0 to disable or >= 2).
Type: ``int`` (default: ``0``).
"""
return self._parms.get("nfolds")
@nfolds.setter
def nfolds(self, nfolds):
assert_is_type(nfolds, None, int)
self._parms["nfolds"] = nfolds
@property
def keep_cross_validation_predictions(self):
"""
Whether to keep the predictions of the cross-validation models.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("keep_cross_validation_predictions")
@keep_cross_validation_predictions.setter
def keep_cross_validation_predictions(self, keep_cross_validation_predictions):
assert_is_type(keep_cross_validation_predictions, None, bool)
self._parms["keep_cross_validation_predictions"] = keep_cross_validation_predictions
@property
def keep_cross_validation_fold_assignment(self):
"""
Whether to keep the cross-validation fold assignment.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("keep_cross_validation_fold_assignment")
@keep_cross_validation_fold_assignment.setter
def keep_cross_validation_fold_assignment(self, keep_cross_validation_fold_assignment):
assert_is_type(keep_cross_validation_fold_assignment, None, bool)
self._parms["keep_cross_validation_fold_assignment"] = keep_cross_validation_fold_assignment
@property
def fold_assignment(self):
"""
Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified' option will stratify
the folds based on the response variable, for classification problems.
One of: ``"auto"``, ``"random"``, ``"modulo"``, ``"stratified"`` (default: ``"auto"``).
"""
return self._parms.get("fold_assignment")
@fold_assignment.setter
def fold_assignment(self, fold_assignment):
assert_is_type(fold_assignment, None, Enum("auto", "random", "modulo", "stratified"))
self._parms["fold_assignment"] = fold_assignment
@property
def fold_column(self):
"""
Column with cross-validation fold index assignment per observation.
Type: ``str``.
"""
return self._parms.get("fold_column")
@fold_column.setter
def fold_column(self, fold_column):
assert_is_type(fold_column, None, str)
self._parms["fold_column"] = fold_column
@property
def ignored_columns(self):
"""
Names of columns to ignore for training.
Type: ``List[str]``.
"""
return self._parms.get("ignored_columns")
@ignored_columns.setter
def ignored_columns(self, ignored_columns):
assert_is_type(ignored_columns, None, [str])
self._parms["ignored_columns"] = ignored_columns
@property
def ignore_const_cols(self):
"""
Ignore constant columns.
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("ignore_const_cols")
@ignore_const_cols.setter
def ignore_const_cols(self, ignore_const_cols):
assert_is_type(ignore_const_cols, None, bool)
self._parms["ignore_const_cols"] = ignore_const_cols
@property
def score_each_iteration(self):
"""
Whether to score during each iteration of model training.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("score_each_iteration")
@score_each_iteration.setter
def score_each_iteration(self, score_each_iteration):
assert_is_type(score_each_iteration, None, bool)
self._parms["score_each_iteration"] = score_each_iteration
@property
def k(self):
"""
The max. number of clusters. If estimate_k is disabled, the model will find k centroids, otherwise it will find
up to k centroids.
Type: ``int`` (default: ``1``).
"""
return self._parms.get("k")
@k.setter
def k(self, k):
assert_is_type(k, None, int)
self._parms["k"] = k
@property
def estimate_k(self):
"""
Whether to estimate the number of clusters (<=k) iteratively and deterministically.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("estimate_k")
@estimate_k.setter
def estimate_k(self, estimate_k):
assert_is_type(estimate_k, None, bool)
self._parms["estimate_k"] = estimate_k
@property
def user_points(self):
"""
This option allows you to specify a dataframe, where each row represents an initial cluster center. The user-
specified points must have the same number of columns as the training observations. The number of rows must
equal the number of clusters
Type: ``H2OFrame``.
"""
return self._parms.get("user_points")
@user_points.setter
def user_points(self, user_points):
assert_is_type(user_points, None, H2OFrame)
self._parms["user_points"] = user_points
@property
def max_iterations(self):
"""
Maximum training iterations (if estimate_k is enabled, then this is for each inner Lloyds iteration)
Type: ``int`` (default: ``10``).
"""
return self._parms.get("max_iterations")
@max_iterations.setter
def max_iterations(self, max_iterations):
assert_is_type(max_iterations, None, int)
self._parms["max_iterations"] = max_iterations
@property
def standardize(self):
"""
Standardize columns before computing distances
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("standardize")
@standardize.setter
def standardize(self, standardize):
assert_is_type(standardize, None, bool)
self._parms["standardize"] = standardize
@property
def seed(self):
"""
RNG Seed
Type: ``int`` (default: ``-1``).
"""
return self._parms.get("seed")
@seed.setter
def seed(self, seed):
assert_is_type(seed, None, int)
self._parms["seed"] = seed
@property
def init(self):
"""
Initialization mode
One of: ``"random"``, ``"plus_plus"``, ``"furthest"``, ``"user"`` (default: ``"furthest"``).
"""
return self._parms.get("init")
@init.setter
def init(self, init):
assert_is_type(init, None, Enum("random", "plus_plus", "furthest", "user"))
self._parms["init"] = init
@property
def max_runtime_secs(self):
"""
Maximum allowed runtime in seconds for model training. Use 0 to disable.
Type: ``float`` (default: ``0``).
"""
return self._parms.get("max_runtime_secs")
@max_runtime_secs.setter
def max_runtime_secs(self, max_runtime_secs):
assert_is_type(max_runtime_secs, None, numeric)
self._parms["max_runtime_secs"] = max_runtime_secs
@property
def categorical_encoding(self):
"""
Encoding scheme for categorical features
One of: ``"auto"``, ``"enum"``, ``"one_hot_internal"``, ``"one_hot_explicit"``, ``"binary"``, ``"eigen"``,
``"label_encoder"``, ``"sort_by_response"``, ``"enum_limited"`` (default: ``"auto"``).
"""
return self._parms.get("categorical_encoding")
@categorical_encoding.setter
def categorical_encoding(self, categorical_encoding):
assert_is_type(categorical_encoding, None, Enum("auto", "enum", "one_hot_internal", "one_hot_explicit", "binary", "eigen", "label_encoder", "sort_by_response", "enum_limited"))
self._parms["categorical_encoding"] = categorical_encoding
| 31.120482
| 184
| 0.636179
|
dedac9c0b93f92b251a2c0ad2e57ce227b45bbe4
| 7,991
|
py
|
Python
|
depletion_scheme.py
|
rabieomar92/pynuctran
|
6a86c578992ee3f1d6079cbbefaa7bcdc14646e8
|
[
"MIT"
] | null | null | null |
depletion_scheme.py
|
rabieomar92/pynuctran
|
6a86c578992ee3f1d6079cbbefaa7bcdc14646e8
|
[
"MIT"
] | null | null | null |
depletion_scheme.py
|
rabieomar92/pynuctran
|
6a86c578992ee3f1d6079cbbefaa7bcdc14646e8
|
[
"MIT"
] | 1
|
2022-01-21T07:35:38.000Z
|
2022-01-21T07:35:38.000Z
|
import numpy as np
import decimal as dc
import xml.etree.ElementTree as ET
import time as tm
from pynuctran.solver import *
'''
SECTION III: DEPLETION DATA PRE-PROCESSING ................................................ SEC. III
*******************************************************************************************
THIS SECTION ENABLES THE RETRIEVAL OF ENDFB71 NUCLIDES DATA FROM XML STORAGE.
THE NUCLIDE DATA ARE STORED IN chain_endfb71.xml, an XML file created by MIT-CPRG.
The original source of the XML file can be retrieved here:
https://github.com/mit-crpg/opendeplete/blob/master/chains/chain_endfb71.xml
*******************************************************************************************
'''
class depletion_scheme:
'''
Defines the depletion scheme in the code, based on ENDFB17 data. The nuclide data are
stored in an xml file 'chains_endfb71.xml'. Here, the depletion chains are created
based on the user specified reaction rates and species.
Parameters:
xml_data_location: A string specifying the location of chains_endfb71.xml on the disk.
rxn_rates : A 2D python dictionary containing the reaction rates of various
removal events. For example,
rxn_rates = {
'U238' : {'(n,gamma)': 1E-4, 'fission': 1E-5},
'Pu239': {'(n,gamma)': 1E-5},
}
'''
@staticmethod
def build_chains(solver: solver, rxn_rates: dict, xml_data_location: str = 'chain_endfb71.xml'):
t0 = tm.process_time()
species_names = solver.species_names
tree = ET.parse(xml_data_location)
root = tree.getroot()
max_rate = 0.0
for species in root:
species_name = species.attrib['name']
if not species_name in species_names:
continue
if 'half_life' in species.attrib:
hl = np.float64(species.attrib['half_life'])
decay_rate = np.log(2) / hl
# Records the maximum rate.
if decay_rate > max_rate:
max_rate = decay_rate
else:
decay_rate = 0.0
removals = list(species)
for removal in removals:
if removal.tag == 'decay_type':
decay_rate_adjusted = np.float64(removal.attrib['branching_ratio']) * decay_rate
parent = species_name
daughter = removal.attrib['target']
parent_id = species_names.index(parent)
if daughter in species_names:
daughter_id = species_names.index(daughter)
solver.add_removal(parent_id, decay_rate_adjusted, [daughter_id])
# Records the maximum rate.
if decay_rate_adjusted > max_rate:
max_rate = decay_rate_adjusted
else:
solver.add_removal(parent_id, decay_rate_adjusted, [solver.__no_product__])
# Records the maximum rate.
if decay_rate_adjusted > max_rate:
max_rate = decay_rate_adjusted
# If reaction rates are not provided then we skip this.
if not rxn_rates is None:
if species_name in rxn_rates.keys():
# Process all absorption reactions, except fission.
if removal.tag == 'reaction_type' and 'target' in removal.attrib:
parent = species_name
parent_id = species_names.index(parent)
if removal.attrib['type'] in rxn_rates[parent].keys() and \
not removal.attrib['type'] == 'fission':
daughter = removal.attrib['target']
removal_rate = dc.Decimal('%.15g' % rxn_rates[parent][removal.attrib['type']])
if daughter in species_names:
daughter_id = species_names.index(daughter)
solver.add_removal(parent_id, removal_rate, [daughter_id])
else:
solver.add_removal(parent_id, removal_rate, [solver.__no_product__])
# Process fission reaction.
if removal.tag == 'neutron_fission_yields':
parent = species_name
parent_id = species_names.index(parent)
yield_data = list(removal)
energy = 0.0
products = []
yields = []
if 'fission' in rxn_rates[parent].keys():
for data in yield_data:
if data.tag == 'energies':
energy = sorted([np.float64(e) for e in data.text.split()])[0]
if data.tag == 'fission_yields':
if float(data.attrib['energy']) == energy:
for param in list(data):
if param.tag == 'products':
products = param.text.split()
if param.tag == 'data':
yields = [dc.Decimal(y) for y in param.text.split()]
total_fission_rate = rxn_rates[parent]['fission']
yields_to_add = []
daughters_id_to_add = []
for product in products:
if product in species_names:
daughters_id_to_add.append(species_names.index(product))
yields_to_add.append(yields[products.index(product)])
parent_id = species_names.index(species_name)
solver.add_removal(parent_id, total_fission_rate, daughters_id_to_add, yields_to_add)
# Report the data processing time.
t1 = tm.process_time()
print('Done building chains. CPU time = %.10g secs' % (t1-t0))
return
'''
Gets the list of species available in the nuclides data.
'''
@staticmethod
def get_all_species_names(xml_data_location: str) -> list:
tree = ET.parse(xml_data_location)
root = tree.getroot()
species_names = []
for species in root:
species_names.append(species.attrib['name'])
return species_names
@staticmethod
def get_names(xml_data_location: str, AMin: int = -1, AMax: int = -1):
tree = ET.parse(xml_data_location)
root = tree.getroot()
species_names = []
for species in root:
name = species.attrib['name']
name = name.split('_')[0]
x = ''
for c in name:
if c.isnumeric():
x += c
if AMin == AMax == -1:
species_names.append(species.attrib['name'])
else:
A = int(x)
if A >= AMin and A <= AMax:
species_names.append(name)
return species_names
| 48.138554
| 118
| 0.463772
|
096a7eb2c94be9b7e70256c20f0dffd314efd3d2
| 3,864
|
py
|
Python
|
scripts/Python/PreprocessPDBs.py
|
joy13975/elfin-old
|
e1a727c084f616815b3d746bf7edd250997fa820
|
[
"MIT"
] | null | null | null |
scripts/Python/PreprocessPDBs.py
|
joy13975/elfin-old
|
e1a727c084f616815b3d746bf7edd250997fa820
|
[
"MIT"
] | null | null | null |
scripts/Python/PreprocessPDBs.py
|
joy13975/elfin-old
|
e1a727c084f616815b3d746bf7edd250997fa820
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse, sys
from utils import *
def mergeChainsAndCleanse(pdb):
for model in pdb:
newChain = Bio.PDB.Chain.Chain('A')
rid = 1
for chain in model:
for r in chain:
badAtoms = []
for a in r:
if a.name == '1H' or a.name == '2H' or a.name == '3H' or a.name == 'OXT':
badAtoms.append(a.name)
for ba in badAtoms:
r.detach_child(ba)
r.id = (r.id[0], rid, r.id[2])
newChain.add(r)
rid += 1
# Remove old chains from model
ocIds = []
for chain in model:
ocIds.append(chain.id)
for ocId in ocIds:
model.detach_child(ocId)
model.add(newChain)
def main():
ap = argparse.ArgumentParser(description='Template Python script');
ap.add_argument('input')
ap.add_argument('outputDir')
args = ap.parse_args()
if len(sys.argv) == 1 or args.input is None or args.outputDir is None:
ap.print_help()
sys.exit(1)
# Extract name
pairFile = args.input
pairName = pairFile[pairFile.rfind('/')+1:].replace('.pdb', '')
underscores = [pairName.find('_'), pairName.rfind('_')]
print 'Working on {}'.format(args.input)
if underscores[0] == -1:
print 'Input {} is a simple pair and does not need loop replacement'.format(args.input)
pair = readPdb('pair', pairFile)
mergeChainsAndCleanse(pair)
savePdb(pair, args.outputDir + '/' + pairName + '.pdb')
exit(0)
dashIdx = pairName.rfind('-')
spairFirst = dashIdx < underscores[0] and dashIdx < underscores[1]
pairNameHalves = [pairName[:dashIdx], pairName[dashIdx+1:]]
spairName = ''
spairName += pairNameHalves[0][pairNameHalves[0].rfind('_')+1:] \
if pairNameHalves[0].rfind('_') != -1 else pairNameHalves[0]
spairName += '-'
spairName += pairNameHalves[1][:pairNameHalves[1].find('_')] \
if pairNameHalves[1].find('_') != -1 else pairNameHalves[1]
spairFile = pairFile[:pairFile.rfind('/')+1] + spairName + '.pdb'
# Load PDBs
pair = readPdb('pair', pairFile)
pairChains = pair.child_list[0].child_list
assert(len(pairChains) == 2)
spair = readPdb('spair', spairFile)
spairChains = spair.child_list[0].child_list
assert(len(spairChains) == 2)
# Get residue counts
pairRCount = getResidueCount(pair)
spairRCount = getResidueCount(spair)
spairStartIdx = int(np.ceil(spairRCount*0.375))+1
spairEndIdx = int(np.floor(spairRCount*0.625))-1
spairEndOffset = spairEndIdx - len(spairChains[0].child_list)
# Find simple pair middle residues
spairChainLens = [len(c.child_list) for c in spairChains]
spairMidRes = spairChains[0].child_list[spairStartIdx:] + \
spairChains[1].child_list[:spairEndOffset]
spairAtoms = [a for r in spairMidRes for a in r.child_list if a.name == 'CA']
# Find first half of the residues
pairChainLens = [len(c.child_list) for c in pairChains]
pairStartOffset = pairChainLens[0]-(spairChainLens[0]-spairStartIdx)
pairMidRes = \
(pairChains[0].child_list[spairStartIdx:] if spairFirst else \
pairChains[0].child_list[pairStartOffset:]) + \
pairChains[1].child_list[:spairEndOffset]
pairAtoms = [a for r in pairMidRes for a in r.child_list if a.name == 'CA']
# Move pair to spair
si = Bio.PDB.Superimposer()
si.set_atoms(spairAtoms, pairAtoms)
rot, tran = si.rotran
pair.transform(rot, tran)
# Merge chains and remove bad atoms
mergeChainsAndCleanse(pair)
mergeChainsAndCleanse(spair)
# Replace pair residues where it should be spair residues
pairNewChain = pair.child_list[0].child_list[0]
spairNewChain = spair.child_list[0].child_list[0]
for rIdx in xrange(spairStartIdx, spairEndIdx):
offsetRIdx = rIdx + (0 if spairFirst else pairChainLens[0]-spairChainLens[0])
oldRId = pairNewChain.child_list[offsetRIdx].id
pairNewChain.child_list[offsetRIdx] = spairNewChain.child_list[rIdx]
pairNewChain.child_list[offsetRIdx].id = oldRId
savePdb(pair, args.outputDir + '/' + pairName + '.pdb')
if __name__ == '__main__':
main()
| 31.16129
| 89
| 0.704451
|
4b2fcc765e8284b75df6d1fdbfd71428021e0cd4
| 2,882
|
py
|
Python
|
anpr_webapp.py
|
shiv2110/ANPR_Stage-1
|
6da0898383efdf17e8efa74e431db54fd2f29b37
|
[
"Net-SNMP",
"Xnet"
] | 1
|
2021-08-02T10:25:37.000Z
|
2021-08-02T10:25:37.000Z
|
anpr_webapp.py
|
shiv2110/ANPR_Stage-1
|
6da0898383efdf17e8efa74e431db54fd2f29b37
|
[
"Net-SNMP",
"Xnet"
] | null | null | null |
anpr_webapp.py
|
shiv2110/ANPR_Stage-1
|
6da0898383efdf17e8efa74e431db54fd2f29b37
|
[
"Net-SNMP",
"Xnet"
] | null | null | null |
import streamlit as st
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
import lp_detection
import lp_char_seg
import lp_char_recog
import lp_tesseract
from PIL import Image
st.set_page_config(page_title='License Plate Recognition', page_icon='🚗')
st.markdown(""" <style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style> """, unsafe_allow_html=True)
padding = 0
st.markdown(f""" <style>
.reportview-container .main .block-container{{
padding-top: {padding}rem;
padding-right: {padding}rem;
padding-left: {padding}rem;
padding-bottom: {padding}rem;
}} </style> """, unsafe_allow_html=True)
st.title('License Plate Recognition Model Test')
dmax = st.sidebar.slider('Dmax value', 288, 700, 608, step=30, key='dmax')
dmin = st.sidebar.slider('Dmin value', 128, 608, 288, step=30, key='dmin')
digit_w = st.sidebar.slider('Digit width for characters', 10, 60, 30, key='digit_w')
digit_h = st.sidebar.slider('Digit height for characters', 10, 150, 80, key='digit_h')
ratio_up = st.sidebar.selectbox('Upper limit for character bounding rectangle ratio',
options=np.arange(2, 12), index=8, key='ratio_up')
hp = st.sidebar.slider('Bounding box height by plate height', 0.22, 0.79, 0.5, key='hp')
uploaded_img = st.file_uploader('Upload an image',
type='jpg',
help='an image file with .jpg extension to be uploaded from your local FS')
if uploaded_img:
st.image(uploaded_img, caption='license plate', width=500)
test_image = Image.open(uploaded_img)
test_image = np.array(test_image)
if st.button('Get License Plate Number'):
vehicle, LpImg, cor = lp_detection.get_license_plate(test_image, dmax, dmin)
st.subheader('License Plate Detection')
fig1, ax1 = plt.subplots()
ax1.axis(False)
ax1.imshow(LpImg[0])
st.pyplot(fig1)
plate, binary, dilated, blur, gray = lp_char_seg.plate_preprocessing(LpImg)
cont = lp_char_seg.find_contours(binary)
test_roi, crop_characters = lp_char_seg.store_chars(hp, digit_w, digit_h, ratio_up, plate, dilated, cont, binary)
st.subheader('Character Segmentation')
fig3, ax3 = plt.subplots()
ax3.axis(False)
ax3.imshow(test_roi)
st.pyplot(fig3)
plate_number = lp_char_recog.get_plate_number(crop_characters)
st.subheader('License Plate Number - MobileNetV2 Result')
st.header(plate_number)
text = lp_tesseract.OCR(blur)
st.subheader('License Plate Number - Pytesseract Result')
st.header(text)
st.write('Is the prediction wrong? Try tuning the parameters for a better result')
st.balloons()
| 29.408163
| 121
| 0.659264
|
75f3492348a6077e7d71394b19268a7584700c44
| 875
|
py
|
Python
|
tests/test_service.py
|
bclau/ceilometer
|
90ad86c08494596dfa03c8cbfcea2c2be58fc8dc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_service.py
|
bclau/ceilometer
|
90ad86c08494596dfa03c8cbfcea2c2be58fc8dc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_service.py
|
bclau/ceilometer
|
90ad86c08494596dfa03c8cbfcea2c2be58fc8dc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 eNovance <licensing@enovance.com>
#
# Author: Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer import service
from ceilometer.tests import base
class ServiceTestCase(base.TestCase):
def test_prepare_service(self):
service.prepare_service([])
| 32.407407
| 75
| 0.748571
|
d34a8cb50afcbd434c7dc5c888881118a2522cbc
| 5,653
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20171101/get_application_security_group.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20171101/get_application_security_group.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20171101/get_application_security_group.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetApplicationSecurityGroupResult',
'AwaitableGetApplicationSecurityGroupResult',
'get_application_security_group',
]
@pulumi.output_type
class GetApplicationSecurityGroupResult:
"""
An application security group in a resource group.
"""
def __init__(__self__, etag=None, id=None, location=None, name=None, provisioning_state=None, resource_guid=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the application security group resource. Possible values are: 'Succeeded', 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the application security group resource. It uniquely identifies a resource, even if the user changes its name or migrate the resource across subscriptions or resource groups.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetApplicationSecurityGroupResult(GetApplicationSecurityGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApplicationSecurityGroupResult(
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
tags=self.tags,
type=self.type)
def get_application_security_group(application_security_group_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApplicationSecurityGroupResult:
"""
An application security group in a resource group.
:param str application_security_group_name: The name of the application security group.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['applicationSecurityGroupName'] = application_security_group_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20171101:getApplicationSecurityGroup', __args__, opts=opts, typ=GetApplicationSecurityGroupResult).value
return AwaitableGetApplicationSecurityGroupResult(
etag=__ret__.etag,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
tags=__ret__.tags,
type=__ret__.type)
| 36.006369
| 212
| 0.650097
|
a5d36d23da5512a1d512b0f6c1e939709cafa86b
| 784
|
py
|
Python
|
molecule/default/tests/test_role.py
|
istvano/ansible_role_kubectx
|
1f452d73feb2da09300084aeb81af3cc849fb2bf
|
[
"MIT"
] | null | null | null |
molecule/default/tests/test_role.py
|
istvano/ansible_role_kubectx
|
1f452d73feb2da09300084aeb81af3cc849fb2bf
|
[
"MIT"
] | null | null | null |
molecule/default/tests/test_role.py
|
istvano/ansible_role_kubectx
|
1f452d73feb2da09300084aeb81af3cc849fb2bf
|
[
"MIT"
] | null | null | null |
import os
import testinfra.utils.ansible_runner
import re
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_dir(host):
dir = host.file('/usr/local/bin')
assert dir.exists
assert dir.is_directory
assert dir.user == 'root'
def test_file(host):
installed_file = host.file('/usr/local/bin/kubectx')
assert installed_file.exists
assert installed_file.is_file
assert installed_file.user == 'root'
assert installed_file.group == 'root'
def test_file2(host):
installed_file = host.file('/usr/local/bin/kubens')
assert installed_file.exists
assert installed_file.is_file
assert installed_file.user == 'root'
assert installed_file.group == 'root'
| 25.290323
| 63
| 0.729592
|
19b1841c6d2fd53b962370fa481a1fa83d74bbfe
| 2,761
|
py
|
Python
|
tests/test_rand_spatial_crop.py
|
dylanbuchi/MONAI
|
1651f1b003b0ffae8b615d191952ad65ad091277
|
[
"Apache-2.0"
] | 2,971
|
2019-10-16T23:53:16.000Z
|
2022-03-31T20:58:24.000Z
|
tests/test_rand_spatial_crop.py
|
dylanbuchi/MONAI
|
1651f1b003b0ffae8b615d191952ad65ad091277
|
[
"Apache-2.0"
] | 2,851
|
2020-01-10T16:23:44.000Z
|
2022-03-31T22:14:53.000Z
|
tests/test_rand_spatial_crop.py
|
dylanbuchi/MONAI
|
1651f1b003b0ffae8b615d191952ad65ad091277
|
[
"Apache-2.0"
] | 614
|
2020-01-14T19:18:01.000Z
|
2022-03-31T14:06:14.000Z
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from parameterized import parameterized
from monai.transforms import RandSpatialCrop
from tests.utils import TEST_NDARRAYS, assert_allclose
TEST_CASE_0 = [
{"roi_size": [3, 3, -1], "random_center": True},
np.random.randint(0, 2, size=[3, 3, 3, 4]),
(3, 3, 3, 4),
]
TEST_CASE_1 = [{"roi_size": [3, 3, 3], "random_center": True}, np.random.randint(0, 2, size=[3, 3, 3, 3]), (3, 3, 3, 3)]
TEST_CASE_2 = [
{"roi_size": [3, 3, 3], "random_center": False},
np.random.randint(0, 2, size=[3, 3, 3, 3]),
(3, 3, 3, 3),
]
TEST_CASE_3 = [
{"roi_size": [3, 3], "random_center": False},
np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]]]),
]
TEST_CASE_4 = [
{"roi_size": [3, 3, 3], "max_roi_size": [5, -1, 4], "random_center": True, "random_size": True},
np.random.randint(0, 2, size=[1, 4, 5, 6]),
(1, 4, 4, 3),
]
TEST_CASE_5 = [
{"roi_size": 3, "max_roi_size": 4, "random_center": True, "random_size": True},
np.random.randint(0, 2, size=[1, 4, 5, 6]),
(1, 3, 4, 3),
]
class TestRandSpatialCrop(unittest.TestCase):
@parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2])
def test_shape(self, input_param, input_data, expected_shape):
result = RandSpatialCrop(**input_param)(input_data)
self.assertTupleEqual(result.shape, expected_shape)
@parameterized.expand([TEST_CASE_3])
def test_value(self, input_param, input_data):
for p in TEST_NDARRAYS:
cropper = RandSpatialCrop(**input_param)
result = cropper(p(input_data))
roi = [(2 - i // 2, 2 + i - i // 2) for i in cropper._size]
assert_allclose(result, input_data[:, roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]], type_test=False)
@parameterized.expand([TEST_CASE_4, TEST_CASE_5])
def test_random_shape(self, input_param, input_data, expected_shape):
cropper = RandSpatialCrop(**input_param)
cropper.set_random_state(seed=123)
result = cropper(input_data)
self.assertTupleEqual(result.shape, expected_shape)
if __name__ == "__main__":
unittest.main()
| 36.328947
| 120
| 0.651213
|
f63cc23c5291046114edef840d88f03959eb2145
| 766
|
py
|
Python
|
setup.py
|
yanndegat/ironic
|
8857ec76443dea7778bb9c0d66568304e52495e5
|
[
"Apache-2.0"
] | 350
|
2015-01-02T09:35:49.000Z
|
2022-03-28T09:25:59.000Z
|
setup.py
|
yanndegat/ironic
|
8857ec76443dea7778bb9c0d66568304e52495e5
|
[
"Apache-2.0"
] | 83
|
2021-03-24T08:31:24.000Z
|
2022-03-31T12:04:52.000Z
|
setup.py
|
yanndegat/ironic
|
8857ec76443dea7778bb9c0d66568304e52495e5
|
[
"Apache-2.0"
] | 409
|
2015-01-01T11:28:26.000Z
|
2022-03-29T14:56:41.000Z
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
setuptools.setup(
setup_requires=['pbr>=2.0.0'],
pbr=True)
| 34.818182
| 69
| 0.750653
|
3adaae241d9ce04e0cabe6eed39250135ea224f6
| 5,073
|
py
|
Python
|
onnx/backend/test/case/node/slice.py
|
cnheider/onnx
|
8e9c7d57f7c5aa6f6eb7ee7abb0ba2a243781933
|
[
"MIT"
] | 137
|
2020-04-28T12:28:32.000Z
|
2022-03-18T10:48:25.000Z
|
onnx/backend/test/case/node/slice.py
|
cnheider/onnx
|
8e9c7d57f7c5aa6f6eb7ee7abb0ba2a243781933
|
[
"MIT"
] | 24
|
2020-05-06T08:06:42.000Z
|
2021-12-31T07:46:13.000Z
|
onnx/backend/test/case/node/slice.py
|
cnheider/onnx
|
8e9c7d57f7c5aa6f6eb7ee7abb0ba2a243781933
|
[
"MIT"
] | 24
|
2020-05-06T11:43:22.000Z
|
2022-03-18T10:50:35.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Slice(Base):
@staticmethod
def export_slice(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes', 'steps'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
y = x[0:3, 0:10]
starts = np.array([0, 0], dtype=np.int64)
ends = np.array([3, 10], dtype=np.int64)
axes = np.array([0, 1], dtype=np.int64)
steps = np.array([1, 1], dtype=np.int64)
expect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],
name='test_slice')
@staticmethod
def export_slice_neg(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes', 'steps'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([0], dtype=np.int64)
ends = np.array([-1], dtype=np.int64)
axes = np.array([1], dtype=np.int64)
steps = np.array([1], dtype=np.int64)
y = x[:, 0:-1]
expect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],
name='test_slice_neg')
@staticmethod
def export_slice_start_out_of_bounds(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes', 'steps'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([1000], dtype=np.int64)
ends = np.array([1000], dtype=np.int64)
axes = np.array([1], dtype=np.int64)
steps = np.array([1], dtype=np.int64)
y = x[:, 1000:1000]
expect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],
name='test_slice_start_out_of_bounds')
@staticmethod
def export_slice_end_out_of_bounds(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes', 'steps'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([1], dtype=np.int64)
ends = np.array([1000], dtype=np.int64)
axes = np.array([1], dtype=np.int64)
steps = np.array([1], dtype=np.int64)
y = x[:, 1:1000]
expect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],
name='test_slice_end_out_of_bounds')
@staticmethod
def export_slice_default_axes(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([0, 0, 3], dtype=np.int64)
ends = np.array([20, 10, 4], dtype=np.int64)
y = x[:, :, 3:4]
expect(node, inputs=[x, starts, ends], outputs=[y],
name='test_slice_default_axes')
@staticmethod
def export_slice_default_steps(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([0, 0, 3], dtype=np.int64)
ends = np.array([20, 10, 4], dtype=np.int64)
axes = np.array([0, 1, 2], dtype=np.int64)
y = x[:, :, 3:4]
expect(node, inputs=[x, starts, ends, axes], outputs=[y],
name='test_slice_default_steps')
@staticmethod
def export_slice_neg_steps(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes', 'steps'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([20, 10, 4], dtype=np.int64)
ends = np.array([0, 0, 1], dtype=np.int64)
axes = np.array([0, 1, 2], dtype=np.int64)
steps = np.array([-1, -3, -2])
y = x[20:0:-1, 10:0:-3, 4:1:-2]
expect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],
name='test_slice_neg_steps')
@staticmethod
def export_slice_negative_axes(): # type: () -> None
node = onnx.helper.make_node(
'Slice',
inputs=['x', 'starts', 'ends', 'axes'],
outputs=['y'],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
starts = np.array([0, 0, 3], dtype=np.int64)
ends = np.array([20, 10, 4], dtype=np.int64)
axes = np.array([0, -2, -1], dtype=np.int64)
y = x[:, :, 3:4]
expect(node, inputs=[x, starts, ends, axes], outputs=[y],
name='test_slice_negative_axes')
| 32.941558
| 72
| 0.523556
|
3a0e2f89a954dc2a466f438a2eeb62b8ec583d3e
| 10,313
|
py
|
Python
|
src/sentry/integrations/vsts/client.py
|
fictional-tribble-2/getsentry--sentry
|
4b0fa548a91b9119a309a53234c4542963714050
|
[
"BSD-3-Clause"
] | 1
|
2019-05-28T06:18:03.000Z
|
2019-05-28T06:18:03.000Z
|
src/sentry/integrations/vsts/client.py
|
fictional-tribble/getsentry--sentry
|
4b0fa548a91b9119a309a53234c4542963714050
|
[
"BSD-3-Clause"
] | 6
|
2018-10-19T10:04:23.000Z
|
2019-12-09T20:29:12.000Z
|
src/sentry/integrations/vsts/client.py
|
fictional-tribble-2/getsentry--sentry
|
4b0fa548a91b9119a309a53234c4542963714050
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from sentry.integrations.client import ApiClient, OAuth2RefreshMixin
from sentry.utils.http import absolute_uri
UNSET = object()
FIELD_MAP = {
'title': '/fields/System.Title',
'description': '/fields/System.Description',
'comment': '/fields/System.History',
'link': '/relations/-',
'assigned_to': '/fields/System.AssignedTo',
'state': '/fields/System.State',
}
INVALID_ACCESS_TOKEN = 'HTTP 400 (invalid_request): The access token is not valid'
class VstsApiPath(object):
commits = u'https://{account_name}/DefaultCollection/_apis/git/repositories/{repo_id}/commits'
commits_batch = u'https://{account_name}/DefaultCollection/_apis/git/repositories/{repo_id}/commitsBatch'
commits_changes = u'https://{account_name}/DefaultCollection/_apis/git/repositories/{repo_id}/commits/{commit_id}/changes'
projects = u'https://{account_name}/DefaultCollection/_apis/projects'
repository = u'https://{account_name}/DefaultCollection/{project}_apis/git/repositories/{repo_id}'
repositories = u'https://{account_name}/{project}_apis/git/repositories'
subscription = 'https://{account_name}/_apis/hooks/subscriptions/{subscription_id}'
subscriptions = u'https://{account_name}/_apis/hooks/subscriptions'
work_items = u'https://{account_name}/DefaultCollection/_apis/wit/workitems/{id}'
work_items_create = u'https://{account_name}/{project}/_apis/wit/workitems/${type}'
work_item_search = u'https://{account_name}.almsearch.visualstudio.com/_apis/search/workitemsearchresults'
work_item_states = u'https://{account_name}/{project}/_apis/wit/workitemtypes/{type}/states'
users = u'https://{account_name}.vssps.visualstudio.com/_apis/graph/users'
class VstsApiClient(ApiClient, OAuth2RefreshMixin):
api_version = '4.1'
api_version_preview = '-preview.1'
def __init__(self, identity, oauth_redirect_url, *args, **kwargs):
super(VstsApiClient, self).__init__(*args, **kwargs)
self.identity = identity
self.oauth_redirect_url = oauth_redirect_url
if 'access_token' not in self.identity.data:
raise ValueError('Vsts Identity missing access token')
def request(self, method, path, data=None, params=None, api_preview=False):
self.check_auth(redirect_url=self.oauth_redirect_url)
headers = {
'Accept': 'application/json; api-version={}{}'.format(self.api_version, self.api_version_preview if api_preview else ''),
'Content-Type': 'application/json-patch+json' if method == 'PATCH' else 'application/json',
'X-HTTP-Method-Override': method,
'X-TFS-FedAuthRedirect': 'Suppress',
'Authorization': 'Bearer {}'.format(self.identity.data['access_token'])
}
return self._request(method, path, headers=headers, data=data, params=params)
def create_work_item(self, instance, project, title=None,
description=None, comment=None, link=None):
data = []
if title:
data.append({
'op': 'add',
'path': FIELD_MAP['title'],
'value': title,
})
if description:
data.append({
'op': 'add',
'path': FIELD_MAP['description'],
'value': description
})
if comment:
data.append({
'op': 'add',
'path': FIELD_MAP['comment'],
'value': comment,
})
# XXX: Link is not yet used, as we can't explicitly bind it to Sentry.
# if link:
# data.append({
# 'op': 'add',
# 'path': FIELD_MAP['link'],
# 'value': {
# 'rel': 'Hyperlink',
# 'url': link,
# }
# })
return self.patch(
VstsApiPath.work_items_create.format(
account_name=instance,
project=project,
type='Bug'
),
data=data,
)
def update_work_item(self, instance, id, title=UNSET, description=UNSET, link=UNSET,
comment=UNSET, assigned_to=UNSET, state=UNSET):
data = []
for f_name, f_value in (('title', title), ('description', description),
('link', link), ('assigned_to', assigned_to), ('state', state)):
if f_name == 'link':
# XXX: Link is not yet used, as we can't explicitly bind it to Sentry.
continue
elif f_value is None:
data.append({
'op': 'remove',
'path': FIELD_MAP[f_name],
})
elif f_value is not UNSET:
data.append({
# TODO(dcramer): this is problematic when the link already exists
'op': 'replace' if f_name != 'link' else 'add',
'path': FIELD_MAP[f_name],
'value': {
'rel': 'Hyperlink',
'url': f_value,
} if f_name == 'link' else f_value,
})
if comment is not UNSET and comment:
data.append({
'op': 'add',
'path': FIELD_MAP['comment'],
'value': comment,
})
return self.patch(
VstsApiPath.work_items.format(
account_name=instance,
id=id,
),
data=data,
)
def get_work_item(self, instance, id):
return self.get(
VstsApiPath.work_items.format(
account_name=instance,
id=id,
),
)
def get_work_item_states(self, instance, project):
return self.get(
VstsApiPath.work_item_states.format(
account_name=instance,
project=project,
# TODO(lb): might want to make this custom like jira at some point
type='Bug',
),
api_preview=True,
)
def get_work_item_types(self, instance, process_id):
return self.get(
VstsApiPath.work_item_types.format(
account_name=instance,
process_id=process_id,
),
api_preview=True,
)
def get_repo(self, instance, name_or_id, project=None):
return self.get(
VstsApiPath.repository.format(
account_name=instance,
project='{}/'.format(project) if project else '',
repo_id=name_or_id,
),
)
def get_repos(self, instance, project=None):
return self.get(
VstsApiPath.repositories.format(
account_name=instance,
project='{}/'.format(project) if project else '',
),
)
def get_commits(self, instance, repo_id, commit, limit=100):
return self.get(
VstsApiPath.commits.format(
account_name=instance,
repo_id=repo_id,
),
params={
'commit': commit,
'$top': limit,
},
)
def get_commit_filechanges(self, instance, repo_id, commit):
resp = self.get(
VstsApiPath.commits_changes.format(
account_name=instance,
repo_id=repo_id,
commit_id=commit,
)
)
changes = resp['changes']
return changes
def get_commit_range(self, instance, repo_id, start_sha, end_sha):
return self.post(
VstsApiPath.commits_batch.format(
account_name=instance,
repo_id=repo_id,
),
data={
'itemVersion': {
'versionType': 'commit',
'version': start_sha,
},
'compareVersion': {
'versionType': 'commit',
'version': end_sha
}
}
)
def get_projects(self, instance):
# TODO(dcramer): VSTS doesn't provide a way to search, so we're
# making the assumption that a user has 100 or less projects today.
return self.get(
VstsApiPath.projects.format(
account_name=instance,
),
params={'stateFilter': 'WellFormed'}
)
def get_users(self, account_name):
return self.get(
VstsApiPath.users.format(
account_name=account_name,
),
api_preview=True,
)
def create_subscription(self, instance, external_id, shared_secret):
return self.post(
VstsApiPath.subscriptions.format(
account_name=instance
),
data={
'publisherId': 'tfs',
'eventType': 'workitem.updated',
'resourceVersion': '1.0',
'consumerId': 'webHooks',
'consumerActionId': 'httpRequest',
'consumerInputs': {
'url': absolute_uri('/extensions/vsts/issue-updated/'),
'resourceDetailsToSend': 'all',
'httpHeaders': 'shared-secret:%s' % shared_secret,
}
},
)
def delete_subscription(self, instance, subscription_id):
self.delete(
VstsApiPath.subscription.format(
account_name=instance,
subscription_id=subscription_id,
)
)
def search_issues(self, account_name, query=None):
return self.post(
VstsApiPath.work_item_search.format(
account_name=account_name,
),
data={
'searchText': query,
'$top': 1000,
'filters': {
'System.WorkItemType': [
'Bug',
'User Story',
'Feature',
'Task'
],
}
},
api_preview=True,
)
| 35.685121
| 133
| 0.528071
|
9ec18f4c4724ebba1bbd55bd3891ab384c6824ca
| 465
|
py
|
Python
|
cicu/forms.py
|
V-Clean/vclean
|
556a93299ff4b8a85fddc476fd8ffa9e3a2634ac
|
[
"BSD-3-Clause"
] | 21
|
2015-01-04T05:14:34.000Z
|
2019-04-27T20:17:50.000Z
|
cicu/forms.py
|
V-Clean/vclean
|
556a93299ff4b8a85fddc476fd8ffa9e3a2634ac
|
[
"BSD-3-Clause"
] | 5
|
2015-07-05T08:33:44.000Z
|
2021-04-20T09:57:15.000Z
|
cicu/forms.py
|
V-Clean/vclean
|
556a93299ff4b8a85fddc476fd8ffa9e3a2634ac
|
[
"BSD-3-Clause"
] | 20
|
2015-01-15T04:51:48.000Z
|
2018-03-04T20:11:08.000Z
|
import uuid
from django import forms
from .models import UploadedFile
class UploadedFileForm(forms.ModelForm):
class Meta:
model = UploadedFile
fields = ('file',)
def clean_file(self):
data = self.cleaned_data['file']
# Change the name of the file to something unguessable
# Construct the new name as <unique-hex>-<original>.<ext>
data.name = u'%s-%s' % (uuid.uuid4().hex, data.name)
return data
| 24.473684
| 65
| 0.63871
|
664bac1da2a47035be97ecbf034d8494c3bac92e
| 445
|
py
|
Python
|
agileLibrary/agileLibrary/main/models/rooms.py
|
DimOps/library-app
|
ed5e832c877957548d19b2ce38fa3ab5058d6797
|
[
"MIT"
] | null | null | null |
agileLibrary/agileLibrary/main/models/rooms.py
|
DimOps/library-app
|
ed5e832c877957548d19b2ce38fa3ab5058d6797
|
[
"MIT"
] | null | null | null |
agileLibrary/agileLibrary/main/models/rooms.py
|
DimOps/library-app
|
ed5e832c877957548d19b2ce38fa3ab5058d6797
|
[
"MIT"
] | null | null | null |
from django.db import models
class StudySpace(models.Model):
room_number = models.CharField(
max_length=5,
)
sector = models.CharField(
max_length=10,
)
level = models.IntegerField(
default=1,
)
capacity = models.IntegerField(
default=4,
)
smartBoard = models.BooleanField(
default=False,
)
is_booked = models.BooleanField(
default=False,
)
| 15.344828
| 37
| 0.593258
|
a44c7c7f53a640b7ddffcb5a34393586bf2ce524
| 3,049
|
py
|
Python
|
src/sagemaker/tensorflow/tensorflow_serving/apis/model_service_pb2_grpc.py
|
jaipradeesh/sagemaker-python-sdk
|
ef842108ccaa324d2be15978aa678926dd1c21ea
|
[
"Apache-2.0"
] | 267
|
2018-10-24T05:28:04.000Z
|
2019-12-24T19:06:14.000Z
|
src/sagemaker/tensorflow/tensorflow_serving/apis/model_service_pb2_grpc.py
|
jaipradeesh/sagemaker-python-sdk
|
ef842108ccaa324d2be15978aa678926dd1c21ea
|
[
"Apache-2.0"
] | 4
|
2019-01-03T14:54:02.000Z
|
2019-12-29T14:31:37.000Z
|
src/sagemaker/tensorflow/tensorflow_serving/apis/model_service_pb2_grpc.py
|
jaipradeesh/sagemaker-python-sdk
|
ef842108ccaa324d2be15978aa678926dd1c21ea
|
[
"Apache-2.0"
] | 83
|
2018-10-23T15:37:54.000Z
|
2019-12-24T19:06:21.000Z
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow_serving/apis/model_service.proto
# To regenerate run
# python -m grpc.tools.protoc --python_out=. --grpc_python_out=. -I. tensorflow_serving/apis/model_service.proto
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
import tensorflow_serving.apis.get_model_status_pb2 as tensorflow__serving_dot_apis_dot_get__model__status__pb2
class ModelServiceStub(object):
"""ModelService provides access to information about model versions
that have been handled by the model server.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetModelStatus = channel.unary_unary(
'/tensorflow.serving.ModelService/GetModelStatus',
request_serializer=tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusRequest.SerializeToString,
response_deserializer=tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusResponse.FromString,
)
class ModelServiceServicer(object):
"""ModelService provides access to information about model versions
that have been handled by the model server.
"""
def GetModelStatus(self, request, context):
"""Gets status of model. If the ModelSpec in the request does not specify
version, information about all versions of the model will be returned. If
the ModelSpec in the request does specify a version, the status of only
that version will be returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ModelServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetModelStatus': grpc.unary_unary_rpc_method_handler(
servicer.GetModelStatus,
request_deserializer=tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusRequest.FromString,
response_serializer=tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'tensorflow.serving.ModelService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 42.347222
| 128
| 0.763529
|
9df507043b9ddc18ce1692deab041bc369293272
| 1,103
|
py
|
Python
|
py/test/envar.py
|
mpaulweeks/fgc
|
2fa77896a59f55be0a68a7dffef76e3dc29f2497
|
[
"MIT"
] | 20
|
2016-06-30T05:48:30.000Z
|
2021-06-05T21:42:41.000Z
|
py/test/envar.py
|
mpaulweeks/fgc
|
2fa77896a59f55be0a68a7dffef76e3dc29f2497
|
[
"MIT"
] | null | null | null |
py/test/envar.py
|
mpaulweeks/fgc
|
2fa77896a59f55be0a68a7dffef76e3dc29f2497
|
[
"MIT"
] | 9
|
2016-07-04T04:44:07.000Z
|
2019-10-12T21:20:35.000Z
|
from py.test.tools import (
BaseTestCase,
)
from py.src.settings.envar import (
_ENVARS,
)
class EnvarTest(BaseTestCase):
def test_is_web_server(self):
sut = _ENVARS({})
self.assertTrue(sut.is_web_server())
sut = _ENVARS({"instance_type": "dev"})
self.assertTrue(sut.is_web_server())
sut = _ENVARS({"instance_type": "web"})
self.assertTrue(sut.is_web_server())
sut = _ENVARS({"instance_type": "api"})
self.assertFalse(sut.is_web_server())
sut = _ENVARS({"instance_type": "scraper"})
self.assertFalse(sut.is_web_server())
def test_is_api_server(self):
sut = _ENVARS({})
self.assertFalse(sut.is_api_server())
sut = _ENVARS({"instance_type": "dev"})
self.assertFalse(sut.is_api_server())
sut = _ENVARS({"instance_type": "web"})
self.assertFalse(sut.is_api_server())
sut = _ENVARS({"instance_type": "api"})
self.assertTrue(sut.is_api_server())
sut = _ENVARS({"instance_type": "scraper"})
self.assertFalse(sut.is_api_server())
| 31.514286
| 51
| 0.62194
|
731ecfc5e5e603f3d029933ef2e7bde475ba6c78
| 119,829
|
py
|
Python
|
sympy/combinatorics/perm_groups.py
|
vprusso/sympy
|
d5aa27ec88bb076f59087aada97d99bfff8b2f4c
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/combinatorics/perm_groups.py
|
vprusso/sympy
|
d5aa27ec88bb076f59087aada97d99bfff8b2f4c
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/combinatorics/perm_groups.py
|
vprusso/sympy
|
d5aa27ec88bb076f59087aada97d99bfff8b2f4c
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function, division
from random import randrange, choice
from math import log
from sympy.core import Basic
from sympy.combinatorics import Permutation
from sympy.combinatorics.permutations import (_af_commutes_with, _af_invert,
_af_rmul, _af_rmuln, _af_pow, Cycle)
from sympy.combinatorics.util import (_check_cycles_alt_sym,
_distribute_gens_by_base, _orbits_transversals_from_bsgs,
_handle_precomputed_bsgs, _base_ordering, _strong_gens_from_distr,
_strip, _strip_af)
from sympy.functions.combinatorial.factorials import factorial
from sympy.ntheory import sieve
from sympy.utilities.iterables import has_variety, is_sequence, uniq
from sympy.utilities.randtest import _randrange
rmul = Permutation.rmul_with_af
_af_new = Permutation._af_new
class PermutationGroup(Basic):
"""The class defining a Permutation group.
PermutationGroup([p1, p2, ..., pn]) returns the permutation group
generated by the list of permutations. This group can be supplied
to Polyhedron if one desires to decorate the elements to which the
indices of the permutation refer.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.permutations import Cycle
>>> from sympy.combinatorics.polyhedron import Polyhedron
>>> from sympy.combinatorics.perm_groups import PermutationGroup
The permutations corresponding to motion of the front, right and
bottom face of a 2x2 Rubik's cube are defined:
>>> F = Permutation(2, 19, 21, 8)(3, 17, 20, 10)(4, 6, 7, 5)
>>> R = Permutation(1, 5, 21, 14)(3, 7, 23, 12)(8, 10, 11, 9)
>>> D = Permutation(6, 18, 14, 10)(7, 19, 15, 11)(20, 22, 23, 21)
These are passed as permutations to PermutationGroup:
>>> G = PermutationGroup(F, R, D)
>>> G.order()
3674160
The group can be supplied to a Polyhedron in order to track the
objects being moved. An example involving the 2x2 Rubik's cube is
given there, but here is a simple demonstration:
>>> a = Permutation(2, 1)
>>> b = Permutation(1, 0)
>>> G = PermutationGroup(a, b)
>>> P = Polyhedron(list('ABC'), pgroup=G)
>>> P.corners
(A, B, C)
>>> P.rotate(0) # apply permutation 0
>>> P.corners
(A, C, B)
>>> P.reset()
>>> P.corners
(A, B, C)
Or one can make a permutation as a product of selected permutations
and apply them to an iterable directly:
>>> P10 = G.make_perm([0, 1])
>>> P10('ABC')
['C', 'A', 'B']
See Also
========
sympy.combinatorics.polyhedron.Polyhedron,
sympy.combinatorics.permutations.Permutation
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
[2] Seress, A.
"Permutation Group Algorithms"
[3] http://en.wikipedia.org/wiki/Schreier_vector
[4] http://en.wikipedia.org/wiki/Nielsen_transformation
#Product_replacement_algorithm
[5] Frank Celler, Charles R.Leedham-Green, Scott H.Murray,
Alice C.Niemeyer, and E.A.O'Brien. "Generating Random
Elements of a Finite Group"
[6] http://en.wikipedia.org/wiki/Block_%28permutation_group_theory%29
[7] http://www.algorithmist.com/index.php/Union_Find
[8] http://en.wikipedia.org/wiki/Multiply_transitive_group#Multiply_transitive_groups
[9] http://en.wikipedia.org/wiki/Center_%28group_theory%29
[10] http://en.wikipedia.org/wiki/Centralizer_and_normalizer
[11] http://groupprops.subwiki.org/wiki/Derived_subgroup
[12] http://en.wikipedia.org/wiki/Nilpotent_group
[13] http://www.math.colostate.edu/~hulpke/CGT/cgtnotes.pdf
"""
def __new__(cls, *args, **kwargs):
"""The default constructor. Accepts Cycle and Permutation forms.
Removes duplicates unless ``dups`` keyword is False.
"""
args = list(args[0] if is_sequence(args[0]) else args)
if not args:
raise ValueError('must supply one or more permutations '
'to define the group')
if any(isinstance(a, Cycle) for a in args):
args = [Permutation(a) for a in args]
if has_variety(a.size for a in args):
degree = kwargs.pop('degree', None)
if degree is None:
degree = max(a.size for a in args)
for i in range(len(args)):
if args[i].size != degree:
args[i] = Permutation(args[i], size=degree)
if kwargs.pop('dups', True):
args = list(uniq([_af_new(list(a)) for a in args]))
obj = Basic.__new__(cls, *args, **kwargs)
obj._generators = args
obj._order = None
obj._center = []
obj._is_abelian = None
obj._is_transitive = None
obj._is_sym = None
obj._is_alt = None
obj._is_primitive = None
obj._is_nilpotent = None
obj._is_solvable = None
obj._is_trivial = None
obj._transitivity_degree = None
obj._max_div = None
obj._r = len(obj._generators)
obj._degree = obj._generators[0].size
# these attributes are assigned after running schreier_sims
obj._base = []
obj._strong_gens = []
obj._basic_orbits = []
obj._transversals = []
# these attributes are assigned after running _random_pr_init
obj._random_gens = []
return obj
def __getitem__(self, i):
return self._generators[i]
def __len__(self):
return len(self._generators)
def __eq__(self, other):
"""Return True if self and other have the same generators.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> p = Permutation(0, 1, 2, 3, 4, 5)
>>> G = PermutationGroup([p, p**2])
>>> H = PermutationGroup([p**2, p])
>>> G.generators == H.generators
False
>>> G == H
True
"""
if not isinstance(other, PermutationGroup):
return False
return set(self.generators) == set(other.generators)
def __hash__(self):
return super(PermutationGroup, self).__hash__()
def __mul__(self, other):
"""Return the direct product of two permutation groups as a permutation
group.
This implementation realizes the direct product by shifting
the index set for the generators of the second group: so if we have
G acting on n1 points and H acting on n2 points, G*H acts on n1 + n2
points.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import CyclicGroup
>>> G = CyclicGroup(5)
>>> H = G*G
>>> H
PermutationGroup([
Permutation(9)(0, 1, 2, 3, 4),
Permutation(5, 6, 7, 8, 9)])
>>> H.order()
25
"""
gens1 = [perm._array_form for perm in self.generators]
gens2 = [perm._array_form for perm in other.generators]
n1 = self._degree
n2 = other._degree
start = list(range(n1))
end = list(range(n1, n1 + n2))
for i in range(len(gens2)):
gens2[i] = [x + n1 for x in gens2[i]]
gens2 = [start + gen for gen in gens2]
gens1 = [gen + end for gen in gens1]
together = gens1 + gens2
gens = [_af_new(x) for x in together]
return PermutationGroup(gens)
def _random_pr_init(self, r, n, _random_prec_n=None):
r"""Initialize random generators for the product replacement algorithm.
The implementation uses a modification of the original product
replacement algorithm due to Leedham-Green, as described in [1],
pp. 69-71; also, see [2], pp. 27-29 for a detailed theoretical
analysis of the original product replacement algorithm, and [4].
The product replacement algorithm is used for producing random,
uniformly distributed elements of a group ``G`` with a set of generators
``S``. For the initialization ``_random_pr_init``, a list ``R`` of
``\max\{r, |S|\}`` group generators is created as the attribute
``G._random_gens``, repeating elements of ``S`` if necessary, and the
identity element of ``G`` is appended to ``R`` - we shall refer to this
last element as the accumulator. Then the function ``random_pr()``
is called ``n`` times, randomizing the list ``R`` while preserving
the generation of ``G`` by ``R``. The function ``random_pr()`` itself
takes two random elements ``g, h`` among all elements of ``R`` but
the accumulator and replaces ``g`` with a randomly chosen element
from ``\{gh, g(~h), hg, (~h)g\}``. Then the accumulator is multiplied
by whatever ``g`` was replaced by. The new value of the accumulator is
then returned by ``random_pr()``.
The elements returned will eventually (for ``n`` large enough) become
uniformly distributed across ``G`` ([5]). For practical purposes however,
the values ``n = 50, r = 11`` are suggested in [1].
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: it changes the attribute
self._random_gens
See Also
========
random_pr
"""
deg = self.degree
random_gens = [x._array_form for x in self.generators]
k = len(random_gens)
if k < r:
for i in range(k, r):
random_gens.append(random_gens[i - k])
acc = list(range(deg))
random_gens.append(acc)
self._random_gens = random_gens
# handle randomized input for testing purposes
if _random_prec_n is None:
for i in range(n):
self.random_pr()
else:
for i in range(n):
self.random_pr(_random_prec=_random_prec_n[i])
def _union_find_merge(self, first, second, ranks, parents, not_rep):
"""Merges two classes in a union-find data structure.
Used in the implementation of Atkinson's algorithm as suggested in [1],
pp. 83-87. The class merging process uses union by rank as an
optimization. ([7])
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives,
``parents``, the list of class sizes, ``ranks``, and the list of
elements that are not representatives, ``not_rep``, are changed due to
class merging.
See Also
========
minimal_block, _union_find_rep
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
[7] http://www.algorithmist.com/index.php/Union_Find
"""
rep_first = self._union_find_rep(first, parents)
rep_second = self._union_find_rep(second, parents)
if rep_first != rep_second:
# union by rank
if ranks[rep_first] >= ranks[rep_second]:
new_1, new_2 = rep_first, rep_second
else:
new_1, new_2 = rep_second, rep_first
total_rank = ranks[new_1] + ranks[new_2]
if total_rank > self.max_div:
return -1
parents[new_2] = new_1
ranks[new_1] = total_rank
not_rep.append(new_2)
return 1
return 0
def _union_find_rep(self, num, parents):
"""Find representative of a class in a union-find data structure.
Used in the implementation of Atkinson's algorithm as suggested in [1],
pp. 83-87. After the representative of the class to which ``num``
belongs is found, path compression is performed as an optimization
([7]).
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives,
``parents``, is altered due to path compression.
See Also
========
minimal_block, _union_find_merge
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
[7] http://www.algorithmist.com/index.php/Union_Find
"""
rep, parent = num, parents[num]
while parent != rep:
rep = parent
parent = parents[rep]
# path compression
temp, parent = num, parents[num]
while parent != rep:
parents[temp] = rep
temp = parent
parent = parents[temp]
return rep
@property
def base(self):
"""Return a base from the Schreier-Sims algorithm.
For a permutation group ``G``, a base is a sequence of points
``B = (b_1, b_2, ..., b_k)`` such that no element of ``G`` apart
from the identity fixes all the points in ``B``. The concepts of
a base and strong generating set and their applications are
discussed in depth in [1], pp. 87-89 and [2], pp. 55-57.
An alternative way to think of ``B`` is that it gives the
indices of the stabilizer cosets that contain more than the
identity permutation.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> G = PermutationGroup([Permutation(0, 1, 3)(2, 4)])
>>> G.base
[0, 2]
See Also
========
strong_gens, basic_transversals, basic_orbits, basic_stabilizers
"""
if self._base == []:
self.schreier_sims()
return self._base
def baseswap(self, base, strong_gens, pos, randomized=False,
transversals=None, basic_orbits=None, strong_gens_distr=None):
r"""Swap two consecutive base points in base and strong generating set.
If a base for a group ``G`` is given by ``(b_1, b_2, ..., b_k)``, this
function returns a base ``(b_1, b_2, ..., b_{i+1}, b_i, ..., b_k)``,
where ``i`` is given by ``pos``, and a strong generating set relative
to that base. The original base and strong generating set are not
modified.
The randomized version (default) is of Las Vegas type.
Parameters
==========
base, strong_gens
The base and strong generating set.
pos
The position at which swapping is performed.
randomized
A switch between randomized and deterministic version.
transversals
The transversals for the basic orbits, if known.
basic_orbits
The basic orbits, if known.
strong_gens_distr
The strong generators distributed by basic stabilizers, if known.
Returns
=======
(base, strong_gens)
``base`` is the new base, and ``strong_gens`` is a generating set
relative to it.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> S = SymmetricGroup(4)
>>> S.schreier_sims()
>>> S.base
[0, 1, 2]
>>> base, gens = S.baseswap(S.base, S.strong_gens, 1, randomized=False)
>>> base, gens
([0, 2, 1],
[Permutation(0, 1, 2, 3), Permutation(3)(0, 1), Permutation(1, 3, 2),
Permutation(2, 3), Permutation(1, 3)])
check that base, gens is a BSGS
>>> S1 = PermutationGroup(gens)
>>> _verify_bsgs(S1, base, gens)
True
See Also
========
schreier_sims
Notes
=====
The deterministic version of the algorithm is discussed in
[1], pp. 102-103; the randomized version is discussed in [1], p.103, and
[2], p.98. It is of Las Vegas type.
Notice that [1] contains a mistake in the pseudocode and
discussion of BASESWAP: on line 3 of the pseudocode,
``|\beta_{i+1}^{\left\langle T\right\rangle}|`` should be replaced by
``|\beta_{i}^{\left\langle T\right\rangle}|``, and the same for the
discussion of the algorithm.
"""
# construct the basic orbits, generators for the stabilizer chain
# and transversal elements from whatever was provided
transversals, basic_orbits, strong_gens_distr = \
_handle_precomputed_bsgs(base, strong_gens, transversals,
basic_orbits, strong_gens_distr)
base_len = len(base)
degree = self.degree
# size of orbit of base[pos] under the stabilizer we seek to insert
# in the stabilizer chain at position pos + 1
size = len(basic_orbits[pos])*len(basic_orbits[pos + 1]) \
//len(_orbit(degree, strong_gens_distr[pos], base[pos + 1]))
# initialize the wanted stabilizer by a subgroup
if pos + 2 > base_len - 1:
T = []
else:
T = strong_gens_distr[pos + 2][:]
# randomized version
if randomized is True:
stab_pos = PermutationGroup(strong_gens_distr[pos])
schreier_vector = stab_pos.schreier_vector(base[pos + 1])
# add random elements of the stabilizer until they generate it
while len(_orbit(degree, T, base[pos])) != size:
new = stab_pos.random_stab(base[pos + 1],
schreier_vector=schreier_vector)
T.append(new)
# deterministic version
else:
Gamma = set(basic_orbits[pos])
Gamma.remove(base[pos])
if base[pos + 1] in Gamma:
Gamma.remove(base[pos + 1])
# add elements of the stabilizer until they generate it by
# ruling out member of the basic orbit of base[pos] along the way
while len(_orbit(degree, T, base[pos])) != size:
gamma = next(iter(Gamma))
x = transversals[pos][gamma]
temp = x._array_form.index(base[pos + 1]) # (~x)(base[pos + 1])
if temp not in basic_orbits[pos + 1]:
Gamma = Gamma - _orbit(degree, T, gamma)
else:
y = transversals[pos + 1][temp]
el = rmul(x, y)
if el(base[pos]) not in _orbit(degree, T, base[pos]):
T.append(el)
Gamma = Gamma - _orbit(degree, T, base[pos])
# build the new base and strong generating set
strong_gens_new_distr = strong_gens_distr[:]
strong_gens_new_distr[pos + 1] = T
base_new = base[:]
base_new[pos], base_new[pos + 1] = base_new[pos + 1], base_new[pos]
strong_gens_new = _strong_gens_from_distr(strong_gens_new_distr)
for gen in T:
if gen not in strong_gens_new:
strong_gens_new.append(gen)
return base_new, strong_gens_new
@property
def basic_orbits(self):
"""
Return the basic orbits relative to a base and strong generating set.
If ``(b_1, b_2, ..., b_k)`` is a base for a group ``G``, and
``G^{(i)} = G_{b_1, b_2, ..., b_{i-1}}`` is the ``i``-th basic stabilizer
(so that ``G^{(1)} = G``), the ``i``-th basic orbit relative to this base
is the orbit of ``b_i`` under ``G^{(i)}``. See [1], pp. 87-89 for more
information.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(4)
>>> S.basic_orbits
[[0, 1, 2, 3], [1, 2, 3], [2, 3]]
See Also
========
base, strong_gens, basic_transversals, basic_stabilizers
"""
if self._basic_orbits == []:
self.schreier_sims()
return self._basic_orbits
@property
def basic_stabilizers(self):
"""
Return a chain of stabilizers relative to a base and strong generating
set.
The ``i``-th basic stabilizer ``G^{(i)}`` relative to a base
``(b_1, b_2, ..., b_k)`` is ``G_{b_1, b_2, ..., b_{i-1}}``. For more
information, see [1], pp. 87-89.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> A = AlternatingGroup(4)
>>> A.schreier_sims()
>>> A.base
[0, 1]
>>> for g in A.basic_stabilizers:
... print(g)
...
PermutationGroup([
Permutation(3)(0, 1, 2),
Permutation(1, 2, 3)])
PermutationGroup([
Permutation(1, 2, 3)])
See Also
========
base, strong_gens, basic_orbits, basic_transversals
"""
if self._transversals == []:
self.schreier_sims()
strong_gens = self._strong_gens
base = self._base
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_stabilizers = []
for gens in strong_gens_distr:
basic_stabilizers.append(PermutationGroup(gens))
return basic_stabilizers
@property
def basic_transversals(self):
"""
Return basic transversals relative to a base and strong generating set.
The basic transversals are transversals of the basic orbits. They
are provided as a list of dictionaries, each dictionary having
keys - the elements of one of the basic orbits, and values - the
corresponding transversal elements. See [1], pp. 87-89 for more
information.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> A = AlternatingGroup(4)
>>> A.basic_transversals
[{0: Permutation(3),
1: Permutation(3)(0, 1, 2),
2: Permutation(3)(0, 2, 1),
3: Permutation(0, 3, 1)},
{1: Permutation(3),
2: Permutation(1, 2, 3),
3: Permutation(1, 3, 2)}]
See Also
========
strong_gens, base, basic_orbits, basic_stabilizers
"""
if self._transversals == []:
self.schreier_sims()
return self._transversals
def center(self):
r"""
Return the center of a permutation group.
The center for a group ``G`` is defined as
``Z(G) = \{z\in G | \forall g\in G, zg = gz \}``,
the set of elements of ``G`` that commute with all elements of ``G``.
It is equal to the centralizer of ``G`` inside ``G``, and is naturally a
subgroup of ``G`` ([9]).
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> G = D.center()
>>> G.order()
2
See Also
========
centralizer
Notes
=====
This is a naive implementation that is a straightforward application
of ``.centralizer()``
"""
return self.centralizer(self)
def centralizer(self, other):
r"""
Return the centralizer of a group/set/element.
The centralizer of a set of permutations ``S`` inside
a group ``G`` is the set of elements of ``G`` that commute with all
elements of ``S``::
``C_G(S) = \{ g \in G | gs = sg \forall s \in S\}`` ([10])
Usually, ``S`` is a subset of ``G``, but if ``G`` is a proper subgroup of
the full symmetric group, we allow for ``S`` to have elements outside
``G``.
It is naturally a subgroup of ``G``; the centralizer of a permutation
group is equal to the centralizer of any set of generators for that
group, since any element commuting with the generators commutes with
any product of the generators.
Parameters
==========
other
a permutation group/list of permutations/single permutation
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup)
>>> S = SymmetricGroup(6)
>>> C = CyclicGroup(6)
>>> H = S.centralizer(C)
>>> H.is_subgroup(C)
True
See Also
========
subgroup_search
Notes
=====
The implementation is an application of ``.subgroup_search()`` with
tests using a specific base for the group ``G``.
"""
if hasattr(other, 'generators'):
if other.is_trivial or self.is_trivial:
return self
degree = self.degree
identity = _af_new(list(range(degree)))
orbits = other.orbits()
num_orbits = len(orbits)
orbits.sort(key=lambda x: -len(x))
long_base = []
orbit_reps = [None]*num_orbits
orbit_reps_indices = [None]*num_orbits
orbit_descr = [None]*degree
for i in range(num_orbits):
orbit = list(orbits[i])
orbit_reps[i] = orbit[0]
orbit_reps_indices[i] = len(long_base)
for point in orbit:
orbit_descr[point] = i
long_base = long_base + orbit
base, strong_gens = self.schreier_sims_incremental(base=long_base)
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
i = 0
for i in range(len(base)):
if strong_gens_distr[i] == [identity]:
break
base = base[:i]
base_len = i
for j in range(num_orbits):
if base[base_len - 1] in orbits[j]:
break
rel_orbits = orbits[: j + 1]
num_rel_orbits = len(rel_orbits)
transversals = [None]*num_rel_orbits
for j in range(num_rel_orbits):
rep = orbit_reps[j]
transversals[j] = dict(
other.orbit_transversal(rep, pairs=True))
trivial_test = lambda x: True
tests = [None]*base_len
for l in range(base_len):
if base[l] in orbit_reps:
tests[l] = trivial_test
else:
def test(computed_words, l=l):
g = computed_words[l]
rep_orb_index = orbit_descr[base[l]]
rep = orbit_reps[rep_orb_index]
im = g._array_form[base[l]]
im_rep = g._array_form[rep]
tr_el = transversals[rep_orb_index][base[l]]
# using the definition of transversal,
# base[l]^g = rep^(tr_el*g);
# if g belongs to the centralizer, then
# base[l]^g = (rep^g)^tr_el
return im == tr_el._array_form[im_rep]
tests[l] = test
def prop(g):
return [rmul(g, gen) for gen in other.generators] == \
[rmul(gen, g) for gen in other.generators]
return self.subgroup_search(prop, base=base,
strong_gens=strong_gens, tests=tests)
elif hasattr(other, '__getitem__'):
gens = list(other)
return self.centralizer(PermutationGroup(gens))
elif hasattr(other, 'array_form'):
return self.centralizer(PermutationGroup([other]))
def commutator(self, G, H):
"""
Return the commutator of two subgroups.
For a permutation group ``K`` and subgroups ``G``, ``H``, the
commutator of ``G`` and ``H`` is defined as the group generated
by all the commutators ``[g, h] = hgh^{-1}g^{-1}`` for ``g`` in ``G`` and
``h`` in ``H``. It is naturally a subgroup of ``K`` ([1], p.27).
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> S = SymmetricGroup(5)
>>> A = AlternatingGroup(5)
>>> G = S.commutator(S, A)
>>> G.is_subgroup(A)
True
See Also
========
derived_subgroup
Notes
=====
The commutator of two subgroups ``H, G`` is equal to the normal closure
of the commutators of all the generators, i.e. ``hgh^{-1}g^{-1}`` for ``h``
a generator of ``H`` and ``g`` a generator of ``G`` ([1], p.28)
"""
ggens = G.generators
hgens = H.generators
commutators = []
for ggen in ggens:
for hgen in hgens:
commutator = rmul(hgen, ggen, ~hgen, ~ggen)
if commutator not in commutators:
commutators.append(commutator)
res = self.normal_closure(commutators)
return res
def coset_factor(self, g, factor_index=False):
"""Return ``G``'s (self's) coset factorization of ``g``
If ``g`` is an element of ``G`` then it can be written as the product
of permutations drawn from the Schreier-Sims coset decomposition,
The permutations returned in ``f`` are those for which
the product gives ``g``: ``g = f[n]*...f[1]*f[0]`` where ``n = len(B)``
and ``B = G.base``. f[i] is one of the permutations in
``self._basic_orbits[i]``.
If factor_index==True,
returns a tuple ``[b[0],..,b[n]]``, where ``b[i]``
belongs to ``self._basic_orbits[i]``
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> Permutation.print_cyclic = True
>>> a = Permutation(0, 1, 3, 7, 6, 4)(2, 5)
>>> b = Permutation(0, 1, 3, 2)(4, 5, 7, 6)
>>> G = PermutationGroup([a, b])
Define g:
>>> g = Permutation(7)(1, 2, 4)(3, 6, 5)
Confirm that it is an element of G:
>>> G.contains(g)
True
Thus, it can be written as a product of factors (up to
3) drawn from u. See below that a factor from u1 and u2
and the Identity permutation have been used:
>>> f = G.coset_factor(g)
>>> f[2]*f[1]*f[0] == g
True
>>> f1 = G.coset_factor(g, True); f1
[0, 4, 4]
>>> tr = G.basic_transversals
>>> f[0] == tr[0][f1[0]]
True
If g is not an element of G then [] is returned:
>>> c = Permutation(5, 6, 7)
>>> G.coset_factor(c)
[]
see util._strip
"""
if isinstance(g, (Cycle, Permutation)):
g = g.list()
if len(g) != self._degree:
# this could either adjust the size or return [] immediately
# but we don't choose between the two and just signal a possible
# error
raise ValueError('g should be the same size as permutations of G')
I = list(range(self._degree))
basic_orbits = self.basic_orbits
transversals = self._transversals
factors = []
base = self.base
h = g
for i in range(len(base)):
beta = h[base[i]]
if beta == base[i]:
factors.append(beta)
continue
if beta not in basic_orbits[i]:
return []
u = transversals[i][beta]._array_form
h = _af_rmul(_af_invert(u), h)
factors.append(beta)
if h != I:
return []
if factor_index:
return factors
tr = self.basic_transversals
factors = [tr[i][factors[i]] for i in range(len(base))]
return factors
def coset_rank(self, g):
"""rank using Schreier-Sims representation
The coset rank of ``g`` is the ordering number in which
it appears in the lexicographic listing according to the
coset decomposition
The ordering is the same as in G.generate(method='coset').
If ``g`` does not belong to the group it returns None.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation(0, 1, 3, 7, 6, 4)(2, 5)
>>> b = Permutation(0, 1, 3, 2)(4, 5, 7, 6)
>>> G = PermutationGroup([a, b])
>>> c = Permutation(7)(2, 4)(3, 5)
>>> G.coset_rank(c)
16
>>> G.coset_unrank(16)
Permutation(7)(2, 4)(3, 5)
See Also
========
coset_factor
"""
factors = self.coset_factor(g, True)
if not factors:
return None
rank = 0
b = 1
transversals = self._transversals
base = self._base
basic_orbits = self._basic_orbits
for i in range(len(base)):
k = factors[i]
j = basic_orbits[i].index(k)
rank += b*j
b = b*len(transversals[i])
return rank
def coset_unrank(self, rank, af=False):
"""unrank using Schreier-Sims representation
coset_unrank is the inverse operation of coset_rank
if 0 <= rank < order; otherwise it returns None.
"""
if rank < 0 or rank >= self.order():
return None
base = self._base
transversals = self._transversals
basic_orbits = self._basic_orbits
m = len(base)
v = [0]*m
for i in range(m):
rank, c = divmod(rank, len(transversals[i]))
v[i] = basic_orbits[i][c]
a = [transversals[i][v[i]]._array_form for i in range(m)]
h = _af_rmuln(*a)
if af:
return h
else:
return _af_new(h)
@property
def degree(self):
"""Returns the size of the permutations in the group.
The number of permutations comprising the group is given by
len(group); the number of permutations that can be generated
by the group is given by group.order().
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1, 0, 2])
>>> G = PermutationGroup([a])
>>> G.degree
3
>>> len(G)
1
>>> G.order()
2
>>> list(G.generate())
[Permutation(2), Permutation(2)(0, 1)]
See Also
========
order
"""
return self._degree
def derived_series(self):
r"""Return the derived series for the group.
The derived series for a group ``G`` is defined as
``G = G_0 > G_1 > G_2 > \ldots`` where ``G_i = [G_{i-1}, G_{i-1}]``,
i.e. ``G_i`` is the derived subgroup of ``G_{i-1}``, for
``i\in\mathbb{N}``. When we have ``G_k = G_{k-1}`` for some
``k\in\mathbb{N}``, the series terminates.
Returns
=======
A list of permutation groups containing the members of the derived
series in the order ``G = G_0, G_1, G_2, \ldots``.
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup, DihedralGroup)
>>> A = AlternatingGroup(5)
>>> len(A.derived_series())
1
>>> S = SymmetricGroup(4)
>>> len(S.derived_series())
4
>>> S.derived_series()[1].is_subgroup(AlternatingGroup(4))
True
>>> S.derived_series()[2].is_subgroup(DihedralGroup(2))
True
See Also
========
derived_subgroup
"""
res = [self]
current = self
next = self.derived_subgroup()
while not current.is_subgroup(next):
res.append(next)
current = next
next = next.derived_subgroup()
return res
def derived_subgroup(self):
"""Compute the derived subgroup.
The derived subgroup, or commutator subgroup is the subgroup generated
by all commutators ``[g, h] = hgh^{-1}g^{-1}`` for ``g, h\in G`` ; it is
equal to the normal closure of the set of commutators of the generators
([1], p.28, [11]).
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1, 0, 2, 4, 3])
>>> b = Permutation([0, 1, 3, 2, 4])
>>> G = PermutationGroup([a, b])
>>> C = G.derived_subgroup()
>>> list(C.generate(af=True))
[[0, 1, 2, 3, 4], [0, 1, 3, 4, 2], [0, 1, 4, 2, 3]]
See Also
========
derived_series
"""
r = self._r
gens = [p._array_form for p in self.generators]
gens_inv = [_af_invert(p) for p in gens]
set_commutators = set()
degree = self._degree
rng = list(range(degree))
for i in range(r):
for j in range(r):
p1 = gens[i]
p2 = gens[j]
c = list(range(degree))
for k in rng:
c[p2[p1[k]]] = p1[p2[k]]
ct = tuple(c)
if not ct in set_commutators:
set_commutators.add(ct)
cms = [_af_new(p) for p in set_commutators]
G2 = self.normal_closure(cms)
return G2
def generate(self, method="coset", af=False):
"""Return iterator to generate the elements of the group
Iteration is done with one of these methods::
method='coset' using the Schreier-Sims coset representation
method='dimino' using the Dimino method
If af = True it yields the array form of the permutations
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics import PermutationGroup
>>> from sympy.combinatorics.polyhedron import tetrahedron
The permutation group given in the tetrahedron object is not
true groups:
>>> G = tetrahedron.pgroup
>>> G.is_group()
False
But the group generated by the permutations in the tetrahedron
pgroup -- even the first two -- is a proper group:
>>> H = PermutationGroup(G[0], G[1])
>>> J = PermutationGroup(list(H.generate())); J
PermutationGroup([
Permutation(0, 1)(2, 3),
Permutation(3),
Permutation(1, 2, 3),
Permutation(1, 3, 2),
Permutation(0, 3, 1),
Permutation(0, 2, 3),
Permutation(0, 3)(1, 2),
Permutation(0, 1, 3),
Permutation(3)(0, 2, 1),
Permutation(0, 3, 2),
Permutation(3)(0, 1, 2),
Permutation(0, 2)(1, 3)])
>>> _.is_group()
True
"""
if method == "coset":
return self.generate_schreier_sims(af)
elif method == "dimino":
return self.generate_dimino(af)
else:
raise NotImplementedError('No generation defined for %s' % method)
def generate_dimino(self, af=False):
"""Yield group elements using Dimino's algorithm
If af == True it yields the array form of the permutations
References
==========
[1] The Implementation of Various Algorithms for Permutation Groups in
the Computer Algebra System: AXIOM, N.J. Doye, M.Sc. Thesis
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([0, 2, 3, 1])
>>> g = PermutationGroup([a, b])
>>> list(g.generate_dimino(af=True))
[[0, 1, 2, 3], [0, 2, 1, 3], [0, 2, 3, 1],
[0, 1, 3, 2], [0, 3, 2, 1], [0, 3, 1, 2]]
"""
idn = list(range(self.degree))
order = 0
element_list = [idn]
set_element_list = set([tuple(idn)])
if af:
yield idn
else:
yield _af_new(idn)
gens = [p._array_form for p in self.generators]
for i in range(len(gens)):
# D elements of the subgroup G_i generated by gens[:i]
D = element_list[:]
N = [idn]
while N:
A = N
N = []
for a in A:
for g in gens[:i + 1]:
ag = _af_rmul(a, g)
if tuple(ag) not in set_element_list:
# produce G_i*g
for d in D:
order += 1
ap = _af_rmul(d, ag)
if af:
yield ap
else:
p = _af_new(ap)
yield p
element_list.append(ap)
set_element_list.add(tuple(ap))
N.append(ap)
self._order = len(element_list)
def generate_schreier_sims(self, af=False):
"""Yield group elements using the Schreier-Sims representation
in coset_rank order
If af = True it yields the array form of the permutations
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([0, 2, 3, 1])
>>> g = PermutationGroup([a, b])
>>> list(g.generate_schreier_sims(af=True))
[[0, 1, 2, 3], [0, 2, 1, 3], [0, 3, 2, 1],
[0, 1, 3, 2], [0, 2, 3, 1], [0, 3, 1, 2]]
"""
n = self._degree
u = self.basic_transversals
basic_orbits = self._basic_orbits
if len(u) == 0:
for x in self.generators:
if af:
yield x._array_form
else:
yield x
raise StopIteration
if len(u) == 1:
for i in basic_orbits[0]:
if af:
yield u[0][i]._array_form
else:
yield u[0][i]
raise StopIteration
u = list(reversed(u))
basic_orbits = basic_orbits[::-1]
# stg stack of group elements
stg = [list(range(n))]
posmax = [len(x) for x in u]
n1 = len(posmax) - 1
pos = [0]*n1
h = 0
while 1:
# backtrack when finished iterating over coset
if pos[h] >= posmax[h]:
if h == 0:
raise StopIteration
pos[h] = 0
h -= 1
stg.pop()
continue
p = _af_rmul(u[h][basic_orbits[h][pos[h]]]._array_form, stg[-1])
pos[h] += 1
stg.append(p)
h += 1
if h == n1:
if af:
for i in basic_orbits[-1]:
p = _af_rmul(u[-1][i]._array_form, stg[-1])
yield p
else:
for i in basic_orbits[-1]:
p = _af_rmul(u[-1][i]._array_form, stg[-1])
p1 = _af_new(p)
yield p1
stg.pop()
h -= 1
@property
def generators(self):
"""Returns the generators of the group.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.generators
[Permutation(1, 2), Permutation(2)(0, 1)]
"""
return self._generators
def contains(self, g, strict=True):
"""Test if permutation ``g`` belong to self, ``G``.
If ``g`` is an element of ``G`` it can be written as a product
of factors drawn from the cosets of ``G``'s stabilizers. To see
if ``g`` is one of the actual generators defining the group use
``G.has(g)``.
If ``strict`` is not True, ``g`` will be resized, if necessary,
to match the size of permutations in ``self``.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation(1, 2)
>>> b = Permutation(2, 3, 1)
>>> G = PermutationGroup(a, b, degree=5)
>>> G.contains(G[0]) # trivial check
True
>>> elem = Permutation([[2, 3]], size=5)
>>> G.contains(elem)
True
>>> G.contains(Permutation(4)(0, 1, 2, 3))
False
If strict is False, a permutation will be resized, if
necessary:
>>> H = PermutationGroup(Permutation(5))
>>> H.contains(Permutation(3))
False
>>> H.contains(Permutation(3), strict=False)
True
To test if a given permutation is present in the group:
>>> elem in G.generators
False
>>> G.has(elem)
False
See Also
========
coset_factor, has, in
"""
if not isinstance(g, Permutation):
return False
if g.size != self.degree:
if strict:
return False
g = Permutation(g, size=self.degree)
if g in self.generators:
return True
return bool(self.coset_factor(g.array_form, True))
@property
def is_abelian(self):
"""Test if the group is Abelian.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.is_abelian
False
>>> a = Permutation([0, 2, 1])
>>> G = PermutationGroup([a])
>>> G.is_abelian
True
"""
if self._is_abelian is not None:
return self._is_abelian
self._is_abelian = True
gens = [p._array_form for p in self.generators]
for x in gens:
for y in gens:
if y <= x:
continue
if not _af_commutes_with(x, y):
self._is_abelian = False
return False
return True
def is_alt_sym(self, eps=0.05, _random_prec=None):
r"""Monte Carlo test for the symmetric/alternating group for degrees
>= 8.
More specifically, it is one-sided Monte Carlo with the
answer True (i.e., G is symmetric/alternating) guaranteed to be
correct, and the answer False being incorrect with probability eps.
Notes
=====
The algorithm itself uses some nontrivial results from group theory and
number theory:
1) If a transitive group ``G`` of degree ``n`` contains an element
with a cycle of length ``n/2 < p < n-2`` for ``p`` a prime, ``G`` is the
symmetric or alternating group ([1], pp. 81-82)
2) The proportion of elements in the symmetric/alternating group having
the property described in 1) is approximately ``\log(2)/\log(n)``
([1], p.82; [2], pp. 226-227).
The helper function ``_check_cycles_alt_sym`` is used to
go over the cycles in a permutation and look for ones satisfying 1).
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(10)
>>> D.is_alt_sym()
False
See Also
========
_check_cycles_alt_sym
"""
if _random_prec is None:
n = self.degree
if n < 8:
return False
if not self.is_transitive():
return False
if n < 17:
c_n = 0.34
else:
c_n = 0.57
d_n = (c_n*log(2))/log(n)
N_eps = int(-log(eps)/d_n)
for i in range(N_eps):
perm = self.random_pr()
if _check_cycles_alt_sym(perm):
return True
return False
else:
for i in range(_random_prec['N_eps']):
perm = _random_prec[i]
if _check_cycles_alt_sym(perm):
return True
return False
@property
def is_nilpotent(self):
"""Test if the group is nilpotent.
A group ``G`` is nilpotent if it has a central series of finite length.
Alternatively, ``G`` is nilpotent if its lower central series terminates
with the trivial group. Every nilpotent group is also solvable
([1], p.29, [12]).
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup)
>>> C = CyclicGroup(6)
>>> C.is_nilpotent
True
>>> S = SymmetricGroup(5)
>>> S.is_nilpotent
False
See Also
========
lower_central_series, is_solvable
"""
if self._is_nilpotent is None:
lcs = self.lower_central_series()
terminator = lcs[len(lcs) - 1]
gens = terminator.generators
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in gens):
self._is_solvable = True
self._is_nilpotent = True
return True
else:
self._is_nilpotent = False
return False
else:
return self._is_nilpotent
def is_normal(self, gr):
"""Test if G=self is a normal subgroup of gr.
G is normal in gr if
for each g2 in G, g1 in gr, g = g1*g2*g1**-1 belongs to G
It is sufficient to check this for each g1 in gr.generator and
g2 g2 in G.generator
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1, 2, 0])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G1 = PermutationGroup([a, Permutation([2, 0, 1])])
>>> G1.is_normal(G)
True
"""
gens2 = [p._array_form for p in self.generators]
gens1 = [p._array_form for p in gr.generators]
for g1 in gens1:
for g2 in gens2:
p = _af_rmuln(g1, g2, _af_invert(g1))
if not self.coset_factor(p, True):
return False
return True
def is_primitive(self, randomized=True):
"""Test if a group is primitive.
A permutation group ``G`` acting on a set ``S`` is called primitive if
``S`` contains no nontrivial block under the action of ``G``
(a block is nontrivial if its cardinality is more than ``1``).
Notes
=====
The algorithm is described in [1], p.83, and uses the function
minimal_block to search for blocks of the form ``\{0, k\}`` for ``k``
ranging over representatives for the orbits of ``G_0``, the stabilizer of
``0``. This algorithm has complexity ``O(n^2)`` where ``n`` is the degree
of the group, and will perform badly if ``G_0`` is small.
There are two implementations offered: one finds ``G_0``
deterministically using the function ``stabilizer``, and the other
(default) produces random elements of ``G_0`` using ``random_stab``,
hoping that they generate a subgroup of ``G_0`` with not too many more
orbits than G_0 (this is suggested in [1], p.83). Behavior is changed
by the ``randomized`` flag.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(10)
>>> D.is_primitive()
False
See Also
========
minimal_block, random_stab
"""
if self._is_primitive is not None:
return self._is_primitive
n = self.degree
if randomized:
random_stab_gens = []
v = self.schreier_vector(0)
for i in range(len(self)):
random_stab_gens.append(self.random_stab(0, v))
stab = PermutationGroup(random_stab_gens)
else:
stab = self.stabilizer(0)
orbits = stab.orbits()
for orb in orbits:
x = orb.pop()
if x != 0 and self.minimal_block([0, x]) != [0]*n:
self._is_primitive = False
return False
self._is_primitive = True
return True
@property
def is_solvable(self):
"""Test if the group is solvable.
``G`` is solvable if its derived series terminates with the trivial
group ([1], p.29).
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(3)
>>> S.is_solvable
True
See Also
========
is_nilpotent, derived_series
"""
if self._is_solvable is None:
ds = self.derived_series()
terminator = ds[len(ds) - 1]
gens = terminator.generators
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in gens):
self._is_solvable = True
return True
else:
self._is_solvable = False
return False
else:
return self._is_solvable
def is_subgroup(self, G, strict=True):
"""Return True if all elements of self belong to G.
If ``strict`` is False then if ``self``'s degree is smaller
than ``G``'s, the elements will be resized to have the same degree.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup)
Testing is strict by default: the degree of each group must be the
same:
>>> p = Permutation(0, 1, 2, 3, 4, 5)
>>> G1 = PermutationGroup([Permutation(0, 1, 2), Permutation(0, 1)])
>>> G2 = PermutationGroup([Permutation(0, 2), Permutation(0, 1, 2)])
>>> G3 = PermutationGroup([p, p**2])
>>> assert G1.order() == G2.order() == G3.order() == 6
>>> G1.is_subgroup(G2)
True
>>> G1.is_subgroup(G3)
False
>>> G3.is_subgroup(PermutationGroup(G3[1]))
False
>>> G3.is_subgroup(PermutationGroup(G3[0]))
True
To ignore the size, set ``strict`` to False:
>>> S3 = SymmetricGroup(3)
>>> S5 = SymmetricGroup(5)
>>> S3.is_subgroup(S5, strict=False)
True
>>> C7 = CyclicGroup(7)
>>> G = S5*C7
>>> S5.is_subgroup(G, False)
True
>>> C7.is_subgroup(G, 0)
False
"""
if not isinstance(G, PermutationGroup):
return False
if self == G:
return True
if G.order() % self.order() != 0:
return False
if self.degree == G.degree or \
(self.degree < G.degree and not strict):
gens = self.generators
else:
return False
return all(G.contains(g, strict=strict) for g in gens)
def is_transitive(self, strict=True):
"""Test if the group is transitive.
A group is transitive if it has a single orbit.
If ``strict`` is False the group is transitive if it has
a single orbit of length different from 1.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([2, 0, 1, 3])
>>> G1 = PermutationGroup([a, b])
>>> G1.is_transitive()
False
>>> G1.is_transitive(strict=False)
True
>>> c = Permutation([2, 3, 0, 1])
>>> G2 = PermutationGroup([a, c])
>>> G2.is_transitive()
True
>>> d = Permutation([1,0,2,3])
>>> e = Permutation([0,1,3,2])
>>> G3 = PermutationGroup([d, e])
>>> G3.is_transitive() or G3.is_transitive(strict=False)
False
"""
if self._is_transitive: # strict or not, if True then True
return self._is_transitive
if strict:
if self._is_transitive is not None: # we only store strict=True
return self._is_transitive
ans = len(self.orbit(0)) == self.degree
self._is_transitive = ans
return ans
got_orb = False
for x in self.orbits():
if len(x) > 1:
if got_orb:
return False
got_orb = True
return got_orb
@property
def is_trivial(self):
"""Test if the group is the trivial group.
This is true if the group contains only the identity permutation.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> G = PermutationGroup([Permutation([0, 1, 2])])
>>> G.is_trivial
True
"""
if self._is_trivial is None:
self._is_trivial = len(self) == 1 and self[0].is_Identity
return self._is_trivial
def lower_central_series(self):
r"""Return the lower central series for the group.
The lower central series for a group ``G`` is the series
``G = G_0 > G_1 > G_2 > \ldots`` where
``G_k = [G, G_{k-1}]``, i.e. every term after the first is equal to the
commutator of ``G`` and the previous term in ``G1`` ([1], p.29).
Returns
=======
A list of permutation groups in the order
``G = G_0, G_1, G_2, \ldots``
Examples
========
>>> from sympy.combinatorics.named_groups import (AlternatingGroup,
... DihedralGroup)
>>> A = AlternatingGroup(4)
>>> len(A.lower_central_series())
2
>>> A.lower_central_series()[1].is_subgroup(DihedralGroup(2))
True
See Also
========
commutator, derived_series
"""
res = [self]
current = self
next = self.commutator(self, current)
while not current.is_subgroup(next):
res.append(next)
current = next
next = self.commutator(self, current)
return res
@property
def max_div(self):
"""Maximum proper divisor of the degree of a permutation group.
Notes
=====
Obviously, this is the degree divided by its minimal proper divisor
(larger than ``1``, if one exists). As it is guaranteed to be prime,
the ``sieve`` from ``sympy.ntheory`` is used.
This function is also used as an optimization tool for the functions
``minimal_block`` and ``_union_find_merge``.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> G = PermutationGroup([Permutation([0,2,1,3])])
>>> G.max_div
2
See Also
========
minimal_block, _union_find_merge
"""
if self._max_div is not None:
return self._max_div
n = self.degree
if n == 1:
return 1
for x in sieve:
if n % x == 0:
d = n//x
self._max_div = d
return d
def minimal_block(self, points):
r"""For a transitive group, finds the block system generated by
``points``.
If a group ``G`` acts on a set ``S``, a nonempty subset ``B`` of ``S``
is called a block under the action of ``G`` if for all ``g`` in ``G``
we have ``gB = B`` (``g`` fixes ``B``) or ``gB`` and ``B`` have no
common points (``g`` moves ``B`` entirely). ([1], p.23; [6]).
The distinct translates ``gB`` of a block ``B`` for ``g`` in ``G``
partition the set ``S`` and this set of translates is known as a block
system. Moreover, we obviously have that all blocks in the partition
have the same size, hence the block size divides ``|S|`` ([1], p.23).
A ``G``-congruence is an equivalence relation ``~`` on the set ``S``
such that ``a ~ b`` implies ``g(a) ~ g(b)`` for all ``g`` in ``G``.
For a transitive group, the equivalence classes of a ``G``-congruence
and the blocks of a block system are the same thing ([1], p.23).
The algorithm below checks the group for transitivity, and then finds
the ``G``-congruence generated by the pairs ``(p_0, p_1), (p_0, p_2),
..., (p_0,p_{k-1})`` which is the same as finding the maximal block
system (i.e., the one with minimum block size) such that
``p_0, ..., p_{k-1}`` are in the same block ([1], p.83).
It is an implementation of Atkinson's algorithm, as suggested in [1],
and manipulates an equivalence relation on the set ``S`` using a
union-find data structure. The running time is just above
``O(|points||S|)``. ([1], pp. 83-87; [7]).
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(10)
>>> D.minimal_block([0,5])
[0, 6, 2, 8, 4, 0, 6, 2, 8, 4]
>>> D.minimal_block([0,1])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
See Also
========
_union_find_rep, _union_find_merge, is_transitive, is_primitive
"""
if not self.is_transitive():
return False
n = self.degree
gens = self.generators
# initialize the list of equivalence class representatives
parents = list(range(n))
ranks = [1]*n
not_rep = []
k = len(points)
# the block size must divide the degree of the group
if k > self.max_div:
return [0]*n
for i in range(k - 1):
parents[points[i + 1]] = points[0]
not_rep.append(points[i + 1])
ranks[points[0]] = k
i = 0
len_not_rep = k - 1
while i < len_not_rep:
temp = not_rep[i]
i += 1
for gen in gens:
# find has side effects: performs path compression on the list
# of representatives
delta = self._union_find_rep(temp, parents)
# union has side effects: performs union by rank on the list
# of representatives
temp = self._union_find_merge(gen(temp), gen(delta), ranks,
parents, not_rep)
if temp == -1:
return [0]*n
len_not_rep += temp
for i in range(n):
# force path compression to get the final state of the equivalence
# relation
self._union_find_rep(i, parents)
return parents
def normal_closure(self, other, k=10):
r"""Return the normal closure of a subgroup/set of permutations.
If ``S`` is a subset of a group ``G``, the normal closure of ``A`` in ``G``
is defined as the intersection of all normal subgroups of ``G`` that
contain ``A`` ([1], p.14). Alternatively, it is the group generated by
the conjugates ``x^{-1}yx`` for ``x`` a generator of ``G`` and ``y`` a
generator of the subgroup ``\left\langle S\right\rangle`` generated by
``S`` (for some chosen generating set for ``\left\langle S\right\rangle``)
([1], p.73).
Parameters
==========
other
a subgroup/list of permutations/single permutation
k
an implementation-specific parameter that determines the number
of conjugates that are adjoined to ``other`` at once
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup, AlternatingGroup)
>>> S = SymmetricGroup(5)
>>> C = CyclicGroup(5)
>>> G = S.normal_closure(C)
>>> G.order()
60
>>> G.is_subgroup(AlternatingGroup(5))
True
See Also
========
commutator, derived_subgroup, random_pr
Notes
=====
The algorithm is described in [1], pp. 73-74; it makes use of the
generation of random elements for permutation groups by the product
replacement algorithm.
"""
if hasattr(other, 'generators'):
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in other.generators):
return other
Z = PermutationGroup(other.generators[:])
base, strong_gens = Z.schreier_sims_incremental()
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, basic_transversals = \
_orbits_transversals_from_bsgs(base, strong_gens_distr)
self._random_pr_init(r=10, n=20)
_loop = True
while _loop:
Z._random_pr_init(r=10, n=10)
for i in range(k):
g = self.random_pr()
h = Z.random_pr()
conj = h^g
res = _strip(conj, base, basic_orbits, basic_transversals)
if res[0] != identity or res[1] != len(base) + 1:
gens = Z.generators
gens.append(conj)
Z = PermutationGroup(gens)
strong_gens.append(conj)
temp_base, temp_strong_gens = \
Z.schreier_sims_incremental(base, strong_gens)
base, strong_gens = temp_base, temp_strong_gens
strong_gens_distr = \
_distribute_gens_by_base(base, strong_gens)
basic_orbits, basic_transversals = \
_orbits_transversals_from_bsgs(base,
strong_gens_distr)
_loop = False
for g in self.generators:
for h in Z.generators:
conj = h^g
res = _strip(conj, base, basic_orbits,
basic_transversals)
if res[0] != identity or res[1] != len(base) + 1:
_loop = True
break
if _loop:
break
return Z
elif hasattr(other, '__getitem__'):
return self.normal_closure(PermutationGroup(other))
elif hasattr(other, 'array_form'):
return self.normal_closure(PermutationGroup([other]))
def orbit(self, alpha, action='tuples'):
r"""Compute the orbit of alpha ``\{g(\alpha) | g \in G\}`` as a set.
The time complexity of the algorithm used here is ``O(|Orb|*r)`` where
``|Orb|`` is the size of the orbit and ``r`` is the number of generators of
the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21.
Here alpha can be a single point, or a list of points.
If alpha is a single point, the ordinary orbit is computed.
if alpha is a list of points, there are three available options:
'union' - computes the union of the orbits of the points in the list
'tuples' - computes the orbit of the list interpreted as an ordered
tuple under the group action ( i.e., g((1,2,3)) = (g(1), g(2), g(3)) )
'sets' - computes the orbit of the list interpreted as a sets
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1,2,0,4,5,6,3])
>>> G = PermutationGroup([a])
>>> G.orbit(0)
set([0, 1, 2])
>>> G.orbit([0,4], 'union')
set([0, 1, 2, 3, 4, 5, 6])
See Also
========
orbit_transversal
"""
return _orbit(self.degree, self.generators, alpha, action)
def orbit_rep(self, alpha, beta, schreier_vector=None):
"""Return a group element which sends ``alpha`` to ``beta``.
If ``beta`` is not in the orbit of ``alpha``, the function returns
``False``. This implementation makes use of the schreier vector.
For a proof of correctness, see [1], p.80
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> G = AlternatingGroup(5)
>>> G.orbit_rep(0, 4)
Permutation(0, 4, 1, 2, 3)
See Also
========
schreier_vector
"""
if schreier_vector is None:
schreier_vector = self.schreier_vector(alpha)
if schreier_vector[beta] is None:
return False
k = schreier_vector[beta]
gens = [x._array_form for x in self.generators]
a = []
while k != -1:
a.append(gens[k])
beta = gens[k].index(beta) # beta = (~gens[k])(beta)
k = schreier_vector[beta]
if a:
return _af_new(_af_rmuln(*a))
else:
return _af_new(list(range(self._degree)))
def orbit_transversal(self, alpha, pairs=False):
r"""Computes a transversal for the orbit of ``alpha`` as a set.
For a permutation group ``G``, a transversal for the orbit
``Orb = \{g(\alpha) | g \in G\}`` is a set
``\{g_\beta | g_\beta(\alpha) = \beta\}`` for ``\beta \in Orb``.
Note that there may be more than one possible transversal.
If ``pairs`` is set to ``True``, it returns the list of pairs
``(\beta, g_\beta)``. For a proof of correctness, see [1], p.79
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(6)
>>> G.orbit_transversal(0)
[Permutation(5),
Permutation(0, 1, 2, 3, 4, 5),
Permutation(0, 5)(1, 4)(2, 3),
Permutation(0, 2, 4)(1, 3, 5),
Permutation(5)(0, 4)(1, 3),
Permutation(0, 3)(1, 4)(2, 5)]
See Also
========
orbit
"""
return _orbit_transversal(self._degree, self.generators, alpha, pairs)
def orbits(self, rep=False):
"""Return the orbits of self, ordered according to lowest element
in each orbit.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation(1,5)(2,3)(4,0,6)
>>> b = Permutation(1,5)(3,4)(2,6,0)
>>> G = PermutationGroup([a, b])
>>> G.orbits()
[set([0, 2, 3, 4, 6]), set([1, 5])]
"""
return _orbits(self._degree, self._generators)
def order(self):
"""Return the order of the group: the number of permutations that
can be generated from elements of the group.
The number of permutations comprising the group is given by
len(group); the length of each permutation in the group is
given by group.size.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1, 0, 2])
>>> G = PermutationGroup([a])
>>> G.degree
3
>>> len(G)
1
>>> G.order()
2
>>> list(G.generate())
[Permutation(2), Permutation(2)(0, 1)]
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.order()
6
See Also
========
degree
"""
if self._order != None:
return self._order
if self._is_sym:
n = self._degree
self._order = factorial(n)
return self._order
if self._is_alt:
n = self._degree
self._order = factorial(n)/2
return self._order
basic_transversals = self.basic_transversals
m = 1
for x in basic_transversals:
m *= len(x)
self._order = m
return m
def pointwise_stabilizer(self, points, incremental=True):
r"""Return the pointwise stabilizer for a set of points.
For a permutation group ``G`` and a set of points
``\{p_1, p_2,\ldots, p_k\}``, the pointwise stabilizer of
``p_1, p_2, \ldots, p_k`` is defined as
``G_{p_1,\ldots, p_k} =
\{g\in G | g(p_i) = p_i \forall i\in\{1, 2,\ldots,k\}\} ([1],p20).
It is a subgroup of ``G``.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(7)
>>> Stab = S.pointwise_stabilizer([2, 3, 5])
>>> Stab.is_subgroup(S.stabilizer(2).stabilizer(3).stabilizer(5))
True
See Also
========
stabilizer, schreier_sims_incremental
Notes
=====
When incremental == True,
rather than the obvious implementation using successive calls to
.stabilizer(), this uses the incremental Schreier-Sims algorithm
to obtain a base with starting segment - the given points.
"""
if incremental:
base, strong_gens = self.schreier_sims_incremental(base=points)
stab_gens = []
degree = self.degree
for gen in strong_gens:
if [gen(point) for point in points] == points:
stab_gens.append(gen)
if not stab_gens:
stab_gens = _af_new(list(range(degree)))
return PermutationGroup(stab_gens)
else:
gens = self._generators
degree = self.degree
for x in points:
gens = _stabilizer(degree, gens, x)
return PermutationGroup(gens)
def make_perm(self, n, seed=None):
"""
Multiply ``n`` randomly selected permutations from
pgroup together, starting with the identity
permutation. If ``n`` is a list of integers, those
integers will be used to select the permutations and they
will be applied in L to R order: make_perm((A, B, C)) will
give CBA(I) where I is the identity permutation.
``seed`` is used to set the seed for the random selection
of permutations from pgroup. If this is a list of integers,
the corresponding permutations from pgroup will be selected
in the order give. This is mainly used for testing purposes.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a, b = [Permutation([1, 0, 3, 2]), Permutation([1, 3, 0, 2])]
>>> G = PermutationGroup([a, b])
>>> G.make_perm(1, [0])
Permutation(0, 1)(2, 3)
>>> G.make_perm(3, [0, 1, 0])
Permutation(0, 2, 3, 1)
>>> G.make_perm([0, 1, 0])
Permutation(0, 2, 3, 1)
See Also
========
random
"""
if is_sequence(n):
if seed is not None:
raise ValueError('If n is a sequence, seed should be None')
n, seed = len(n), n
else:
try:
n = int(n)
except TypeError:
raise ValueError('n must be an integer or a sequence.')
randrange = _randrange(seed)
# start with the identity permutation
result = Permutation(list(range(self.degree)))
m = len(self)
for i in range(n):
p = self[randrange(m)]
result = rmul(result, p)
return result
def random(self, af=False):
"""Return a random group element
"""
rank = randrange(self.order())
return self.coset_unrank(rank, af)
def random_pr(self, gen_count=11, iterations=50, _random_prec=None):
"""Return a random group element using product replacement.
For the details of the product replacement algorithm, see
``_random_pr_init`` In ``random_pr`` the actual 'product replacement'
is performed. Notice that if the attribute ``_random_gens``
is empty, it needs to be initialized by ``_random_pr_init``.
See Also
========
_random_pr_init
"""
if self._random_gens == []:
self._random_pr_init(gen_count, iterations)
random_gens = self._random_gens
r = len(random_gens) - 1
# handle randomized input for testing purposes
if _random_prec is None:
s = randrange(r)
t = randrange(r - 1)
if t == s:
t = r - 1
x = choice([1, 2])
e = choice([-1, 1])
else:
s = _random_prec['s']
t = _random_prec['t']
if t == s:
t = r - 1
x = _random_prec['x']
e = _random_prec['e']
if x == 1:
random_gens[s] = _af_rmul(random_gens[s], _af_pow(random_gens[t], e))
random_gens[r] = _af_rmul(random_gens[r], random_gens[s])
else:
random_gens[s] = _af_rmul(_af_pow(random_gens[t], e), random_gens[s])
random_gens[r] = _af_rmul(random_gens[s], random_gens[r])
return _af_new(random_gens[r])
def random_stab(self, alpha, schreier_vector=None, _random_prec=None):
"""Random element from the stabilizer of ``alpha``.
The schreier vector for ``alpha`` is an optional argument used
for speeding up repeated calls. The algorithm is described in [1], p.81
See Also
========
random_pr, orbit_rep
"""
if schreier_vector is None:
schreier_vector = self.schreier_vector(alpha)
if _random_prec is None:
rand = self.random_pr()
else:
rand = _random_prec['rand']
beta = rand(alpha)
h = self.orbit_rep(alpha, beta, schreier_vector)
return rmul(~h, rand)
def schreier_sims(self):
"""Schreier-Sims algorithm.
It computes the generators of the chain of stabilizers
G > G_{b_1} > .. > G_{b1,..,b_r} > 1
in which G_{b_1,..,b_i} stabilizes b_1,..,b_i,
and the corresponding ``s`` cosets.
An element of the group can be written as the product
h_1*..*h_s.
We use the incremental Schreier-Sims algorithm.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.schreier_sims()
>>> G.basic_transversals
[{0: Permutation(2)(0, 1), 1: Permutation(2), 2: Permutation(1, 2)},
{0: Permutation(2), 2: Permutation(0, 2)}]
"""
if self._transversals:
return
base, strong_gens = self.schreier_sims_incremental()
self._base = base
self._strong_gens = strong_gens
if not base:
self._transversals = []
self._basic_orbits = []
return
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, transversals = _orbits_transversals_from_bsgs(base,\
strong_gens_distr)
self._transversals = transversals
self._basic_orbits = [sorted(x) for x in basic_orbits]
def schreier_sims_incremental(self, base=None, gens=None):
"""Extend a sequence of points and generating set to a base and strong
generating set.
Parameters
==========
base
The sequence of points to be extended to a base. Optional
parameter with default value ``[]``.
gens
The generating set to be extended to a strong generating set
relative to the base obtained. Optional parameter with default
value ``self.generators``.
Returns
=======
(base, strong_gens)
``base`` is the base obtained, and ``strong_gens`` is the strong
generating set relative to it. The original parameters ``base``,
``gens`` remain unchanged.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> A = AlternatingGroup(7)
>>> base = [2, 3]
>>> seq = [2, 3]
>>> base, strong_gens = A.schreier_sims_incremental(base=seq)
>>> _verify_bsgs(A, base, strong_gens)
True
>>> base[:2]
[2, 3]
Notes
=====
This version of the Schreier-Sims algorithm runs in polynomial time.
There are certain assumptions in the implementation - if the trivial
group is provided, ``base`` and ``gens`` are returned immediately,
as any sequence of points is a base for the trivial group. If the
identity is present in the generators ``gens``, it is removed as
it is a redundant generator.
The implementation is described in [1], pp. 90-93.
See Also
========
schreier_sims, schreier_sims_random
"""
if base is None:
base = []
if gens is None:
gens = self.generators[:]
degree = self.degree
id_af = list(range(degree))
# handle the trivial group
if len(gens) == 1 and gens[0].is_Identity:
return base, gens
# prevent side effects
_base, _gens = base[:], gens[:]
# remove the identity as a generator
_gens = [x for x in _gens if not x.is_Identity]
# make sure no generator fixes all base points
for gen in _gens:
if all(x == gen._array_form[x] for x in _base):
for new in id_af:
if gen._array_form[new] != new:
break
else:
assert None # can this ever happen?
_base.append(new)
# distribute generators according to basic stabilizers
strong_gens_distr = _distribute_gens_by_base(_base, _gens)
# initialize the basic stabilizers, basic orbits and basic transversals
orbs = {}
transversals = {}
base_len = len(_base)
for i in range(base_len):
transversals[i] = dict(_orbit_transversal(degree, strong_gens_distr[i],
_base[i], pairs=True, af=True))
orbs[i] = list(transversals[i].keys())
# main loop: amend the stabilizer chain until we have generators
# for all stabilizers
i = base_len - 1
while i >= 0:
# this flag is used to continue with the main loop from inside
# a nested loop
continue_i = False
# test the generators for being a strong generating set
db = {}
for beta, u_beta in list(transversals[i].items()):
for gen in strong_gens_distr[i]:
gb = gen._array_form[beta]
u1 = transversals[i][gb]
g1 = _af_rmul(gen._array_form, u_beta)
if g1 != u1:
# test if the schreier generator is in the i+1-th
# would-be basic stabilizer
y = True
try:
u1_inv = db[gb]
except KeyError:
u1_inv = db[gb] = _af_invert(u1)
schreier_gen = _af_rmul(u1_inv, g1)
h, j = _strip_af(schreier_gen, _base, orbs, transversals, i)
if j <= base_len:
# new strong generator h at level j
y = False
elif h:
# h fixes all base points
y = False
moved = 0
while h[moved] == moved:
moved += 1
_base.append(moved)
base_len += 1
strong_gens_distr.append([])
if y is False:
# if a new strong generator is found, update the
# data structures and start over
h = _af_new(h)
for l in range(i + 1, j):
strong_gens_distr[l].append(h)
transversals[l] =\
dict(_orbit_transversal(degree, strong_gens_distr[l],
_base[l], pairs=True, af=True))
orbs[l] = list(transversals[l].keys())
i = j - 1
# continue main loop using the flag
continue_i = True
if continue_i is True:
break
if continue_i is True:
break
if continue_i is True:
continue
i -= 1
# build the strong generating set
strong_gens = []
for gens in strong_gens_distr:
for gen in gens:
if gen not in strong_gens:
strong_gens.append(gen)
return _base, strong_gens
def schreier_sims_random(self, base=None, gens=None, consec_succ=10,
_random_prec=None):
r"""Randomized Schreier-Sims algorithm.
The randomized Schreier-Sims algorithm takes the sequence ``base``
and the generating set ``gens``, and extends ``base`` to a base, and
``gens`` to a strong generating set relative to that base with
probability of a wrong answer at most ``2^{-consec\_succ}``,
provided the random generators are sufficiently random.
Parameters
==========
base
The sequence to be extended to a base.
gens
The generating set to be extended to a strong generating set.
consec_succ
The parameter defining the probability of a wrong answer.
_random_prec
An internal parameter used for testing purposes.
Returns
=======
(base, strong_gens)
``base`` is the base and ``strong_gens`` is the strong generating
set relative to it.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(5)
>>> base, strong_gens = S.schreier_sims_random(consec_succ=5)
>>> _verify_bsgs(S, base, strong_gens) #doctest: +SKIP
True
Notes
=====
The algorithm is described in detail in [1], pp. 97-98. It extends
the orbits ``orbs`` and the permutation groups ``stabs`` to
basic orbits and basic stabilizers for the base and strong generating
set produced in the end.
The idea of the extension process
is to "sift" random group elements through the stabilizer chain
and amend the stabilizers/orbits along the way when a sift
is not successful.
The helper function ``_strip`` is used to attempt
to decompose a random group element according to the current
state of the stabilizer chain and report whether the element was
fully decomposed (successful sift) or not (unsuccessful sift). In
the latter case, the level at which the sift failed is reported and
used to amend ``stabs``, ``base``, ``gens`` and ``orbs`` accordingly.
The halting condition is for ``consec_succ`` consecutive successful
sifts to pass. This makes sure that the current ``base`` and ``gens``
form a BSGS with probability at least ``1 - 1/\text{consec\_succ}``.
See Also
========
schreier_sims
"""
if base is None:
base = []
if gens is None:
gens = self.generators
base_len = len(base)
n = self.degree
# make sure no generator fixes all base points
for gen in gens:
if all(gen(x) == x for x in base):
new = 0
while gen._array_form[new] == new:
new += 1
base.append(new)
base_len += 1
# distribute generators according to basic stabilizers
strong_gens_distr = _distribute_gens_by_base(base, gens)
# initialize the basic stabilizers, basic transversals and basic orbits
transversals = {}
orbs = {}
for i in range(base_len):
transversals[i] = dict(_orbit_transversal(n, strong_gens_distr[i],
base[i], pairs=True))
orbs[i] = list(transversals[i].keys())
# initialize the number of consecutive elements sifted
c = 0
# start sifting random elements while the number of consecutive sifts
# is less than consec_succ
while c < consec_succ:
if _random_prec is None:
g = self.random_pr()
else:
g = _random_prec['g'].pop()
h, j = _strip(g, base, orbs, transversals)
y = True
# determine whether a new base point is needed
if j <= base_len:
y = False
elif not h.is_Identity:
y = False
moved = 0
while h(moved) == moved:
moved += 1
base.append(moved)
base_len += 1
strong_gens_distr.append([])
# if the element doesn't sift, amend the strong generators and
# associated stabilizers and orbits
if y is False:
for l in range(1, j):
strong_gens_distr[l].append(h)
transversals[l] = dict(_orbit_transversal(n,
strong_gens_distr[l], base[l], pairs=True))
orbs[l] = list(transversals[l].keys())
c = 0
else:
c += 1
# build the strong generating set
strong_gens = strong_gens_distr[0][:]
for gen in strong_gens_distr[1]:
if gen not in strong_gens:
strong_gens.append(gen)
return base, strong_gens
def schreier_vector(self, alpha):
"""Computes the schreier vector for ``alpha``.
The Schreier vector efficiently stores information
about the orbit of ``alpha``. It can later be used to quickly obtain
elements of the group that send ``alpha`` to a particular element
in the orbit. Notice that the Schreier vector depends on the order
in which the group generators are listed. For a definition, see [3].
Since list indices start from zero, we adopt the convention to use
"None" instead of 0 to signify that an element doesn't belong
to the orbit.
For the algorithm and its correctness, see [2], pp.78-80.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> a = Permutation([2,4,6,3,1,5,0])
>>> b = Permutation([0,1,3,5,4,6,2])
>>> G = PermutationGroup([a,b])
>>> G.schreier_vector(0)
[-1, None, 0, 1, None, 1, 0]
See Also
========
orbit
"""
n = self.degree
v = [None]*n
v[alpha] = -1
orb = [alpha]
used = [False]*n
used[alpha] = True
gens = self.generators
r = len(gens)
for b in orb:
for i in range(r):
temp = gens[i]._array_form[b]
if used[temp] is False:
orb.append(temp)
used[temp] = True
v[temp] = i
return v
def stabilizer(self, alpha):
r"""Return the stabilizer subgroup of ``alpha``.
The stabilizer of ``\alpha`` is the group ``G_\alpha =
\{g \in G | g(\alpha) = \alpha\}``.
For a proof of correctness, see [1], p.79.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(6)
>>> G.stabilizer(5)
PermutationGroup([
Permutation(5)(0, 4)(1, 3),
Permutation(5)])
See Also
========
orbit
"""
return PermGroup(_stabilizer(self._degree, self._generators, alpha))
@property
def strong_gens(self):
"""Return a strong generating set from the Schreier-Sims algorithm.
A generating set ``S = \{g_1, g_2, ..., g_t\}`` for a permutation group
``G`` is a strong generating set relative to the sequence of points
(referred to as a "base") ``(b_1, b_2, ..., b_k)`` if, for
``1 \leq i \leq k`` we have that the intersection of the pointwise
stabilizer ``G^{(i+1)} := G_{b_1, b_2, ..., b_i}`` with ``S`` generates
the pointwise stabilizer ``G^{(i+1)}``. The concepts of a base and
strong generating set and their applications are discussed in depth
in [1], pp. 87-89 and [2], pp. 55-57.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> D.strong_gens
[Permutation(0, 1, 2, 3), Permutation(0, 3)(1, 2), Permutation(1, 3)]
>>> D.base
[0, 1]
See Also
========
base, basic_transversals, basic_orbits, basic_stabilizers
"""
if self._strong_gens == []:
self.schreier_sims()
return self._strong_gens
def subgroup_search(self, prop, base=None, strong_gens=None, tests=None,
init_subgroup=None):
"""Find the subgroup of all elements satisfying the property ``prop``.
This is done by a depth-first search with respect to base images that
uses several tests to prune the search tree.
Parameters
==========
prop
The property to be used. Has to be callable on group elements
and always return ``True`` or ``False``. It is assumed that
all group elements satisfying ``prop`` indeed form a subgroup.
base
A base for the supergroup.
strong_gens
A strong generating set for the supergroup.
tests
A list of callables of length equal to the length of ``base``.
These are used to rule out group elements by partial base images,
so that ``tests[l](g)`` returns False if the element ``g`` is known
not to satisfy prop base on where g sends the first ``l + 1`` base
points.
init_subgroup
if a subgroup of the sought group is
known in advance, it can be passed to the function as this
parameter.
Returns
=======
res
The subgroup of all elements satisfying ``prop``. The generating
set for this group is guaranteed to be a strong generating set
relative to the base ``base``.
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> S = SymmetricGroup(7)
>>> prop_even = lambda x: x.is_even
>>> base, strong_gens = S.schreier_sims_incremental()
>>> G = S.subgroup_search(prop_even, base=base, strong_gens=strong_gens)
>>> G.is_subgroup(AlternatingGroup(7))
True
>>> _verify_bsgs(G, base, G.generators)
True
Notes
=====
This function is extremely lenghty and complicated and will require
some careful attention. The implementation is described in
[1], pp. 114-117, and the comments for the code here follow the lines
of the pseudocode in the book for clarity.
The complexity is exponential in general, since the search process by
itself visits all members of the supergroup. However, there are a lot
of tests which are used to prune the search tree, and users can define
their own tests via the ``tests`` parameter, so in practice, and for
some computations, it's not terrible.
A crucial part in the procedure is the frequent base change performed
(this is line 11 in the pseudocode) in order to obtain a new basic
stabilizer. The book mentiones that this can be done by using
``.baseswap(...)``, however the current imlementation uses a more
straightforward way to find the next basic stabilizer - calling the
function ``.stabilizer(...)`` on the previous basic stabilizer.
"""
# initialize BSGS and basic group properties
def get_reps(orbits):
# get the minimal element in the base ordering
return [min(orbit, key = lambda x: base_ordering[x]) \
for orbit in orbits]
def update_nu(l):
temp_index = len(basic_orbits[l]) + 1 -\
len(res_basic_orbits_init_base[l])
# this corresponds to the element larger than all points
if temp_index >= len(sorted_orbits[l]):
nu[l] = base_ordering[degree]
else:
nu[l] = sorted_orbits[l][temp_index]
if base is None:
base, strong_gens = self.schreier_sims_incremental()
base_len = len(base)
degree = self.degree
identity = _af_new(list(range(degree)))
base_ordering = _base_ordering(base, degree)
# add an element larger than all points
base_ordering.append(degree)
# add an element smaller than all points
base_ordering.append(-1)
# compute BSGS-related structures
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, transversals = _orbits_transversals_from_bsgs(base,
strong_gens_distr)
# handle subgroup initialization and tests
if init_subgroup is None:
init_subgroup = PermutationGroup([identity])
if tests is None:
trivial_test = lambda x: True
tests = []
for i in range(base_len):
tests.append(trivial_test)
# line 1: more initializations.
res = init_subgroup
f = base_len - 1
l = base_len - 1
# line 2: set the base for K to the base for G
res_base = base[:]
# line 3: compute BSGS and related structures for K
res_base, res_strong_gens = res.schreier_sims_incremental(
base=res_base)
res_strong_gens_distr = _distribute_gens_by_base(res_base,
res_strong_gens)
res_generators = res.generators
res_basic_orbits_init_base = \
[_orbit(degree, res_strong_gens_distr[i], res_base[i])\
for i in range(base_len)]
# initialize orbit representatives
orbit_reps = [None]*base_len
# line 4: orbit representatives for f-th basic stabilizer of K
orbits = _orbits(degree, res_strong_gens_distr[f])
orbit_reps[f] = get_reps(orbits)
# line 5: remove the base point from the representatives to avoid
# getting the identity element as a generator for K
orbit_reps[f].remove(base[f])
# line 6: more initializations
c = [0]*base_len
u = [identity]*base_len
sorted_orbits = [None]*base_len
for i in range(base_len):
sorted_orbits[i] = basic_orbits[i][:]
sorted_orbits[i].sort(key=lambda point: base_ordering[point])
# line 7: initializations
mu = [None]*base_len
nu = [None]*base_len
# this corresponds to the element smaller than all points
mu[l] = degree + 1
update_nu(l)
# initialize computed words
computed_words = [identity]*base_len
# line 8: main loop
while True:
# apply all the tests
while l < base_len - 1 and \
computed_words[l](base[l]) in orbit_reps[l] and \
base_ordering[mu[l]] < \
base_ordering[computed_words[l](base[l])] < \
base_ordering[nu[l]] and \
tests[l](computed_words):
# line 11: change the (partial) base of K
new_point = computed_words[l](base[l])
res_base[l] = new_point
new_stab_gens = _stabilizer(degree, res_strong_gens_distr[l],
new_point)
res_strong_gens_distr[l + 1] = new_stab_gens
# line 12: calculate minimal orbit representatives for the
# l+1-th basic stabilizer
orbits = _orbits(degree, new_stab_gens)
orbit_reps[l + 1] = get_reps(orbits)
# line 13: amend sorted orbits
l += 1
temp_orbit = [computed_words[l - 1](point) for point
in basic_orbits[l]]
temp_orbit.sort(key=lambda point: base_ordering[point])
sorted_orbits[l] = temp_orbit
# lines 14 and 15: update variables used minimality tests
new_mu = degree + 1
for i in range(l):
if base[l] in res_basic_orbits_init_base[i]:
candidate = computed_words[i](base[i])
if base_ordering[candidate] > base_ordering[new_mu]:
new_mu = candidate
mu[l] = new_mu
update_nu(l)
# line 16: determine the new transversal element
c[l] = 0
temp_point = sorted_orbits[l][c[l]]
gamma = computed_words[l - 1]._array_form.index(temp_point)
u[l] = transversals[l][gamma]
# update computed words
computed_words[l] = rmul(computed_words[l - 1], u[l])
# lines 17 & 18: apply the tests to the group element found
g = computed_words[l]
temp_point = g(base[l])
if l == base_len - 1 and \
base_ordering[mu[l]] < \
base_ordering[temp_point] < base_ordering[nu[l]] and \
temp_point in orbit_reps[l] and \
tests[l](computed_words) and \
prop(g):
# line 19: reset the base of K
res_generators.append(g)
res_base = base[:]
# line 20: recalculate basic orbits (and transversals)
res_strong_gens.append(g)
res_strong_gens_distr = _distribute_gens_by_base(res_base,
res_strong_gens)
res_basic_orbits_init_base = \
[_orbit(degree, res_strong_gens_distr[i], res_base[i]) \
for i in range(base_len)]
# line 21: recalculate orbit representatives
# line 22: reset the search depth
orbit_reps[f] = get_reps(orbits)
l = f
# line 23: go up the tree until in the first branch not fully
# searched
while l >= 0 and c[l] == len(basic_orbits[l]) - 1:
l = l - 1
# line 24: if the entire tree is traversed, return K
if l == -1:
return PermutationGroup(res_generators)
# lines 25-27: update orbit representatives
if l < f:
# line 26
f = l
c[l] = 0
# line 27
temp_orbits = _orbits(degree, res_strong_gens_distr[f])
orbit_reps[f] = get_reps(temp_orbits)
# line 28: update variables used for minimality testing
mu[l] = degree + 1
temp_index = len(basic_orbits[l]) + 1 - \
len(res_basic_orbits_init_base[l])
if temp_index >= len(sorted_orbits[l]):
nu[l] = base_ordering[degree]
else:
nu[l] = sorted_orbits[l][temp_index]
# line 29: set the next element from the current branch and update
# accorndingly
c[l] += 1
if l == 0:
gamma = sorted_orbits[l][c[l]]
else:
gamma = computed_words[l - 1]._array_form.index(sorted_orbits[l][c[l]])
u[l] = transversals[l][gamma]
if l == 0:
computed_words[l] = u[l]
else:
computed_words[l] = rmul(computed_words[l - 1], u[l])
@property
def transitivity_degree(self):
"""Compute the degree of transitivity of the group.
A permutation group ``G`` acting on ``\Omega = \{0, 1, ..., n-1\}`` is
``k``-fold transitive, if, for any k points
``(a_1, a_2, ..., a_k)\in\Omega`` and any k points
``(b_1, b_2, ..., b_k)\in\Omega`` there exists ``g\in G`` such that
``g(a_1)=b_1, g(a_2)=b_2, ..., g(a_k)=b_k``
The degree of transitivity of ``G`` is the maximum ``k`` such that
``G`` is ``k``-fold transitive. ([8])
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> a = Permutation([1, 2, 0])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a,b])
>>> G.transitivity_degree
3
See Also
========
is_transitive, orbit
"""
if self._transitivity_degree is None:
n = self.degree
G = self
# if G is k-transitive, a tuple (a_0,..,a_k)
# can be brought to (b_0,...,b_(k-1), b_k)
# where b_0,...,b_(k-1) are fixed points;
# consider the group G_k which stabilizes b_0,...,b_(k-1)
# if G_k is transitive on the subset excluding b_0,...,b_(k-1)
# then G is (k+1)-transitive
for i in range(n):
orb = G.orbit((i))
if len(orb) != n - i:
self._transitivity_degree = i
return i
G = G.stabilizer(i)
self._transitivity_degree = n
return n
else:
return self._transitivity_degree
def is_group(self):
"""Return True if the group meets three criteria: identity is present,
the inverse of every element is also an element, and the product of
any two elements is also an element. If any of the tests fail, False
is returned.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics import PermutationGroup
>>> from sympy.combinatorics.polyhedron import tetrahedron
The permutation group given in the tetrahedron object is not
a true group:
>>> G = tetrahedron.pgroup
>>> G.is_group()
False
But the group generated by the permutations in the tetrahedron
pgroup is a proper group:
>>> H = PermutationGroup(list(G.generate()))
>>> H.is_group()
True
The identity permutation is present:
>>> H.has(Permutation(G.degree - 1))
True
The product of any two elements from the group is also in the group:
>>> from sympy import TableForm
>>> g = list(H)
>>> n = len(g)
>>> m = []
>>> for i in g:
... m.append([g.index(i*H) for H in g])
...
>>> TableForm(m, headings=[range(n), range(n)], wipe_zeros=False)
| 0 1 2 3 4 5 6 7 8 9 10 11
----------------------------------------
0 | 11 0 8 10 6 2 7 4 5 3 9 1
1 | 0 1 2 3 4 5 6 7 8 9 10 11
2 | 6 2 7 4 5 3 9 1 11 0 8 10
3 | 5 3 9 1 11 0 8 10 6 2 7 4
4 | 3 4 0 2 10 6 11 8 9 7 1 5
5 | 4 5 6 7 8 9 10 11 0 1 2 3
6 | 10 6 11 8 9 7 1 5 3 4 0 2
7 | 9 7 1 5 3 4 0 2 10 6 11 8
8 | 7 8 4 6 2 10 3 0 1 11 5 9
9 | 8 9 10 11 0 1 2 3 4 5 6 7
10 | 2 10 3 0 1 11 5 9 7 8 4 6
11 | 1 11 5 9 7 8 4 6 2 10 3 0
>>>
The entries in the table give the element in the group corresponding
to the product of a given column element and row element:
>>> g[3]*g[2] == g[9]
True
The inverse of every element is also in the group:
>>> TableForm([[g.index(~gi) for gi in g]], headings=[[], range(n)],
... wipe_zeros=False)
0 1 2 3 4 5 6 7 8 9 10 11
---------------------------
11 1 7 3 10 9 6 2 8 5 4 0
So we see that g[1] and g[3] are equivalent to their inverse while
g[7] == ~g[2].
"""
# identity present
I = Permutation(size=self.degree)
for g in self:
if g == I:
break
else:
return False
# associativity already holds: a*(b*c) == (a*b)*c for permutations
# inverse of each is present
if not all(self.has(~a) for a in self):
return False
# closure
for a in self:
for b in self:
if not self.has(a*b):
return False
return True
def _orbit(degree, generators, alpha, action='tuples'):
r"""Compute the orbit of alpha ``\{g(\alpha) | g \in G\}`` as a set.
The time complexity of the algorithm used here is ``O(|Orb|*r)`` where
``|Orb|`` is the size of the orbit and ``r`` is the number of generators of
the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21.
Here alpha can be a single point, or a list of points.
If alpha is a single point, the ordinary orbit is computed.
if alpha is a list of points, there are three available options:
'union' - computes the union of the orbits of the points in the list
'tuples' - computes the orbit of the list interpreted as an ordered
tuple under the group action ( i.e., g((1,2,3)) = (g(1), g(2), g(3)) )
'sets' - computes the orbit of the list interpreted as a sets
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup, _orbit
>>> a = Permutation([1,2,0,4,5,6,3])
>>> G = PermutationGroup([a])
>>> _orbit(G.degree, G.generators, 0)
set([0, 1, 2])
>>> _orbit(G.degree, G.generators, [0, 4], 'union')
set([0, 1, 2, 3, 4, 5, 6])
See Also
========
orbit, orbit_transversal
"""
if not hasattr(alpha, '__getitem__'):
alpha = [alpha]
gens = [x._array_form for x in generators]
if len(alpha) == 1 or action == 'union':
orb = alpha
used = [False]*degree
for el in alpha:
used[el] = True
for b in orb:
for gen in gens:
temp = gen[b]
if used[temp] == False:
orb.append(temp)
used[temp] = True
return set(orb)
elif action == 'tuples':
alpha = tuple(alpha)
orb = [alpha]
used = set([alpha])
for b in orb:
for gen in gens:
temp = tuple([gen[x] for x in b])
if temp not in used:
orb.append(temp)
used.add(temp)
return set(orb)
elif action == 'sets':
alpha = frozenset(alpha)
orb = [alpha]
used = set([alpha])
for b in orb:
for gen in gens:
temp = frozenset([gen[x] for x in b])
if temp not in used:
orb.append(temp)
used.add(temp)
return set([tuple(x) for x in orb])
def _orbits(degree, generators):
"""Compute the orbits of G.
If rep=False it returns a list of sets else it returns a list of
representatives of the orbits
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup, _orbits
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> _orbits(a.size, [a, b])
[set([0, 1, 2])]
"""
seen = set() # elements that have already appeared in orbits
orbs = []
sorted_I = list(range(degree))
I = set(sorted_I)
while I:
i = sorted_I[0]
orb = _orbit(degree, generators, i)
orbs.append(orb)
# remove all indices that are in this orbit
I -= orb
sorted_I = [i for i in sorted_I if i not in orb]
return orbs
def _orbit_transversal(degree, generators, alpha, pairs, af=False):
r"""Computes a transversal for the orbit of ``alpha`` as a set.
generators generators of the group ``G``
For a permutation group ``G``, a transversal for the orbit
``Orb = \{g(\alpha) | g \in G\}`` is a set
``\{g_\beta | g_\beta(\alpha) = \beta\}`` for ``\beta \in Orb``.
Note that there may be more than one possible transversal.
If ``pairs`` is set to ``True``, it returns the list of pairs
``(\beta, g_\beta)``. For a proof of correctness, see [1], p.79
if af is True, the transversal elements are given in array form
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> from sympy.combinatorics.perm_groups import _orbit_transversal
>>> G = DihedralGroup(6)
>>> _orbit_transversal(G.degree, G.generators, 0, False)
[Permutation(5),
Permutation(0, 1, 2, 3, 4, 5),
Permutation(0, 5)(1, 4)(2, 3),
Permutation(0, 2, 4)(1, 3, 5),
Permutation(5)(0, 4)(1, 3),
Permutation(0, 3)(1, 4)(2, 5)]
"""
tr = [(alpha, list(range(degree)))]
used = [False]*degree
used[alpha] = True
gens = [x._array_form for x in generators]
for x, px in tr:
for gen in gens:
temp = gen[x]
if used[temp] == False:
tr.append((temp, _af_rmul(gen, px)))
used[temp] = True
if pairs:
if not af:
tr = [(x, _af_new(y)) for x, y in tr]
return tr
if af:
return [y for _, y in tr]
return [_af_new(y) for _, y in tr]
def _stabilizer(degree, generators, alpha):
r"""Return the stabilizer subgroup of ``alpha``.
The stabilizer of ``\alpha`` is the group ``G_\alpha =
\{g \in G | g(\alpha) = \alpha\}``.
For a proof of correctness, see [1], p.79.
degree degree of G
generators generators of G
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import _stabilizer
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(6)
>>> _stabilizer(G.degree, G.generators, 5)
[Permutation(5)(0, 4)(1, 3), Permutation(5)]
See Also
========
orbit
"""
orb = [alpha]
table = {alpha: list(range(degree))}
table_inv = {alpha: list(range(degree))}
used = [False]*degree
used[alpha] = True
gens = [x._array_form for x in generators]
stab_gens = []
for b in orb:
for gen in gens:
temp = gen[b]
if used[temp] is False:
gen_temp = _af_rmul(gen, table[b])
orb.append(temp)
table[temp] = gen_temp
table_inv[temp] = _af_invert(gen_temp)
used[temp] = True
else:
schreier_gen = _af_rmuln(table_inv[temp], gen, table[b])
if schreier_gen not in stab_gens:
stab_gens.append(schreier_gen)
return [_af_new(x) for x in stab_gens]
PermGroup = PermutationGroup
| 35.037719
| 89
| 0.54279
|
b69202db9249f46e5d7919f7469fedb966efd798
| 2,753
|
py
|
Python
|
keystonemiddleware/tests/unit/test_fixtures.py
|
jrbalderrama/keystonemiddleware
|
4bc09580070c5f6afa9ef39a3d9d1641de557589
|
[
"Apache-1.1"
] | 55
|
2015-01-29T20:10:42.000Z
|
2022-03-11T04:02:22.000Z
|
keystonemiddleware/tests/unit/test_fixtures.py
|
jrbalderrama/keystonemiddleware
|
4bc09580070c5f6afa9ef39a3d9d1641de557589
|
[
"Apache-1.1"
] | 1
|
2019-02-18T10:31:04.000Z
|
2019-02-18T10:31:04.000Z
|
keystonemiddleware/tests/unit/test_fixtures.py
|
jrbalderrama/keystonemiddleware
|
4bc09580070c5f6afa9ef39a3d9d1641de557589
|
[
"Apache-1.1"
] | 49
|
2015-02-02T23:57:09.000Z
|
2021-12-17T19:01:53.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo_utils import timeutils
from keystonemiddleware import fixture
from keystonemiddleware.tests.unit.auth_token import test_auth_token_middleware
class AuthTokenFixtureTest(
test_auth_token_middleware.BaseAuthTokenMiddlewareTest):
def setUp(self):
self.token_id = uuid.uuid4().hex
self.user_id = uuid.uuid4().hex
self.username = uuid.uuid4().hex
self.project_id = uuid.uuid4().hex
self.project_name = uuid.uuid4().hex
self.role_list = [uuid.uuid4().hex, uuid.uuid4().hex]
super(AuthTokenFixtureTest, self).setUp()
self.atm_fixture = self.useFixture(fixture.AuthTokenFixture())
self.atm_fixture.add_token_data(token_id=self.token_id,
user_id=self.user_id,
user_name=self.username,
role_list=self.role_list,
project_id=self.project_id,
project_name=self.project_name)
self.set_middleware()
self.middleware._app.expected_env = {
'HTTP_X_USER_ID': self.user_id,
'HTTP_X_USER_NAME': self.username,
'HTTP_X_PROJECT_ID': self.project_id,
'HTTP_X_PROJECT_NAME': self.project_name,
'HTTP_X_ROLES': ','.join(self.role_list)}
def test_auth_token_fixture_valid_token(self):
resp = self.call_middleware(headers={'X-Auth-Token': self.token_id})
self.assertIn('keystone.token_info', resp.request.environ)
def test_auth_token_fixture_invalid_token(self):
self.call_middleware(
headers={'X-Auth-Token': uuid.uuid4().hex}, expected_status=401)
def test_auth_token_fixture_expired_token(self):
expired_token_id = uuid.uuid4().hex
self.atm_fixture.add_token_data(
token_id=expired_token_id,
user_id=self.user_id,
role_list=self.role_list,
expires=(timeutils.utcnow() - datetime.timedelta(seconds=86400)))
self.call_middleware(
headers={'X-Auth-Token': expired_token_id}, expected_status=401)
| 41.712121
| 79
| 0.657828
|
a271f263d53aa5d4b5a4f8465ef093c8b5f37283
| 7,093
|
py
|
Python
|
web3/_utils/module_testing/personal_module.py
|
y19818/web3.py
|
32a85a287ab63220d1e0c06d77be74de595ff02f
|
[
"MIT"
] | null | null | null |
web3/_utils/module_testing/personal_module.py
|
y19818/web3.py
|
32a85a287ab63220d1e0c06d77be74de595ff02f
|
[
"MIT"
] | null | null | null |
web3/_utils/module_testing/personal_module.py
|
y19818/web3.py
|
32a85a287ab63220d1e0c06d77be74de595ff02f
|
[
"MIT"
] | null | null | null |
import pytest
from vns_utils import (
is_checksum_address,
is_list_like,
is_same_address,
)
PRIVATE_KEY_HEX = '0x56ebb41875ceedd42e395f730e03b5c44989393c9f0484ee6bc05f933673458f'
PASSWORD = 'web3-testing'
ADDRESS = '0x844B417c0C58B02c2224306047B9fb0D3264fE8c'
PRIVATE_KEY_FOR_UNLOCK = '0x392f63a79b1ff8774845f3fa69de4a13800a59e7083f5187f1558f0797ad0f01'
ACCOUNT_FOR_UNLOCK = '0x12efDc31B1a8FA1A1e756DFD8A1601055C971E13'
class GoEthereumPersonalModuleTest:
def test_personal_importRawKey(self, web3):
actual = web3.geth.personal.importRawKey(PRIVATE_KEY_HEX, PASSWORD)
assert actual == ADDRESS
def test_personal_listAccounts(self, web3):
accounts = web3.geth.personal.listAccounts()
assert is_list_like(accounts)
assert len(accounts) > 0
assert all((
is_checksum_address(item)
for item
in accounts
))
def test_personal_lockAccount(self, web3, unlockable_account_dual_type):
# TODO: how do we test this better?
web3.geth.personal.lockAccount(unlockable_account_dual_type)
def test_personal_unlockAccount_success(self,
web3,
unlockable_account_dual_type,
unlockable_account_pw):
result = web3.geth.personal.unlockAccount(
unlockable_account_dual_type,
unlockable_account_pw
)
assert result is True
def test_personal_unlockAccount_failure(self,
web3,
unlockable_account_dual_type):
with pytest.raises(ValueError):
web3.geth.personal.unlockAccount(unlockable_account_dual_type, 'bad-password')
def test_personal_newAccount(self, web3):
new_account = web3.geth.personal.newAccount(PASSWORD)
assert is_checksum_address(new_account)
def test_personal_sendTransaction(self,
web3,
unlockable_account_dual_type,
unlockable_account_pw):
assert web3.vns.getBalance(unlockable_account_dual_type) > web3.toWei(1, 'ether')
txn_params = {
'from': unlockable_account_dual_type,
'to': unlockable_account_dual_type,
'gas': 21000,
'value': 1,
'gasPrice': web3.toWei(1, 'gwei'),
}
txn_hash = web3.geth.personal.sendTransaction(txn_params, unlockable_account_pw)
assert txn_hash
transaction = web3.vns.getTransaction(txn_hash)
assert is_same_address(transaction['from'], txn_params['from'])
assert is_same_address(transaction['to'], txn_params['to'])
assert transaction['gas'] == txn_params['gas']
assert transaction['value'] == txn_params['value']
assert transaction['gasPrice'] == txn_params['gasPrice']
def test_personal_sign_and_ecrecover(self,
web3,
unlockable_account_dual_type,
unlockable_account_pw):
message = 'test-web3-geth-personal-sign'
signature = web3.geth.personal.sign(
message,
unlockable_account_dual_type,
unlockable_account_pw
)
signer = web3.geth.personal.ecRecover(message, signature)
assert is_same_address(signer, unlockable_account_dual_type)
class ParityPersonalModuleTest():
def test_personal_listAccounts(self, web3):
accounts = web3.parity.personal.listAccounts()
assert is_list_like(accounts)
assert len(accounts) > 0
assert all((
is_checksum_address(item)
for item
in accounts
))
def test_personal_unlockAccount_success(self,
web3,
unlockable_account_dual_type,
unlockable_account_pw):
result = web3.parity.personal.unlockAccount(
unlockable_account_dual_type,
unlockable_account_pw,
None
)
assert result is True
# Seems to be an issue with Parity since this should return False
def test_personal_unlockAccount_failure(self,
web3,
unlockable_account_dual_type):
result = web3.parity.personal.unlockAccount(
unlockable_account_dual_type,
'bad-password',
None
)
assert result is True
def test_personal_newAccount(self, web3):
new_account = web3.parity.personal.newAccount(PASSWORD)
assert is_checksum_address(new_account)
def test_personal_lockAccount(self, web3, unlocked_account):
pytest.xfail('this non-standard json-rpc method is not implemented on parity')
super().test_personal_lockAccount(web3, unlocked_account)
def test_personal_importRawKey(self, web3):
pytest.xfail('this non-standard json-rpc method is not implemented on parity')
super().test_personal_importRawKey(web3)
def test_personal_sendTransaction(self,
web3,
unlockable_account_dual_type,
unlockable_account_pw):
assert web3.vns.getBalance(unlockable_account_dual_type) > web3.toWei(1, 'ether')
txn_params = {
'from': unlockable_account_dual_type,
'to': unlockable_account_dual_type,
'gas': 21000,
'value': 1,
'gasPrice': web3.toWei(1, 'gwei'),
}
txn_hash = web3.parity.personal.sendTransaction(txn_params, unlockable_account_pw)
assert txn_hash
transaction = web3.vns.getTransaction(txn_hash)
assert is_same_address(transaction['from'], txn_params['from'])
assert is_same_address(transaction['to'], txn_params['to'])
assert transaction['gas'] == txn_params['gas']
assert transaction['value'] == txn_params['value']
assert transaction['gasPrice'] == txn_params['gasPrice']
def test_personal_sign_and_ecrecover(self,
web3,
unlockable_account_dual_type,
unlockable_account_pw):
message = 'test-web3-parity-personal-sign'
signature = web3.parity.personal.sign(
message,
unlockable_account_dual_type,
unlockable_account_pw
)
signer = web3.parity.personal.ecRecover(message, signature)
assert is_same_address(signer, unlockable_account_dual_type)
| 41.238372
| 94
| 0.588327
|
a325e6ee8c8cd7079a82d457a5ac65b72d161c1a
| 750
|
py
|
Python
|
codepost/urls.py
|
zachary-berdell-elliott/code-post
|
0b94a3bab55ff2d8787f2924ca2d7f906683bb13
|
[
"MIT"
] | null | null | null |
codepost/urls.py
|
zachary-berdell-elliott/code-post
|
0b94a3bab55ff2d8787f2924ca2d7f906683bb13
|
[
"MIT"
] | null | null | null |
codepost/urls.py
|
zachary-berdell-elliott/code-post
|
0b94a3bab55ff2d8787f2924ca2d7f906683bb13
|
[
"MIT"
] | null | null | null |
"""codepost URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.090909
| 77
| 0.709333
|
a26f855634696d2f04427717d8695afad0392dd9
| 37,593
|
py
|
Python
|
tests/test_platypus_tables.py
|
nakagami/reportlab
|
0514ba380fe66c76746725cc0c1b7a3fee51c833
|
[
"BSD-3-Clause"
] | 9
|
2016-12-21T02:19:24.000Z
|
2021-08-07T11:39:47.000Z
|
tests/test_platypus_tables.py
|
nakagami/reportlab
|
0514ba380fe66c76746725cc0c1b7a3fee51c833
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_platypus_tables.py
|
nakagami/reportlab
|
0514ba380fe66c76746725cc0c1b7a3fee51c833
|
[
"BSD-3-Clause"
] | 4
|
2018-08-24T14:50:14.000Z
|
2022-03-01T08:46:40.000Z
|
#!/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
__version__=''' $Id: test_platypus_tables.py 3959 2012-09-27 14:39:39Z robin $ '''
__doc__='Test script for reportlab.tables'
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import os,unittest
from reportlab.platypus import Spacer, SimpleDocTemplate, Table, TableStyle
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch, cm
from reportlab.lib import colors
from reportlab.graphics.charts.linecharts import HorizontalLineChart
from reportlab.graphics.shapes import Drawing, _DrawingEditorMixin
from reportlab.graphics.charts.barcharts import VerticalBarChart
styleSheet = getSampleStyleSheet()
def getTable():
t = Table((('','North','South','East','West'),
('Quarter 1',100,200,300,400),
('Quarter 2',100,400,600,800),
('Total',300,600,900,'1,200')),
(72,36,36,36,36),
(24, 16,16,18)
)
return t
def makeStyles():
styles = []
for i in range(5):
styles.append(TableStyle([('ALIGN', (1,1), (-1,-1), 'RIGHT'),
('ALIGN', (0,0), (-1,0), 'CENTRE'),
('HREF', (0,0), (0,0), 'www.google.com'),
]))
for style in styles[1:]:
style.add('GRID', (0,0), (-1,-1), 0.25, colors.black)
for style in styles[2:]:
style.add('LINEBELOW', (0,0), (-1,0), 2, colors.black)
for style in styles[3:]:
style.add('LINEABOVE', (0, -1), (-1,-1), 2, colors.black)
styles[-1].add('LINEBELOW',(1,-1), (-1, -1), 2, (0.5, 0.5, 0.5))
return styles
def run():
doc = SimpleDocTemplate(outputfile('test_platypus_tables.pdf'), pagesize=(8.5*inch, 11*inch), showBoundary=1)
lst = []
from reportlab import Version
styNormal = styleSheet['Normal']
styBackground = ParagraphStyle('background', parent=styNormal, backColor=colors.pink)
styH1 = styleSheet['Heading1']
lst.append(Paragraph("First, a test of how tables align their content...", styH1))
lst.append(Paragraph("""Generated with version %s""" % Version,
styNormal))
lst.append(Paragraph("""In release 2.3, cells with plain text positioned their
text differently to cells with Paragraphs using the
same font. Hopefully now they are back on the same baseline""",
styNormal))
ts1 = TableStyle([
('ALIGN', (0,0), (-1,0), 'RIGHT'),
('BACKGROUND', (0,0), (-1,0), colors.lightgrey),
('VALIGN', (0,0), (-1,-1), 'TOP'),
('GRID', (0,0), (-1,-1), 0.25, colors.black),
])
t1 = Table([
('plain text','plain text','shortpara','plain text', 'long para'),
('Text','more text', Paragraph('Is this para level?', styBackground), 'Back to text', Paragraph('Short para again', styBackground)),
('Text',
'more text',
Paragraph('Is this level?', styBackground),
'This is plain\ntext with line breaks\nto compare against\nthe para on right',
Paragraph('Long paragraph we expect to wrap over several lines accurately', styBackground)),
])
t1.setStyle(ts1)
lst.append(t1)
lst.append(Spacer(0,10))
lst.append(Paragraph("Now we make a table with just one cell containing a string...note how the text sits low", styNormal))
tsGrid = TableStyle([
('GRID', (0,0), (-1,-1), 0.25, colors.black),
])
lst.append(Table([['One cell of plain text']], style=tsGrid, colWidths=[200]))
lst.append(Spacer(0,10))
lst.append(Paragraph("Now we make a table with just one cell containing a para...should be same position. Note that the overall bounding box is an approximation and lies - it always did.", styNormal))
lst.append(Table([[Paragraph('One cell containing a paragraph. ÄÉ∫', styBackground)]], style=tsGrid, colWidths=[200]))
lst.append(Spacer(0,10))
lst.append(Paragraph("Paragraphs jumped up post 2.1. Ideally they should align the same.", styNormal))
lst.append(Spacer(0,30))
lst.append(Paragraph("Now for all the tests we had before. See also the much longer test_platypus_tables_2.pdf, which for reasons unknown was split into a separate file generated by the same script", styNormal))
styles = makeStyles()
for style in styles:
t = getTable()
t.setStyle(style)
## print '--------------'
## for rowstyle in t._cellstyles:
## for s in rowstyle:
## print s.alignment
lst.append(t)
lst.append(Spacer(0,12))
doc.build(lst)
class TableBarChart(_DrawingEditorMixin,Drawing):
def __init__(self,width=400,height=200,*args,**kw):
Drawing.__init__(self,width,height,*args,**kw)
self.width = 136
self.height = 140
self._add(self,VerticalBarChart(),name='chart',validate=None,desc=None)
self.chart.y = 20
self.chart.width = self.width - 21
self.chart.height = self.height - 24
self.chart.categoryAxis.categoryNames = ['Spring','Summer','Autumn','Winter']
self.chart.categoryAxis.labels.fontSize = 7
def old_tables_test():
from reportlab.lib.units import inch, cm
from reportlab.platypus.flowables import Image, PageBreak, Spacer, XBox
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.xpreformatted import XPreformatted
from reportlab.platypus.flowables import Preformatted
from reportlab.platypus.doctemplate import SimpleDocTemplate
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.platypus.tables import GRID_STYLE, BOX_STYLE, LABELED_GRID_STYLE, COLORED_GRID_STYLE, LIST_STYLE, LongTable
rowheights = (24, 16, 16, 16, 16)
rowheights2 = (24, 16, 16, 16, 30)
colwidths = (50, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32)
data = (
('', 'Jan', 'Feb', 'Mar','Apr','May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'),
('Mugs', 0, 4, 17, 3, 21, 47, 12, 33, 2, -2, 44, 89),
('T-Shirts', 0, 42, 9, -3, 16, 4, 72, 89, 3, 19, 32, 119),
('Key Ring', 0,0,0,0,0,0,1,0,0,0,2,13),
('Hats', 893, 912, '1,212', 643, 789, 159, 888, '1,298', 832, 453, '1,344','2,843')
)
data2 = (
('', 'Jan', 'Feb', 'Mar','Apr','May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'),
('Mugs', 0, 4, 17, 3, 21, 47, 12, 33, 2, -2, 44, 89),
('T-Shirts', 0, 42, 9, -3, 16, 4, 72, 89, 3, 19, 32, 119),
('Key Ring', 0,0,0,0,0,0,1,0,0,0,2,13),
('Hats\nLarge', 893, 912, '1,212', 643, 789, 159, 888, '1,298', 832, 453, '1,344','2,843')
)
lst = []
lst.append(Paragraph("Tables", styleSheet['Heading1']))
lst.append(Paragraph(__doc__, styleSheet['BodyText']))
lst.append(Paragraph("The Tables (shown in different styles below) were created using the following code:", styleSheet['BodyText']))
lst.append(Preformatted("""
colwidths = (50, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32)
rowheights = (24, 16, 16, 16, 16)
data = (
('', 'Jan', 'Feb', 'Mar','Apr','May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'),
('Mugs', 0, 4, 17, 3, 21, 47, 12, 33, 2, -2, 44, 89),
('T-Shirts', 0, 42, 9, -3, 16, 4, 72, 89, 3, 19, 32, 119),
('Key Ring', 0,0,0,0,0,0,1,0,0,0,2,13),
('Hats', 893, 912, '1,212', 643, 789, 159,
888, '1,298', 832, 453, '1,344','2,843')
)
t = Table(data, colwidths, rowheights)
""", styleSheet['Code'], dedent=4))
lst.append(Paragraph("""
You can then give the Table a TableStyle object to control its format. The first TableStyle used was
created as follows:
""", styleSheet['BodyText']))
lst.append(Preformatted("""
GRID_STYLE = TableStyle(
[('GRID', (0,0), (-1,-1), 0.25, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
""", styleSheet['Code']))
lst.append(Paragraph("""
TableStyles are created by passing in a list of commands. There are two types of commands - line commands
and cell formatting commands. In all cases, the first three elements of a command are the command name,
the starting cell and the ending cell.
""", styleSheet['BodyText']))
lst.append(Paragraph("""
Line commands always follow this with the weight and color of the desired lines. Colors can be names,
or they can be specified as a (R,G,B) tuple, where R, G and B are floats and (0,0,0) is black. The line
command names are: GRID, BOX, OUTLINE, INNERGRID, LINEBELOW, LINEABOVE, LINEBEFORE
and LINEAFTER. BOX and OUTLINE are equivalent, and GRID is the equivalent of applying both BOX and
INNERGRID.
""", styleSheet['BodyText']))
lst.append(Paragraph("""
Cell formatting commands are:
""", styleSheet['BodyText']))
lst.append(Paragraph("""
FONT - takes fontname, fontsize and (optional) leading.
""", styleSheet['Definition']))
lst.append(Paragraph("""
TEXTCOLOR - takes a color name or (R,G,B) tuple.
""", styleSheet['Definition']))
lst.append(Paragraph("""
ALIGNMENT (or ALIGN) - takes one of LEFT, RIGHT, CENTRE (or CENTER) or DECIMAL.
""", styleSheet['Definition']))
lst.append(Paragraph("""
LEFTPADDING - defaults to 6.
""", styleSheet['Definition']))
lst.append(Paragraph("""
RIGHTPADDING - defaults to 6.
""", styleSheet['Definition']))
lst.append(Paragraph("""
BOTTOMPADDING - defaults to 3.
""", styleSheet['Definition']))
lst.append(Paragraph("""
A tablestyle is applied to a table by calling Table.setStyle(tablestyle).
""", styleSheet['BodyText']))
t = Table(data, colwidths, rowheights)
t.setStyle(GRID_STYLE)
lst.append(PageBreak())
lst.append(Paragraph("This is GRID_STYLE\n", styleSheet['BodyText']))
lst.append(t)
t = Table(data, colwidths, rowheights)
t.setStyle(BOX_STYLE)
lst.append(Paragraph("This is BOX_STYLE\n", styleSheet['BodyText']))
lst.append(t)
lst.append(Paragraph("""
It was created as follows:
""", styleSheet['BodyText']))
lst.append(Preformatted("""
BOX_STYLE = TableStyle(
[('BOX', (0,0), (-1,-1), 0.50, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
""", styleSheet['Code']))
t = Table(data, colwidths, rowheights)
t.setStyle(LABELED_GRID_STYLE)
lst.append(Paragraph("This is LABELED_GRID_STYLE\n", styleSheet['BodyText']))
lst.append(t)
t = Table(data2, colwidths, rowheights2)
t.setStyle(LABELED_GRID_STYLE)
lst.append(Paragraph("This is LABELED_GRID_STYLE ILLUSTRATES EXPLICIT LINE SPLITTING WITH NEWLINE (different heights and data)\n", styleSheet['BodyText']))
lst.append(t)
lst.append(Paragraph("""
It was created as follows:
""", styleSheet['BodyText']))
lst.append(Preformatted("""
LABELED_GRID_STYLE = TableStyle(
[('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 2, colors.black),
('LINEBELOW', (0,0), (-1,0), 2, colors.black),
('LINEAFTER', (0,0), (0,-1), 2, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
""", styleSheet['Code']))
lst.append(PageBreak())
t = Table(data, colwidths, rowheights)
t.setStyle(COLORED_GRID_STYLE)
lst.append(Paragraph("This is COLORED_GRID_STYLE\n", styleSheet['BodyText']))
lst.append(t)
lst.append(Paragraph("""
It was created as follows:
""", styleSheet['BodyText']))
lst.append(Preformatted("""
COLORED_GRID_STYLE = TableStyle(
[('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 2, colors.red),
('LINEBELOW', (0,0), (-1,0), 2, colors.black),
('LINEAFTER', (0,0), (0,-1), 2, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
""", styleSheet['Code']))
t = Table(data, colwidths, rowheights)
t.setStyle(LIST_STYLE)
lst.append(Paragraph("This is LIST_STYLE\n", styleSheet['BodyText']))
lst.append(t)
lst.append(Paragraph("""
It was created as follows:
""", styleSheet['BodyText']))
lst.append(Preformatted("""
LIST_STYLE = TableStyle(
[('LINEABOVE', (0,0), (-1,0), 2, colors.green),
('LINEABOVE', (0,1), (-1,-1), 0.25, colors.black),
('LINEBELOW', (0,-1), (-1,-1), 2, colors.green),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
""", styleSheet['Code']))
t = Table(data, colwidths, rowheights)
ts = TableStyle(
[('LINEABOVE', (0,0), (-1,0), 2, colors.green),
('LINEABOVE', (0,1), (-1,-1), 0.25, colors.black),
('LINEBELOW', (0,-1), (-1,-1), 3, colors.green,'butt'),
('LINEBELOW', (0,-1), (-1,-1), 1, colors.white,'butt'),
('ALIGN', (1,1), (-1,-1), 'RIGHT'),
('TEXTCOLOR', (0,1), (0,-1), colors.red),
('BACKGROUND', (0,0), (-1,0), colors.Color(0,0.7,0.7))]
)
t.setStyle(ts)
lst.append(Paragraph("This is a custom style\n", styleSheet['BodyText']))
lst.append(t)
lst.append(Paragraph("""
It was created as follows:
""", styleSheet['BodyText']))
lst.append(Preformatted("""
ts = TableStyle(
[('LINEABOVE', (0,0), (-1,0), 2, colors.green),
('LINEABOVE', (0,1), (-1,-1), 0.25, colors.black),
('LINEBELOW', (0,-1), (-1,-1), 3, colors.green,'butt'),
('LINEBELOW', (0,-1), (-1,-1), 1, colors.white,'butt'),
('ALIGN', (1,1), (-1,-1), 'RIGHT'),
('TEXTCOLOR', (0,1), (0,-1), colors.red),
('BACKGROUND', (0,0), (-1,0), colors.Color(0,0.7,0.7))]
)
""", styleSheet['Code']))
data = (
('', 'Jan\nCold', 'Feb\n', 'Mar\n','Apr\n','May\n', 'Jun\nHot', 'Jul\n', 'Aug\nThunder', 'Sep\n', 'Oct\n', 'Nov\n', 'Dec\n'),
('Mugs', 0, 4, 17, 3, 21, 47, 12, 33, 2, -2, 44, 89),
('T-Shirts', 0, 42, 9, -3, 16, 4, 72, 89, 3, 19, 32, 119),
('Key Ring', 0,0,0,0,0,0,1,0,0,0,2,13),
('Hats', 893, 912, '1,212', 643, 789, 159, 888, '1,298', 832, 453, '1,344','2,843')
)
c = list(colwidths)
c[0] = None
c[8] = None
t = Table(data, c, [None]+list(rowheights[1:]))
t.setStyle(LIST_STYLE)
lst.append(Paragraph("""
This is a LIST_STYLE table with the first rowheight set to None ie automatic.
The top row cells are split at a newline '\\n' character. The first and August
column widths were also set to None.
""", styleSheet['BodyText']))
lst.append(t)
lst.append(Paragraph("""
This demonstrates a number of features useful in financial statements. The first is decimal alignment;
with ALIGN=DECIMAL the numbers align on the points; and the points are aligned based on
the RIGHTPADDING, which is usually 3 points so you should set it higher. The second is multiple lines;
one can specify double or triple lines and control the separation if desired. Finally, the coloured
negative numbers were (we regret to say) done in the style; we don't have a way to conditionally
format numbers based on value yet.
""", styleSheet['BodyText']))
t = Table([[u'Corporate Assets','Amount'],
['Fixed Assets','1,234,567.89'],
['Company Vehicle','1,234.8901'],
['Petty Cash','42'],
[u'Intellectual Property\u00ae','(42,078,231.56)'],
['Overdraft','(12,345)'],
['Boardroom Flat Screen','60 inches'],
['Net Position','Deep Sh*t.Really']
],
[144,72])
ts = TableStyle(
[#first the top row
('ALIGN', (1,1), (-1,-1), 'CENTER'),
('LINEABOVE', (0,0), (-1,0), 1, colors.purple),
('LINEBELOW', (0,0), (-1,0), 1, colors.purple),
('FONT', (0,0), (-1,0), 'Times-Bold'),
#bottom row has a line above, and two lines below
('LINEABOVE', (0,-1), (-1,-1), 1, colors.purple), #last 2 are count, sep
('LINEBELOW', (0,-1), (-1,-1), 0.5, colors.purple, 1, None, None, 4,1),
('LINEBELOW', (0,-1), (-1,-1), 1, colors.red),
('FONT', (0,-1), (-1,-1), 'Times-Bold'),
#numbers column
('ALIGN', (1,1), (-1,-1), 'DECIMAL'),
('RIGHTPADDING', (1,1), (-1,-1), 36),
('TEXTCOLOR', (1,4), (1,4), colors.red),
#red cell
]
)
t.setStyle(ts)
lst.append(t)
lst.append(Spacer(36,36))
lst.append(Paragraph("""
The red numbers should be aligned LEFT & BOTTOM, the blue RIGHT & TOP
and the green CENTER & MIDDLE.
""", styleSheet['BodyText']))
XY = [['X00y', 'X01y', 'X02y', 'X03y', 'X04y'],
['X10y', 'X11y', 'X12y', 'X13y', 'X14y'],
['X20y', 'X21y', 'X22y', 'X23y', 'X24y'],
['X30y', 'X31y', 'X32y', 'X33y', 'X34y']]
t=Table(XY, 5*[0.6*inch], 4*[0.6*inch])
t.setStyle([('ALIGN',(1,1),(-2,-2),'LEFT'),
('TEXTCOLOR',(1,1),(-2,-2),colors.red),
('VALIGN',(0,0),(1,-1),'TOP'),
('ALIGN',(0,0),(1,-1),'RIGHT'),
('TEXTCOLOR',(0,0),(1,-1),colors.blue),
('ALIGN',(0,-1),(-1,-1),'CENTER'),
('VALIGN',(0,-1),(-1,-1),'MIDDLE'),
('TEXTCOLOR',(0,-1),(-1,-1),colors.green),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
])
lst.append(t)
data = [('alignment', 'align\012alignment'),
('bulletColor', 'bulletcolor\012bcolor'),
('bulletFontName', 'bfont\012bulletfontname'),
('bulletFontSize', 'bfontsize\012bulletfontsize'),
('bulletIndent', 'bindent\012bulletindent'),
('firstLineIndent', 'findent\012firstlineindent'),
('fontName', 'face\012fontname\012font'),
('fontSize', 'size\012fontsize'),
('leading', 'leading'),
('leftIndent', 'leftindent\012lindent'),
('rightIndent', 'rightindent\012rindent'),
('spaceAfter', 'spaceafter\012spacea'),
('spaceBefore', 'spacebefore\012spaceb'),
('textColor', 'fg\012textcolor\012color')]
t = Table(data)
t.setStyle([
('VALIGN',(0,0),(-1,-1),'TOP'),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
])
lst.append(t)
t = Table([ ('Attribute', 'Synonyms'),
('alignment', 'align, alignment'),
('bulletColor', 'bulletcolor, bcolor'),
('bulletFontName', 'bfont, bulletfontname'),
('bulletFontSize', 'bfontsize, bulletfontsize'),
('bulletIndent', 'bindent, bulletindent'),
('firstLineIndent', 'findent, firstlineindent'),
('fontName', 'face, fontname, font'),
('fontSize', 'size, fontsize'),
('leading', 'leading'),
('leftIndent', 'leftindent, lindent'),
('rightIndent', 'rightindent, rindent'),
('spaceAfter', 'spaceafter, spacea'),
('spaceBefore', 'spacebefore, spaceb'),
('textColor', 'fg, textcolor, color')])
t.repeatRows = 1
t.setStyle([
('FONT',(0,0),(-1,1),'Times-Bold',10,12),
('FONT',(0,1),(-1,-1),'Courier',8,8),
('VALIGN',(0,0),(-1,-1),'MIDDLE'),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.green),
('BACKGROUND', (0, 1), (-1, -1), colors.pink),
('ALIGN', (0, 0), (-1, 0), 'CENTER'),
('ALIGN', (0, 1), (0, -1), 'LEFT'),
('ALIGN', (-1, 1), (-1, -1), 'RIGHT'),
('FONT', (0, 0), (-1, 0), 'Times-Bold', 12),
('ALIGN', (1, 1), (1, -1), 'CENTER'),
])
lst.append(t)
lst.append(Table(XY,
style=[ ('FONT',(0,0),(-1,-1),'Times-Roman', 5,6),
('GRID', (0,0), (-1,-1), 0.25, colors.blue),]))
lst.append(Table(XY,
style=[ ('FONT',(0,0),(-1,-1),'Times-Roman', 10,12),
('GRID', (0,0), (-1,-1), 0.25, colors.black),]))
lst.append(Table(XY,
style=[ ('FONT',(0,0),(-1,-1),'Times-Roman', 20,24),
('GRID', (0,0), (-1,-1), 0.25, colors.red),]))
lst.append(PageBreak())
data= [['00', '01', '02', '03', '04'],
['10', '11', '12', '13', '14'],
['20', '21', '22', '23', '24'],
['30', '31', '32', '33', '34']]
t=Table(data,style=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('GRID',(1,1),(-2,-2),1,colors.green),
('BOX',(0,0),(1,-1),2,colors.red),
('BOX',(0,0),(-1,-1),2,colors.black),
('LINEABOVE',(1,2),(-2,2),1,colors.blue),
('LINEBEFORE',(2,1),(2,-2),1,colors.pink),
('BACKGROUND', (0, 0), (0, 1), colors.pink),
('BACKGROUND', (1, 1), (1, 2), colors.lavender),
('BACKGROUND', (2, 2), (2, 3), colors.orange),
])
lst.append(Paragraph("Illustrating splits: nosplit", styleSheet['BodyText']))
lst.append(t)
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits: split(4in,30)", styleSheet['BodyText']))
for s in t.split(4*inch,30):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits: split(4in,36)", styleSheet['BodyText']))
for s in t.split(4*inch,36):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits: split(4in,56)", styleSheet['BodyText']))
lst.append(Spacer(0,6))
for s in t.split(4*inch,56):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(PageBreak())
data= [['00', '01', '02', '03', '04'],
['', '11', '12', '13', '14'],
['20', '21', '22', '23', '24'],
['30', '31', '', '33', '34']]
sty=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('GRID',(1,1),(-2,-2),1,colors.green),
('BOX',(0,0),(1,-1),2,colors.red),
('BOX',(0,0),(-1,-1),2,colors.black),
('LINEABOVE',(1,2),(-2,2),1,colors.blue),
('LINEBEFORE',(2,1),(2,-2),1,colors.pink),
('BACKGROUND', (0, 0), (0, 1), colors.pink),
('SPAN',(0,0),(0,1)),
('BACKGROUND', (2, 2), (2, 3), colors.orange),
('SPAN',(2,2),(2,3)),
]
t=Table(data,style=sty)
lst.append(Paragraph("Illustrating splits with spans: nosplit", styleSheet['BodyText']))
lst.append(t)
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits with spans: split(4in,30)", styleSheet['BodyText']))
for s in t.split(4*inch,30):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits with spans: split(4in,36)", styleSheet['BodyText']))
for s in t.split(4*inch,36):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits with spans: split(4in,56)", styleSheet['BodyText']))
lst.append(Spacer(0,6))
for s in t.split(4*inch,56):
lst.append(s)
lst.append(Spacer(0,6))
data= [['00', '01', '02', '03', '04'],
['', '11', '12', '13', ''],
['20', '21', '22', '23', '24'],
['30', '31', '', '33', ''],
['40', '41', '', '43', '44']]
sty=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('GRID',(1,1),(-2,-2),1,colors.green),
('BOX',(0,0),(1,-1),2,colors.red),
('BOX',(0,0),(-1,-1),2,colors.black),
('LINEABOVE',(1,2),(-2,2),1,colors.blue),
('LINEBEFORE',(2,1),(2,-2),1,colors.pink),
('BACKGROUND', (0, 0), (0, 1), colors.pink),
('SPAN',(0,0),(0,1)),
('BACKGROUND',(-2,1),(-1,1),colors.palegreen),
('SPAN',(-2,1),(-1,1)),
('BACKGROUND',(-2,3),(-1,3),colors.yellow),
('SPAN',(-2,3),(-1,3)),
('BACKGROUND', (2, 3), (2, 4), colors.orange),
('SPAN',(2,3),(2,4)),
]
t=Table(data,style=sty,repeatRows=2)
lst.append(Paragraph("Illustrating splits with spans and repeatRows: nosplit", styleSheet['BodyText']))
lst.append(t)
lst.append(Spacer(0,6))
if 1:
lst.append(Paragraph("Illustrating splits with spans and repeatRows: split(4in,30)", styleSheet['BodyText']))
for s in t.split(4*inch,30):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits with spans and repeatRows: split(4in,36)", styleSheet['BodyText']))
for s in t.split(4*inch,36):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits with spans and repeatRows: split(4in,56)", styleSheet['BodyText']))
lst.append(Spacer(0,6))
for s in t.split(4*inch,56):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(PageBreak())
from reportlab.lib.testutils import testsFolder
I = Image(os.path.join(os.path.dirname(testsFolder),'src','tools','pythonpoint','demos','leftlogo.gif'))
I.drawHeight = 1.25*inch*I.drawHeight / I.drawWidth
I.drawWidth = 1.25*inch
#I.drawWidth = 9.25*inch #uncomment to see better messaging
P = Paragraph("<para align=center spaceb=3>The <b>ReportLab Left <font color=red>Logo</font></b> Image</para>", styleSheet["BodyText"])
B = TableBarChart()
BP = Paragraph("<para align=center spaceb=3>A bar chart in a cell.</para>", styleSheet["BodyText"])
data= [['A', 'B', 'C', Paragraph("<b>A pa<font color=red>r</font>a<i>graph</i></b><super><font color=yellow>1</font></super>",styleSheet["BodyText"]), 'D'],
['00', '01', '02', [I,P], '04'],
['10', '11', '12', [I,P], '14'],
['20', '21', '22', '23', '24'],
['30', '31', '32', '33', '34'],
['40', '41', '42', [B,BP], '44']]
t=Table(data,style=[('GRID',(1,1),(-2,-2),1,colors.green),
('BOX',(0,0),(1,-1),2,colors.red),
('LINEABOVE',(1,2),(-2,2),1,colors.blue),
('LINEBEFORE',(2,1),(2,-2),1,colors.pink),
('BACKGROUND', (0, 0), (0, 1), colors.pink),
('BACKGROUND', (1, 1), (1, 2), colors.lavender),
('BACKGROUND', (2, 2), (2, 3), colors.orange),
('BOX',(0,0),(-1,-1),2,colors.black),
('GRID',(0,0),(-1,-1),0.5,colors.black),
('VALIGN',(3,0),(3,0),'BOTTOM'),
('BACKGROUND',(3,0),(3,0),colors.limegreen),
('BACKGROUND',(3,1),(3,1),colors.khaki),
('ALIGN',(3,1),(3,1),'CENTER'),
('BACKGROUND',(3,2),(3,2),colors.beige),
('ALIGN',(3,2),(3,2),'LEFT'),
])
t._argW[3]=1.5*inch
lst.append(t)
# now for an attempt at column spanning.
lst.append(PageBreak())
data= [['A', 'BBBBB', 'C', 'D', 'E'],
['00', '01', '02', '03', '04'],
['10', '11', '12', '13', '14'],
['20', '21', '22', '23', '24'],
['30', '31', '32', '33', '34']]
sty = [
('ALIGN',(0,0),(-1,-1),'CENTER'),
('VALIGN',(0,0),(-1,-1),'TOP'),
('GRID',(0,0),(-1,-1),1,colors.green),
('BOX',(0,0),(-1,-1),2,colors.red),
#span 'BBBB' across middle 3 cells in top row
('SPAN',(1,0),(3,0)),
#now color the first cell in this range only,
#i.e. the one we want to have spanned. Hopefuly
#the range of 3 will come out khaki.
('BACKGROUND',(1,0),(1,0),colors.khaki),
('SPAN',(0,2),(-1,2)),
#span 'AAA'down entire left column
('SPAN',(0,0), (0, 1)),
('BACKGROUND',(0,0),(0,0),colors.cyan),
('LINEBELOW', (0,'splitlast'), (-1,'splitlast'), 1, colors.white,'butt'),
]
t=Table(data,style=sty, colWidths = [20] * 5, rowHeights = [20]*5)
lst.append(t)
# now for an attempt at percentage widths
lst.append(Spacer(18,18))
lst.append(Paragraph("This table has colWidths=5*['14%']!", styleSheet['BodyText']))
t=Table(data,style=sty, colWidths = ['14%'] * 5, rowHeights = [20]*5)
lst.append(t)
lst.append(Spacer(18,18))
lst.append(Paragraph("This table has colWidths=['14%','10%','19%','22%','*']!", styleSheet['BodyText']))
t=Table(data,style=sty, colWidths = ['14%','10%','19%','22%','*'], rowHeights = [20]*5)
lst.append(t)
# Mike's test example
lst.append(Spacer(18,18))
lst.append(Paragraph('Mike\'s Spanning Example', styleSheet['Heading1']))
data= [[Paragraph('World Domination: The First Five Years', styleSheet['BodyText']), ''],
[Paragraph('World <font color="green">Domination</font>: The First Five Years', styleSheet['BodyText']),''],
[Paragraph('World Domination: The First Five Years', styleSheet['BodyText']), ''],
]
t=Table(data, style=[('SPAN',(0,0),(1,0)),('SPAN',(0,1),(1,1)),('SPAN',(0,2),(1,2)),], colWidths = [3*cm,8*cm], rowHeights = [None]*3)
lst.append(t)
lst.append(Spacer(18,18))
lst.append(Paragraph('Mike\'s Non-spanning Example', styleSheet['Heading1']))
data= [[Paragraph('World Domination: The First Five Years', styleSheet['BodyText'])],
[Paragraph('World <font color="magenta">Domination</font>: The First Five Years', styleSheet['BodyText'])],
[Paragraph('World Domination: The First Five Years', styleSheet['BodyText'])],
]
t=Table(data, style=[], colWidths = [11*cm], rowHeights = [None]*3)
lst.append(t)
lst.append(Spacer(18,18))
lst.append(Paragraph('xpre example', styleSheet['Heading1']))
data= [ [
XPreformatted('Account Details', styleSheet['Heading3']),
'', XPreformatted('Client Details', styleSheet['Heading3']),
], #end of row 0
]
t=Table(data, style=[], colWidths = [80,230.0,80], rowHeights = [None]*1)
lst.append(t)
lst.append(PageBreak())
lst.append(Paragraph('Trying colour cycling in background', styleSheet['Heading1']))
lst.append(Paragraph("This should alternate pale blue and uncolored by row", styleSheet['BodyText']))
data= [['001', '01', '02', '03', '04', '05'],
['002', '01', '02', '03', '04', '05'],
['003', '01', '02', '03', '04', '05'],
['004', '01', '02', '03', '04', '05'],
['005', '01', '02', '03', '04', '05'],
['006', '01', '02', '03', '04', '05'],
['007', '01', '02', '03', '04', '05'],
['008', '01', '02', '03', '04', '05'],
['009', '01', '02', '03', '04', '05'],
['010', '01', '02', '03', '04', '05'],
]
t=Table(data,style=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('ROWBACKGROUNDS', (0, 0), (-1, -1), (0xD0D0FF, None)),
])
lst.append(t)
lst.append(Spacer(0,6))
lst.append(Paragraph("And this should pale blue, pale pink and None by column", styleSheet['BodyText']))
data= [['001', '01', '02', '03', '04', '05'],
['002', '01', '02', '03', '04', '05'],
['003', '01', '02', '03', '04', '05'],
['004', '01', '02', '03', '04', '05'],
['005', '01', '02', '03', '04', '05'],
['006', '01', '02', '03', '04', '05'],
['007', '01', '02', '03', '04', '05'],
['008', '01', '02', '03', '04', '05'],
['009', '01', '02', '03', '04', '05'],
['010', '01', '02', '03', '04', '05'],
]
t=Table(data,style=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('COLBACKGROUNDS', (0, 0), (-1, -1), (0xD0D0FF, 0xFFD0D0, None)),
])
lst.append(t)
lst.append(PageBreak())
lst.append(Paragraph("This spanning example illustrates automatic removal of grids and lines in spanned cells!", styleSheet['BodyText']))
lst.append(Spacer(0,6))
data= [['Top\nLeft', '', '02', '03', '04', '05', '06', '07'],
['', '', '12', 'Span (3,1) (6,2)', '','','','17'],
['20', '21', '22', '', '','','','27'],
['30', '31', '32', '33', '34','35','36','37'],
['40', 'In The\nMiddle', '', '', '44','45','46','47'],
['50', '', '', '', '54','55','56','57'],
['60', '', '', '','64', '65', 'Bottom\nRight', ''],
['70', '71', '72', '73','74', '75', '', '']]
t=Table(data,style=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('BACKGROUND',(0,0),(1,1),colors.palegreen),
('SPAN',(0,0),(1,1)),
('BACKGROUND',(-2,-2),(-1,-1), colors.pink),
('SPAN',(-2,-2),(-1,-1)),
('SPAN',(1,4),(3,6)),
('BACKGROUND',(1,4),(3,6), colors.lightblue),
('SPAN',(3,1),(6,2)),
('BACKGROUND',(3,1),(6,2), colors.peachpuff),
('VALIGN',(3,1),(6,2),'TOP'),
('LINEABOVE', (0,2),(-1,2), 1, colors.black, 0, None, None, 2, 2),
('LINEBEFORE', (3,0),(3,-1), 1, colors.black, 0, None, None, 2, 2),
])
lst.append(t)
lst.append(PageBreak())
lst.append(Paragraph("und jetzt noch eine Tabelle mit 5000 Zeilen:", styleSheet['BodyText']))
sty = [ ('GRID',(0,0),(-1,-1),1,colors.green),
('BOX',(0,0),(-1,-1),2,colors.red),
]
data = [[str(i), Paragraph("xx "* (i%10), styleSheet["BodyText"]), Paragraph("blah "*(i%40), styleSheet["BodyText"])] for i in range(500)]
t=LongTable(data, style=sty, colWidths = [50,100,200])
lst.append(t)
#Yuan Hong's bug tester
lst.append(PageBreak())
lst.append(Paragraph('Yian Hong\'s Bug Case (should not blow up)', styleSheet['Heading2']))
data = ([['Col1', 'Col2', 'Col3', 'Col4', 'Col5']]+
[['01', Paragraph('This is cell one that contains a paragraph.', styleSheet['Normal']), '02', '03', '04']
for i in range(50)])
t = Table(data, ['20%']*5, repeatRows=1)
t.setStyle(TableStyle([
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
('SPAN', (0,50), (-2,50)),
]))
lst.append(t)
lst.append(PageBreak())
#Volker Haas' example extended
#the optimal row heights are the solution of an LP similar to
#
#Objective function
# min: 3*h0+3*h1+3*h2+2*h3;
#
#constraints
# h0>=12;
# h1>=12;
# h2>=12;
# h3>=12;
# h0+h1+h2>=48;
# h0+h1>=12;
# h2+h3>=60;
#
#the solution H=[12,12,24,36]
def makeTable(x,y):
return Table([
['00', '01', '02', '03', '04', '05\nline2\nline3\nline4'],
['', '11', '12', x, '',''],
['20', '21', y, '23', '24',''],
['30', '31', '', '33', '34','35'],
],
style=[
('TOPPADDING',(0,0),(-1,-1),0),
('BOTTOMPADDING',(0,0),(-1,-1),0),
('RIGHTPADDING',(0,0),(-1,-1),0),
('LEFTPADDING',(0,0),(-1,-1),0),
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('BACKGROUND', (0, 0), (0, 1), colors.pink),
('SPAN',(0,0),(0,1)),
('BACKGROUND', (2, 2), (2, 3), colors.orange),
('SPAN',(2,2),(2,3)),
('SPAN',(3,1),(4,1)),
('SPAN',(5,0),(5,2)),
])
p_style= ParagraphStyle('Normal')
lst.append(makeTable(
Paragraph('This is a string',p_style),
Paragraph('22<br/>blub<br/>asfd<br/>afd<br/>asdfs', p_style)
))
lst.append(Spacer(10,10))
lst.append(makeTable(
XPreformatted('This is a string',p_style),
Paragraph('22<br/>blub<br/>asfd<br/>afd<br/>asdfs', p_style)
))
lst.append(Spacer(10,10))
lst.append(makeTable(
'This is a string',
'22\nblub\nasfd\nafd\nasdfs',
))
lst.append(Spacer(10,10))
lst.append(makeTable(
'This is a string',
Paragraph('22<br/>blub<br/>asfd<br/>afd<br/>asdfs', p_style)
))
SimpleDocTemplate(outputfile('test_platypus_tables_2.pdf'), showBoundary=1).build(lst)
class TablesTestCase(unittest.TestCase):
"Make documents with tables"
def test0(self):
"Make a document full of tables"
run()
def test1(self):
"Make a document full of tables"
old_tables_test()
def makeSuite():
return makeSuiteForClasses(TablesTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| 43.917056
| 216
| 0.522039
|
b4f668960cb14338a8929912b0c7876e97bd9876
| 677
|
py
|
Python
|
screenpy/__version__.py
|
MohamedRaslan/screenpy
|
94be7caa444ae7ac8a4ac403cd93ad92108237fe
|
[
"MIT"
] | null | null | null |
screenpy/__version__.py
|
MohamedRaslan/screenpy
|
94be7caa444ae7ac8a4ac403cd93ad92108237fe
|
[
"MIT"
] | null | null | null |
screenpy/__version__.py
|
MohamedRaslan/screenpy
|
94be7caa444ae7ac8a4ac403cd93ad92108237fe
|
[
"MIT"
] | null | null | null |
" ____ ____"
# / ___| ___ _ __ ___ ___ _ __ | _ \ _ _
# \___ \ / __| '__/ _ \/ _ \ '_ \| |_) | | | |
# ___) | (__| | | __/ __/ | | | __/| |_| |
# |____/ \___|_| \___|\___|_| |_|_| \__, |
# |___/
__title__ = "screenpy"
__description__ = "Screenplay pattern base for Python automated UI test suites."
__url__ = "https://github.com/perrygoy/screenpy"
__version__ = "3.2.1"
__author__ = "Perry Goy"
__author_email__ = "perry.goy@gmail.com"
__license__ = "MIT"
__copyright__ = "Copyright (c) 2019-2021 Perry Goy"
| 42.3125
| 80
| 0.45938
|
00cac252796787780ec7a54fb50a06197804ca25
| 60,675
|
py
|
Python
|
numpy/polynomial/chebyshev.py
|
yarikoptic/numpy
|
613589e2286b03171829bf4ff8cb5c9c863df4be
|
[
"BSD-3-Clause"
] | 5
|
2019-10-02T13:32:41.000Z
|
2022-01-11T00:36:48.000Z
|
numpy/polynomial/chebyshev.py
|
yarikoptic/numpy
|
613589e2286b03171829bf4ff8cb5c9c863df4be
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/polynomial/chebyshev.py
|
yarikoptic/numpy
|
613589e2286b03171829bf4ff8cb5c9c863df4be
|
[
"BSD-3-Clause"
] | 3
|
2020-06-08T05:14:16.000Z
|
2021-07-06T21:12:50.000Z
|
"""
Objects for dealing with Chebyshev series.
This module provides a number of objects (mostly functions) useful for
dealing with Chebyshev series, including a `Chebyshev` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `chebdomain` -- Chebyshev series default domain, [-1,1].
- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates
identically to 0.
- `chebone` -- (Coefficients of the) Chebyshev series that evaluates
identically to 1.
- `chebx` -- (Coefficients of the) Chebyshev series for the identity map,
``f(x) = x``.
Arithmetic
----------
- `chebadd` -- add two Chebyshev series.
- `chebsub` -- subtract one Chebyshev series from another.
- `chebmul` -- multiply two Chebyshev series.
- `chebdiv` -- divide one Chebyshev series by another.
- `chebpow` -- raise a Chebyshev series to an positive integer power
- `chebval` -- evaluate a Chebyshev series at given points.
- `chebval2d` -- evaluate a 2D Chebyshev series at given points.
- `chebval3d` -- evaluate a 3D Chebyshev series at given points.
- `chebgrid2d` -- evaluate a 2D Chebyshev series on a Cartesian product.
- `chebgrid3d` -- evaluate a 3D Chebyshev series on a Cartesian product.
Calculus
--------
- `chebder` -- differentiate a Chebyshev series.
- `chebint` -- integrate a Chebyshev series.
Misc Functions
--------------
- `chebfromroots` -- create a Chebyshev series with specified roots.
- `chebroots` -- find the roots of a Chebyshev series.
- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials.
- `chebvander2d` -- Vandermonde-like matrix for 2D power series.
- `chebvander3d` -- Vandermonde-like matrix for 3D power series.
- `chebgauss` -- Gauss-Chebyshev quadrature, points and weights.
- `chebweight` -- Chebyshev weight function.
- `chebcompanion` -- symmetrized companion matrix in Chebyshev form.
- `chebfit` -- least-squares fit returning a Chebyshev series.
- `chebpts1` -- Chebyshev points of the first kind.
- `chebpts2` -- Chebyshev points of the second kind.
- `chebtrim` -- trim leading coefficients from a Chebyshev series.
- `chebline` -- Chebyshev series representing given straight line.
- `cheb2poly` -- convert a Chebyshev series to a polynomial.
- `poly2cheb` -- convert a polynomial to a Chebyshev series.
Classes
-------
- `Chebyshev` -- A Chebyshev series class.
See also
--------
`numpy.polynomial`
Notes
-----
The implementations of multiplication, division, integration, and
differentiation use the algebraic identities [1]_:
.. math ::
T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\
z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}.
where
.. math :: x = \\frac{z + z^{-1}}{2}.
These identities allow a Chebyshev series to be expressed as a finite,
symmetric Laurent series. In this module, this sort of Laurent series
is referred to as a "z-series."
References
----------
.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev
Polynomials," *Journal of Statistical Planning and Inference 14*, 2008
(preprint: http://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
"""
from __future__ import division
import numpy as np
import numpy.linalg as la
import polyutils as pu
import warnings
from polytemplate import polytemplate
__all__ = ['chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline',
'chebadd', 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow',
'chebval', 'chebder', 'chebint', 'cheb2poly', 'poly2cheb',
'chebfromroots', 'chebvander', 'chebfit', 'chebtrim', 'chebroots',
'chebpts1', 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d',
'chebgrid2d', 'chebgrid3d', 'chebvander2d','chebvander3d',
'chebcompanion', 'chebgauss', 'chebweight']
chebtrim = pu.trimcoef
#
# A collection of functions for manipulating z-series. These are private
# functions and do minimal error checking.
#
def _cseries_to_zseries(c) :
"""Covert Chebyshev series to z-series.
Covert a Chebyshev series to the equivalent z-series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
c : 1-D ndarray
Chebyshev coefficients, ordered from low to high
Returns
-------
zs : 1-D ndarray
Odd length symmetric z-series, ordered from low to high.
"""
n = c.size
zs = np.zeros(2*n-1, dtype=c.dtype)
zs[n-1:] = c/2
return zs + zs[::-1]
def _zseries_to_cseries(zs) :
"""Covert z-series to a Chebyshev series.
Covert a z series to the equivalent Chebyshev series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
zs : 1-D ndarray
Odd length symmetric z-series, ordered from low to high.
Returns
-------
c : 1-D ndarray
Chebyshev coefficients, ordered from low to high.
"""
n = (zs.size + 1)//2
c = zs[n-1:].copy()
c[1:n] *= 2
return c
def _zseries_mul(z1, z2) :
"""Multiply two z-series.
Multiply two z-series to produce a z-series.
Parameters
----------
z1, z2 : 1-D ndarray
The arrays must be 1-D but this is not checked.
Returns
-------
product : 1-D ndarray
The product z-series.
Notes
-----
This is simply convolution. If symmetric/anti-symmetric z-series are
denoted by S/A then the following rules apply:
S*S, A*A -> S
S*A, A*S -> A
"""
return np.convolve(z1, z2)
def _zseries_div(z1, z2) :
"""Divide the first z-series by the second.
Divide `z1` by `z2` and return the quotient and remainder as z-series.
Warning: this implementation only applies when both z1 and z2 have the
same symmetry, which is sufficient for present purposes.
Parameters
----------
z1, z2 : 1-D ndarray
The arrays must be 1-D and have the same symmetry, but this is not
checked.
Returns
-------
(quotient, remainder) : 1-D ndarrays
Quotient and remainder as z-series.
Notes
-----
This is not the same as polynomial division on account of the desired form
of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A
then the following rules apply:
S/S -> S,S
A/A -> S,A
The restriction to types of the same symmetry could be fixed but seems like
unneeded generality. There is no natural form for the remainder in the case
where there is no symmetry.
"""
z1 = z1.copy()
z2 = z2.copy()
len1 = len(z1)
len2 = len(z2)
if len2 == 1 :
z1 /= z2
return z1, z1[:1]*0
elif len1 < len2 :
return z1[:1]*0, z1
else :
dlen = len1 - len2
scl = z2[0]
z2 /= scl
quo = np.empty(dlen + 1, dtype=z1.dtype)
i = 0
j = dlen
while i < j :
r = z1[i]
quo[i] = z1[i]
quo[dlen - i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
z1[j:j+len2] -= tmp
i += 1
j -= 1
r = z1[i]
quo[i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
quo /= scl
rem = z1[i+1:i-1+len2].copy()
return quo, rem
def _zseries_der(zs) :
"""Differentiate a z-series.
The derivative is with respect to x, not z. This is achieved using the
chain rule and the value of dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to differentiate.
Returns
-------
derivative : z-series
The derivative
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
multiplying the value of zs by two also so that the two cancels in the
division.
"""
n = len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs *= np.arange(-n, n+1)*2
d, r = _zseries_div(zs, ns)
return d
def _zseries_int(zs) :
"""Integrate a z-series.
The integral is with respect to x, not z. This is achieved by a change
of variable using dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to integrate
Returns
-------
integral : z-series
The indefinite integral
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
dividing the resulting zs by two.
"""
n = 1 + len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs = _zseries_mul(zs, ns)
div = np.arange(-n, n+1)*2
zs[:n] /= div[:n]
zs[n+1:] /= div[n+1:]
zs[n] = 0
return zs
#
# Chebyshev series functions
#
def poly2cheb(pol) :
"""
Convert a polynomial to a Chebyshev series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Chebyshev series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Chebyshev
series.
See Also
--------
cheb2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(range(4))
>>> p
Polynomial([ 0., 1., 2., 3.], [-1., 1.])
>>> c = p.convert(kind=P.Chebyshev)
>>> c
Chebyshev([ 1. , 3.25, 1. , 0.75], [-1., 1.])
>>> P.poly2cheb(range(4))
array([ 1. , 3.25, 1. , 0.75])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1) :
res = chebadd(chebmulx(res), pol[i])
return res
def cheb2poly(c) :
"""
Convert a Chebyshev series to a polynomial.
Convert an array representing the coefficients of a Chebyshev series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Chebyshev series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2cheb
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> c = P.Chebyshev(range(4))
>>> c
Chebyshev([ 0., 1., 2., 3.], [-1., 1.])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([ -2., -8., 4., 12.], [-1., 1.])
>>> P.cheb2poly(range(4))
array([ -2., -8., 4., 12.])
"""
from polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1) :
tmp = c0
c0 = polysub(c[i - 2], c1)
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Chebyshev default domain.
chebdomain = np.array([-1,1])
# Chebyshev coefficients representing zero.
chebzero = np.array([0])
# Chebyshev coefficients representing one.
chebone = np.array([1])
# Chebyshev coefficients representing the identity x.
chebx = np.array([0,1])
def chebline(off, scl) :
"""
Chebyshev series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Chebyshev series for
``off + scl*x``.
See Also
--------
polyline
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebline(3,2)
array([3, 2])
>>> C.chebval(-3, C.chebline(3,2)) # should be -3
-3.0
"""
if scl != 0 :
return np.array([off,scl])
else :
return np.array([off])
def chebfromroots(roots) :
"""
Generate a Chebyshev series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Chebyshev form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Chebyshev form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.25, 0. , 0.25])
>>> j = complex(0,1)
>>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.5+0.j, 0.0+0.j, 0.5+0.j])
"""
if len(roots) == 0 :
return np.ones(1)
else :
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [chebline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [chebmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = chebmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def chebadd(c1, c2):
"""
Add one Chebyshev series to another.
Returns the sum of two Chebyshev series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Chebyshev series of their sum.
See Also
--------
chebsub, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Chebyshev series
is a Chebyshev series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] += c2
ret = c1
else :
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebsub(c1, c2):
"""
Subtract one Chebyshev series from another.
Returns the difference of two Chebyshev series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their difference.
See Also
--------
chebadd, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Chebyshev
series is a Chebyshev series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebsub(c1,c2)
array([-2., 0., 2.])
>>> C.chebsub(c2,c1) # -C.chebsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] -= c2
ret = c1
else :
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebmulx(c):
"""Multiply a Chebyshev series by x.
Multiply the polynomial `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
.. versionadded:: 1.5.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
if len(c) > 1:
tmp = c[1:]/2
prd[2:] = tmp
prd[0:-2] += tmp
return prd
def chebmul(c1, c2):
"""
Multiply one Chebyshev series by another.
Returns the product of two Chebyshev series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their product.
See Also
--------
chebadd, chebsub, chebdiv, chebpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Chebyshev polynomial basis set. Thus, to express
the product as a C-series, it is typically necessary to "reproject"
the product onto said basis set, which typically produces
"unintuitive live" (but correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebmul(c1,c2) # multiplication requires "reprojection"
array([ 6.5, 12. , 12. , 4. , 1.5])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
prd = _zseries_mul(z1, z2)
ret = _zseries_to_cseries(prd)
return pu.trimseq(ret)
def chebdiv(c1, c2):
"""
Divide one Chebyshev series by another.
Returns the quotient-with-remainder of two Chebyshev series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Chebyshev series coefficients representing the quotient and
remainder.
See Also
--------
chebadd, chebsub, chebmul, chebpow
Notes
-----
In general, the (polynomial) division of one C-series by another
results in quotient and remainder terms that are not in the Chebyshev
polynomial basis set. Thus, to express these results as C-series, it
is typically necessary to "reproject" the results onto said basis
set, which typically produces "unintuitive" (but correct) results;
see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> C.chebdiv(c2,c1) # neither "intuitive"
(array([ 0., 2.]), array([-2., -4.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0 :
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2 :
return c1[:1]*0, c1
elif lc2 == 1 :
return c1/c2[-1], c1[:1]*0
else :
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
quo, rem = _zseries_div(z1, z2)
quo = pu.trimseq(_zseries_to_cseries(quo))
rem = pu.trimseq(_zseries_to_cseries(rem))
return quo, rem
def chebpow(c, pow, maxpower=16) :
"""Raise a Chebyshev series to a power.
Returns the Chebyshev series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.``
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Chebyshev series of power.
See Also
--------
chebadd, chebsub, chebmul, chebdiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0 :
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower :
raise ValueError("Power is too large")
elif power == 0 :
return np.array([1], dtype=c.dtype)
elif power == 1 :
return c
else :
# This can be made more efficient by using powers of two
# in the usual way.
zs = _cseries_to_zseries(c)
prd = zs
for i in range(2, power + 1) :
prd = np.convolve(prd, zs)
return _zseries_to_cseries(prd)
def chebder(c, m=1, scl=1, axis=0) :
"""
Differentiate a Chebyshev series.
Returns the Chebyshev series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2``
while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) +
2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c: array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Chebyshev series of the derivative.
See Also
--------
chebint
Notes
-----
In general, the result of differentiating a C-series needs to be
"reprojected" onto the C-series basis set. Thus, typically, the
result of this function is "unintuitive," albeit correct; see Examples
section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3,4)
>>> C.chebder(c)
array([ 14., 12., 24.])
>>> C.chebder(c,3)
array([ 96.])
>>> C.chebder(c,scl=-1)
array([-14., -12., -24.])
>>> C.chebder(c,2,-1)
array([ 12., 96.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j)*c[j]
c[j - 2] += (j*c[j])/(j - 2)
if n > 1:
der[1] = 4*c[2]
der[0] = c[1]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Chebyshev series.
Returns the Chebyshev series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]]
represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) +
2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
C-series coefficients of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
chebder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a`- perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3)
>>> C.chebint(c)
array([ 0.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,3)
array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667,
0.00625 ])
>>> C.chebint(c, k=3)
array([ 3.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,lbnd=-2)
array([ 8.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,scl=-2)
array([-1., 1., -1., -1.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0 :
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt :
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt) :
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/4
for j in range(2, n):
t = c[j]/(2*j + 1)
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[j - 1] -= c[j]/(2*(j - 1))
tmp[0] += k[i] - chebval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def chebval(x, c, tensor=True):
"""
Evaluate a Chebyshev series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
chebval2d, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1 :
c0 = c[0]
c1 = 0
elif len(c) == 2 :
c0 = c[0]
c1 = c[1]
else :
x2 = 2*x
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1) :
tmp = c0
c0 = c[-i] - c1
c1 = tmp + c1*x2
return c0 + c1*x
def chebval2d(x, y, c):
"""
Evaluate a 2-D Chebyshev series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than 2 the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
chebval, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = chebval(x, c)
c = chebval(y, c, tensor=False)
return c
def chebgrid2d(x, y, c):
"""
Evaluate a 2-D Chebyshev series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * T_i(a) * T_j(b),
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
chebval, chebval2d, chebval3d, chebgrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = chebval(x, c)
c = chebval(y, c)
return c
def chebval3d(x, y, z, c):
"""
Evaluate a 3-D Chebyshev series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
chebval, chebval2d, chebgrid2d, chebgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = chebval(x, c)
c = chebval(y, c, tensor=False)
c = chebval(z, c, tensor=False)
return c
def chebgrid3d(x, y, z, c):
"""
Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
chebval, chebval2d, chebgrid2d, chebval3d
Notes
-----
.. versionadded::1.7.0
"""
c = chebval(x, c)
c = chebval(y, c)
c = chebval(z, c)
return c
def chebvander(x, deg) :
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = T_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Chebyshev polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and
``chebval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Chebyshev series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander: ndarray
The pseudo Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Chebyshev polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries.
v[0] = x*0 + 1
if ideg > 0 :
x2 = 2*x
v[1] = x
for i in range(2, ideg + 1) :
v[i] = v[i-1]*x2 - v[i-2]
return np.rollaxis(v, 0, v.ndim)
def chebvander2d(x, y, deg) :
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = T_i(x) * T_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Chebyshev polynomials.
If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Chebyshev
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
chebvander, chebvander3d. chebval2d, chebval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = chebvander(x, degx)
vy = chebvander(y, degy)
v = vx[..., None]*vy[..., None, :]
return v.reshape(v.shape[:-2] + (-1,))
def chebvander3d(x, y, z, deg) :
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Chebyshev polynomials.
If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Chebyshev
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
chebvander, chebvander3d. chebval2d, chebval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = chebvander(x, degx)
vy = chebvander(y, degy)
vz = chebvander(z, degz)
v = vx[..., None, None]*vy[..., None, :, None]*vz[..., None, None, :]
return v.reshape(v.shape[:-3] + (-1,))
def chebfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Chebyshev series to data.
Return the coefficients of a Legendre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x),
where `n` is `deg`.
Since numpy version 1.7.0, chebfit also supports NA. If any of the
elements of `x`, `y`, or `w` are NA, then the corresponding rows of the
linear least squares problem (see Notes) are set to 0. If `y` is 2-D,
then an NA in any row of `y` invalidates that whole row.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting series
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Chebyshev coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : present when `full` = True
Residuals of the least-squares fit, the effective rank of the
scaled Vandermonde matrix and its singular values, and the
specified value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
polyfit, legfit, lagfit, hermfit, hermefit
chebval : Evaluates a Chebyshev series.
chebvander : Vandermonde matrix of Chebyshev series.
chebweight : Chebyshev weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Chebyshev series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Chebyshev series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2 :
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = chebvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
# scale the design matrix and solve the least squares equation
scl = np.sqrt((lhs*lhs).sum(1))
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full :
return c, [resids, rank, s, rcond]
else :
return c
def chebcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is aa Chebyshev basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array(-c[0]/c[1])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.array([1.] + [np.sqrt(.5)]*(n-1))
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[0] = np.sqrt(.5)
top[1:] = 1/2
bot[...] = top
mat[:,-1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5
return mat
def chebroots(c):
"""
Compute the roots of a Chebyshev series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * T_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Chebyshev series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.chebyshev as cheb
>>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots
array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = chebcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def chebgauss(deg):
"""
Gauss-Chebyshev quadrature.
Computes the sample points and weights for Gauss-Chebyshev quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1/\sqrt{1 - x^2}`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. For Gauss-Chebyshev there are closed form solutions for
the sample points and weights. If n = `deg`, then
.. math:: x_i = \cos(\pi (2 i - 1) / (2 n))
.. math:: w_i = \pi / n
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg))
w = np.ones(ideg)*(np.pi/ideg)
return x, w
def chebweight(x):
"""
The weight function of the Chebyshev polynomials.
The weight function is :math:`1/\sqrt{1 - x^2}` and the interval of
integration is :math:`[-1, 1]`. The Chebyshev polynomials are orthogonal, but
not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded:: 1.7.0
"""
w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x))
return w
def chebpts1(npts):
"""
Chebyshev points of the first kind.
The Chebyshev points of the first kind are the points ``cos(x)``,
where ``x = [pi*(k + .5)/npts for k in range(npts)]``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the first kind.
See Also
--------
chebpts2
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 1:
raise ValueError("npts must be >= 1")
x = np.linspace(-np.pi, 0, _npts, endpoint=False) + np.pi/(2*_npts)
return np.cos(x)
def chebpts2(npts):
"""
Chebyshev points of the second kind.
The Chebyshev points of the second kind are the points ``cos(x)``,
where ``x = [pi*k/(npts - 1) for k in range(npts)]``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the second kind.
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 2:
raise ValueError("npts must be >= 2")
x = np.linspace(-np.pi, 0, _npts)
return np.cos(x)
#
# Chebyshev series class
#
exec polytemplate.substitute(name='Chebyshev', nick='cheb', domain='[-1,1]')
| 30.186567
| 81
| 0.601665
|
4cb4b2f212168934e49e6a888d5875fbb8013de5
| 6,478
|
py
|
Python
|
call2plink/bin/topbottom.py
|
lvclark/h3agwas
|
5e42e60123b819d3c331a91b25ee50846e55af3b
|
[
"MIT"
] | 62
|
2016-08-29T11:27:35.000Z
|
2022-03-10T17:16:14.000Z
|
call2plink/bin/topbottom.py
|
lvclark/h3agwas
|
5e42e60123b819d3c331a91b25ee50846e55af3b
|
[
"MIT"
] | 33
|
2016-12-26T13:48:19.000Z
|
2021-12-05T13:34:06.000Z
|
call2plink/bin/topbottom.py
|
lvclark/h3agwas
|
5e42e60123b819d3c331a91b25ee50846e55af3b
|
[
"MIT"
] | 50
|
2017-04-15T04:17:43.000Z
|
2022-03-30T07:26:01.000Z
|
#!/usr/bin/env python3
# Takes as input
# - A file describing an Illumica chip
# It should have a header line columns within the first 15 lines "Name Chr MapInfo deCODE(cM):
# the cm is optional
# - A file with the calls for the chip
# There should be some header lines
# - the base name of the PLINK output files
# The output l
# (c) University of the Witwatersand, Johannesburg on behalf of the H3ABioinformatics Network Consortium
# 2016-2018
# Licensed under the Creative Commons Attribution 4.0 International Licence.
# See the "LICENSE" file for details
from __future__ import print_function
import sys
import argparse
import re
import gzip
import numpy as np
from shutil import copyfile
import pandas as pd
null_values = [0,"0","",False,"false","FALSE","False"]
def parseArguments():
parser=argparse.ArgumentParser()
parser.add_argument('array', type=str, metavar='array description'),
parser.add_argument('report', type=str, metavar='report',\
help="genotypereport"),
parser.add_argument('samplesize', type=int, metavar='samplesize',\
help="how many indivs in each site, 0=all")
parser.add_argument('idpat', type=str, metavar='idpat',help="id pattern"),
parser.add_argument('output', type=str, metavar='fname',help="output base"),
args = parser.parse_args()
return args
TAB=chr(9)
EOL=chr(10)
# auxiliary defs
chr2chr = list(map(str,range(0,27)))
chr2chr[23]="X"
chr2chr[24]="Y"
chr2chr[25]="XY"
chr2chr[26]="MT"
def conv(x):
try:
num = int(x)
except ValueError:
if x == "X": num=23
elif x == "Y": num=24
elif x == "XY": num =25
elif x == "MT": num=26
else: num = 0
return num
def parseArray(fname):
f = open(fname)
for i in range(15):
line = f.readline()
if ",Name," in line or "Name"+TAB in line: break
else:
sys.exit("Cannot find header line in "+fname)
fields=re.split("[,\t]",line.rstrip())
name_i = fields.index("Name")
indices = [fields.index("Chr"),fields.index("MapInfo")]
if "deCODE(cM)" in fields:
indices.append(fields.index("deCODE(cM)"))
array = {}
snp_elt=[]
i=0
for line in f:
if "[Controls]" in line: break
fields=re.split("[,\t]",line.rstrip())
if len(indices)==3:
cm = fields[indices[2]]
cm = 0.0 if "NA" in cm else float(cm)
else:
cm = 0.0
snp_elt.append([conv(fields[indices[0]]), int(fields[indices[1]]), cm, fields[name_i]])
snp_elt.sort()
for i,content in enumerate(snp_elt):
array[content[-1]]=i
return snp_elt, array
def generate_line(pedf,old_sample_id,output):
pedf.write(TAB.join((old_sample_id,"0","0","0","0")))
pedf.write(TAB)
pedf.write(TAB.join(output)+EOL)
pass
def getReportIndices(line):
#SNP NameSample IDAllele1 - TopAllele2 - Top
fields=re.split("[,\t]",line.rstrip())
#fields = line.rstrip().split(",")
if "Sample Name" in fields:
sample = "Sample Name"
elif "Sample ID" in fields:
sample = "Sample ID"
else:
sys.exit("Can't find sample id as a field in <%s>"%line)
if "Allele1 - Top" in fields:
allele1 = "Allele1 - Top"
allele2 = "Allele2 - Top"
elif "Allele1 - Forward" in fields:
allele1, allele2 = "Allele1 - Forward","Allele2 - Forward"
else:
sys.exit("Can't find field labels to mark alleles in <%s>"%line)
name_i = fields.index("SNP Name")
samp_i = fields.index(sample)
alle_1 = fields.index(allele1)
alle_2 = fields.index(allele2)
return name_i, samp_i, alle_1, alle_2
def getID(idreg,sample_id):
m = idreg.match(sample_id)
if sample_id in replicates:
rep = "_replicate_"+replicates[sample_id]
else:
rep = ""
if m:
if len(m.groups())==2:
return (m.group(1)+rep, m.group(2))
elif len(m.groups())==1:
return (m.group(1)+rep,m.group(1))
else:
sys.exit("Pattern <%s> has wrong number of groups"%args.idpat)
else:
sys.exit("Sample ID <%s> cannot be parsed by <%s>"%(sample_id,args.idpat))
return sample_id
def checkgzipfile(fname) :
GZIP_MAGIC_NUMBER = "1f8b"
f = open(fname)
isGz=f.read(2).encode("utf-8").hex() == GZIP_MAGIC_NUMBER
f.close()
return isGz
def parseChipReport(snp_elt,array,fname,output):
# how many lines do we need to skip
# Looks like 10, but let's be sure
idreg = re.compile(args.idpat)
if checkgzipfile(fname) :
f = gzip.open(fname,"rt")
else :
f = open(fname)
head=0
for line in f:
head=head+1
if "[Data]" in line: break
name_i, samp_i, alle_1, alle_2 = getReportIndices(f.readline())
pedf = open ("{}.ped".format(output),"w")
old_sample_id="xxx" # ("xxx","")
num=0
output = np.empty([len(snp_elt)*2],dtype='U1')
output.fill("0")
for line in f:
#fields = line.rstrip().split(",")
fields = re.split("[,\t]",line.rstrip())
sample_id = fields[samp_i].replace(' ','')
snp_name = fields[name_i]
if snp_name not in array:
sys.exit("Unknown SNP name in line "+line)
a1 = fields[alle_1]
a2 = fields[alle_2]
if a1 == "-": a1="0"
if a2 == "-": a2="0"
if sample_id != old_sample_id:
if num > args.samplesize > 0:
pedf.close()
return
if old_sample_id!="xxx":
generate_line(pedf,old_sample_id,output)
output.fill("0")
old_sample_id = sample_id
num=num+1
ind = array.get(snp_name)
output[2*ind]=a1
output[2*ind+1]=a2
generate_line(pedf,old_sample_id,output)
pedf.close()
def outputMap(snp_elt,array,outname):
mapf= open("{}.map".format(outname),"w")
for [chrom,pos,cm,snp] in snp_elt:
mapf.write("{}{}{}{}{}{}{}{}".format(chrom,TAB,snp,TAB,cm,TAB,pos,EOL))
mapf.close()
if len(sys.argv) == 1:
sys.argv=["topbot2plink.py","$array","$report","$samplesize", "$idpat", "$mask", "$replicates", "$output"]
args = parseArguments()
if args.idpat in null_values:
args.idpat=("(.*)")
snp_elt, array = parseArray(args.array)
parseChipReport(snp_elt,array,args.report,args.output)
outputMap(snp_elt,array,args.output)
#copyFam(args.fam,args.output)
| 28.663717
| 109
| 0.601575
|
f067568106abe4f5844f70f9c9cdab10a150cfea
| 2,459
|
py
|
Python
|
numba_dppy/driver/usm_ndarray_type.py
|
Rubtsowa/numba-dppy
|
20f9825b144913ebe1f7635c785b334f3743c4cb
|
[
"Apache-2.0"
] | null | null | null |
numba_dppy/driver/usm_ndarray_type.py
|
Rubtsowa/numba-dppy
|
20f9825b144913ebe1f7635c785b334f3743c4cb
|
[
"Apache-2.0"
] | null | null | null |
numba_dppy/driver/usm_ndarray_type.py
|
Rubtsowa/numba-dppy
|
20f9825b144913ebe1f7635c785b334f3743c4cb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numba.extending import typeof_impl, register_model
from numba_dppy.dppy_array_type import DPPYArray, DPPYArrayModel
import numba_dppy.target as dppy_target
from dpctl.tensor import usm_ndarray
from numba.np import numpy_support
class USMNdArrayType(DPPYArray):
"""
USMNdArrayType(dtype, ndim, layout, usm_type,
readonly=False, name=None,
aligned=True, addrspace=None)
creates Numba type to represent ``dpctl.tensor.usm_ndarray``.
"""
def __init__(
self,
dtype,
ndim,
layout,
usm_type,
readonly=False,
name=None,
aligned=True,
addrspace=None,
):
self.usm_type = usm_type
# This name defines how this type will be shown in Numba's type dumps.
name = "USM:ndarray(%s, %sd, %s)" % (dtype, ndim, layout)
super(USMNdArrayType, self).__init__(
dtype,
ndim,
layout,
py_type=usm_ndarray,
readonly=readonly,
name=name,
addrspace=addrspace,
)
def copy(self, *args, **kwargs):
return super(USMNdArrayType, self).copy(*args, **kwargs)
# This tells Numba to use the DPPYArray data layout for object of type USMNdArrayType.
register_model(USMNdArrayType)(DPPYArrayModel)
dppy_target.spirv_data_model_manager.register(USMNdArrayType, DPPYArrayModel)
@typeof_impl.register(usm_ndarray)
def typeof_usm_ndarray(val, c):
"""
This function creates the Numba type (USMNdArrayType) when a usm_ndarray is passed.
"""
try:
dtype = numpy_support.from_dtype(val.dtype)
except NotImplementedError:
raise ValueError("Unsupported array dtype: %s" % (val.dtype,))
layout = "C"
readonly = False
return USMNdArrayType(dtype, val.ndim, layout, val.usm_type, readonly=readonly)
| 32.786667
| 87
| 0.678731
|
faf6c862bdab5723960c4de2ce6d2342ff55a3a2
| 146
|
py
|
Python
|
pyramide.py
|
CodeGenie007/python
|
42c850999926b697a9b93667ca4d65bf5c643564
|
[
"MIT"
] | null | null | null |
pyramide.py
|
CodeGenie007/python
|
42c850999926b697a9b93667ca4d65bf5c643564
|
[
"MIT"
] | null | null | null |
pyramide.py
|
CodeGenie007/python
|
42c850999926b697a9b93667ca4d65bf5c643564
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#Pyramide Program
for i in [1, 1, 1, 1, 1,]:
print i
for j in range(1,6):
print '{:3d}'.format(j, i),
| 11.230769
| 35
| 0.493151
|
4d984e62a3ac953ce2ce103213fc7219af46111d
| 644
|
py
|
Python
|
jassen/project/blog/migrations/0010_auto_20180418_0158.py
|
GadinganJayHarley06/intern-blog
|
b442c6f307da63d8687773df7bcbf28ceab3e6a9
|
[
"MIT"
] | null | null | null |
jassen/project/blog/migrations/0010_auto_20180418_0158.py
|
GadinganJayHarley06/intern-blog
|
b442c6f307da63d8687773df7bcbf28ceab3e6a9
|
[
"MIT"
] | null | null | null |
jassen/project/blog/migrations/0010_auto_20180418_0158.py
|
GadinganJayHarley06/intern-blog
|
b442c6f307da63d8687773df7bcbf28ceab3e6a9
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.4 on 2018-04-18 01:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_auto_20180418_0143'),
]
operations = [
migrations.AlterField(
model_name='post',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category'),
),
migrations.AlterField(
model_name='post',
name='tags',
field=models.ManyToManyField(related_name='Post', to='blog.Tags'),
),
]
| 25.76
| 101
| 0.607143
|
3fae10e54bc002508f8ef343b5c59d33c94da780
| 113
|
py
|
Python
|
generic_serializer/__init__.py
|
Grusinator/django-generic-serializer
|
5a44651c64a0d036a532701d90e79aca243152e5
|
[
"MIT"
] | null | null | null |
generic_serializer/__init__.py
|
Grusinator/django-generic-serializer
|
5a44651c64a0d036a532701d90e79aca243152e5
|
[
"MIT"
] | 1
|
2020-06-05T20:15:25.000Z
|
2020-06-05T20:15:25.000Z
|
generic_serializer/__init__.py
|
Grusinator/django-generic-serializer
|
5a44651c64a0d036a532701d90e79aca243152e5
|
[
"MIT"
] | null | null | null |
from .serializable_model import SerializableModel
from .serializable_model_filter import SerializableModelFilter
| 37.666667
| 62
| 0.911504
|
b712e88b15f651f177fc67c4ef80aad1e4307ca3
| 112
|
py
|
Python
|
Mundo 1/Desafio/032.py
|
LeonardoJosedaSilveira/Curso-de-python
|
e5b7920ce75a3c7af9b8250e18cabaa173e2478b
|
[
"MIT"
] | null | null | null |
Mundo 1/Desafio/032.py
|
LeonardoJosedaSilveira/Curso-de-python
|
e5b7920ce75a3c7af9b8250e18cabaa173e2478b
|
[
"MIT"
] | null | null | null |
Mundo 1/Desafio/032.py
|
LeonardoJosedaSilveira/Curso-de-python
|
e5b7920ce75a3c7af9b8250e18cabaa173e2478b
|
[
"MIT"
] | null | null | null |
ano = float(input('Digite um ano: '))
print('È um ano bissexto' if (ano % 4) == 0 else 'Não é um ano bissexto')
| 37.333333
| 73
| 0.633929
|
8a70346f8bb257a83cef7e1befa49acf713050e9
| 5,532
|
py
|
Python
|
sdk/python/pulumi_azure_native/netapp/v20191101/get_snapshot.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/netapp/v20191101/get_snapshot.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/netapp/v20191101/get_snapshot.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetSnapshotResult',
'AwaitableGetSnapshotResult',
'get_snapshot',
]
@pulumi.output_type
class GetSnapshotResult:
"""
Snapshot of a Volume
"""
def __init__(__self__, created=None, file_system_id=None, id=None, location=None, name=None, provisioning_state=None, snapshot_id=None, type=None):
if created and not isinstance(created, str):
raise TypeError("Expected argument 'created' to be a str")
pulumi.set(__self__, "created", created)
if file_system_id and not isinstance(file_system_id, str):
raise TypeError("Expected argument 'file_system_id' to be a str")
pulumi.set(__self__, "file_system_id", file_system_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if snapshot_id and not isinstance(snapshot_id, str):
raise TypeError("Expected argument 'snapshot_id' to be a str")
pulumi.set(__self__, "snapshot_id", snapshot_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def created(self) -> str:
"""
The creation date of the snapshot
"""
return pulumi.get(self, "created")
@property
@pulumi.getter(name="fileSystemId")
def file_system_id(self) -> Optional[str]:
"""
UUID v4 used to identify the FileSystem
"""
return pulumi.get(self, "file_system_id")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Azure lifecycle management
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> str:
"""
UUID v4 used to identify the Snapshot
"""
return pulumi.get(self, "snapshot_id")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetSnapshotResult(GetSnapshotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSnapshotResult(
created=self.created,
file_system_id=self.file_system_id,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
snapshot_id=self.snapshot_id,
type=self.type)
def get_snapshot(account_name: Optional[str] = None,
pool_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
snapshot_name: Optional[str] = None,
volume_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSnapshotResult:
"""
Snapshot of a Volume
:param str account_name: The name of the NetApp account
:param str pool_name: The name of the capacity pool
:param str resource_group_name: The name of the resource group.
:param str snapshot_name: The name of the snapshot
:param str volume_name: The name of the volume
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['poolName'] = pool_name
__args__['resourceGroupName'] = resource_group_name
__args__['snapshotName'] = snapshot_name
__args__['volumeName'] = volume_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:netapp/v20191101:getSnapshot', __args__, opts=opts, typ=GetSnapshotResult).value
return AwaitableGetSnapshotResult(
created=__ret__.created,
file_system_id=__ret__.file_system_id,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
snapshot_id=__ret__.snapshot_id,
type=__ret__.type)
| 33.325301
| 151
| 0.63449
|
c2d3a68f62cc7e8ade571d10da33388816f66c7d
| 1,862
|
py
|
Python
|
contrib/devtools/check-doc.py
|
syglee7/ZenaCoin
|
f1f438444e59aca9d4f9950e267f37153afb60c4
|
[
"MIT"
] | 1
|
2019-12-25T17:05:37.000Z
|
2019-12-25T17:05:37.000Z
|
contrib/devtools/check-doc.py
|
syglee7/ZenaCoin
|
f1f438444e59aca9d4f9950e267f37153afb60c4
|
[
"MIT"
] | null | null | null |
contrib/devtools/check-doc.py
|
syglee7/ZenaCoin
|
f1f438444e59aca9d4f9950e267f37153afb60c4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2015-2016 The Zenacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/%s' % FOLDER_GREP
CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' %s | grep -v '%s'" % (CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' %s" % (CMD_ROOT_DIR)
REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"')
REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")')
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-dbcrashratio', '-forcecompactdb'])
def main():
used = check_output(CMD_GREP_ARGS, shell=True)
docd = check_output(CMD_GREP_DOCS, shell=True)
args_used = set(re.findall(REGEX_ARG,used))
args_docd = set(re.findall(REGEX_DOC,docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print "Args used : %s" % len(args_used)
print "Args documented : %s" % len(args_docd)
print "Args undocumented: %s" % len(args_need_doc)
print args_need_doc
print "Args unknown : %s" % len(args_unknown)
print args_unknown
exit(len(args_need_doc))
if __name__ == "__main__":
main()
| 40.478261
| 249
| 0.686896
|
4b842b90995e7f02ba5350cb269f6d1042181043
| 12,816
|
py
|
Python
|
tutorials/dev/use_pass_infra.py
|
jacobpostman/incubator-tvm
|
fdef79d317d455eb5c9e9e86feb97416eb594690
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 4
|
2019-05-08T04:46:07.000Z
|
2019-11-11T19:43:04.000Z
|
tutorials/dev/use_pass_infra.py
|
minminsun/incubator-tvm
|
02643d39798c6ec28348235d36d8da626f50d9dd
|
[
"Apache-2.0"
] | 2
|
2020-09-14T09:18:25.000Z
|
2020-09-24T03:28:18.000Z
|
tutorials/dev/use_pass_infra.py
|
minminsun/incubator-tvm
|
02643d39798c6ec28348235d36d8da626f50d9dd
|
[
"Apache-2.0"
] | 2
|
2019-08-08T01:48:03.000Z
|
2019-09-27T06:49:16.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long
"""
.. _tutorial-use-pass-infra:
How to Use TVM Pass Infra
=========================
**Author**: `Zhi Chen <https://github.com/zhiics>`_
As the number of optimization passes increases in Relay/tir, it becomes intractable to
execute them and maintain their dependencies manually. Therefore, we have
introduced an infrastructure to manage the optimization passes and make it
applicable to different layers of the IR in the TVM stack.
The optimizations of a Relay/tir program could be applied at various granularity,
namely function-level and module-level using :py:class:`tvm.relay.transform.FunctionPass`/
:py:class:`tvm.tir.transform.PrimFuncPass` and :py:class:`tvm.transform.ModulePass`
respectively. Or users can rely on :py:class:`tvm.transform.Sequential` to apply a sequence of passes
on a Relay/tir program where the dependencies between passes can be resolved by the
pass infra. For more details about each type of these passes, please refer to
the :ref:`pass-infra`
This tutorial mainly demostrates how developers can use the pass infra to perform
a certain optimization and create an optimization pipeline for a Relay program.
The same approach can be used for tir as well.
"""
import numpy as np
import tvm
from tvm import te
import tvm.relay as relay
###############################################################################
# Create An Example Relay Program
# -------------------------------
# First of all, we create a simple Relay program for the tutorial. This program
# will be used by various optimizations of the examples in this tutorial.
# Similarly, users can write a tir primitive function and apply the tir passes.
def example():
shape = (1, 64, 54, 54)
c_data = np.empty(shape).astype("float32")
c = relay.const(c_data)
weight = relay.var('weight', shape=(64, 64, 3, 3))
x = relay.var("x", relay.TensorType((1, 64, 56, 56), "float32"))
conv = relay.nn.conv2d(x, weight)
y = relay.add(c, c)
y = relay.multiply(y, relay.const(2, "float32"))
y = relay.add(conv, y)
z = relay.add(y, c)
z1 = relay.add(y, c)
z2 = relay.add(z, z1)
return relay.Function([x, weight], z2)
###############################################################################
# Let us register layout alteration for a conv2d op so that we can apply the
# layout alteration pass on the example. How alter layout pass works is out
# the scope of this tutorial.
@relay.op.register_alter_op_layout("nn.conv2d", level=101)
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs['data_layout'] = 'NCHW16c'
return relay.nn.conv2d(data, weight, **new_attrs)
###############################################################################
# Optimize the Program
# --------------------
# Now we would like to optimize the program. Relay features a host of
# optimizations. We will select some of them to apply on this example program.
#
# There are multiple ways to optimize a Relay program. Below we will provide
# examples for each of them.
#
# Manually Apply Optimization Passes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Let's first create a relay Module which contains one or multiple Relay
# functions for optimization.
f = example()
mod = tvm.IRModule.from_expr(f)
# Now we can apply constant folding on the module.
# fold_const here is a callback that doesn't take any parameters.
fold_const = relay.transform.FoldConstant()
# Then, we can invoke the pass on the given module. Note that the constant
# folding pass works at the function-level. That being said, each function in
# the module will be applied with the optimization. Users don't need to iterate
# through individual functions manually to apply this pass.
mod = fold_const(mod)
# We can see from the updated program that the constants are folded.
print(mod)
###############################################################################
# More optimizations can be applied in the similar manner. For instance, we can
# eliminate the common expressions that used by `z` and `z1`.
mod = relay.transform.EliminateCommonSubexpr()(mod)
print(mod)
###############################################################################
# Some optimizations, such as fusion, are parameteric as well. For example,
# opt level 0 will not allow operators to be fused together. Users can pass the
# `fuse_opt_level` to enable this.
mod = relay.transform.FuseOps(fuse_opt_level=0)(mod)
# We can observe that the optimized module contains functions that only have
# a signle primitive op.
print(mod)
###############################################################################
# Use Sequential to Apply a Sequence of Passes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Applying passes as above is actually tedious and it may require users to have
# better understanding about the dependencies between them. For example, fusion
# currently doesn't work well on let bindings. Therefore, we would not be able
# to fuse operators that were fusable if :py:func:`relay.transform.ToANormalForm` is applied before
# fusion, as this pass generates let bindings for each expression to
# canonicalize a Relay program.
#
# Relay, hence, provides :py:class:`tvm.transform.Sequential` to alleviate developers from handling
# these issues explicitly by specifying the required passes of each pass and
# packing them as a whole to execute. For example, the same passes can now be
# applied using the sequential style as the following. :py:class:`tvm.transform.Sequential` is
# similiar to `torch.nn.sequential <https://pytorch.org/docs/stable/nn.html#torch.nn.Sequential>`_
# and `mxnet.gluon.block <https://mxnet.incubator.apache.org/api/python/docs/_modules/mxnet/gluon/block.html>`_.
# For example, `torch.nn.sequential` is used to contain a sequence of PyTorch
# `Modules` that will be added to build a network. It focuses on the network
# layers. Instead, the :py:class:`tvm.transform.Sequential` in our pass infra works on the optimizing
# pass.
# Now let's execute some passes through :py:class:`tvm.transform.Sequential`
f = example()
mod = tvm.IRModule.from_expr(f)
# Glob the interested passes.
seq = tvm.transform.Sequential([relay.transform.FoldConstant(),
relay.transform.EliminateCommonSubexpr(),
relay.transform.FuseOps(fuse_opt_level=2)])
mod1 = seq(mod)
print(mod1)
###############################################################################
# From the transformed Relay program, we can see that there are still two
# identical addition operations. This is because ``EliminateCommonSubexpr``
# was not actually performed. The reason is because only the passes that have
# optimization level less or equal to 2 will be executed by default under
# :py:class:`tvm.transform.Sequential`. The pass infra,
# however, provides a configuration interface
# for users to customize the optimization level that they want to execute.
with tvm.transform.PassContext(opt_level=3):
mod2 = seq(mod)
print(mod2)
###############################################################################
# Now we can see that only one of the two identical additions is kept.
#
# In addition, users can selectively disable some passes using the
# `disabled_pass` config, which is similar to the `-fno-xxx` option used the
# general purpose compilers, such as Clang and GCC. For example, we can disable
# EliminateCommonSubexpr as following. The printed module will again show two
# identical addition operations.
with tvm.transform.PassContext(opt_level=3, disabled_pass=["EliminateCommonSubexpr"]):
mod3 = seq(mod)
print(mod3)
###############################################################################
# The passes applied so far are target independent. The pass infra also
# provides a means to make pass target-aware. For example, the layout
# alteration pass falls in such category.
with tvm.transform.PassContext(opt_level=3):
mod4 = seq(mod)
print(mod4)
seq1 = tvm.transform.Sequential([relay.transform.AlterOpLayout()])
with tvm.transform.PassContext(opt_level=3):
with tvm.target.create("llvm"):
mod5 = seq1(mod)
print(mod5)
##############################################################################
# Implement a Pass Using Python Decorator
# ------------------------------------------
# The next example illustrates how we can orchestrate a customized optimization
# pipeline through the pass infra using Python decorators. This functionality
# greatly eases the implementation of passes. For example, users can simply
# define a decorated class to do function-level optimizations as the following
# example shows. `transform_function` wraps a class to replace all constants
# with a multiple of `c`. Later on, each function in a given module will be
# visited and each constant in the function will be replaced when we invoke the
# customized pass.
@relay.transform.function_pass(opt_level=1)
class CustomPipeline:
"""Simple test function to replace one argument to another."""
def __init__(self, multiplier):
self.multiplier = multiplier
# This function can define a pass.
def transform_function(self, func, mod, ctx):
obj = self
class ReplaceConstant(tvm.relay.ExprMutator):
def visit_constant(self, c):
return relay.multiply(obj.multiplier, c)
return ReplaceConstant().visit(func)
f = example()
mod = tvm.IRModule.from_expr(f)
custom_pass = CustomPipeline(multiplier=relay.const(3, "float32"))
assert custom_pass.info.name == "CustomPipeline"
mod3 = custom_pass(mod)
print(mod3)
##############################################################################
# Debug a Pass
# ------------
# TVM provides users a plug-and-play style debugging pass that print the IR
# after a certain pass is done through a special pass (``PrintIR``) to dump the IR of the
# whole module. A slightly modified version of the sequential pass example
# could be like the following to enable IR dumping for ``FoldConstant`` optimization.
f = example()
mod = tvm.IRModule.from_expr(f)
seq = tvm.transform.Sequential([relay.transform.FoldConstant(),
tvm.transform.PrintIR(),
relay.transform.EliminateCommonSubexpr(),
relay.transform.FuseOps(),
relay.transform.AlterOpLayout()])
# By inserting the ``PrintIR`` pass after ``FoldConstant``, the pass infra will
# dump out the module IR when ``FoldConstant`` is done. Users can plug in this
# pass after any pass they want to debug for viewing the optimization effect.
#
# There is a more flexible debugging mechanism also exposed by the build configuration
# object. One can pass a tracing function which can be used to execute arbitrary code
# before and/or after each pass. A tracing function will receive a :py::class:`tvm.IRModule`,
# a :py:class:`tvm.transform.PassInfo` object,
# and a boolean indicating whether you are executing before, or after a pass.
# An example is below.
def print_ir(mod, info, is_before):
"""Print the name of the pass, the IR, only before passes execute."""
if is_before:
print("Running pass: {}", info)
print(mod)
with tvm.transform.PassContext(opt_level=3, trace=print_ir):
with tvm.target.create("llvm"):
# Perform the optimizations.
mod = seq(mod)
print(mod)
print("done")
##############################################################################
# Summary
# -------
# This tutorial has covered how we can write and invoke passes in TVM more
# conveniently using the pass infra. Different ways of invoking a pass are also
# disucssed. Using :py:class:`tvm.transform.Sequential` can largely help
# users to ease the work of handling multiple optimization passes and their
# dependencies. In addition, an example is provided to illustrate
# how we can debug a pass using the ``PrintIR`` and tracing.
| 45.286219
| 112
| 0.675094
|
1a2fd9b881bcb20c6a7d72e836aff2941ba30a0c
| 675
|
py
|
Python
|
GUI/qt/ZetCode/a_small_window_2.py
|
archu2020/python-2
|
19c626ca9fd37168db8a7ac075fd80c8e2971313
|
[
"Apache-2.0"
] | 48
|
2017-12-24T12:19:55.000Z
|
2022-02-26T13:14:27.000Z
|
GUI/qt/ZetCode/a_small_window_2.py
|
17610178081/python
|
3975c678d985c468deecd03560d882e9d316bb63
|
[
"Apache-2.0"
] | 3
|
2018-12-05T08:48:14.000Z
|
2020-07-29T01:56:16.000Z
|
GUI/qt/ZetCode/a_small_window_2.py
|
17610178081/python
|
3975c678d985c468deecd03560d882e9d316bb63
|
[
"Apache-2.0"
] | 113
|
2017-08-09T03:10:04.000Z
|
2022-03-26T16:05:01.000Z
|
"""
ZetCode PyQt5 tutorial
This example shows an icon
in the titlebar of the window.
Author: Jan Bodnar
Website: zetcode.com
Last edited: August 2017
"""
import sys
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtGui import QIcon
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 300, 220)
self.setWindowTitle('Icon')
self.setWindowIcon(QIcon('web.png'))
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
| 17.763158
| 53
| 0.613333
|
e15587e71837974807dae3e70507b83c434413e2
| 494
|
py
|
Python
|
flaskblog/config.py
|
carlba/flask-full-featured-web-app
|
909db8be89c529d7fa2f3b1a839777be4ae85c68
|
[
"MIT"
] | null | null | null |
flaskblog/config.py
|
carlba/flask-full-featured-web-app
|
909db8be89c529d7fa2f3b1a839777be4ae85c68
|
[
"MIT"
] | null | null | null |
flaskblog/config.py
|
carlba/flask-full-featured-web-app
|
909db8be89c529d7fa2f3b1a839777be4ae85c68
|
[
"MIT"
] | null | null | null |
import os
class Config(object):
# noinspection SpellCheckingInspection
# import secrets
# secrets.token_hex(16)
SECRET_KEY = 'bcaa436189daf75374ecebec4a652522'
# The three slashes means a relative path so the file will next to the script
SQLALCHEMY_DATABASE_URI = 'sqlite:///site.db'
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
| 29.058824
| 81
| 0.720648
|
0a3c2cb3da1e5e7bac160d7f41cc078ec74ed324
| 220
|
py
|
Python
|
src2/site01/models/book.py
|
DYS12345/flask
|
a939a0c3c079bbbbc74a7a28440973347fceb2c1
|
[
"Apache-2.0"
] | null | null | null |
src2/site01/models/book.py
|
DYS12345/flask
|
a939a0c3c079bbbbc74a7a28440973347fceb2c1
|
[
"Apache-2.0"
] | null | null | null |
src2/site01/models/book.py
|
DYS12345/flask
|
a939a0c3c079bbbbc74a7a28440973347fceb2c1
|
[
"Apache-2.0"
] | null | null | null |
class Book:
def __init__(self, title, price, author, publisher):
self.title = title
self.price = price
self.author = author
self.publisher = publisher
def __str__(self):
return '<Book {}>'.format(self.title)
| 24.444444
| 53
| 0.7
|
64cba61aa5fd61852d46f346137fd9d329ece7f7
| 866
|
py
|
Python
|
app/demo/documents/migrations/0003_link.py
|
sesostris/django-material-admin
|
f6678e57286bd871a820b235f868873d5f86d649
|
[
"MIT"
] | 270
|
2018-09-14T07:55:04.000Z
|
2022-03-31T13:12:41.000Z
|
app/demo/documents/migrations/0003_link.py
|
sesostris/django-material-admin
|
f6678e57286bd871a820b235f868873d5f86d649
|
[
"MIT"
] | 107
|
2019-03-26T20:35:23.000Z
|
2022-03-15T15:34:38.000Z
|
app/demo/documents/migrations/0003_link.py
|
sesostris/django-material-admin
|
f6678e57286bd871a820b235f868873d5f86d649
|
[
"MIT"
] | 66
|
2018-11-05T13:07:14.000Z
|
2022-03-31T17:17:22.000Z
|
# Generated by Django 2.2.3 on 2019-07-23 14:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('documents', '0002_auto_20190719_1539'),
]
operations = [
migrations.CreateModel(
name='Link',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField(verbose_name='Url')),
('document', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='documents.Document', verbose_name='Document')),
],
options={
'verbose_name': 'Link',
'verbose_name_plural': 'Links',
'db_table': 'links',
},
),
]
| 30.928571
| 154
| 0.571594
|
7da7689da963fde6e8a3a7ca4868c859cacfffe8
| 1,003
|
py
|
Python
|
mainapp/management/commands/search_tokenize.py
|
cyroxx/meine-stadt-transparent
|
d5a3f03a29a1bb97ce50ac5257d8bbd5208d9218
|
[
"MIT"
] | 34
|
2017-10-04T14:20:41.000Z
|
2022-03-11T18:06:48.000Z
|
mainapp/management/commands/search_tokenize.py
|
cyroxx/meine-stadt-transparent
|
d5a3f03a29a1bb97ce50ac5257d8bbd5208d9218
|
[
"MIT"
] | 588
|
2017-10-14T18:31:17.000Z
|
2022-03-16T13:00:30.000Z
|
mainapp/management/commands/search_tokenize.py
|
codeformuenster/meine-stadt-transparent
|
1458bc6acad40183908e2b7cc98ef92165d1123a
|
[
"MIT"
] | 11
|
2017-11-27T10:12:59.000Z
|
2022-02-09T10:27:11.000Z
|
from django.core.management.base import BaseCommand
from elasticsearch_dsl import Index
from mainapp.documents.index import get_text_analyzer
class Command(BaseCommand):
help = "View the tokenizations of some word with the elasticsearch tokenizer"
def add_arguments(self, parser):
parser.add_argument("words", nargs="+")
def handle(self, *args, **options):
text_analyzer = get_text_analyzer("german")
elastic_index = Index("mst_debug")
if not elastic_index.exists():
elastic_index.create()
elastic_index.close()
elastic_index.analyzer(text_analyzer)
elastic_index.save()
elastic_index.open()
elastic_index.flush()
for word in options["words"]:
analysis = elastic_index.analyze(
body={"analyzer": "text_analyzer", "text": word}
)
tokens = [i["token"] for i in analysis["tokens"]]
self.stdout.write("{} {}\n".format(word, tokens))
| 33.433333
| 81
| 0.643071
|
aa94e5356eb3f17a7993ea52e6ff4d9bb1f374e6
| 1,011
|
py
|
Python
|
src/cogs/config/config.py
|
vcokltfre/hcubed
|
d935d9d91668c85af5dc18aef6becca565d50545
|
[
"MIT"
] | null | null | null |
src/cogs/config/config.py
|
vcokltfre/hcubed
|
d935d9d91668c85af5dc18aef6becca565d50545
|
[
"MIT"
] | 1
|
2022-01-29T17:00:52.000Z
|
2022-01-29T17:09:31.000Z
|
src/cogs/config/config.py
|
vcokltfre/hcubed
|
d935d9d91668c85af5dc18aef6becca565d50545
|
[
"MIT"
] | 2
|
2022-01-01T16:23:57.000Z
|
2022-01-29T17:06:47.000Z
|
from discord.ext import commands
from src.internal.bot import Bot
from src.internal.context import Context
class Config(commands.Cog):
"""Change the bot config."""
def __init__(self, bot: Bot):
self.bot = bot
@commands.command(name="prefix")
@commands.check_any(
commands.is_owner(), commands.has_guild_permissions(manage_guild=True)
)
async def prefix(self, ctx: Context, *, new: str = None):
"""Set or get the guild prefix."""
if not new:
guild = await self.bot.db.fetch_guild(ctx.guild.id, ctx.guild.owner_id)
return await ctx.reply(f"The prefix in this server is `{guild['prefix']}`")
if len(new) > 16:
return await ctx.reply("Prefixes should be 16 characters or fewer.")
await self.bot.db.set_guild_prefix(ctx.guild.id, new)
await ctx.reply(f"Changed this server's prefix to `{new}`")
del self.bot.prefixes[ctx.guild.id]
def setup(bot: Bot):
bot.add_cog(Config(bot))
| 28.885714
| 87
| 0.645895
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.